4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <qemu-common.h>
67 #include <sys/eventfd.h>
70 #define termios host_termios
71 #define winsize host_winsize
72 #define termio host_termio
73 #define sgttyb host_sgttyb /* same as target */
74 #define tchars host_tchars /* same as target */
75 #define ltchars host_ltchars /* same as target */
77 #include <linux/termios.h>
78 #include <linux/unistd.h>
79 #include <linux/utsname.h>
80 #include <linux/cdrom.h>
81 #include <linux/hdreg.h>
82 #include <linux/soundcard.h>
84 #include <linux/mtio.h>
88 #include "linux_loop.h"
89 #include "cpu-uname.h"
92 #include "qemu-common.h"
94 #if defined(CONFIG_USE_NPTL)
95 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
96 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
98 /* XXX: Hardcode the above values. */
99 #define CLONE_NPTL_FLAGS2 0
104 //#include <linux/msdos_fs.h>
105 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
106 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
117 #define _syscall0(type,name) \
118 static type name (void) \
120 return syscall(__NR_##name); \
123 #define _syscall1(type,name,type1,arg1) \
124 static type name (type1 arg1) \
126 return syscall(__NR_##name, arg1); \
129 #define _syscall2(type,name,type1,arg1,type2,arg2) \
130 static type name (type1 arg1,type2 arg2) \
132 return syscall(__NR_##name, arg1, arg2); \
135 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
136 static type name (type1 arg1,type2 arg2,type3 arg3) \
138 return syscall(__NR_##name, arg1, arg2, arg3); \
141 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
147 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
155 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
156 type5,arg5,type6,arg6) \
157 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
164 #define __NR_sys_uname __NR_uname
165 #define __NR_sys_faccessat __NR_faccessat
166 #define __NR_sys_fchmodat __NR_fchmodat
167 #define __NR_sys_fchownat __NR_fchownat
168 #define __NR_sys_fstatat64 __NR_fstatat64
169 #define __NR_sys_futimesat __NR_futimesat
170 #define __NR_sys_getcwd1 __NR_getcwd
171 #define __NR_sys_getdents __NR_getdents
172 #define __NR_sys_getdents64 __NR_getdents64
173 #define __NR_sys_getpriority __NR_getpriority
174 #define __NR_sys_linkat __NR_linkat
175 #define __NR_sys_mkdirat __NR_mkdirat
176 #define __NR_sys_mknodat __NR_mknodat
177 #define __NR_sys_newfstatat __NR_newfstatat
178 #define __NR_sys_openat __NR_openat
179 #define __NR_sys_readlinkat __NR_readlinkat
180 #define __NR_sys_renameat __NR_renameat
181 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
182 #define __NR_sys_symlinkat __NR_symlinkat
183 #define __NR_sys_syslog __NR_syslog
184 #define __NR_sys_tgkill __NR_tgkill
185 #define __NR_sys_tkill __NR_tkill
186 #define __NR_sys_unlinkat __NR_unlinkat
187 #define __NR_sys_utimensat __NR_utimensat
188 #define __NR_sys_futex __NR_futex
189 #define __NR_sys_inotify_init __NR_inotify_init
190 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
191 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
193 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
194 #define __NR__llseek __NR_lseek
198 _syscall0(int, gettid
)
200 /* This is a replacement for the host gettid() and must return a host
202 static int gettid(void) {
206 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
207 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
208 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
210 _syscall2(int, sys_getpriority
, int, which
, int, who
);
211 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
212 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
213 loff_t
*, res
, uint
, wh
);
215 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
216 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
217 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
218 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
220 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
221 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
223 #ifdef __NR_exit_group
224 _syscall1(int,exit_group
,int,error_code
)
226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
227 _syscall1(int,set_tid_address
,int *,tidptr
)
229 #if defined(CONFIG_USE_NPTL)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
232 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
236 static bitmask_transtbl fcntl_flags_tbl
[] = {
237 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
238 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
239 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
240 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
241 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
242 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
243 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
244 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
245 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
246 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
247 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
248 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
249 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
250 #if defined(O_DIRECT)
251 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
256 #define COPY_UTSNAME_FIELD(dest, src) \
258 /* __NEW_UTS_LEN doesn't include terminating null */ \
259 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
260 (dest)[__NEW_UTS_LEN] = '\0'; \
263 static int sys_uname(struct new_utsname
*buf
)
265 struct utsname uts_buf
;
267 if (uname(&uts_buf
) < 0)
271 * Just in case these have some differences, we
272 * translate utsname to new_utsname (which is the
273 * struct linux kernel uses).
276 bzero(buf
, sizeof (*buf
));
277 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
278 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
279 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
280 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
281 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
283 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
287 #undef COPY_UTSNAME_FIELD
290 static int sys_getcwd1(char *buf
, size_t size
)
292 if (getcwd(buf
, size
) == NULL
) {
293 /* getcwd() sets errno */
296 return strlen(buf
)+1;
301 * Host system seems to have atfile syscall stubs available. We
302 * now enable them one by one as specified by target syscall_nr.h.
305 #ifdef TARGET_NR_faccessat
306 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
308 return (faccessat(dirfd
, pathname
, mode
, 0));
311 #ifdef TARGET_NR_fchmodat
312 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
314 return (fchmodat(dirfd
, pathname
, mode
, 0));
317 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
318 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
319 gid_t group
, int flags
)
321 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
324 #ifdef __NR_fstatat64
325 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
328 return (fstatat(dirfd
, pathname
, buf
, flags
));
331 #ifdef __NR_newfstatat
332 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
335 return (fstatat(dirfd
, pathname
, buf
, flags
));
338 #ifdef TARGET_NR_futimesat
339 static int sys_futimesat(int dirfd
, const char *pathname
,
340 const struct timeval times
[2])
342 return (futimesat(dirfd
, pathname
, times
));
345 #ifdef TARGET_NR_linkat
346 static int sys_linkat(int olddirfd
, const char *oldpath
,
347 int newdirfd
, const char *newpath
, int flags
)
349 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
352 #ifdef TARGET_NR_mkdirat
353 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
355 return (mkdirat(dirfd
, pathname
, mode
));
358 #ifdef TARGET_NR_mknodat
359 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
362 return (mknodat(dirfd
, pathname
, mode
, dev
));
365 #ifdef TARGET_NR_openat
366 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
369 * open(2) has extra parameter 'mode' when called with
372 if ((flags
& O_CREAT
) != 0) {
377 * Get the 'mode' parameter and translate it to
381 mode
= va_arg(ap
, mode_t
);
382 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
385 return (openat(dirfd
, pathname
, flags
, mode
));
387 return (openat(dirfd
, pathname
, flags
));
390 #ifdef TARGET_NR_readlinkat
391 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
393 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
396 #ifdef TARGET_NR_renameat
397 static int sys_renameat(int olddirfd
, const char *oldpath
,
398 int newdirfd
, const char *newpath
)
400 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
403 #ifdef TARGET_NR_symlinkat
404 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
406 return (symlinkat(oldpath
, newdirfd
, newpath
));
409 #ifdef TARGET_NR_unlinkat
410 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
412 return (unlinkat(dirfd
, pathname
, flags
));
415 #else /* !CONFIG_ATFILE */
418 * Try direct syscalls instead
420 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
421 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
423 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
424 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
426 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
427 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
428 uid_t
,owner
,gid_t
,group
,int,flags
)
430 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
431 defined(__NR_fstatat64)
432 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
433 struct stat
*,buf
,int,flags
)
435 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
436 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
437 const struct timeval
*,times
)
439 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
440 defined(__NR_newfstatat)
441 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
442 struct stat
*,buf
,int,flags
)
444 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
445 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
446 int,newdirfd
,const char *,newpath
,int,flags
)
448 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
449 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
451 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
452 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
453 mode_t
,mode
,dev_t
,dev
)
455 #if defined(TARGET_NR_openat) && defined(__NR_openat)
456 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
458 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
459 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
460 char *,buf
,size_t,bufsize
)
462 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
463 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
464 int,newdirfd
,const char *,newpath
)
466 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
467 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
468 int,newdirfd
,const char *,newpath
)
470 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
471 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
474 #endif /* CONFIG_ATFILE */
476 #ifdef CONFIG_UTIMENSAT
477 static int sys_utimensat(int dirfd
, const char *pathname
,
478 const struct timespec times
[2], int flags
)
480 if (pathname
== NULL
)
481 return futimens(dirfd
, times
);
483 return utimensat(dirfd
, pathname
, times
, flags
);
486 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
487 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
488 const struct timespec
*,tsp
,int,flags
)
490 #endif /* CONFIG_UTIMENSAT */
492 #ifdef CONFIG_INOTIFY
493 #include <sys/inotify.h>
495 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
496 static int sys_inotify_init(void)
498 return (inotify_init());
501 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
502 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
504 return (inotify_add_watch(fd
, pathname
, mask
));
507 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
508 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
510 return (inotify_rm_watch(fd
, wd
));
513 #ifdef CONFIG_INOTIFY1
514 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
515 static int sys_inotify_init1(int flags
)
517 return (inotify_init1(flags
));
522 /* Userspace can usually survive runtime without inotify */
523 #undef TARGET_NR_inotify_init
524 #undef TARGET_NR_inotify_init1
525 #undef TARGET_NR_inotify_add_watch
526 #undef TARGET_NR_inotify_rm_watch
527 #endif /* CONFIG_INOTIFY */
530 extern int personality(int);
531 extern int flock(int, int);
532 extern int setfsuid(int);
533 extern int setfsgid(int);
534 extern int setgroups(int, gid_t
*);
536 #define ERRNO_TABLE_SIZE 1200
538 /* target_to_host_errno_table[] is initialized from
539 * host_to_target_errno_table[] in syscall_init(). */
540 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
544 * This list is the union of errno values overridden in asm-<arch>/errno.h
545 * minus the errnos that are not actually generic to all archs.
547 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
548 [EIDRM
] = TARGET_EIDRM
,
549 [ECHRNG
] = TARGET_ECHRNG
,
550 [EL2NSYNC
] = TARGET_EL2NSYNC
,
551 [EL3HLT
] = TARGET_EL3HLT
,
552 [EL3RST
] = TARGET_EL3RST
,
553 [ELNRNG
] = TARGET_ELNRNG
,
554 [EUNATCH
] = TARGET_EUNATCH
,
555 [ENOCSI
] = TARGET_ENOCSI
,
556 [EL2HLT
] = TARGET_EL2HLT
,
557 [EDEADLK
] = TARGET_EDEADLK
,
558 [ENOLCK
] = TARGET_ENOLCK
,
559 [EBADE
] = TARGET_EBADE
,
560 [EBADR
] = TARGET_EBADR
,
561 [EXFULL
] = TARGET_EXFULL
,
562 [ENOANO
] = TARGET_ENOANO
,
563 [EBADRQC
] = TARGET_EBADRQC
,
564 [EBADSLT
] = TARGET_EBADSLT
,
565 [EBFONT
] = TARGET_EBFONT
,
566 [ENOSTR
] = TARGET_ENOSTR
,
567 [ENODATA
] = TARGET_ENODATA
,
568 [ETIME
] = TARGET_ETIME
,
569 [ENOSR
] = TARGET_ENOSR
,
570 [ENONET
] = TARGET_ENONET
,
571 [ENOPKG
] = TARGET_ENOPKG
,
572 [EREMOTE
] = TARGET_EREMOTE
,
573 [ENOLINK
] = TARGET_ENOLINK
,
574 [EADV
] = TARGET_EADV
,
575 [ESRMNT
] = TARGET_ESRMNT
,
576 [ECOMM
] = TARGET_ECOMM
,
577 [EPROTO
] = TARGET_EPROTO
,
578 [EDOTDOT
] = TARGET_EDOTDOT
,
579 [EMULTIHOP
] = TARGET_EMULTIHOP
,
580 [EBADMSG
] = TARGET_EBADMSG
,
581 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
582 [EOVERFLOW
] = TARGET_EOVERFLOW
,
583 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
584 [EBADFD
] = TARGET_EBADFD
,
585 [EREMCHG
] = TARGET_EREMCHG
,
586 [ELIBACC
] = TARGET_ELIBACC
,
587 [ELIBBAD
] = TARGET_ELIBBAD
,
588 [ELIBSCN
] = TARGET_ELIBSCN
,
589 [ELIBMAX
] = TARGET_ELIBMAX
,
590 [ELIBEXEC
] = TARGET_ELIBEXEC
,
591 [EILSEQ
] = TARGET_EILSEQ
,
592 [ENOSYS
] = TARGET_ENOSYS
,
593 [ELOOP
] = TARGET_ELOOP
,
594 [ERESTART
] = TARGET_ERESTART
,
595 [ESTRPIPE
] = TARGET_ESTRPIPE
,
596 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
597 [EUSERS
] = TARGET_EUSERS
,
598 [ENOTSOCK
] = TARGET_ENOTSOCK
,
599 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
600 [EMSGSIZE
] = TARGET_EMSGSIZE
,
601 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
602 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
603 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
604 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
605 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
606 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
607 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
608 [EADDRINUSE
] = TARGET_EADDRINUSE
,
609 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
610 [ENETDOWN
] = TARGET_ENETDOWN
,
611 [ENETUNREACH
] = TARGET_ENETUNREACH
,
612 [ENETRESET
] = TARGET_ENETRESET
,
613 [ECONNABORTED
] = TARGET_ECONNABORTED
,
614 [ECONNRESET
] = TARGET_ECONNRESET
,
615 [ENOBUFS
] = TARGET_ENOBUFS
,
616 [EISCONN
] = TARGET_EISCONN
,
617 [ENOTCONN
] = TARGET_ENOTCONN
,
618 [EUCLEAN
] = TARGET_EUCLEAN
,
619 [ENOTNAM
] = TARGET_ENOTNAM
,
620 [ENAVAIL
] = TARGET_ENAVAIL
,
621 [EISNAM
] = TARGET_EISNAM
,
622 [EREMOTEIO
] = TARGET_EREMOTEIO
,
623 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
624 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
625 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
626 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
627 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
628 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
629 [EALREADY
] = TARGET_EALREADY
,
630 [EINPROGRESS
] = TARGET_EINPROGRESS
,
631 [ESTALE
] = TARGET_ESTALE
,
632 [ECANCELED
] = TARGET_ECANCELED
,
633 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
634 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
636 [ENOKEY
] = TARGET_ENOKEY
,
639 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
642 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
645 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
648 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
650 #ifdef ENOTRECOVERABLE
651 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
655 static inline int host_to_target_errno(int err
)
657 if(host_to_target_errno_table
[err
])
658 return host_to_target_errno_table
[err
];
662 static inline int target_to_host_errno(int err
)
664 if (target_to_host_errno_table
[err
])
665 return target_to_host_errno_table
[err
];
669 static inline abi_long
get_errno(abi_long ret
)
672 return -host_to_target_errno(errno
);
677 static inline int is_error(abi_long ret
)
679 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
682 char *target_strerror(int err
)
684 return strerror(target_to_host_errno(err
));
687 static abi_ulong target_brk
;
688 static abi_ulong target_original_brk
;
690 void target_set_brk(abi_ulong new_brk
)
692 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
695 /* do_brk() must return target values and target errnos. */
696 abi_long
do_brk(abi_ulong new_brk
)
699 abi_long mapped_addr
;
704 if (new_brk
< target_original_brk
)
707 brk_page
= HOST_PAGE_ALIGN(target_brk
);
709 /* If the new brk is less than this, set it and we're done... */
710 if (new_brk
< brk_page
) {
711 target_brk
= new_brk
;
715 /* We need to allocate more memory after the brk... */
716 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
717 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
718 PROT_READ
|PROT_WRITE
,
719 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
721 #if defined(TARGET_ALPHA)
722 /* We (partially) emulate OSF/1 on Alpha, which requires we
723 return a proper errno, not an unchanged brk value. */
724 if (is_error(mapped_addr
)) {
725 return -TARGET_ENOMEM
;
729 if (!is_error(mapped_addr
)) {
730 target_brk
= new_brk
;
735 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
736 abi_ulong target_fds_addr
,
740 abi_ulong b
, *target_fds
;
742 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
743 if (!(target_fds
= lock_user(VERIFY_READ
,
745 sizeof(abi_ulong
) * nw
,
747 return -TARGET_EFAULT
;
751 for (i
= 0; i
< nw
; i
++) {
752 /* grab the abi_ulong */
753 __get_user(b
, &target_fds
[i
]);
754 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
755 /* check the bit inside the abi_ulong */
762 unlock_user(target_fds
, target_fds_addr
, 0);
767 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
773 abi_ulong
*target_fds
;
775 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
776 if (!(target_fds
= lock_user(VERIFY_WRITE
,
778 sizeof(abi_ulong
) * nw
,
780 return -TARGET_EFAULT
;
783 for (i
= 0; i
< nw
; i
++) {
785 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
786 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
789 __put_user(v
, &target_fds
[i
]);
792 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
797 #if defined(__alpha__)
803 static inline abi_long
host_to_target_clock_t(long ticks
)
805 #if HOST_HZ == TARGET_HZ
808 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
812 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
813 const struct rusage
*rusage
)
815 struct target_rusage
*target_rusage
;
817 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
818 return -TARGET_EFAULT
;
819 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
820 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
821 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
822 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
823 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
824 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
825 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
826 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
827 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
828 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
829 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
830 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
831 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
832 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
833 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
834 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
835 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
836 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
837 unlock_user_struct(target_rusage
, target_addr
, 1);
842 static inline rlim_t
target_to_host_rlim(target_ulong target_rlim
)
844 if (target_rlim
== TARGET_RLIM_INFINITY
)
845 return RLIM_INFINITY
;
847 return tswapl(target_rlim
);
850 static inline target_ulong
host_to_target_rlim(rlim_t rlim
)
852 if (rlim
== RLIM_INFINITY
|| rlim
!= (target_long
)rlim
)
853 return TARGET_RLIM_INFINITY
;
858 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
859 abi_ulong target_tv_addr
)
861 struct target_timeval
*target_tv
;
863 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
864 return -TARGET_EFAULT
;
866 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
867 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
869 unlock_user_struct(target_tv
, target_tv_addr
, 0);
874 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
875 const struct timeval
*tv
)
877 struct target_timeval
*target_tv
;
879 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
880 return -TARGET_EFAULT
;
882 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
883 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
885 unlock_user_struct(target_tv
, target_tv_addr
, 1);
890 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
893 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
894 abi_ulong target_mq_attr_addr
)
896 struct target_mq_attr
*target_mq_attr
;
898 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
899 target_mq_attr_addr
, 1))
900 return -TARGET_EFAULT
;
902 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
903 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
904 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
905 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
907 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
912 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
913 const struct mq_attr
*attr
)
915 struct target_mq_attr
*target_mq_attr
;
917 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
918 target_mq_attr_addr
, 0))
919 return -TARGET_EFAULT
;
921 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
922 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
923 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
924 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
926 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
932 /* do_select() must return target values and target errnos. */
933 static abi_long
do_select(int n
,
934 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
935 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
937 fd_set rfds
, wfds
, efds
;
938 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
939 struct timeval tv
, *tv_ptr
;
943 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
944 return -TARGET_EFAULT
;
950 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
951 return -TARGET_EFAULT
;
957 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
958 return -TARGET_EFAULT
;
964 if (target_tv_addr
) {
965 if (copy_from_user_timeval(&tv
, target_tv_addr
))
966 return -TARGET_EFAULT
;
972 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
974 if (!is_error(ret
)) {
975 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
976 return -TARGET_EFAULT
;
977 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
978 return -TARGET_EFAULT
;
979 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
980 return -TARGET_EFAULT
;
982 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
983 return -TARGET_EFAULT
;
989 static abi_long
do_pipe2(int host_pipe
[], int flags
)
992 return pipe2(host_pipe
, flags
);
998 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
999 int flags
, int is_pipe2
)
1003 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1006 return get_errno(ret
);
1008 /* Several targets have special calling conventions for the original
1009 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1011 #if defined(TARGET_ALPHA)
1012 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1013 return host_pipe
[0];
1014 #elif defined(TARGET_MIPS)
1015 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1016 return host_pipe
[0];
1017 #elif defined(TARGET_SH4)
1018 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1019 return host_pipe
[0];
1023 if (put_user_s32(host_pipe
[0], pipedes
)
1024 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1025 return -TARGET_EFAULT
;
1026 return get_errno(ret
);
1029 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1030 abi_ulong target_addr
,
1033 struct target_ip_mreqn
*target_smreqn
;
1035 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1037 return -TARGET_EFAULT
;
1038 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1039 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1040 if (len
== sizeof(struct target_ip_mreqn
))
1041 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
1042 unlock_user(target_smreqn
, target_addr
, 0);
1047 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1048 abi_ulong target_addr
,
1051 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1052 sa_family_t sa_family
;
1053 struct target_sockaddr
*target_saddr
;
1055 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1057 return -TARGET_EFAULT
;
1059 sa_family
= tswap16(target_saddr
->sa_family
);
1061 /* Oops. The caller might send a incomplete sun_path; sun_path
1062 * must be terminated by \0 (see the manual page), but
1063 * unfortunately it is quite common to specify sockaddr_un
1064 * length as "strlen(x->sun_path)" while it should be
1065 * "strlen(...) + 1". We'll fix that here if needed.
1066 * Linux kernel has a similar feature.
1069 if (sa_family
== AF_UNIX
) {
1070 if (len
< unix_maxlen
&& len
> 0) {
1071 char *cp
= (char*)target_saddr
;
1073 if ( cp
[len
-1] && !cp
[len
] )
1076 if (len
> unix_maxlen
)
1080 memcpy(addr
, target_saddr
, len
);
1081 addr
->sa_family
= sa_family
;
1082 unlock_user(target_saddr
, target_addr
, 0);
1087 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1088 struct sockaddr
*addr
,
1091 struct target_sockaddr
*target_saddr
;
1093 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1095 return -TARGET_EFAULT
;
1096 memcpy(target_saddr
, addr
, len
);
1097 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1098 unlock_user(target_saddr
, target_addr
, len
);
1103 /* ??? Should this also swap msgh->name? */
1104 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1105 struct target_msghdr
*target_msgh
)
1107 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1108 abi_long msg_controllen
;
1109 abi_ulong target_cmsg_addr
;
1110 struct target_cmsghdr
*target_cmsg
;
1111 socklen_t space
= 0;
1113 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1114 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1116 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1117 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1119 return -TARGET_EFAULT
;
1121 while (cmsg
&& target_cmsg
) {
1122 void *data
= CMSG_DATA(cmsg
);
1123 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1125 int len
= tswapl(target_cmsg
->cmsg_len
)
1126 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1128 space
+= CMSG_SPACE(len
);
1129 if (space
> msgh
->msg_controllen
) {
1130 space
-= CMSG_SPACE(len
);
1131 gemu_log("Host cmsg overflow\n");
1135 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1136 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1137 cmsg
->cmsg_len
= CMSG_LEN(len
);
1139 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1140 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1141 memcpy(data
, target_data
, len
);
1143 int *fd
= (int *)data
;
1144 int *target_fd
= (int *)target_data
;
1145 int i
, numfds
= len
/ sizeof(int);
1147 for (i
= 0; i
< numfds
; i
++)
1148 fd
[i
] = tswap32(target_fd
[i
]);
1151 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1152 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1154 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1156 msgh
->msg_controllen
= space
;
1160 /* ??? Should this also swap msgh->name? */
1161 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1162 struct msghdr
*msgh
)
1164 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1165 abi_long msg_controllen
;
1166 abi_ulong target_cmsg_addr
;
1167 struct target_cmsghdr
*target_cmsg
;
1168 socklen_t space
= 0;
1170 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1171 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1173 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1174 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1176 return -TARGET_EFAULT
;
1178 while (cmsg
&& target_cmsg
) {
1179 void *data
= CMSG_DATA(cmsg
);
1180 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1182 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1184 space
+= TARGET_CMSG_SPACE(len
);
1185 if (space
> msg_controllen
) {
1186 space
-= TARGET_CMSG_SPACE(len
);
1187 gemu_log("Target cmsg overflow\n");
1191 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1192 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1193 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1195 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1196 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1197 memcpy(target_data
, data
, len
);
1199 int *fd
= (int *)data
;
1200 int *target_fd
= (int *)target_data
;
1201 int i
, numfds
= len
/ sizeof(int);
1203 for (i
= 0; i
< numfds
; i
++)
1204 target_fd
[i
] = tswap32(fd
[i
]);
1207 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1208 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1210 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1212 target_msgh
->msg_controllen
= tswapl(space
);
1216 /* do_setsockopt() Must return target values and target errnos. */
1217 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1218 abi_ulong optval_addr
, socklen_t optlen
)
1222 struct ip_mreqn
*ip_mreq
;
1223 struct ip_mreq_source
*ip_mreq_source
;
1227 /* TCP options all take an 'int' value. */
1228 if (optlen
< sizeof(uint32_t))
1229 return -TARGET_EINVAL
;
1231 if (get_user_u32(val
, optval_addr
))
1232 return -TARGET_EFAULT
;
1233 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1240 case IP_ROUTER_ALERT
:
1244 case IP_MTU_DISCOVER
:
1250 case IP_MULTICAST_TTL
:
1251 case IP_MULTICAST_LOOP
:
1253 if (optlen
>= sizeof(uint32_t)) {
1254 if (get_user_u32(val
, optval_addr
))
1255 return -TARGET_EFAULT
;
1256 } else if (optlen
>= 1) {
1257 if (get_user_u8(val
, optval_addr
))
1258 return -TARGET_EFAULT
;
1260 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1262 case IP_ADD_MEMBERSHIP
:
1263 case IP_DROP_MEMBERSHIP
:
1264 if (optlen
< sizeof (struct target_ip_mreq
) ||
1265 optlen
> sizeof (struct target_ip_mreqn
))
1266 return -TARGET_EINVAL
;
1268 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1269 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1270 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1273 case IP_BLOCK_SOURCE
:
1274 case IP_UNBLOCK_SOURCE
:
1275 case IP_ADD_SOURCE_MEMBERSHIP
:
1276 case IP_DROP_SOURCE_MEMBERSHIP
:
1277 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1278 return -TARGET_EINVAL
;
1280 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1281 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1282 unlock_user (ip_mreq_source
, optval_addr
, 0);
1289 case TARGET_SOL_SOCKET
:
1291 /* Options with 'int' argument. */
1292 case TARGET_SO_DEBUG
:
1295 case TARGET_SO_REUSEADDR
:
1296 optname
= SO_REUSEADDR
;
1298 case TARGET_SO_TYPE
:
1301 case TARGET_SO_ERROR
:
1304 case TARGET_SO_DONTROUTE
:
1305 optname
= SO_DONTROUTE
;
1307 case TARGET_SO_BROADCAST
:
1308 optname
= SO_BROADCAST
;
1310 case TARGET_SO_SNDBUF
:
1311 optname
= SO_SNDBUF
;
1313 case TARGET_SO_RCVBUF
:
1314 optname
= SO_RCVBUF
;
1316 case TARGET_SO_KEEPALIVE
:
1317 optname
= SO_KEEPALIVE
;
1319 case TARGET_SO_OOBINLINE
:
1320 optname
= SO_OOBINLINE
;
1322 case TARGET_SO_NO_CHECK
:
1323 optname
= SO_NO_CHECK
;
1325 case TARGET_SO_PRIORITY
:
1326 optname
= SO_PRIORITY
;
1329 case TARGET_SO_BSDCOMPAT
:
1330 optname
= SO_BSDCOMPAT
;
1333 case TARGET_SO_PASSCRED
:
1334 optname
= SO_PASSCRED
;
1336 case TARGET_SO_TIMESTAMP
:
1337 optname
= SO_TIMESTAMP
;
1339 case TARGET_SO_RCVLOWAT
:
1340 optname
= SO_RCVLOWAT
;
1342 case TARGET_SO_RCVTIMEO
:
1343 optname
= SO_RCVTIMEO
;
1345 case TARGET_SO_SNDTIMEO
:
1346 optname
= SO_SNDTIMEO
;
1352 if (optlen
< sizeof(uint32_t))
1353 return -TARGET_EINVAL
;
1355 if (get_user_u32(val
, optval_addr
))
1356 return -TARGET_EFAULT
;
1357 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1361 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1362 ret
= -TARGET_ENOPROTOOPT
;
1367 /* do_getsockopt() Must return target values and target errnos. */
1368 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1369 abi_ulong optval_addr
, abi_ulong optlen
)
1376 case TARGET_SOL_SOCKET
:
1379 case TARGET_SO_LINGER
:
1380 case TARGET_SO_RCVTIMEO
:
1381 case TARGET_SO_SNDTIMEO
:
1382 case TARGET_SO_PEERCRED
:
1383 case TARGET_SO_PEERNAME
:
1384 /* These don't just return a single integer */
1391 /* TCP options all take an 'int' value. */
1393 if (get_user_u32(len
, optlen
))
1394 return -TARGET_EFAULT
;
1396 return -TARGET_EINVAL
;
1398 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1404 if (put_user_u32(val
, optval_addr
))
1405 return -TARGET_EFAULT
;
1407 if (put_user_u8(val
, optval_addr
))
1408 return -TARGET_EFAULT
;
1410 if (put_user_u32(len
, optlen
))
1411 return -TARGET_EFAULT
;
1418 case IP_ROUTER_ALERT
:
1422 case IP_MTU_DISCOVER
:
1428 case IP_MULTICAST_TTL
:
1429 case IP_MULTICAST_LOOP
:
1430 if (get_user_u32(len
, optlen
))
1431 return -TARGET_EFAULT
;
1433 return -TARGET_EINVAL
;
1435 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1438 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1440 if (put_user_u32(len
, optlen
)
1441 || put_user_u8(val
, optval_addr
))
1442 return -TARGET_EFAULT
;
1444 if (len
> sizeof(int))
1446 if (put_user_u32(len
, optlen
)
1447 || put_user_u32(val
, optval_addr
))
1448 return -TARGET_EFAULT
;
1452 ret
= -TARGET_ENOPROTOOPT
;
1458 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1460 ret
= -TARGET_EOPNOTSUPP
;
1467 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1468 * other lock functions have a return code of 0 for failure.
1470 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1471 int count
, int copy
)
1473 struct target_iovec
*target_vec
;
1477 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1479 return -TARGET_EFAULT
;
1480 for(i
= 0;i
< count
; i
++) {
1481 base
= tswapl(target_vec
[i
].iov_base
);
1482 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1483 if (vec
[i
].iov_len
!= 0) {
1484 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1485 /* Don't check lock_user return value. We must call writev even
1486 if a element has invalid base address. */
1488 /* zero length pointer is ignored */
1489 vec
[i
].iov_base
= NULL
;
1492 unlock_user (target_vec
, target_addr
, 0);
1496 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1497 int count
, int copy
)
1499 struct target_iovec
*target_vec
;
1503 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1505 return -TARGET_EFAULT
;
1506 for(i
= 0;i
< count
; i
++) {
1507 if (target_vec
[i
].iov_base
) {
1508 base
= tswapl(target_vec
[i
].iov_base
);
1509 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1512 unlock_user (target_vec
, target_addr
, 0);
1517 /* do_socket() Must return target values and target errnos. */
1518 static abi_long
do_socket(int domain
, int type
, int protocol
)
1520 #if defined(TARGET_MIPS)
1522 case TARGET_SOCK_DGRAM
:
1525 case TARGET_SOCK_STREAM
:
1528 case TARGET_SOCK_RAW
:
1531 case TARGET_SOCK_RDM
:
1534 case TARGET_SOCK_SEQPACKET
:
1535 type
= SOCK_SEQPACKET
;
1537 case TARGET_SOCK_PACKET
:
1542 if (domain
== PF_NETLINK
)
1543 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1544 return get_errno(socket(domain
, type
, protocol
));
1547 /* do_bind() Must return target values and target errnos. */
1548 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1555 return -TARGET_EINVAL
;
1557 addr
= alloca(addrlen
+1);
1559 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1563 return get_errno(bind(sockfd
, addr
, addrlen
));
1566 /* do_connect() Must return target values and target errnos. */
1567 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1574 return -TARGET_EINVAL
;
1576 addr
= alloca(addrlen
);
1578 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1582 return get_errno(connect(sockfd
, addr
, addrlen
));
1585 /* do_sendrecvmsg() Must return target values and target errnos. */
1586 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1587 int flags
, int send
)
1590 struct target_msghdr
*msgp
;
1594 abi_ulong target_vec
;
1597 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1601 return -TARGET_EFAULT
;
1602 if (msgp
->msg_name
) {
1603 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1604 msg
.msg_name
= alloca(msg
.msg_namelen
);
1605 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1608 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1612 msg
.msg_name
= NULL
;
1613 msg
.msg_namelen
= 0;
1615 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1616 msg
.msg_control
= alloca(msg
.msg_controllen
);
1617 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1619 count
= tswapl(msgp
->msg_iovlen
);
1620 vec
= alloca(count
* sizeof(struct iovec
));
1621 target_vec
= tswapl(msgp
->msg_iov
);
1622 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1623 msg
.msg_iovlen
= count
;
1627 ret
= target_to_host_cmsg(&msg
, msgp
);
1629 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1631 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1632 if (!is_error(ret
)) {
1634 ret
= host_to_target_cmsg(msgp
, &msg
);
1639 unlock_iovec(vec
, target_vec
, count
, !send
);
1640 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1644 /* do_accept() Must return target values and target errnos. */
1645 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1646 abi_ulong target_addrlen_addr
)
1652 if (target_addr
== 0)
1653 return get_errno(accept(fd
, NULL
, NULL
));
1655 /* linux returns EINVAL if addrlen pointer is invalid */
1656 if (get_user_u32(addrlen
, target_addrlen_addr
))
1657 return -TARGET_EINVAL
;
1660 return -TARGET_EINVAL
;
1662 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1663 return -TARGET_EINVAL
;
1665 addr
= alloca(addrlen
);
1667 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1668 if (!is_error(ret
)) {
1669 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1670 if (put_user_u32(addrlen
, target_addrlen_addr
))
1671 ret
= -TARGET_EFAULT
;
1676 /* do_getpeername() Must return target values and target errnos. */
1677 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1678 abi_ulong target_addrlen_addr
)
1684 if (get_user_u32(addrlen
, target_addrlen_addr
))
1685 return -TARGET_EFAULT
;
1688 return -TARGET_EINVAL
;
1690 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1691 return -TARGET_EFAULT
;
1693 addr
= alloca(addrlen
);
1695 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1696 if (!is_error(ret
)) {
1697 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1698 if (put_user_u32(addrlen
, target_addrlen_addr
))
1699 ret
= -TARGET_EFAULT
;
1704 /* do_getsockname() Must return target values and target errnos. */
1705 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1706 abi_ulong target_addrlen_addr
)
1712 if (get_user_u32(addrlen
, target_addrlen_addr
))
1713 return -TARGET_EFAULT
;
1716 return -TARGET_EINVAL
;
1718 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1719 return -TARGET_EFAULT
;
1721 addr
= alloca(addrlen
);
1723 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1724 if (!is_error(ret
)) {
1725 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1726 if (put_user_u32(addrlen
, target_addrlen_addr
))
1727 ret
= -TARGET_EFAULT
;
1732 /* do_socketpair() Must return target values and target errnos. */
1733 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1734 abi_ulong target_tab_addr
)
1739 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1740 if (!is_error(ret
)) {
1741 if (put_user_s32(tab
[0], target_tab_addr
)
1742 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1743 ret
= -TARGET_EFAULT
;
1748 /* do_sendto() Must return target values and target errnos. */
1749 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1750 abi_ulong target_addr
, socklen_t addrlen
)
1757 return -TARGET_EINVAL
;
1759 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1761 return -TARGET_EFAULT
;
1763 addr
= alloca(addrlen
);
1764 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1766 unlock_user(host_msg
, msg
, 0);
1769 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1771 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1773 unlock_user(host_msg
, msg
, 0);
1777 /* do_recvfrom() Must return target values and target errnos. */
1778 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1779 abi_ulong target_addr
,
1780 abi_ulong target_addrlen
)
1787 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1789 return -TARGET_EFAULT
;
1791 if (get_user_u32(addrlen
, target_addrlen
)) {
1792 ret
= -TARGET_EFAULT
;
1796 ret
= -TARGET_EINVAL
;
1799 addr
= alloca(addrlen
);
1800 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1802 addr
= NULL
; /* To keep compiler quiet. */
1803 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1805 if (!is_error(ret
)) {
1807 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1808 if (put_user_u32(addrlen
, target_addrlen
)) {
1809 ret
= -TARGET_EFAULT
;
1813 unlock_user(host_msg
, msg
, len
);
1816 unlock_user(host_msg
, msg
, 0);
1821 #ifdef TARGET_NR_socketcall
1822 /* do_socketcall() Must return target values and target errnos. */
1823 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1826 const int n
= sizeof(abi_ulong
);
1831 abi_ulong domain
, type
, protocol
;
1833 if (get_user_ual(domain
, vptr
)
1834 || get_user_ual(type
, vptr
+ n
)
1835 || get_user_ual(protocol
, vptr
+ 2 * n
))
1836 return -TARGET_EFAULT
;
1838 ret
= do_socket(domain
, type
, protocol
);
1844 abi_ulong target_addr
;
1847 if (get_user_ual(sockfd
, vptr
)
1848 || get_user_ual(target_addr
, vptr
+ n
)
1849 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1850 return -TARGET_EFAULT
;
1852 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1855 case SOCKOP_connect
:
1858 abi_ulong target_addr
;
1861 if (get_user_ual(sockfd
, vptr
)
1862 || get_user_ual(target_addr
, vptr
+ n
)
1863 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1864 return -TARGET_EFAULT
;
1866 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1871 abi_ulong sockfd
, backlog
;
1873 if (get_user_ual(sockfd
, vptr
)
1874 || get_user_ual(backlog
, vptr
+ n
))
1875 return -TARGET_EFAULT
;
1877 ret
= get_errno(listen(sockfd
, backlog
));
1883 abi_ulong target_addr
, target_addrlen
;
1885 if (get_user_ual(sockfd
, vptr
)
1886 || get_user_ual(target_addr
, vptr
+ n
)
1887 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1888 return -TARGET_EFAULT
;
1890 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1893 case SOCKOP_getsockname
:
1896 abi_ulong target_addr
, target_addrlen
;
1898 if (get_user_ual(sockfd
, vptr
)
1899 || get_user_ual(target_addr
, vptr
+ n
)
1900 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1901 return -TARGET_EFAULT
;
1903 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1906 case SOCKOP_getpeername
:
1909 abi_ulong target_addr
, target_addrlen
;
1911 if (get_user_ual(sockfd
, vptr
)
1912 || get_user_ual(target_addr
, vptr
+ n
)
1913 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1914 return -TARGET_EFAULT
;
1916 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1919 case SOCKOP_socketpair
:
1921 abi_ulong domain
, type
, protocol
;
1924 if (get_user_ual(domain
, vptr
)
1925 || get_user_ual(type
, vptr
+ n
)
1926 || get_user_ual(protocol
, vptr
+ 2 * n
)
1927 || get_user_ual(tab
, vptr
+ 3 * n
))
1928 return -TARGET_EFAULT
;
1930 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1940 if (get_user_ual(sockfd
, vptr
)
1941 || get_user_ual(msg
, vptr
+ n
)
1942 || get_user_ual(len
, vptr
+ 2 * n
)
1943 || get_user_ual(flags
, vptr
+ 3 * n
))
1944 return -TARGET_EFAULT
;
1946 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1956 if (get_user_ual(sockfd
, vptr
)
1957 || get_user_ual(msg
, vptr
+ n
)
1958 || get_user_ual(len
, vptr
+ 2 * n
)
1959 || get_user_ual(flags
, vptr
+ 3 * n
))
1960 return -TARGET_EFAULT
;
1962 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1974 if (get_user_ual(sockfd
, vptr
)
1975 || get_user_ual(msg
, vptr
+ n
)
1976 || get_user_ual(len
, vptr
+ 2 * n
)
1977 || get_user_ual(flags
, vptr
+ 3 * n
)
1978 || get_user_ual(addr
, vptr
+ 4 * n
)
1979 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1980 return -TARGET_EFAULT
;
1982 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1985 case SOCKOP_recvfrom
:
1994 if (get_user_ual(sockfd
, vptr
)
1995 || get_user_ual(msg
, vptr
+ n
)
1996 || get_user_ual(len
, vptr
+ 2 * n
)
1997 || get_user_ual(flags
, vptr
+ 3 * n
)
1998 || get_user_ual(addr
, vptr
+ 4 * n
)
1999 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2000 return -TARGET_EFAULT
;
2002 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2005 case SOCKOP_shutdown
:
2007 abi_ulong sockfd
, how
;
2009 if (get_user_ual(sockfd
, vptr
)
2010 || get_user_ual(how
, vptr
+ n
))
2011 return -TARGET_EFAULT
;
2013 ret
= get_errno(shutdown(sockfd
, how
));
2016 case SOCKOP_sendmsg
:
2017 case SOCKOP_recvmsg
:
2020 abi_ulong target_msg
;
2023 if (get_user_ual(fd
, vptr
)
2024 || get_user_ual(target_msg
, vptr
+ n
)
2025 || get_user_ual(flags
, vptr
+ 2 * n
))
2026 return -TARGET_EFAULT
;
2028 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2029 (num
== SOCKOP_sendmsg
));
2032 case SOCKOP_setsockopt
:
2040 if (get_user_ual(sockfd
, vptr
)
2041 || get_user_ual(level
, vptr
+ n
)
2042 || get_user_ual(optname
, vptr
+ 2 * n
)
2043 || get_user_ual(optval
, vptr
+ 3 * n
)
2044 || get_user_ual(optlen
, vptr
+ 4 * n
))
2045 return -TARGET_EFAULT
;
2047 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2050 case SOCKOP_getsockopt
:
2058 if (get_user_ual(sockfd
, vptr
)
2059 || get_user_ual(level
, vptr
+ n
)
2060 || get_user_ual(optname
, vptr
+ 2 * n
)
2061 || get_user_ual(optval
, vptr
+ 3 * n
)
2062 || get_user_ual(optlen
, vptr
+ 4 * n
))
2063 return -TARGET_EFAULT
;
2065 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2069 gemu_log("Unsupported socketcall: %d\n", num
);
2070 ret
= -TARGET_ENOSYS
;
2077 #define N_SHM_REGIONS 32
2079 static struct shm_region
{
2082 } shm_regions
[N_SHM_REGIONS
];
2084 struct target_ipc_perm
2091 unsigned short int mode
;
2092 unsigned short int __pad1
;
2093 unsigned short int __seq
;
2094 unsigned short int __pad2
;
2095 abi_ulong __unused1
;
2096 abi_ulong __unused2
;
2099 struct target_semid_ds
2101 struct target_ipc_perm sem_perm
;
2102 abi_ulong sem_otime
;
2103 abi_ulong __unused1
;
2104 abi_ulong sem_ctime
;
2105 abi_ulong __unused2
;
2106 abi_ulong sem_nsems
;
2107 abi_ulong __unused3
;
2108 abi_ulong __unused4
;
2111 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2112 abi_ulong target_addr
)
2114 struct target_ipc_perm
*target_ip
;
2115 struct target_semid_ds
*target_sd
;
2117 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2118 return -TARGET_EFAULT
;
2119 target_ip
= &(target_sd
->sem_perm
);
2120 host_ip
->__key
= tswapl(target_ip
->__key
);
2121 host_ip
->uid
= tswapl(target_ip
->uid
);
2122 host_ip
->gid
= tswapl(target_ip
->gid
);
2123 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2124 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2125 host_ip
->mode
= tswapl(target_ip
->mode
);
2126 unlock_user_struct(target_sd
, target_addr
, 0);
2130 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2131 struct ipc_perm
*host_ip
)
2133 struct target_ipc_perm
*target_ip
;
2134 struct target_semid_ds
*target_sd
;
2136 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2137 return -TARGET_EFAULT
;
2138 target_ip
= &(target_sd
->sem_perm
);
2139 target_ip
->__key
= tswapl(host_ip
->__key
);
2140 target_ip
->uid
= tswapl(host_ip
->uid
);
2141 target_ip
->gid
= tswapl(host_ip
->gid
);
2142 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2143 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2144 target_ip
->mode
= tswapl(host_ip
->mode
);
2145 unlock_user_struct(target_sd
, target_addr
, 1);
2149 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2150 abi_ulong target_addr
)
2152 struct target_semid_ds
*target_sd
;
2154 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2155 return -TARGET_EFAULT
;
2156 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2157 return -TARGET_EFAULT
;
2158 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2159 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2160 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2161 unlock_user_struct(target_sd
, target_addr
, 0);
2165 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2166 struct semid_ds
*host_sd
)
2168 struct target_semid_ds
*target_sd
;
2170 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2171 return -TARGET_EFAULT
;
2172 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2173 return -TARGET_EFAULT
;;
2174 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2175 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2176 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2177 unlock_user_struct(target_sd
, target_addr
, 1);
2181 struct target_seminfo
{
2194 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2195 struct seminfo
*host_seminfo
)
2197 struct target_seminfo
*target_seminfo
;
2198 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2199 return -TARGET_EFAULT
;
2200 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2201 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2202 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2203 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2204 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2205 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2206 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2207 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2208 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2209 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2210 unlock_user_struct(target_seminfo
, target_addr
, 1);
2216 struct semid_ds
*buf
;
2217 unsigned short *array
;
2218 struct seminfo
*__buf
;
2221 union target_semun
{
2228 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2229 abi_ulong target_addr
)
2232 unsigned short *array
;
2234 struct semid_ds semid_ds
;
2237 semun
.buf
= &semid_ds
;
2239 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2241 return get_errno(ret
);
2243 nsems
= semid_ds
.sem_nsems
;
2245 *host_array
= malloc(nsems
*sizeof(unsigned short));
2246 array
= lock_user(VERIFY_READ
, target_addr
,
2247 nsems
*sizeof(unsigned short), 1);
2249 return -TARGET_EFAULT
;
2251 for(i
=0; i
<nsems
; i
++) {
2252 __get_user((*host_array
)[i
], &array
[i
]);
2254 unlock_user(array
, target_addr
, 0);
2259 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2260 unsigned short **host_array
)
2263 unsigned short *array
;
2265 struct semid_ds semid_ds
;
2268 semun
.buf
= &semid_ds
;
2270 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2272 return get_errno(ret
);
2274 nsems
= semid_ds
.sem_nsems
;
2276 array
= lock_user(VERIFY_WRITE
, target_addr
,
2277 nsems
*sizeof(unsigned short), 0);
2279 return -TARGET_EFAULT
;
2281 for(i
=0; i
<nsems
; i
++) {
2282 __put_user((*host_array
)[i
], &array
[i
]);
2285 unlock_user(array
, target_addr
, 1);
2290 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2291 union target_semun target_su
)
2294 struct semid_ds dsarg
;
2295 unsigned short *array
= NULL
;
2296 struct seminfo seminfo
;
2297 abi_long ret
= -TARGET_EINVAL
;
2304 arg
.val
= tswapl(target_su
.val
);
2305 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2306 target_su
.val
= tswapl(arg
.val
);
2310 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2314 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2315 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2322 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2326 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2327 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2333 arg
.__buf
= &seminfo
;
2334 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2335 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2343 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2350 struct target_sembuf
{
2351 unsigned short sem_num
;
2356 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2357 abi_ulong target_addr
,
2360 struct target_sembuf
*target_sembuf
;
2363 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2364 nsops
*sizeof(struct target_sembuf
), 1);
2366 return -TARGET_EFAULT
;
2368 for(i
=0; i
<nsops
; i
++) {
2369 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2370 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2371 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2374 unlock_user(target_sembuf
, target_addr
, 0);
2379 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2381 struct sembuf sops
[nsops
];
2383 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2384 return -TARGET_EFAULT
;
2386 return semop(semid
, sops
, nsops
);
2389 struct target_msqid_ds
2391 struct target_ipc_perm msg_perm
;
2392 abi_ulong msg_stime
;
2393 #if TARGET_ABI_BITS == 32
2394 abi_ulong __unused1
;
2396 abi_ulong msg_rtime
;
2397 #if TARGET_ABI_BITS == 32
2398 abi_ulong __unused2
;
2400 abi_ulong msg_ctime
;
2401 #if TARGET_ABI_BITS == 32
2402 abi_ulong __unused3
;
2404 abi_ulong __msg_cbytes
;
2406 abi_ulong msg_qbytes
;
2407 abi_ulong msg_lspid
;
2408 abi_ulong msg_lrpid
;
2409 abi_ulong __unused4
;
2410 abi_ulong __unused5
;
2413 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2414 abi_ulong target_addr
)
2416 struct target_msqid_ds
*target_md
;
2418 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2419 return -TARGET_EFAULT
;
2420 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2421 return -TARGET_EFAULT
;
2422 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2423 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2424 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2425 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2426 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2427 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2428 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2429 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2430 unlock_user_struct(target_md
, target_addr
, 0);
2434 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2435 struct msqid_ds
*host_md
)
2437 struct target_msqid_ds
*target_md
;
2439 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2440 return -TARGET_EFAULT
;
2441 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2442 return -TARGET_EFAULT
;
2443 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2444 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2445 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2446 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2447 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2448 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2449 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2450 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2451 unlock_user_struct(target_md
, target_addr
, 1);
2455 struct target_msginfo
{
2463 unsigned short int msgseg
;
2466 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2467 struct msginfo
*host_msginfo
)
2469 struct target_msginfo
*target_msginfo
;
2470 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2471 return -TARGET_EFAULT
;
2472 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2473 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2474 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2475 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2476 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2477 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2478 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2479 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2480 unlock_user_struct(target_msginfo
, target_addr
, 1);
2484 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2486 struct msqid_ds dsarg
;
2487 struct msginfo msginfo
;
2488 abi_long ret
= -TARGET_EINVAL
;
2496 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2497 return -TARGET_EFAULT
;
2498 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2499 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2500 return -TARGET_EFAULT
;
2503 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2507 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2508 if (host_to_target_msginfo(ptr
, &msginfo
))
2509 return -TARGET_EFAULT
;
2516 struct target_msgbuf
{
2521 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2522 unsigned int msgsz
, int msgflg
)
2524 struct target_msgbuf
*target_mb
;
2525 struct msgbuf
*host_mb
;
2528 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2529 return -TARGET_EFAULT
;
2530 host_mb
= malloc(msgsz
+sizeof(long));
2531 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2532 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2533 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2535 unlock_user_struct(target_mb
, msgp
, 0);
2540 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2541 unsigned int msgsz
, abi_long msgtyp
,
2544 struct target_msgbuf
*target_mb
;
2546 struct msgbuf
*host_mb
;
2549 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2550 return -TARGET_EFAULT
;
2552 host_mb
= malloc(msgsz
+sizeof(long));
2553 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2556 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2557 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2558 if (!target_mtext
) {
2559 ret
= -TARGET_EFAULT
;
2562 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2563 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2566 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2571 unlock_user_struct(target_mb
, msgp
, 1);
2575 struct target_shmid_ds
2577 struct target_ipc_perm shm_perm
;
2578 abi_ulong shm_segsz
;
2579 abi_ulong shm_atime
;
2580 #if TARGET_ABI_BITS == 32
2581 abi_ulong __unused1
;
2583 abi_ulong shm_dtime
;
2584 #if TARGET_ABI_BITS == 32
2585 abi_ulong __unused2
;
2587 abi_ulong shm_ctime
;
2588 #if TARGET_ABI_BITS == 32
2589 abi_ulong __unused3
;
2593 abi_ulong shm_nattch
;
2594 unsigned long int __unused4
;
2595 unsigned long int __unused5
;
2598 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2599 abi_ulong target_addr
)
2601 struct target_shmid_ds
*target_sd
;
2603 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2604 return -TARGET_EFAULT
;
2605 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2606 return -TARGET_EFAULT
;
2607 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2608 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2609 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2610 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2611 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2612 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2613 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2614 unlock_user_struct(target_sd
, target_addr
, 0);
2618 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2619 struct shmid_ds
*host_sd
)
2621 struct target_shmid_ds
*target_sd
;
2623 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2624 return -TARGET_EFAULT
;
2625 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2626 return -TARGET_EFAULT
;
2627 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2628 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2629 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2630 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2631 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2632 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2633 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2634 unlock_user_struct(target_sd
, target_addr
, 1);
2638 struct target_shminfo
{
2646 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2647 struct shminfo
*host_shminfo
)
2649 struct target_shminfo
*target_shminfo
;
2650 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2651 return -TARGET_EFAULT
;
2652 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2653 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2654 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2655 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2656 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2657 unlock_user_struct(target_shminfo
, target_addr
, 1);
2661 struct target_shm_info
{
2666 abi_ulong swap_attempts
;
2667 abi_ulong swap_successes
;
2670 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2671 struct shm_info
*host_shm_info
)
2673 struct target_shm_info
*target_shm_info
;
2674 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2675 return -TARGET_EFAULT
;
2676 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2677 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2678 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2679 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2680 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2681 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2682 unlock_user_struct(target_shm_info
, target_addr
, 1);
2686 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2688 struct shmid_ds dsarg
;
2689 struct shminfo shminfo
;
2690 struct shm_info shm_info
;
2691 abi_long ret
= -TARGET_EINVAL
;
2699 if (target_to_host_shmid_ds(&dsarg
, buf
))
2700 return -TARGET_EFAULT
;
2701 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2702 if (host_to_target_shmid_ds(buf
, &dsarg
))
2703 return -TARGET_EFAULT
;
2706 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2707 if (host_to_target_shminfo(buf
, &shminfo
))
2708 return -TARGET_EFAULT
;
2711 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2712 if (host_to_target_shm_info(buf
, &shm_info
))
2713 return -TARGET_EFAULT
;
2718 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2725 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2729 struct shmid_ds shm_info
;
2732 /* find out the length of the shared memory segment */
2733 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2734 if (is_error(ret
)) {
2735 /* can't get length, bail out */
2742 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2744 abi_ulong mmap_start
;
2746 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2748 if (mmap_start
== -1) {
2750 host_raddr
= (void *)-1;
2752 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2755 if (host_raddr
== (void *)-1) {
2757 return get_errno((long)host_raddr
);
2759 raddr
=h2g((unsigned long)host_raddr
);
2761 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2762 PAGE_VALID
| PAGE_READ
|
2763 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2765 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2766 if (shm_regions
[i
].start
== 0) {
2767 shm_regions
[i
].start
= raddr
;
2768 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2778 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2782 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2783 if (shm_regions
[i
].start
== shmaddr
) {
2784 shm_regions
[i
].start
= 0;
2785 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
2790 return get_errno(shmdt(g2h(shmaddr
)));
2793 #ifdef TARGET_NR_ipc
2794 /* ??? This only works with linear mappings. */
2795 /* do_ipc() must return target values and target errnos. */
2796 static abi_long
do_ipc(unsigned int call
, int first
,
2797 int second
, int third
,
2798 abi_long ptr
, abi_long fifth
)
2803 version
= call
>> 16;
2808 ret
= do_semop(first
, ptr
, second
);
2812 ret
= get_errno(semget(first
, second
, third
));
2816 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2820 ret
= get_errno(msgget(first
, second
));
2824 ret
= do_msgsnd(first
, ptr
, second
, third
);
2828 ret
= do_msgctl(first
, second
, ptr
);
2835 struct target_ipc_kludge
{
2840 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2841 ret
= -TARGET_EFAULT
;
2845 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2847 unlock_user_struct(tmp
, ptr
, 0);
2851 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2860 raddr
= do_shmat(first
, ptr
, second
);
2861 if (is_error(raddr
))
2862 return get_errno(raddr
);
2863 if (put_user_ual(raddr
, third
))
2864 return -TARGET_EFAULT
;
2868 ret
= -TARGET_EINVAL
;
2873 ret
= do_shmdt(ptr
);
2877 /* IPC_* flag values are the same on all linux platforms */
2878 ret
= get_errno(shmget(first
, second
, third
));
2881 /* IPC_* and SHM_* command values are the same on all linux platforms */
2883 ret
= do_shmctl(first
, second
, third
);
2886 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2887 ret
= -TARGET_ENOSYS
;
2894 /* kernel structure types definitions */
2897 #define STRUCT(name, ...) STRUCT_ ## name,
2898 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2900 #include "syscall_types.h"
2903 #undef STRUCT_SPECIAL
2905 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2906 #define STRUCT_SPECIAL(name)
2907 #include "syscall_types.h"
2909 #undef STRUCT_SPECIAL
2911 typedef struct IOCTLEntry
{
2912 unsigned int target_cmd
;
2913 unsigned int host_cmd
;
2916 const argtype arg_type
[5];
2919 #define IOC_R 0x0001
2920 #define IOC_W 0x0002
2921 #define IOC_RW (IOC_R | IOC_W)
2923 #define MAX_STRUCT_SIZE 4096
2925 static IOCTLEntry ioctl_entries
[] = {
2926 #define IOCTL(cmd, access, ...) \
2927 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2932 /* ??? Implement proper locking for ioctls. */
2933 /* do_ioctl() Must return target values and target errnos. */
2934 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2936 const IOCTLEntry
*ie
;
2937 const argtype
*arg_type
;
2939 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2945 if (ie
->target_cmd
== 0) {
2946 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2947 return -TARGET_ENOSYS
;
2949 if (ie
->target_cmd
== cmd
)
2953 arg_type
= ie
->arg_type
;
2955 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2957 switch(arg_type
[0]) {
2960 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2965 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2969 target_size
= thunk_type_size(arg_type
, 0);
2970 switch(ie
->access
) {
2972 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2973 if (!is_error(ret
)) {
2974 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2976 return -TARGET_EFAULT
;
2977 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2978 unlock_user(argptr
, arg
, target_size
);
2982 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2984 return -TARGET_EFAULT
;
2985 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2986 unlock_user(argptr
, arg
, 0);
2987 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2991 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2993 return -TARGET_EFAULT
;
2994 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2995 unlock_user(argptr
, arg
, 0);
2996 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2997 if (!is_error(ret
)) {
2998 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3000 return -TARGET_EFAULT
;
3001 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3002 unlock_user(argptr
, arg
, target_size
);
3008 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3009 (long)cmd
, arg_type
[0]);
3010 ret
= -TARGET_ENOSYS
;
3016 static const bitmask_transtbl iflag_tbl
[] = {
3017 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3018 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3019 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3020 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3021 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3022 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3023 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3024 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3025 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3026 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3027 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3028 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3029 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3030 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3034 static const bitmask_transtbl oflag_tbl
[] = {
3035 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3036 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3037 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3038 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3039 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3040 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3041 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3042 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3043 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3044 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3045 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3046 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3047 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3048 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3049 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3050 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3051 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3052 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3053 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3054 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3055 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3056 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3057 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3058 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3062 static const bitmask_transtbl cflag_tbl
[] = {
3063 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3064 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3065 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3066 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3067 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3068 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3069 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3070 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3071 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3072 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3073 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3074 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3075 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3076 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3077 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3078 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3079 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3080 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3081 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3082 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3083 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3084 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3085 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3086 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3087 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3088 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3089 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3090 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3091 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3092 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3093 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3097 static const bitmask_transtbl lflag_tbl
[] = {
3098 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3099 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3100 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3101 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3102 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3103 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3104 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3105 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3106 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3107 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3108 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3109 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3110 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3111 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3112 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3116 static void target_to_host_termios (void *dst
, const void *src
)
3118 struct host_termios
*host
= dst
;
3119 const struct target_termios
*target
= src
;
3122 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3124 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3126 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3128 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3129 host
->c_line
= target
->c_line
;
3131 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3132 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3133 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3134 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3135 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3136 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3137 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3138 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3139 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3140 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3141 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3142 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3143 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3144 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3145 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3146 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3147 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3148 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3151 static void host_to_target_termios (void *dst
, const void *src
)
3153 struct target_termios
*target
= dst
;
3154 const struct host_termios
*host
= src
;
3157 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3159 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3161 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3163 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3164 target
->c_line
= host
->c_line
;
3166 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3167 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3168 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3169 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3170 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3171 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3172 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3173 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3174 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3175 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3176 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3177 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3178 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3179 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3180 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3181 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3182 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3183 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3186 static const StructEntry struct_termios_def
= {
3187 .convert
= { host_to_target_termios
, target_to_host_termios
},
3188 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3189 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3192 static bitmask_transtbl mmap_flags_tbl
[] = {
3193 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3194 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3195 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3196 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3197 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3198 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3199 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3200 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3204 #if defined(TARGET_I386)
3206 /* NOTE: there is really one LDT for all the threads */
3207 static uint8_t *ldt_table
;
3209 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3216 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3217 if (size
> bytecount
)
3219 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3221 return -TARGET_EFAULT
;
3222 /* ??? Should this by byteswapped? */
3223 memcpy(p
, ldt_table
, size
);
3224 unlock_user(p
, ptr
, size
);
3228 /* XXX: add locking support */
3229 static abi_long
write_ldt(CPUX86State
*env
,
3230 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3232 struct target_modify_ldt_ldt_s ldt_info
;
3233 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3234 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3235 int seg_not_present
, useable
, lm
;
3236 uint32_t *lp
, entry_1
, entry_2
;
3238 if (bytecount
!= sizeof(ldt_info
))
3239 return -TARGET_EINVAL
;
3240 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3241 return -TARGET_EFAULT
;
3242 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3243 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3244 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3245 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3246 unlock_user_struct(target_ldt_info
, ptr
, 0);
3248 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3249 return -TARGET_EINVAL
;
3250 seg_32bit
= ldt_info
.flags
& 1;
3251 contents
= (ldt_info
.flags
>> 1) & 3;
3252 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3253 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3254 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3255 useable
= (ldt_info
.flags
>> 6) & 1;
3259 lm
= (ldt_info
.flags
>> 7) & 1;
3261 if (contents
== 3) {
3263 return -TARGET_EINVAL
;
3264 if (seg_not_present
== 0)
3265 return -TARGET_EINVAL
;
3267 /* allocate the LDT */
3269 env
->ldt
.base
= target_mmap(0,
3270 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3271 PROT_READ
|PROT_WRITE
,
3272 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3273 if (env
->ldt
.base
== -1)
3274 return -TARGET_ENOMEM
;
3275 memset(g2h(env
->ldt
.base
), 0,
3276 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3277 env
->ldt
.limit
= 0xffff;
3278 ldt_table
= g2h(env
->ldt
.base
);
3281 /* NOTE: same code as Linux kernel */
3282 /* Allow LDTs to be cleared by the user. */
3283 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3286 read_exec_only
== 1 &&
3288 limit_in_pages
== 0 &&
3289 seg_not_present
== 1 &&
3297 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3298 (ldt_info
.limit
& 0x0ffff);
3299 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3300 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3301 (ldt_info
.limit
& 0xf0000) |
3302 ((read_exec_only
^ 1) << 9) |
3304 ((seg_not_present
^ 1) << 15) |
3306 (limit_in_pages
<< 23) |
3310 entry_2
|= (useable
<< 20);
3312 /* Install the new entry ... */
3314 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3315 lp
[0] = tswap32(entry_1
);
3316 lp
[1] = tswap32(entry_2
);
3320 /* specific and weird i386 syscalls */
3321 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3322 unsigned long bytecount
)
3328 ret
= read_ldt(ptr
, bytecount
);
3331 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3334 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3337 ret
= -TARGET_ENOSYS
;
3343 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3344 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3346 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3347 struct target_modify_ldt_ldt_s ldt_info
;
3348 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3349 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3350 int seg_not_present
, useable
, lm
;
3351 uint32_t *lp
, entry_1
, entry_2
;
3354 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3355 if (!target_ldt_info
)
3356 return -TARGET_EFAULT
;
3357 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3358 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3359 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3360 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3361 if (ldt_info
.entry_number
== -1) {
3362 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3363 if (gdt_table
[i
] == 0) {
3364 ldt_info
.entry_number
= i
;
3365 target_ldt_info
->entry_number
= tswap32(i
);
3370 unlock_user_struct(target_ldt_info
, ptr
, 1);
3372 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3373 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3374 return -TARGET_EINVAL
;
3375 seg_32bit
= ldt_info
.flags
& 1;
3376 contents
= (ldt_info
.flags
>> 1) & 3;
3377 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3378 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3379 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3380 useable
= (ldt_info
.flags
>> 6) & 1;
3384 lm
= (ldt_info
.flags
>> 7) & 1;
3387 if (contents
== 3) {
3388 if (seg_not_present
== 0)
3389 return -TARGET_EINVAL
;
3392 /* NOTE: same code as Linux kernel */
3393 /* Allow LDTs to be cleared by the user. */
3394 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3395 if ((contents
== 0 &&
3396 read_exec_only
== 1 &&
3398 limit_in_pages
== 0 &&
3399 seg_not_present
== 1 &&
3407 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3408 (ldt_info
.limit
& 0x0ffff);
3409 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3410 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3411 (ldt_info
.limit
& 0xf0000) |
3412 ((read_exec_only
^ 1) << 9) |
3414 ((seg_not_present
^ 1) << 15) |
3416 (limit_in_pages
<< 23) |
3421 /* Install the new entry ... */
3423 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3424 lp
[0] = tswap32(entry_1
);
3425 lp
[1] = tswap32(entry_2
);
3429 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3431 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3432 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3433 uint32_t base_addr
, limit
, flags
;
3434 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3435 int seg_not_present
, useable
, lm
;
3436 uint32_t *lp
, entry_1
, entry_2
;
3438 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3439 if (!target_ldt_info
)
3440 return -TARGET_EFAULT
;
3441 idx
= tswap32(target_ldt_info
->entry_number
);
3442 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3443 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3444 unlock_user_struct(target_ldt_info
, ptr
, 1);
3445 return -TARGET_EINVAL
;
3447 lp
= (uint32_t *)(gdt_table
+ idx
);
3448 entry_1
= tswap32(lp
[0]);
3449 entry_2
= tswap32(lp
[1]);
3451 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3452 contents
= (entry_2
>> 10) & 3;
3453 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3454 seg_32bit
= (entry_2
>> 22) & 1;
3455 limit_in_pages
= (entry_2
>> 23) & 1;
3456 useable
= (entry_2
>> 20) & 1;
3460 lm
= (entry_2
>> 21) & 1;
3462 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3463 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3464 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3465 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3466 base_addr
= (entry_1
>> 16) |
3467 (entry_2
& 0xff000000) |
3468 ((entry_2
& 0xff) << 16);
3469 target_ldt_info
->base_addr
= tswapl(base_addr
);
3470 target_ldt_info
->limit
= tswap32(limit
);
3471 target_ldt_info
->flags
= tswap32(flags
);
3472 unlock_user_struct(target_ldt_info
, ptr
, 1);
3475 #endif /* TARGET_I386 && TARGET_ABI32 */
3477 #ifndef TARGET_ABI32
3478 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3485 case TARGET_ARCH_SET_GS
:
3486 case TARGET_ARCH_SET_FS
:
3487 if (code
== TARGET_ARCH_SET_GS
)
3491 cpu_x86_load_seg(env
, idx
, 0);
3492 env
->segs
[idx
].base
= addr
;
3494 case TARGET_ARCH_GET_GS
:
3495 case TARGET_ARCH_GET_FS
:
3496 if (code
== TARGET_ARCH_GET_GS
)
3500 val
= env
->segs
[idx
].base
;
3501 if (put_user(val
, addr
, abi_ulong
))
3502 return -TARGET_EFAULT
;
3505 ret
= -TARGET_EINVAL
;
3512 #endif /* defined(TARGET_I386) */
3514 #if defined(CONFIG_USE_NPTL)
3516 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3518 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3521 pthread_mutex_t mutex
;
3522 pthread_cond_t cond
;
3525 abi_ulong child_tidptr
;
3526 abi_ulong parent_tidptr
;
3530 static void *clone_func(void *arg
)
3532 new_thread_info
*info
= arg
;
3538 ts
= (TaskState
*)thread_env
->opaque
;
3539 info
->tid
= gettid();
3540 env
->host_tid
= info
->tid
;
3542 if (info
->child_tidptr
)
3543 put_user_u32(info
->tid
, info
->child_tidptr
);
3544 if (info
->parent_tidptr
)
3545 put_user_u32(info
->tid
, info
->parent_tidptr
);
3546 /* Enable signals. */
3547 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3548 /* Signal to the parent that we're ready. */
3549 pthread_mutex_lock(&info
->mutex
);
3550 pthread_cond_broadcast(&info
->cond
);
3551 pthread_mutex_unlock(&info
->mutex
);
3552 /* Wait until the parent has finshed initializing the tls state. */
3553 pthread_mutex_lock(&clone_lock
);
3554 pthread_mutex_unlock(&clone_lock
);
3560 /* this stack is the equivalent of the kernel stack associated with a
3562 #define NEW_STACK_SIZE 8192
3564 static int clone_func(void *arg
)
3566 CPUState
*env
= arg
;
3573 /* do_fork() Must return host values and target errnos (unlike most
3574 do_*() functions). */
3575 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3576 abi_ulong parent_tidptr
, target_ulong newtls
,
3577 abi_ulong child_tidptr
)
3583 #if defined(CONFIG_USE_NPTL)
3584 unsigned int nptl_flags
;
3588 /* Emulate vfork() with fork() */
3589 if (flags
& CLONE_VFORK
)
3590 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3592 if (flags
& CLONE_VM
) {
3593 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3594 #if defined(CONFIG_USE_NPTL)
3595 new_thread_info info
;
3596 pthread_attr_t attr
;
3598 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3599 init_task_state(ts
);
3600 new_stack
= ts
->stack
;
3601 /* we create a new CPU instance. */
3602 new_env
= cpu_copy(env
);
3603 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
3606 /* Init regs that differ from the parent. */
3607 cpu_clone_regs(new_env
, newsp
);
3608 new_env
->opaque
= ts
;
3609 ts
->bprm
= parent_ts
->bprm
;
3610 ts
->info
= parent_ts
->info
;
3611 #if defined(CONFIG_USE_NPTL)
3613 flags
&= ~CLONE_NPTL_FLAGS2
;
3615 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3616 ts
->child_tidptr
= child_tidptr
;
3619 if (nptl_flags
& CLONE_SETTLS
)
3620 cpu_set_tls (new_env
, newtls
);
3622 /* Grab a mutex so that thread setup appears atomic. */
3623 pthread_mutex_lock(&clone_lock
);
3625 memset(&info
, 0, sizeof(info
));
3626 pthread_mutex_init(&info
.mutex
, NULL
);
3627 pthread_mutex_lock(&info
.mutex
);
3628 pthread_cond_init(&info
.cond
, NULL
);
3630 if (nptl_flags
& CLONE_CHILD_SETTID
)
3631 info
.child_tidptr
= child_tidptr
;
3632 if (nptl_flags
& CLONE_PARENT_SETTID
)
3633 info
.parent_tidptr
= parent_tidptr
;
3635 ret
= pthread_attr_init(&attr
);
3636 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3637 /* It is not safe to deliver signals until the child has finished
3638 initializing, so temporarily block all signals. */
3639 sigfillset(&sigmask
);
3640 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3642 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3643 /* TODO: Free new CPU state if thread creation failed. */
3645 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3646 pthread_attr_destroy(&attr
);
3648 /* Wait for the child to initialize. */
3649 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3651 if (flags
& CLONE_PARENT_SETTID
)
3652 put_user_u32(ret
, parent_tidptr
);
3656 pthread_mutex_unlock(&info
.mutex
);
3657 pthread_cond_destroy(&info
.cond
);
3658 pthread_mutex_destroy(&info
.mutex
);
3659 pthread_mutex_unlock(&clone_lock
);
3661 if (flags
& CLONE_NPTL_FLAGS2
)
3663 /* This is probably going to die very quickly, but do it anyway. */
3665 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
3667 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3671 /* if no CLONE_VM, we consider it is a fork */
3672 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3677 /* Child Process. */
3678 cpu_clone_regs(env
, newsp
);
3680 #if defined(CONFIG_USE_NPTL)
3681 /* There is a race condition here. The parent process could
3682 theoretically read the TID in the child process before the child
3683 tid is set. This would require using either ptrace
3684 (not implemented) or having *_tidptr to point at a shared memory
3685 mapping. We can't repeat the spinlock hack used above because
3686 the child process gets its own copy of the lock. */
3687 if (flags
& CLONE_CHILD_SETTID
)
3688 put_user_u32(gettid(), child_tidptr
);
3689 if (flags
& CLONE_PARENT_SETTID
)
3690 put_user_u32(gettid(), parent_tidptr
);
3691 ts
= (TaskState
*)env
->opaque
;
3692 if (flags
& CLONE_SETTLS
)
3693 cpu_set_tls (env
, newtls
);
3694 if (flags
& CLONE_CHILD_CLEARTID
)
3695 ts
->child_tidptr
= child_tidptr
;
3704 /* warning : doesn't handle linux specific flags... */
3705 static int target_to_host_fcntl_cmd(int cmd
)
3708 case TARGET_F_DUPFD
:
3709 case TARGET_F_GETFD
:
3710 case TARGET_F_SETFD
:
3711 case TARGET_F_GETFL
:
3712 case TARGET_F_SETFL
:
3714 case TARGET_F_GETLK
:
3716 case TARGET_F_SETLK
:
3718 case TARGET_F_SETLKW
:
3720 case TARGET_F_GETOWN
:
3722 case TARGET_F_SETOWN
:
3724 case TARGET_F_GETSIG
:
3726 case TARGET_F_SETSIG
:
3728 #if TARGET_ABI_BITS == 32
3729 case TARGET_F_GETLK64
:
3731 case TARGET_F_SETLK64
:
3733 case TARGET_F_SETLKW64
:
3736 case TARGET_F_SETLEASE
:
3738 case TARGET_F_GETLEASE
:
3740 #ifdef F_DUPFD_CLOEXEC
3741 case TARGET_F_DUPFD_CLOEXEC
:
3742 return F_DUPFD_CLOEXEC
;
3744 case TARGET_F_NOTIFY
:
3747 return -TARGET_EINVAL
;
3749 return -TARGET_EINVAL
;
3752 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3755 struct target_flock
*target_fl
;
3756 struct flock64 fl64
;
3757 struct target_flock64
*target_fl64
;
3759 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3761 if (host_cmd
== -TARGET_EINVAL
)
3765 case TARGET_F_GETLK
:
3766 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3767 return -TARGET_EFAULT
;
3768 fl
.l_type
= tswap16(target_fl
->l_type
);
3769 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3770 fl
.l_start
= tswapl(target_fl
->l_start
);
3771 fl
.l_len
= tswapl(target_fl
->l_len
);
3772 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3773 unlock_user_struct(target_fl
, arg
, 0);
3774 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3776 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3777 return -TARGET_EFAULT
;
3778 target_fl
->l_type
= tswap16(fl
.l_type
);
3779 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3780 target_fl
->l_start
= tswapl(fl
.l_start
);
3781 target_fl
->l_len
= tswapl(fl
.l_len
);
3782 target_fl
->l_pid
= tswap32(fl
.l_pid
);
3783 unlock_user_struct(target_fl
, arg
, 1);
3787 case TARGET_F_SETLK
:
3788 case TARGET_F_SETLKW
:
3789 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3790 return -TARGET_EFAULT
;
3791 fl
.l_type
= tswap16(target_fl
->l_type
);
3792 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3793 fl
.l_start
= tswapl(target_fl
->l_start
);
3794 fl
.l_len
= tswapl(target_fl
->l_len
);
3795 fl
.l_pid
= tswap32(target_fl
->l_pid
);
3796 unlock_user_struct(target_fl
, arg
, 0);
3797 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3800 case TARGET_F_GETLK64
:
3801 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3802 return -TARGET_EFAULT
;
3803 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3804 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3805 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3806 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3807 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3808 unlock_user_struct(target_fl64
, arg
, 0);
3809 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3811 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3812 return -TARGET_EFAULT
;
3813 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3814 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3815 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3816 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3817 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
3818 unlock_user_struct(target_fl64
, arg
, 1);
3821 case TARGET_F_SETLK64
:
3822 case TARGET_F_SETLKW64
:
3823 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3824 return -TARGET_EFAULT
;
3825 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3826 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3827 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3828 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3829 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
3830 unlock_user_struct(target_fl64
, arg
, 0);
3831 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3834 case TARGET_F_GETFL
:
3835 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3837 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3841 case TARGET_F_SETFL
:
3842 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3845 case TARGET_F_SETOWN
:
3846 case TARGET_F_GETOWN
:
3847 case TARGET_F_SETSIG
:
3848 case TARGET_F_GETSIG
:
3849 case TARGET_F_SETLEASE
:
3850 case TARGET_F_GETLEASE
:
3851 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3855 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3863 static inline int high2lowuid(int uid
)
3871 static inline int high2lowgid(int gid
)
3879 static inline int low2highuid(int uid
)
3881 if ((int16_t)uid
== -1)
3887 static inline int low2highgid(int gid
)
3889 if ((int16_t)gid
== -1)
3895 #endif /* USE_UID16 */
3897 void syscall_init(void)
3900 const argtype
*arg_type
;
3904 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3905 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3906 #include "syscall_types.h"
3908 #undef STRUCT_SPECIAL
3910 /* we patch the ioctl size if necessary. We rely on the fact that
3911 no ioctl has all the bits at '1' in the size field */
3913 while (ie
->target_cmd
!= 0) {
3914 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3915 TARGET_IOC_SIZEMASK
) {
3916 arg_type
= ie
->arg_type
;
3917 if (arg_type
[0] != TYPE_PTR
) {
3918 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3923 size
= thunk_type_size(arg_type
, 0);
3924 ie
->target_cmd
= (ie
->target_cmd
&
3925 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3926 (size
<< TARGET_IOC_SIZESHIFT
);
3929 /* Build target_to_host_errno_table[] table from
3930 * host_to_target_errno_table[]. */
3931 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3932 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3934 /* automatic consistency check if same arch */
3935 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3936 (defined(__x86_64__) && defined(TARGET_X86_64))
3937 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3938 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3939 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3946 #if TARGET_ABI_BITS == 32
3947 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3949 #ifdef TARGET_WORDS_BIGENDIAN
3950 return ((uint64_t)word0
<< 32) | word1
;
3952 return ((uint64_t)word1
<< 32) | word0
;
3955 #else /* TARGET_ABI_BITS == 32 */
3956 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3960 #endif /* TARGET_ABI_BITS != 32 */
3962 #ifdef TARGET_NR_truncate64
3963 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3969 if (((CPUARMState
*)cpu_env
)->eabi
)
3975 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3979 #ifdef TARGET_NR_ftruncate64
3980 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3986 if (((CPUARMState
*)cpu_env
)->eabi
)
3992 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3996 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3997 abi_ulong target_addr
)
3999 struct target_timespec
*target_ts
;
4001 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4002 return -TARGET_EFAULT
;
4003 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
4004 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
4005 unlock_user_struct(target_ts
, target_addr
, 0);
4009 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4010 struct timespec
*host_ts
)
4012 struct target_timespec
*target_ts
;
4014 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4015 return -TARGET_EFAULT
;
4016 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
4017 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
4018 unlock_user_struct(target_ts
, target_addr
, 1);
4022 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4023 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4024 abi_ulong target_addr
,
4025 struct stat
*host_st
)
4028 if (((CPUARMState
*)cpu_env
)->eabi
) {
4029 struct target_eabi_stat64
*target_st
;
4031 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4032 return -TARGET_EFAULT
;
4033 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4034 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4035 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4036 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4037 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4039 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4040 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4041 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4042 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4043 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4044 __put_user(host_st
->st_size
, &target_st
->st_size
);
4045 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4046 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4047 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4048 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4049 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4050 unlock_user_struct(target_st
, target_addr
, 1);
4054 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4055 struct target_stat
*target_st
;
4057 struct target_stat64
*target_st
;
4060 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4061 return -TARGET_EFAULT
;
4062 memset(target_st
, 0, sizeof(*target_st
));
4063 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4064 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4065 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4066 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4068 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4069 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4070 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4071 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4072 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4073 /* XXX: better use of kernel struct */
4074 __put_user(host_st
->st_size
, &target_st
->st_size
);
4075 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4076 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4077 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4078 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4079 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4080 unlock_user_struct(target_st
, target_addr
, 1);
4087 #if defined(CONFIG_USE_NPTL)
4088 /* ??? Using host futex calls even when target atomic operations
4089 are not really atomic probably breaks things. However implementing
4090 futexes locally would make futexes shared between multiple processes
4091 tricky. However they're probably useless because guest atomic
4092 operations won't work either. */
4093 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4094 target_ulong uaddr2
, int val3
)
4096 struct timespec ts
, *pts
;
4099 /* ??? We assume FUTEX_* constants are the same on both host
4101 #ifdef FUTEX_CMD_MASK
4102 base_op
= op
& FUTEX_CMD_MASK
;
4110 target_to_host_timespec(pts
, timeout
);
4114 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4117 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4119 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4121 case FUTEX_CMP_REQUEUE
:
4123 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4124 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4125 But the prototype takes a `struct timespec *'; insert casts
4126 to satisfy the compiler. We do not need to tswap TIMEOUT
4127 since it's not compared to guest memory. */
4128 pts
= (struct timespec
*)(uintptr_t) timeout
;
4129 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4131 (base_op
== FUTEX_CMP_REQUEUE
4135 return -TARGET_ENOSYS
;
4140 /* Map host to target signal numbers for the wait family of syscalls.
4141 Assume all other status bits are the same. */
4142 static int host_to_target_waitstatus(int status
)
4144 if (WIFSIGNALED(status
)) {
4145 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4147 if (WIFSTOPPED(status
)) {
4148 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4154 int get_osversion(void)
4156 static int osversion
;
4157 struct new_utsname buf
;
4162 if (qemu_uname_release
&& *qemu_uname_release
) {
4163 s
= qemu_uname_release
;
4165 if (sys_uname(&buf
))
4170 for (i
= 0; i
< 3; i
++) {
4172 while (*s
>= '0' && *s
<= '9') {
4177 tmp
= (tmp
<< 8) + n
;
4185 /* do_syscall() should always have a single exit point at the end so
4186 that actions, such as logging of syscall results, can be performed.
4187 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4188 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4189 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4190 abi_long arg5
, abi_long arg6
)
4198 gemu_log("syscall %d", num
);
4201 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4204 case TARGET_NR_exit
:
4205 #ifdef CONFIG_USE_NPTL
4206 /* In old applications this may be used to implement _exit(2).
4207 However in threaded applictions it is used for thread termination,
4208 and _exit_group is used for application termination.
4209 Do thread termination if we have more then one thread. */
4210 /* FIXME: This probably breaks if a signal arrives. We should probably
4211 be disabling signals. */
4212 if (first_cpu
->next_cpu
) {
4220 while (p
&& p
!= (CPUState
*)cpu_env
) {
4221 lastp
= &p
->next_cpu
;
4224 /* If we didn't find the CPU for this thread then something is
4228 /* Remove the CPU from the list. */
4229 *lastp
= p
->next_cpu
;
4231 ts
= ((CPUState
*)cpu_env
)->opaque
;
4232 if (ts
->child_tidptr
) {
4233 put_user_u32(0, ts
->child_tidptr
);
4234 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4237 /* TODO: Free CPU state. */
4244 gdb_exit(cpu_env
, arg1
);
4246 ret
= 0; /* avoid warning */
4248 case TARGET_NR_read
:
4252 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4254 ret
= get_errno(read(arg1
, p
, arg3
));
4255 unlock_user(p
, arg2
, ret
);
4258 case TARGET_NR_write
:
4259 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4261 ret
= get_errno(write(arg1
, p
, arg3
));
4262 unlock_user(p
, arg2
, 0);
4264 case TARGET_NR_open
:
4265 if (!(p
= lock_user_string(arg1
)))
4267 ret
= get_errno(open(path(p
),
4268 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4270 unlock_user(p
, arg1
, 0);
4272 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4273 case TARGET_NR_openat
:
4274 if (!(p
= lock_user_string(arg2
)))
4276 ret
= get_errno(sys_openat(arg1
,
4278 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4280 unlock_user(p
, arg2
, 0);
4283 case TARGET_NR_close
:
4284 ret
= get_errno(close(arg1
));
4289 case TARGET_NR_fork
:
4290 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4292 #ifdef TARGET_NR_waitpid
4293 case TARGET_NR_waitpid
:
4296 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4297 if (!is_error(ret
) && arg2
4298 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4303 #ifdef TARGET_NR_waitid
4304 case TARGET_NR_waitid
:
4308 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4309 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4310 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4312 host_to_target_siginfo(p
, &info
);
4313 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4318 #ifdef TARGET_NR_creat /* not on alpha */
4319 case TARGET_NR_creat
:
4320 if (!(p
= lock_user_string(arg1
)))
4322 ret
= get_errno(creat(p
, arg2
));
4323 unlock_user(p
, arg1
, 0);
4326 case TARGET_NR_link
:
4329 p
= lock_user_string(arg1
);
4330 p2
= lock_user_string(arg2
);
4332 ret
= -TARGET_EFAULT
;
4334 ret
= get_errno(link(p
, p2
));
4335 unlock_user(p2
, arg2
, 0);
4336 unlock_user(p
, arg1
, 0);
4339 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4340 case TARGET_NR_linkat
:
4345 p
= lock_user_string(arg2
);
4346 p2
= lock_user_string(arg4
);
4348 ret
= -TARGET_EFAULT
;
4350 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4351 unlock_user(p
, arg2
, 0);
4352 unlock_user(p2
, arg4
, 0);
4356 case TARGET_NR_unlink
:
4357 if (!(p
= lock_user_string(arg1
)))
4359 ret
= get_errno(unlink(p
));
4360 unlock_user(p
, arg1
, 0);
4362 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4363 case TARGET_NR_unlinkat
:
4364 if (!(p
= lock_user_string(arg2
)))
4366 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4367 unlock_user(p
, arg2
, 0);
4370 case TARGET_NR_execve
:
4372 char **argp
, **envp
;
4375 abi_ulong guest_argp
;
4376 abi_ulong guest_envp
;
4382 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4383 if (get_user_ual(addr
, gp
))
4391 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4392 if (get_user_ual(addr
, gp
))
4399 argp
= alloca((argc
+ 1) * sizeof(void *));
4400 envp
= alloca((envc
+ 1) * sizeof(void *));
4402 for (gp
= guest_argp
, q
= argp
; gp
;
4403 gp
+= sizeof(abi_ulong
), q
++) {
4404 if (get_user_ual(addr
, gp
))
4408 if (!(*q
= lock_user_string(addr
)))
4413 for (gp
= guest_envp
, q
= envp
; gp
;
4414 gp
+= sizeof(abi_ulong
), q
++) {
4415 if (get_user_ual(addr
, gp
))
4419 if (!(*q
= lock_user_string(addr
)))
4424 if (!(p
= lock_user_string(arg1
)))
4426 ret
= get_errno(execve(p
, argp
, envp
));
4427 unlock_user(p
, arg1
, 0);
4432 ret
= -TARGET_EFAULT
;
4435 for (gp
= guest_argp
, q
= argp
; *q
;
4436 gp
+= sizeof(abi_ulong
), q
++) {
4437 if (get_user_ual(addr
, gp
)
4440 unlock_user(*q
, addr
, 0);
4442 for (gp
= guest_envp
, q
= envp
; *q
;
4443 gp
+= sizeof(abi_ulong
), q
++) {
4444 if (get_user_ual(addr
, gp
)
4447 unlock_user(*q
, addr
, 0);
4451 case TARGET_NR_chdir
:
4452 if (!(p
= lock_user_string(arg1
)))
4454 ret
= get_errno(chdir(p
));
4455 unlock_user(p
, arg1
, 0);
4457 #ifdef TARGET_NR_time
4458 case TARGET_NR_time
:
4461 ret
= get_errno(time(&host_time
));
4464 && put_user_sal(host_time
, arg1
))
4469 case TARGET_NR_mknod
:
4470 if (!(p
= lock_user_string(arg1
)))
4472 ret
= get_errno(mknod(p
, arg2
, arg3
));
4473 unlock_user(p
, arg1
, 0);
4475 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4476 case TARGET_NR_mknodat
:
4477 if (!(p
= lock_user_string(arg2
)))
4479 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4480 unlock_user(p
, arg2
, 0);
4483 case TARGET_NR_chmod
:
4484 if (!(p
= lock_user_string(arg1
)))
4486 ret
= get_errno(chmod(p
, arg2
));
4487 unlock_user(p
, arg1
, 0);
4489 #ifdef TARGET_NR_break
4490 case TARGET_NR_break
:
4493 #ifdef TARGET_NR_oldstat
4494 case TARGET_NR_oldstat
:
4497 case TARGET_NR_lseek
:
4498 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4500 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
4501 /* Alpha specific */
4502 case TARGET_NR_getxpid
:
4503 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
4504 ret
= get_errno(getpid());
4507 #ifdef TARGET_NR_getpid
4508 case TARGET_NR_getpid
:
4509 ret
= get_errno(getpid());
4512 case TARGET_NR_mount
:
4514 /* need to look at the data field */
4516 p
= lock_user_string(arg1
);
4517 p2
= lock_user_string(arg2
);
4518 p3
= lock_user_string(arg3
);
4519 if (!p
|| !p2
|| !p3
)
4520 ret
= -TARGET_EFAULT
;
4522 /* FIXME - arg5 should be locked, but it isn't clear how to
4523 * do that since it's not guaranteed to be a NULL-terminated
4527 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
4529 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4531 unlock_user(p
, arg1
, 0);
4532 unlock_user(p2
, arg2
, 0);
4533 unlock_user(p3
, arg3
, 0);
4536 #ifdef TARGET_NR_umount
4537 case TARGET_NR_umount
:
4538 if (!(p
= lock_user_string(arg1
)))
4540 ret
= get_errno(umount(p
));
4541 unlock_user(p
, arg1
, 0);
4544 #ifdef TARGET_NR_stime /* not on alpha */
4545 case TARGET_NR_stime
:
4548 if (get_user_sal(host_time
, arg1
))
4550 ret
= get_errno(stime(&host_time
));
4554 case TARGET_NR_ptrace
:
4556 #ifdef TARGET_NR_alarm /* not on alpha */
4557 case TARGET_NR_alarm
:
4561 #ifdef TARGET_NR_oldfstat
4562 case TARGET_NR_oldfstat
:
4565 #ifdef TARGET_NR_pause /* not on alpha */
4566 case TARGET_NR_pause
:
4567 ret
= get_errno(pause());
4570 #ifdef TARGET_NR_utime
4571 case TARGET_NR_utime
:
4573 struct utimbuf tbuf
, *host_tbuf
;
4574 struct target_utimbuf
*target_tbuf
;
4576 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4578 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4579 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4580 unlock_user_struct(target_tbuf
, arg2
, 0);
4585 if (!(p
= lock_user_string(arg1
)))
4587 ret
= get_errno(utime(p
, host_tbuf
));
4588 unlock_user(p
, arg1
, 0);
4592 case TARGET_NR_utimes
:
4594 struct timeval
*tvp
, tv
[2];
4596 if (copy_from_user_timeval(&tv
[0], arg2
)
4597 || copy_from_user_timeval(&tv
[1],
4598 arg2
+ sizeof(struct target_timeval
)))
4604 if (!(p
= lock_user_string(arg1
)))
4606 ret
= get_errno(utimes(p
, tvp
));
4607 unlock_user(p
, arg1
, 0);
4610 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4611 case TARGET_NR_futimesat
:
4613 struct timeval
*tvp
, tv
[2];
4615 if (copy_from_user_timeval(&tv
[0], arg3
)
4616 || copy_from_user_timeval(&tv
[1],
4617 arg3
+ sizeof(struct target_timeval
)))
4623 if (!(p
= lock_user_string(arg2
)))
4625 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4626 unlock_user(p
, arg2
, 0);
4630 #ifdef TARGET_NR_stty
4631 case TARGET_NR_stty
:
4634 #ifdef TARGET_NR_gtty
4635 case TARGET_NR_gtty
:
4638 case TARGET_NR_access
:
4639 if (!(p
= lock_user_string(arg1
)))
4641 ret
= get_errno(access(path(p
), arg2
));
4642 unlock_user(p
, arg1
, 0);
4644 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4645 case TARGET_NR_faccessat
:
4646 if (!(p
= lock_user_string(arg2
)))
4648 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4649 unlock_user(p
, arg2
, 0);
4652 #ifdef TARGET_NR_nice /* not on alpha */
4653 case TARGET_NR_nice
:
4654 ret
= get_errno(nice(arg1
));
4657 #ifdef TARGET_NR_ftime
4658 case TARGET_NR_ftime
:
4661 case TARGET_NR_sync
:
4665 case TARGET_NR_kill
:
4666 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4668 case TARGET_NR_rename
:
4671 p
= lock_user_string(arg1
);
4672 p2
= lock_user_string(arg2
);
4674 ret
= -TARGET_EFAULT
;
4676 ret
= get_errno(rename(p
, p2
));
4677 unlock_user(p2
, arg2
, 0);
4678 unlock_user(p
, arg1
, 0);
4681 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4682 case TARGET_NR_renameat
:
4685 p
= lock_user_string(arg2
);
4686 p2
= lock_user_string(arg4
);
4688 ret
= -TARGET_EFAULT
;
4690 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4691 unlock_user(p2
, arg4
, 0);
4692 unlock_user(p
, arg2
, 0);
4696 case TARGET_NR_mkdir
:
4697 if (!(p
= lock_user_string(arg1
)))
4699 ret
= get_errno(mkdir(p
, arg2
));
4700 unlock_user(p
, arg1
, 0);
4702 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4703 case TARGET_NR_mkdirat
:
4704 if (!(p
= lock_user_string(arg2
)))
4706 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4707 unlock_user(p
, arg2
, 0);
4710 case TARGET_NR_rmdir
:
4711 if (!(p
= lock_user_string(arg1
)))
4713 ret
= get_errno(rmdir(p
));
4714 unlock_user(p
, arg1
, 0);
4717 ret
= get_errno(dup(arg1
));
4719 case TARGET_NR_pipe
:
4720 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
4722 #ifdef TARGET_NR_pipe2
4723 case TARGET_NR_pipe2
:
4724 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
4727 case TARGET_NR_times
:
4729 struct target_tms
*tmsp
;
4731 ret
= get_errno(times(&tms
));
4733 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4736 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4737 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4738 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4739 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4742 ret
= host_to_target_clock_t(ret
);
4745 #ifdef TARGET_NR_prof
4746 case TARGET_NR_prof
:
4749 #ifdef TARGET_NR_signal
4750 case TARGET_NR_signal
:
4753 case TARGET_NR_acct
:
4755 ret
= get_errno(acct(NULL
));
4757 if (!(p
= lock_user_string(arg1
)))
4759 ret
= get_errno(acct(path(p
)));
4760 unlock_user(p
, arg1
, 0);
4763 #ifdef TARGET_NR_umount2 /* not on alpha */
4764 case TARGET_NR_umount2
:
4765 if (!(p
= lock_user_string(arg1
)))
4767 ret
= get_errno(umount2(p
, arg2
));
4768 unlock_user(p
, arg1
, 0);
4771 #ifdef TARGET_NR_lock
4772 case TARGET_NR_lock
:
4775 case TARGET_NR_ioctl
:
4776 ret
= do_ioctl(arg1
, arg2
, arg3
);
4778 case TARGET_NR_fcntl
:
4779 ret
= do_fcntl(arg1
, arg2
, arg3
);
4781 #ifdef TARGET_NR_mpx
4785 case TARGET_NR_setpgid
:
4786 ret
= get_errno(setpgid(arg1
, arg2
));
4788 #ifdef TARGET_NR_ulimit
4789 case TARGET_NR_ulimit
:
4792 #ifdef TARGET_NR_oldolduname
4793 case TARGET_NR_oldolduname
:
4796 case TARGET_NR_umask
:
4797 ret
= get_errno(umask(arg1
));
4799 case TARGET_NR_chroot
:
4800 if (!(p
= lock_user_string(arg1
)))
4802 ret
= get_errno(chroot(p
));
4803 unlock_user(p
, arg1
, 0);
4805 case TARGET_NR_ustat
:
4807 case TARGET_NR_dup2
:
4808 ret
= get_errno(dup2(arg1
, arg2
));
4810 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
4811 case TARGET_NR_dup3
:
4812 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
4815 #ifdef TARGET_NR_getppid /* not on alpha */
4816 case TARGET_NR_getppid
:
4817 ret
= get_errno(getppid());
4820 case TARGET_NR_getpgrp
:
4821 ret
= get_errno(getpgrp());
4823 case TARGET_NR_setsid
:
4824 ret
= get_errno(setsid());
4826 #ifdef TARGET_NR_sigaction
4827 case TARGET_NR_sigaction
:
4829 #if defined(TARGET_ALPHA)
4830 struct target_sigaction act
, oact
, *pact
= 0;
4831 struct target_old_sigaction
*old_act
;
4833 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4835 act
._sa_handler
= old_act
->_sa_handler
;
4836 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4837 act
.sa_flags
= old_act
->sa_flags
;
4838 act
.sa_restorer
= 0;
4839 unlock_user_struct(old_act
, arg2
, 0);
4842 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4843 if (!is_error(ret
) && arg3
) {
4844 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4846 old_act
->_sa_handler
= oact
._sa_handler
;
4847 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4848 old_act
->sa_flags
= oact
.sa_flags
;
4849 unlock_user_struct(old_act
, arg3
, 1);
4851 #elif defined(TARGET_MIPS)
4852 struct target_sigaction act
, oact
, *pact
, *old_act
;
4855 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4857 act
._sa_handler
= old_act
->_sa_handler
;
4858 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4859 act
.sa_flags
= old_act
->sa_flags
;
4860 unlock_user_struct(old_act
, arg2
, 0);
4866 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4868 if (!is_error(ret
) && arg3
) {
4869 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4871 old_act
->_sa_handler
= oact
._sa_handler
;
4872 old_act
->sa_flags
= oact
.sa_flags
;
4873 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4874 old_act
->sa_mask
.sig
[1] = 0;
4875 old_act
->sa_mask
.sig
[2] = 0;
4876 old_act
->sa_mask
.sig
[3] = 0;
4877 unlock_user_struct(old_act
, arg3
, 1);
4880 struct target_old_sigaction
*old_act
;
4881 struct target_sigaction act
, oact
, *pact
;
4883 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4885 act
._sa_handler
= old_act
->_sa_handler
;
4886 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4887 act
.sa_flags
= old_act
->sa_flags
;
4888 act
.sa_restorer
= old_act
->sa_restorer
;
4889 unlock_user_struct(old_act
, arg2
, 0);
4894 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4895 if (!is_error(ret
) && arg3
) {
4896 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4898 old_act
->_sa_handler
= oact
._sa_handler
;
4899 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4900 old_act
->sa_flags
= oact
.sa_flags
;
4901 old_act
->sa_restorer
= oact
.sa_restorer
;
4902 unlock_user_struct(old_act
, arg3
, 1);
4908 case TARGET_NR_rt_sigaction
:
4910 #if defined(TARGET_ALPHA)
4911 struct target_sigaction act
, oact
, *pact
= 0;
4912 struct target_rt_sigaction
*rt_act
;
4913 /* ??? arg4 == sizeof(sigset_t). */
4915 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
4917 act
._sa_handler
= rt_act
->_sa_handler
;
4918 act
.sa_mask
= rt_act
->sa_mask
;
4919 act
.sa_flags
= rt_act
->sa_flags
;
4920 act
.sa_restorer
= arg5
;
4921 unlock_user_struct(rt_act
, arg2
, 0);
4924 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4925 if (!is_error(ret
) && arg3
) {
4926 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
4928 rt_act
->_sa_handler
= oact
._sa_handler
;
4929 rt_act
->sa_mask
= oact
.sa_mask
;
4930 rt_act
->sa_flags
= oact
.sa_flags
;
4931 unlock_user_struct(rt_act
, arg3
, 1);
4934 struct target_sigaction
*act
;
4935 struct target_sigaction
*oact
;
4938 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4943 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4944 ret
= -TARGET_EFAULT
;
4945 goto rt_sigaction_fail
;
4949 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4952 unlock_user_struct(act
, arg2
, 0);
4954 unlock_user_struct(oact
, arg3
, 1);
4958 #ifdef TARGET_NR_sgetmask /* not on alpha */
4959 case TARGET_NR_sgetmask
:
4962 abi_ulong target_set
;
4963 sigprocmask(0, NULL
, &cur_set
);
4964 host_to_target_old_sigset(&target_set
, &cur_set
);
4969 #ifdef TARGET_NR_ssetmask /* not on alpha */
4970 case TARGET_NR_ssetmask
:
4972 sigset_t set
, oset
, cur_set
;
4973 abi_ulong target_set
= arg1
;
4974 sigprocmask(0, NULL
, &cur_set
);
4975 target_to_host_old_sigset(&set
, &target_set
);
4976 sigorset(&set
, &set
, &cur_set
);
4977 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4978 host_to_target_old_sigset(&target_set
, &oset
);
4983 #ifdef TARGET_NR_sigprocmask
4984 case TARGET_NR_sigprocmask
:
4986 #if defined(TARGET_ALPHA)
4987 sigset_t set
, oldset
;
4992 case TARGET_SIG_BLOCK
:
4995 case TARGET_SIG_UNBLOCK
:
4998 case TARGET_SIG_SETMASK
:
5002 ret
= -TARGET_EINVAL
;
5006 target_to_host_old_sigset(&set
, &mask
);
5008 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5010 if (!is_error(ret
)) {
5011 host_to_target_old_sigset(&mask
, &oldset
);
5013 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5016 sigset_t set
, oldset
, *set_ptr
;
5021 case TARGET_SIG_BLOCK
:
5024 case TARGET_SIG_UNBLOCK
:
5027 case TARGET_SIG_SETMASK
:
5031 ret
= -TARGET_EINVAL
;
5034 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5036 target_to_host_old_sigset(&set
, p
);
5037 unlock_user(p
, arg2
, 0);
5043 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5044 if (!is_error(ret
) && arg3
) {
5045 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5047 host_to_target_old_sigset(p
, &oldset
);
5048 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5054 case TARGET_NR_rt_sigprocmask
:
5057 sigset_t set
, oldset
, *set_ptr
;
5061 case TARGET_SIG_BLOCK
:
5064 case TARGET_SIG_UNBLOCK
:
5067 case TARGET_SIG_SETMASK
:
5071 ret
= -TARGET_EINVAL
;
5074 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5076 target_to_host_sigset(&set
, p
);
5077 unlock_user(p
, arg2
, 0);
5083 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5084 if (!is_error(ret
) && arg3
) {
5085 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5087 host_to_target_sigset(p
, &oldset
);
5088 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5092 #ifdef TARGET_NR_sigpending
5093 case TARGET_NR_sigpending
:
5096 ret
= get_errno(sigpending(&set
));
5097 if (!is_error(ret
)) {
5098 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5100 host_to_target_old_sigset(p
, &set
);
5101 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5106 case TARGET_NR_rt_sigpending
:
5109 ret
= get_errno(sigpending(&set
));
5110 if (!is_error(ret
)) {
5111 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5113 host_to_target_sigset(p
, &set
);
5114 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5118 #ifdef TARGET_NR_sigsuspend
5119 case TARGET_NR_sigsuspend
:
5122 #if defined(TARGET_ALPHA)
5123 abi_ulong mask
= arg1
;
5124 target_to_host_old_sigset(&set
, &mask
);
5126 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5128 target_to_host_old_sigset(&set
, p
);
5129 unlock_user(p
, arg1
, 0);
5131 ret
= get_errno(sigsuspend(&set
));
5135 case TARGET_NR_rt_sigsuspend
:
5138 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5140 target_to_host_sigset(&set
, p
);
5141 unlock_user(p
, arg1
, 0);
5142 ret
= get_errno(sigsuspend(&set
));
5145 case TARGET_NR_rt_sigtimedwait
:
5148 struct timespec uts
, *puts
;
5151 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5153 target_to_host_sigset(&set
, p
);
5154 unlock_user(p
, arg1
, 0);
5157 target_to_host_timespec(puts
, arg3
);
5161 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5162 if (!is_error(ret
) && arg2
) {
5163 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5165 host_to_target_siginfo(p
, &uinfo
);
5166 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5170 case TARGET_NR_rt_sigqueueinfo
:
5173 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5175 target_to_host_siginfo(&uinfo
, p
);
5176 unlock_user(p
, arg1
, 0);
5177 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5180 #ifdef TARGET_NR_sigreturn
5181 case TARGET_NR_sigreturn
:
5182 /* NOTE: ret is eax, so not transcoding must be done */
5183 ret
= do_sigreturn(cpu_env
);
5186 case TARGET_NR_rt_sigreturn
:
5187 /* NOTE: ret is eax, so not transcoding must be done */
5188 ret
= do_rt_sigreturn(cpu_env
);
5190 case TARGET_NR_sethostname
:
5191 if (!(p
= lock_user_string(arg1
)))
5193 ret
= get_errno(sethostname(p
, arg2
));
5194 unlock_user(p
, arg1
, 0);
5196 case TARGET_NR_setrlimit
:
5198 int resource
= arg1
;
5199 struct target_rlimit
*target_rlim
;
5201 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5203 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5204 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5205 unlock_user_struct(target_rlim
, arg2
, 0);
5206 ret
= get_errno(setrlimit(resource
, &rlim
));
5209 case TARGET_NR_getrlimit
:
5211 int resource
= arg1
;
5212 struct target_rlimit
*target_rlim
;
5215 ret
= get_errno(getrlimit(resource
, &rlim
));
5216 if (!is_error(ret
)) {
5217 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5219 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5220 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5221 unlock_user_struct(target_rlim
, arg2
, 1);
5225 case TARGET_NR_getrusage
:
5227 struct rusage rusage
;
5228 ret
= get_errno(getrusage(arg1
, &rusage
));
5229 if (!is_error(ret
)) {
5230 host_to_target_rusage(arg2
, &rusage
);
5234 case TARGET_NR_gettimeofday
:
5237 ret
= get_errno(gettimeofday(&tv
, NULL
));
5238 if (!is_error(ret
)) {
5239 if (copy_to_user_timeval(arg1
, &tv
))
5244 case TARGET_NR_settimeofday
:
5247 if (copy_from_user_timeval(&tv
, arg1
))
5249 ret
= get_errno(settimeofday(&tv
, NULL
));
5252 #ifdef TARGET_NR_select
5253 case TARGET_NR_select
:
5255 struct target_sel_arg_struct
*sel
;
5256 abi_ulong inp
, outp
, exp
, tvp
;
5259 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5261 nsel
= tswapl(sel
->n
);
5262 inp
= tswapl(sel
->inp
);
5263 outp
= tswapl(sel
->outp
);
5264 exp
= tswapl(sel
->exp
);
5265 tvp
= tswapl(sel
->tvp
);
5266 unlock_user_struct(sel
, arg1
, 0);
5267 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5271 #ifdef TARGET_NR_pselect6
5272 case TARGET_NR_pselect6
:
5273 goto unimplemented_nowarn
;
5275 case TARGET_NR_symlink
:
5278 p
= lock_user_string(arg1
);
5279 p2
= lock_user_string(arg2
);
5281 ret
= -TARGET_EFAULT
;
5283 ret
= get_errno(symlink(p
, p2
));
5284 unlock_user(p2
, arg2
, 0);
5285 unlock_user(p
, arg1
, 0);
5288 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5289 case TARGET_NR_symlinkat
:
5292 p
= lock_user_string(arg1
);
5293 p2
= lock_user_string(arg3
);
5295 ret
= -TARGET_EFAULT
;
5297 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5298 unlock_user(p2
, arg3
, 0);
5299 unlock_user(p
, arg1
, 0);
5303 #ifdef TARGET_NR_oldlstat
5304 case TARGET_NR_oldlstat
:
5307 case TARGET_NR_readlink
:
5310 p
= lock_user_string(arg1
);
5311 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5313 ret
= -TARGET_EFAULT
;
5315 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5316 char real
[PATH_MAX
];
5317 temp
= realpath(exec_path
,real
);
5318 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5319 snprintf((char *)p2
, arg3
, "%s", real
);
5322 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5324 unlock_user(p2
, arg2
, ret
);
5325 unlock_user(p
, arg1
, 0);
5328 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5329 case TARGET_NR_readlinkat
:
5332 p
= lock_user_string(arg2
);
5333 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5335 ret
= -TARGET_EFAULT
;
5337 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5338 unlock_user(p2
, arg3
, ret
);
5339 unlock_user(p
, arg2
, 0);
5343 #ifdef TARGET_NR_uselib
5344 case TARGET_NR_uselib
:
5347 #ifdef TARGET_NR_swapon
5348 case TARGET_NR_swapon
:
5349 if (!(p
= lock_user_string(arg1
)))
5351 ret
= get_errno(swapon(p
, arg2
));
5352 unlock_user(p
, arg1
, 0);
5355 case TARGET_NR_reboot
:
5357 #ifdef TARGET_NR_readdir
5358 case TARGET_NR_readdir
:
5361 #ifdef TARGET_NR_mmap
5362 case TARGET_NR_mmap
:
5363 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5366 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5367 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5375 unlock_user(v
, arg1
, 0);
5376 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5377 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5381 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5382 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5388 #ifdef TARGET_NR_mmap2
5389 case TARGET_NR_mmap2
:
5391 #define MMAP_SHIFT 12
5393 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5394 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5396 arg6
<< MMAP_SHIFT
));
5399 case TARGET_NR_munmap
:
5400 ret
= get_errno(target_munmap(arg1
, arg2
));
5402 case TARGET_NR_mprotect
:
5403 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5405 #ifdef TARGET_NR_mremap
5406 case TARGET_NR_mremap
:
5407 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5410 /* ??? msync/mlock/munlock are broken for softmmu. */
5411 #ifdef TARGET_NR_msync
5412 case TARGET_NR_msync
:
5413 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5416 #ifdef TARGET_NR_mlock
5417 case TARGET_NR_mlock
:
5418 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5421 #ifdef TARGET_NR_munlock
5422 case TARGET_NR_munlock
:
5423 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5426 #ifdef TARGET_NR_mlockall
5427 case TARGET_NR_mlockall
:
5428 ret
= get_errno(mlockall(arg1
));
5431 #ifdef TARGET_NR_munlockall
5432 case TARGET_NR_munlockall
:
5433 ret
= get_errno(munlockall());
5436 case TARGET_NR_truncate
:
5437 if (!(p
= lock_user_string(arg1
)))
5439 ret
= get_errno(truncate(p
, arg2
));
5440 unlock_user(p
, arg1
, 0);
5442 case TARGET_NR_ftruncate
:
5443 ret
= get_errno(ftruncate(arg1
, arg2
));
5445 case TARGET_NR_fchmod
:
5446 ret
= get_errno(fchmod(arg1
, arg2
));
5448 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5449 case TARGET_NR_fchmodat
:
5450 if (!(p
= lock_user_string(arg2
)))
5452 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5453 unlock_user(p
, arg2
, 0);
5456 case TARGET_NR_getpriority
:
5457 /* libc does special remapping of the return value of
5458 * sys_getpriority() so it's just easiest to call
5459 * sys_getpriority() directly rather than through libc. */
5460 ret
= get_errno(sys_getpriority(arg1
, arg2
));
5462 case TARGET_NR_setpriority
:
5463 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5465 #ifdef TARGET_NR_profil
5466 case TARGET_NR_profil
:
5469 case TARGET_NR_statfs
:
5470 if (!(p
= lock_user_string(arg1
)))
5472 ret
= get_errno(statfs(path(p
), &stfs
));
5473 unlock_user(p
, arg1
, 0);
5475 if (!is_error(ret
)) {
5476 struct target_statfs
*target_stfs
;
5478 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5480 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5481 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5482 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5483 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5484 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5485 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5486 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5487 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5488 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5489 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5490 unlock_user_struct(target_stfs
, arg2
, 1);
5493 case TARGET_NR_fstatfs
:
5494 ret
= get_errno(fstatfs(arg1
, &stfs
));
5495 goto convert_statfs
;
5496 #ifdef TARGET_NR_statfs64
5497 case TARGET_NR_statfs64
:
5498 if (!(p
= lock_user_string(arg1
)))
5500 ret
= get_errno(statfs(path(p
), &stfs
));
5501 unlock_user(p
, arg1
, 0);
5503 if (!is_error(ret
)) {
5504 struct target_statfs64
*target_stfs
;
5506 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5508 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5509 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5510 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5511 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5512 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5513 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5514 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5515 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5516 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5517 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5518 unlock_user_struct(target_stfs
, arg3
, 1);
5521 case TARGET_NR_fstatfs64
:
5522 ret
= get_errno(fstatfs(arg1
, &stfs
));
5523 goto convert_statfs64
;
5525 #ifdef TARGET_NR_ioperm
5526 case TARGET_NR_ioperm
:
5529 #ifdef TARGET_NR_socketcall
5530 case TARGET_NR_socketcall
:
5531 ret
= do_socketcall(arg1
, arg2
);
5534 #ifdef TARGET_NR_accept
5535 case TARGET_NR_accept
:
5536 ret
= do_accept(arg1
, arg2
, arg3
);
5539 #ifdef TARGET_NR_bind
5540 case TARGET_NR_bind
:
5541 ret
= do_bind(arg1
, arg2
, arg3
);
5544 #ifdef TARGET_NR_connect
5545 case TARGET_NR_connect
:
5546 ret
= do_connect(arg1
, arg2
, arg3
);
5549 #ifdef TARGET_NR_getpeername
5550 case TARGET_NR_getpeername
:
5551 ret
= do_getpeername(arg1
, arg2
, arg3
);
5554 #ifdef TARGET_NR_getsockname
5555 case TARGET_NR_getsockname
:
5556 ret
= do_getsockname(arg1
, arg2
, arg3
);
5559 #ifdef TARGET_NR_getsockopt
5560 case TARGET_NR_getsockopt
:
5561 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5564 #ifdef TARGET_NR_listen
5565 case TARGET_NR_listen
:
5566 ret
= get_errno(listen(arg1
, arg2
));
5569 #ifdef TARGET_NR_recv
5570 case TARGET_NR_recv
:
5571 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5574 #ifdef TARGET_NR_recvfrom
5575 case TARGET_NR_recvfrom
:
5576 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5579 #ifdef TARGET_NR_recvmsg
5580 case TARGET_NR_recvmsg
:
5581 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5584 #ifdef TARGET_NR_send
5585 case TARGET_NR_send
:
5586 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5589 #ifdef TARGET_NR_sendmsg
5590 case TARGET_NR_sendmsg
:
5591 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5594 #ifdef TARGET_NR_sendto
5595 case TARGET_NR_sendto
:
5596 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5599 #ifdef TARGET_NR_shutdown
5600 case TARGET_NR_shutdown
:
5601 ret
= get_errno(shutdown(arg1
, arg2
));
5604 #ifdef TARGET_NR_socket
5605 case TARGET_NR_socket
:
5606 ret
= do_socket(arg1
, arg2
, arg3
);
5609 #ifdef TARGET_NR_socketpair
5610 case TARGET_NR_socketpair
:
5611 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5614 #ifdef TARGET_NR_setsockopt
5615 case TARGET_NR_setsockopt
:
5616 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5620 case TARGET_NR_syslog
:
5621 if (!(p
= lock_user_string(arg2
)))
5623 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5624 unlock_user(p
, arg2
, 0);
5627 case TARGET_NR_setitimer
:
5629 struct itimerval value
, ovalue
, *pvalue
;
5633 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5634 || copy_from_user_timeval(&pvalue
->it_value
,
5635 arg2
+ sizeof(struct target_timeval
)))
5640 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5641 if (!is_error(ret
) && arg3
) {
5642 if (copy_to_user_timeval(arg3
,
5643 &ovalue
.it_interval
)
5644 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5650 case TARGET_NR_getitimer
:
5652 struct itimerval value
;
5654 ret
= get_errno(getitimer(arg1
, &value
));
5655 if (!is_error(ret
) && arg2
) {
5656 if (copy_to_user_timeval(arg2
,
5658 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5664 case TARGET_NR_stat
:
5665 if (!(p
= lock_user_string(arg1
)))
5667 ret
= get_errno(stat(path(p
), &st
));
5668 unlock_user(p
, arg1
, 0);
5670 case TARGET_NR_lstat
:
5671 if (!(p
= lock_user_string(arg1
)))
5673 ret
= get_errno(lstat(path(p
), &st
));
5674 unlock_user(p
, arg1
, 0);
5676 case TARGET_NR_fstat
:
5678 ret
= get_errno(fstat(arg1
, &st
));
5680 if (!is_error(ret
)) {
5681 struct target_stat
*target_st
;
5683 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5685 memset(target_st
, 0, sizeof(*target_st
));
5686 __put_user(st
.st_dev
, &target_st
->st_dev
);
5687 __put_user(st
.st_ino
, &target_st
->st_ino
);
5688 __put_user(st
.st_mode
, &target_st
->st_mode
);
5689 __put_user(st
.st_uid
, &target_st
->st_uid
);
5690 __put_user(st
.st_gid
, &target_st
->st_gid
);
5691 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5692 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5693 __put_user(st
.st_size
, &target_st
->st_size
);
5694 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5695 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5696 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5697 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5698 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5699 unlock_user_struct(target_st
, arg2
, 1);
5703 #ifdef TARGET_NR_olduname
5704 case TARGET_NR_olduname
:
5707 #ifdef TARGET_NR_iopl
5708 case TARGET_NR_iopl
:
5711 case TARGET_NR_vhangup
:
5712 ret
= get_errno(vhangup());
5714 #ifdef TARGET_NR_idle
5715 case TARGET_NR_idle
:
5718 #ifdef TARGET_NR_syscall
5719 case TARGET_NR_syscall
:
5720 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5723 case TARGET_NR_wait4
:
5726 abi_long status_ptr
= arg2
;
5727 struct rusage rusage
, *rusage_ptr
;
5728 abi_ulong target_rusage
= arg4
;
5730 rusage_ptr
= &rusage
;
5733 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5734 if (!is_error(ret
)) {
5736 status
= host_to_target_waitstatus(status
);
5737 if (put_user_s32(status
, status_ptr
))
5741 host_to_target_rusage(target_rusage
, &rusage
);
5745 #ifdef TARGET_NR_swapoff
5746 case TARGET_NR_swapoff
:
5747 if (!(p
= lock_user_string(arg1
)))
5749 ret
= get_errno(swapoff(p
));
5750 unlock_user(p
, arg1
, 0);
5753 case TARGET_NR_sysinfo
:
5755 struct target_sysinfo
*target_value
;
5756 struct sysinfo value
;
5757 ret
= get_errno(sysinfo(&value
));
5758 if (!is_error(ret
) && arg1
)
5760 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5762 __put_user(value
.uptime
, &target_value
->uptime
);
5763 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5764 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5765 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5766 __put_user(value
.totalram
, &target_value
->totalram
);
5767 __put_user(value
.freeram
, &target_value
->freeram
);
5768 __put_user(value
.sharedram
, &target_value
->sharedram
);
5769 __put_user(value
.bufferram
, &target_value
->bufferram
);
5770 __put_user(value
.totalswap
, &target_value
->totalswap
);
5771 __put_user(value
.freeswap
, &target_value
->freeswap
);
5772 __put_user(value
.procs
, &target_value
->procs
);
5773 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5774 __put_user(value
.freehigh
, &target_value
->freehigh
);
5775 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5776 unlock_user_struct(target_value
, arg1
, 1);
5780 #ifdef TARGET_NR_ipc
5782 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5785 #ifdef TARGET_NR_semget
5786 case TARGET_NR_semget
:
5787 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5790 #ifdef TARGET_NR_semop
5791 case TARGET_NR_semop
:
5792 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5795 #ifdef TARGET_NR_semctl
5796 case TARGET_NR_semctl
:
5797 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5800 #ifdef TARGET_NR_msgctl
5801 case TARGET_NR_msgctl
:
5802 ret
= do_msgctl(arg1
, arg2
, arg3
);
5805 #ifdef TARGET_NR_msgget
5806 case TARGET_NR_msgget
:
5807 ret
= get_errno(msgget(arg1
, arg2
));
5810 #ifdef TARGET_NR_msgrcv
5811 case TARGET_NR_msgrcv
:
5812 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5815 #ifdef TARGET_NR_msgsnd
5816 case TARGET_NR_msgsnd
:
5817 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5820 #ifdef TARGET_NR_shmget
5821 case TARGET_NR_shmget
:
5822 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5825 #ifdef TARGET_NR_shmctl
5826 case TARGET_NR_shmctl
:
5827 ret
= do_shmctl(arg1
, arg2
, arg3
);
5830 #ifdef TARGET_NR_shmat
5831 case TARGET_NR_shmat
:
5832 ret
= do_shmat(arg1
, arg2
, arg3
);
5835 #ifdef TARGET_NR_shmdt
5836 case TARGET_NR_shmdt
:
5837 ret
= do_shmdt(arg1
);
5840 case TARGET_NR_fsync
:
5841 ret
= get_errno(fsync(arg1
));
5843 case TARGET_NR_clone
:
5844 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
5845 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5846 #elif defined(TARGET_CRIS)
5847 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5849 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5852 #ifdef __NR_exit_group
5853 /* new thread calls */
5854 case TARGET_NR_exit_group
:
5858 gdb_exit(cpu_env
, arg1
);
5859 ret
= get_errno(exit_group(arg1
));
5862 case TARGET_NR_setdomainname
:
5863 if (!(p
= lock_user_string(arg1
)))
5865 ret
= get_errno(setdomainname(p
, arg2
));
5866 unlock_user(p
, arg1
, 0);
5868 case TARGET_NR_uname
:
5869 /* no need to transcode because we use the linux syscall */
5871 struct new_utsname
* buf
;
5873 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5875 ret
= get_errno(sys_uname(buf
));
5876 if (!is_error(ret
)) {
5877 /* Overrite the native machine name with whatever is being
5879 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
5880 /* Allow the user to override the reported release. */
5881 if (qemu_uname_release
&& *qemu_uname_release
)
5882 strcpy (buf
->release
, qemu_uname_release
);
5884 unlock_user_struct(buf
, arg1
, 1);
5888 case TARGET_NR_modify_ldt
:
5889 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5891 #if !defined(TARGET_X86_64)
5892 case TARGET_NR_vm86old
:
5894 case TARGET_NR_vm86
:
5895 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5899 case TARGET_NR_adjtimex
:
5901 #ifdef TARGET_NR_create_module
5902 case TARGET_NR_create_module
:
5904 case TARGET_NR_init_module
:
5905 case TARGET_NR_delete_module
:
5906 #ifdef TARGET_NR_get_kernel_syms
5907 case TARGET_NR_get_kernel_syms
:
5910 case TARGET_NR_quotactl
:
5912 case TARGET_NR_getpgid
:
5913 ret
= get_errno(getpgid(arg1
));
5915 case TARGET_NR_fchdir
:
5916 ret
= get_errno(fchdir(arg1
));
5918 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5919 case TARGET_NR_bdflush
:
5922 #ifdef TARGET_NR_sysfs
5923 case TARGET_NR_sysfs
:
5926 case TARGET_NR_personality
:
5927 ret
= get_errno(personality(arg1
));
5929 #ifdef TARGET_NR_afs_syscall
5930 case TARGET_NR_afs_syscall
:
5933 #ifdef TARGET_NR__llseek /* Not on alpha */
5934 case TARGET_NR__llseek
:
5936 #if defined (__x86_64__)
5937 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5938 if (put_user_s64(ret
, arg4
))
5942 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5943 if (put_user_s64(res
, arg4
))
5949 case TARGET_NR_getdents
:
5950 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5952 struct target_dirent
*target_dirp
;
5953 struct linux_dirent
*dirp
;
5954 abi_long count
= arg3
;
5956 dirp
= malloc(count
);
5958 ret
= -TARGET_ENOMEM
;
5962 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5963 if (!is_error(ret
)) {
5964 struct linux_dirent
*de
;
5965 struct target_dirent
*tde
;
5967 int reclen
, treclen
;
5968 int count1
, tnamelen
;
5972 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5976 reclen
= de
->d_reclen
;
5977 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5978 tde
->d_reclen
= tswap16(treclen
);
5979 tde
->d_ino
= tswapl(de
->d_ino
);
5980 tde
->d_off
= tswapl(de
->d_off
);
5981 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5984 /* XXX: may not be correct */
5985 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5986 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5988 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5992 unlock_user(target_dirp
, arg2
, ret
);
5998 struct linux_dirent
*dirp
;
5999 abi_long count
= arg3
;
6001 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6003 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6004 if (!is_error(ret
)) {
6005 struct linux_dirent
*de
;
6010 reclen
= de
->d_reclen
;
6013 de
->d_reclen
= tswap16(reclen
);
6014 tswapls(&de
->d_ino
);
6015 tswapls(&de
->d_off
);
6016 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6020 unlock_user(dirp
, arg2
, ret
);
6024 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6025 case TARGET_NR_getdents64
:
6027 struct linux_dirent64
*dirp
;
6028 abi_long count
= arg3
;
6029 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6031 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6032 if (!is_error(ret
)) {
6033 struct linux_dirent64
*de
;
6038 reclen
= de
->d_reclen
;
6041 de
->d_reclen
= tswap16(reclen
);
6042 tswap64s((uint64_t *)&de
->d_ino
);
6043 tswap64s((uint64_t *)&de
->d_off
);
6044 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6048 unlock_user(dirp
, arg2
, ret
);
6051 #endif /* TARGET_NR_getdents64 */
6052 #ifdef TARGET_NR__newselect
6053 case TARGET_NR__newselect
:
6054 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6057 #ifdef TARGET_NR_poll
6058 case TARGET_NR_poll
:
6060 struct target_pollfd
*target_pfd
;
6061 unsigned int nfds
= arg2
;
6066 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6069 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6070 for(i
= 0; i
< nfds
; i
++) {
6071 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6072 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6074 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6075 if (!is_error(ret
)) {
6076 for(i
= 0; i
< nfds
; i
++) {
6077 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6079 ret
+= nfds
* (sizeof(struct target_pollfd
)
6080 - sizeof(struct pollfd
));
6082 unlock_user(target_pfd
, arg1
, ret
);
6086 case TARGET_NR_flock
:
6087 /* NOTE: the flock constant seems to be the same for every
6089 ret
= get_errno(flock(arg1
, arg2
));
6091 case TARGET_NR_readv
:
6096 vec
= alloca(count
* sizeof(struct iovec
));
6097 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6099 ret
= get_errno(readv(arg1
, vec
, count
));
6100 unlock_iovec(vec
, arg2
, count
, 1);
6103 case TARGET_NR_writev
:
6108 vec
= alloca(count
* sizeof(struct iovec
));
6109 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6111 ret
= get_errno(writev(arg1
, vec
, count
));
6112 unlock_iovec(vec
, arg2
, count
, 0);
6115 case TARGET_NR_getsid
:
6116 ret
= get_errno(getsid(arg1
));
6118 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6119 case TARGET_NR_fdatasync
:
6120 ret
= get_errno(fdatasync(arg1
));
6123 case TARGET_NR__sysctl
:
6124 /* We don't implement this, but ENOTDIR is always a safe
6126 ret
= -TARGET_ENOTDIR
;
6128 case TARGET_NR_sched_setparam
:
6130 struct sched_param
*target_schp
;
6131 struct sched_param schp
;
6133 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6135 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6136 unlock_user_struct(target_schp
, arg2
, 0);
6137 ret
= get_errno(sched_setparam(arg1
, &schp
));
6140 case TARGET_NR_sched_getparam
:
6142 struct sched_param
*target_schp
;
6143 struct sched_param schp
;
6144 ret
= get_errno(sched_getparam(arg1
, &schp
));
6145 if (!is_error(ret
)) {
6146 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6148 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6149 unlock_user_struct(target_schp
, arg2
, 1);
6153 case TARGET_NR_sched_setscheduler
:
6155 struct sched_param
*target_schp
;
6156 struct sched_param schp
;
6157 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6159 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6160 unlock_user_struct(target_schp
, arg3
, 0);
6161 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6164 case TARGET_NR_sched_getscheduler
:
6165 ret
= get_errno(sched_getscheduler(arg1
));
6167 case TARGET_NR_sched_yield
:
6168 ret
= get_errno(sched_yield());
6170 case TARGET_NR_sched_get_priority_max
:
6171 ret
= get_errno(sched_get_priority_max(arg1
));
6173 case TARGET_NR_sched_get_priority_min
:
6174 ret
= get_errno(sched_get_priority_min(arg1
));
6176 case TARGET_NR_sched_rr_get_interval
:
6179 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6180 if (!is_error(ret
)) {
6181 host_to_target_timespec(arg2
, &ts
);
6185 case TARGET_NR_nanosleep
:
6187 struct timespec req
, rem
;
6188 target_to_host_timespec(&req
, arg1
);
6189 ret
= get_errno(nanosleep(&req
, &rem
));
6190 if (is_error(ret
) && arg2
) {
6191 host_to_target_timespec(arg2
, &rem
);
6195 #ifdef TARGET_NR_query_module
6196 case TARGET_NR_query_module
:
6199 #ifdef TARGET_NR_nfsservctl
6200 case TARGET_NR_nfsservctl
:
6203 case TARGET_NR_prctl
:
6206 case PR_GET_PDEATHSIG
:
6209 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6210 if (!is_error(ret
) && arg2
6211 && put_user_ual(deathsig
, arg2
))
6216 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6220 #ifdef TARGET_NR_arch_prctl
6221 case TARGET_NR_arch_prctl
:
6222 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6223 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6229 #ifdef TARGET_NR_pread
6230 case TARGET_NR_pread
:
6232 if (((CPUARMState
*)cpu_env
)->eabi
)
6235 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6237 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6238 unlock_user(p
, arg2
, ret
);
6240 case TARGET_NR_pwrite
:
6242 if (((CPUARMState
*)cpu_env
)->eabi
)
6245 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6247 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6248 unlock_user(p
, arg2
, 0);
6251 #ifdef TARGET_NR_pread64
6252 case TARGET_NR_pread64
:
6253 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6255 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6256 unlock_user(p
, arg2
, ret
);
6258 case TARGET_NR_pwrite64
:
6259 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6261 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6262 unlock_user(p
, arg2
, 0);
6265 case TARGET_NR_getcwd
:
6266 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6268 ret
= get_errno(sys_getcwd1(p
, arg2
));
6269 unlock_user(p
, arg1
, ret
);
6271 case TARGET_NR_capget
:
6273 case TARGET_NR_capset
:
6275 case TARGET_NR_sigaltstack
:
6276 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6277 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
6278 defined(TARGET_M68K)
6279 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6284 case TARGET_NR_sendfile
:
6286 #ifdef TARGET_NR_getpmsg
6287 case TARGET_NR_getpmsg
:
6290 #ifdef TARGET_NR_putpmsg
6291 case TARGET_NR_putpmsg
:
6294 #ifdef TARGET_NR_vfork
6295 case TARGET_NR_vfork
:
6296 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6300 #ifdef TARGET_NR_ugetrlimit
6301 case TARGET_NR_ugetrlimit
:
6304 ret
= get_errno(getrlimit(arg1
, &rlim
));
6305 if (!is_error(ret
)) {
6306 struct target_rlimit
*target_rlim
;
6307 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6309 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
6310 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
6311 unlock_user_struct(target_rlim
, arg2
, 1);
6316 #ifdef TARGET_NR_truncate64
6317 case TARGET_NR_truncate64
:
6318 if (!(p
= lock_user_string(arg1
)))
6320 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6321 unlock_user(p
, arg1
, 0);
6324 #ifdef TARGET_NR_ftruncate64
6325 case TARGET_NR_ftruncate64
:
6326 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6329 #ifdef TARGET_NR_stat64
6330 case TARGET_NR_stat64
:
6331 if (!(p
= lock_user_string(arg1
)))
6333 ret
= get_errno(stat(path(p
), &st
));
6334 unlock_user(p
, arg1
, 0);
6336 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6339 #ifdef TARGET_NR_lstat64
6340 case TARGET_NR_lstat64
:
6341 if (!(p
= lock_user_string(arg1
)))
6343 ret
= get_errno(lstat(path(p
), &st
));
6344 unlock_user(p
, arg1
, 0);
6346 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6349 #ifdef TARGET_NR_fstat64
6350 case TARGET_NR_fstat64
:
6351 ret
= get_errno(fstat(arg1
, &st
));
6353 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6356 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6357 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6358 #ifdef TARGET_NR_fstatat64
6359 case TARGET_NR_fstatat64
:
6361 #ifdef TARGET_NR_newfstatat
6362 case TARGET_NR_newfstatat
:
6364 if (!(p
= lock_user_string(arg2
)))
6366 #ifdef __NR_fstatat64
6367 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6369 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6372 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6376 case TARGET_NR_lchown
:
6377 if (!(p
= lock_user_string(arg1
)))
6379 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6380 unlock_user(p
, arg1
, 0);
6382 case TARGET_NR_getuid
:
6383 ret
= get_errno(high2lowuid(getuid()));
6385 case TARGET_NR_getgid
:
6386 ret
= get_errno(high2lowgid(getgid()));
6388 case TARGET_NR_geteuid
:
6389 ret
= get_errno(high2lowuid(geteuid()));
6391 case TARGET_NR_getegid
:
6392 ret
= get_errno(high2lowgid(getegid()));
6394 case TARGET_NR_setreuid
:
6395 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6397 case TARGET_NR_setregid
:
6398 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6400 case TARGET_NR_getgroups
:
6402 int gidsetsize
= arg1
;
6403 uint16_t *target_grouplist
;
6407 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6408 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6409 if (gidsetsize
== 0)
6411 if (!is_error(ret
)) {
6412 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6413 if (!target_grouplist
)
6415 for(i
= 0;i
< ret
; i
++)
6416 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6417 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6421 case TARGET_NR_setgroups
:
6423 int gidsetsize
= arg1
;
6424 uint16_t *target_grouplist
;
6428 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6429 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6430 if (!target_grouplist
) {
6431 ret
= -TARGET_EFAULT
;
6434 for(i
= 0;i
< gidsetsize
; i
++)
6435 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6436 unlock_user(target_grouplist
, arg2
, 0);
6437 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6440 case TARGET_NR_fchown
:
6441 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6443 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6444 case TARGET_NR_fchownat
:
6445 if (!(p
= lock_user_string(arg2
)))
6447 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6448 unlock_user(p
, arg2
, 0);
6451 #ifdef TARGET_NR_setresuid
6452 case TARGET_NR_setresuid
:
6453 ret
= get_errno(setresuid(low2highuid(arg1
),
6455 low2highuid(arg3
)));
6458 #ifdef TARGET_NR_getresuid
6459 case TARGET_NR_getresuid
:
6461 uid_t ruid
, euid
, suid
;
6462 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6463 if (!is_error(ret
)) {
6464 if (put_user_u16(high2lowuid(ruid
), arg1
)
6465 || put_user_u16(high2lowuid(euid
), arg2
)
6466 || put_user_u16(high2lowuid(suid
), arg3
))
6472 #ifdef TARGET_NR_getresgid
6473 case TARGET_NR_setresgid
:
6474 ret
= get_errno(setresgid(low2highgid(arg1
),
6476 low2highgid(arg3
)));
6479 #ifdef TARGET_NR_getresgid
6480 case TARGET_NR_getresgid
:
6482 gid_t rgid
, egid
, sgid
;
6483 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6484 if (!is_error(ret
)) {
6485 if (put_user_u16(high2lowgid(rgid
), arg1
)
6486 || put_user_u16(high2lowgid(egid
), arg2
)
6487 || put_user_u16(high2lowgid(sgid
), arg3
))
6493 case TARGET_NR_chown
:
6494 if (!(p
= lock_user_string(arg1
)))
6496 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6497 unlock_user(p
, arg1
, 0);
6499 case TARGET_NR_setuid
:
6500 ret
= get_errno(setuid(low2highuid(arg1
)));
6502 case TARGET_NR_setgid
:
6503 ret
= get_errno(setgid(low2highgid(arg1
)));
6505 case TARGET_NR_setfsuid
:
6506 ret
= get_errno(setfsuid(arg1
));
6508 case TARGET_NR_setfsgid
:
6509 ret
= get_errno(setfsgid(arg1
));
6511 #endif /* USE_UID16 */
6513 #ifdef TARGET_NR_lchown32
6514 case TARGET_NR_lchown32
:
6515 if (!(p
= lock_user_string(arg1
)))
6517 ret
= get_errno(lchown(p
, arg2
, arg3
));
6518 unlock_user(p
, arg1
, 0);
6521 #ifdef TARGET_NR_getuid32
6522 case TARGET_NR_getuid32
:
6523 ret
= get_errno(getuid());
6527 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6528 /* Alpha specific */
6529 case TARGET_NR_getxuid
:
6533 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6535 ret
= get_errno(getuid());
6538 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6539 /* Alpha specific */
6540 case TARGET_NR_getxgid
:
6544 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6546 ret
= get_errno(getgid());
6549 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
6550 /* Alpha specific */
6551 case TARGET_NR_osf_getsysinfo
:
6552 ret
= -TARGET_EOPNOTSUPP
;
6554 case TARGET_GSI_IEEE_FP_CONTROL
:
6556 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6558 /* Copied from linux ieee_fpcr_to_swcr. */
6559 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
6560 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
6561 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
6562 | SWCR_TRAP_ENABLE_DZE
6563 | SWCR_TRAP_ENABLE_OVF
);
6564 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
6565 | SWCR_TRAP_ENABLE_INE
);
6566 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
6567 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
6569 if (put_user_u64 (swcr
, arg2
))
6575 /* case GSI_IEEE_STATE_AT_SIGNAL:
6576 -- Not implemented in linux kernel.
6578 -- Retrieves current unaligned access state; not much used.
6580 -- Retrieves implver information; surely not used.
6582 -- Grabs a copy of the HWRPB; surely not used.
6587 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
6588 /* Alpha specific */
6589 case TARGET_NR_osf_setsysinfo
:
6590 ret
= -TARGET_EOPNOTSUPP
;
6592 case TARGET_SSI_IEEE_FP_CONTROL
:
6593 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
6595 uint64_t swcr
, fpcr
, orig_fpcr
;
6597 if (get_user_u64 (swcr
, arg2
))
6599 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
6600 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
6602 /* Copied from linux ieee_swcr_to_fpcr. */
6603 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
6604 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
6605 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
6606 | SWCR_TRAP_ENABLE_DZE
6607 | SWCR_TRAP_ENABLE_OVF
)) << 48;
6608 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
6609 | SWCR_TRAP_ENABLE_INE
)) << 57;
6610 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
6611 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
6613 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
6616 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
6617 /* Old exceptions are not signaled. */
6618 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
6620 /* If any exceptions set by this call, and are unmasked,
6627 /* case SSI_NVPAIRS:
6628 -- Used with SSIN_UACPROC to enable unaligned accesses.
6629 case SSI_IEEE_STATE_AT_SIGNAL:
6630 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
6631 -- Not implemented in linux kernel
6636 #ifdef TARGET_NR_osf_sigprocmask
6637 /* Alpha specific. */
6638 case TARGET_NR_osf_sigprocmask
:
6642 sigset_t set
, oldset
;
6645 case TARGET_SIG_BLOCK
:
6648 case TARGET_SIG_UNBLOCK
:
6651 case TARGET_SIG_SETMASK
:
6655 ret
= -TARGET_EINVAL
;
6659 target_to_host_old_sigset(&set
, &mask
);
6660 sigprocmask(arg1
, &set
, &oldset
);
6661 host_to_target_old_sigset(&mask
, &oldset
);
6667 #ifdef TARGET_NR_getgid32
6668 case TARGET_NR_getgid32
:
6669 ret
= get_errno(getgid());
6672 #ifdef TARGET_NR_geteuid32
6673 case TARGET_NR_geteuid32
:
6674 ret
= get_errno(geteuid());
6677 #ifdef TARGET_NR_getegid32
6678 case TARGET_NR_getegid32
:
6679 ret
= get_errno(getegid());
6682 #ifdef TARGET_NR_setreuid32
6683 case TARGET_NR_setreuid32
:
6684 ret
= get_errno(setreuid(arg1
, arg2
));
6687 #ifdef TARGET_NR_setregid32
6688 case TARGET_NR_setregid32
:
6689 ret
= get_errno(setregid(arg1
, arg2
));
6692 #ifdef TARGET_NR_getgroups32
6693 case TARGET_NR_getgroups32
:
6695 int gidsetsize
= arg1
;
6696 uint32_t *target_grouplist
;
6700 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6701 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6702 if (gidsetsize
== 0)
6704 if (!is_error(ret
)) {
6705 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6706 if (!target_grouplist
) {
6707 ret
= -TARGET_EFAULT
;
6710 for(i
= 0;i
< ret
; i
++)
6711 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6712 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6717 #ifdef TARGET_NR_setgroups32
6718 case TARGET_NR_setgroups32
:
6720 int gidsetsize
= arg1
;
6721 uint32_t *target_grouplist
;
6725 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6726 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6727 if (!target_grouplist
) {
6728 ret
= -TARGET_EFAULT
;
6731 for(i
= 0;i
< gidsetsize
; i
++)
6732 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6733 unlock_user(target_grouplist
, arg2
, 0);
6734 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6738 #ifdef TARGET_NR_fchown32
6739 case TARGET_NR_fchown32
:
6740 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6743 #ifdef TARGET_NR_setresuid32
6744 case TARGET_NR_setresuid32
:
6745 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6748 #ifdef TARGET_NR_getresuid32
6749 case TARGET_NR_getresuid32
:
6751 uid_t ruid
, euid
, suid
;
6752 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6753 if (!is_error(ret
)) {
6754 if (put_user_u32(ruid
, arg1
)
6755 || put_user_u32(euid
, arg2
)
6756 || put_user_u32(suid
, arg3
))
6762 #ifdef TARGET_NR_setresgid32
6763 case TARGET_NR_setresgid32
:
6764 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6767 #ifdef TARGET_NR_getresgid32
6768 case TARGET_NR_getresgid32
:
6770 gid_t rgid
, egid
, sgid
;
6771 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6772 if (!is_error(ret
)) {
6773 if (put_user_u32(rgid
, arg1
)
6774 || put_user_u32(egid
, arg2
)
6775 || put_user_u32(sgid
, arg3
))
6781 #ifdef TARGET_NR_chown32
6782 case TARGET_NR_chown32
:
6783 if (!(p
= lock_user_string(arg1
)))
6785 ret
= get_errno(chown(p
, arg2
, arg3
));
6786 unlock_user(p
, arg1
, 0);
6789 #ifdef TARGET_NR_setuid32
6790 case TARGET_NR_setuid32
:
6791 ret
= get_errno(setuid(arg1
));
6794 #ifdef TARGET_NR_setgid32
6795 case TARGET_NR_setgid32
:
6796 ret
= get_errno(setgid(arg1
));
6799 #ifdef TARGET_NR_setfsuid32
6800 case TARGET_NR_setfsuid32
:
6801 ret
= get_errno(setfsuid(arg1
));
6804 #ifdef TARGET_NR_setfsgid32
6805 case TARGET_NR_setfsgid32
:
6806 ret
= get_errno(setfsgid(arg1
));
6810 case TARGET_NR_pivot_root
:
6812 #ifdef TARGET_NR_mincore
6813 case TARGET_NR_mincore
:
6816 ret
= -TARGET_EFAULT
;
6817 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6819 if (!(p
= lock_user_string(arg3
)))
6821 ret
= get_errno(mincore(a
, arg2
, p
));
6822 unlock_user(p
, arg3
, ret
);
6824 unlock_user(a
, arg1
, 0);
6828 #ifdef TARGET_NR_arm_fadvise64_64
6829 case TARGET_NR_arm_fadvise64_64
:
6832 * arm_fadvise64_64 looks like fadvise64_64 but
6833 * with different argument order
6841 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
6842 #ifdef TARGET_NR_fadvise64_64
6843 case TARGET_NR_fadvise64_64
:
6845 #ifdef TARGET_NR_fadvise64
6846 case TARGET_NR_fadvise64
:
6850 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
6851 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
6852 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
6853 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
6857 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
6860 #ifdef TARGET_NR_madvise
6861 case TARGET_NR_madvise
:
6862 /* A straight passthrough may not be safe because qemu sometimes
6863 turns private flie-backed mappings into anonymous mappings.
6864 This will break MADV_DONTNEED.
6865 This is a hint, so ignoring and returning success is ok. */
6869 #if TARGET_ABI_BITS == 32
6870 case TARGET_NR_fcntl64
:
6874 struct target_flock64
*target_fl
;
6876 struct target_eabi_flock64
*target_efl
;
6879 cmd
= target_to_host_fcntl_cmd(arg2
);
6880 if (cmd
== -TARGET_EINVAL
)
6884 case TARGET_F_GETLK64
:
6886 if (((CPUARMState
*)cpu_env
)->eabi
) {
6887 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6889 fl
.l_type
= tswap16(target_efl
->l_type
);
6890 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6891 fl
.l_start
= tswap64(target_efl
->l_start
);
6892 fl
.l_len
= tswap64(target_efl
->l_len
);
6893 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6894 unlock_user_struct(target_efl
, arg3
, 0);
6898 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6900 fl
.l_type
= tswap16(target_fl
->l_type
);
6901 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6902 fl
.l_start
= tswap64(target_fl
->l_start
);
6903 fl
.l_len
= tswap64(target_fl
->l_len
);
6904 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6905 unlock_user_struct(target_fl
, arg3
, 0);
6907 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6910 if (((CPUARMState
*)cpu_env
)->eabi
) {
6911 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6913 target_efl
->l_type
= tswap16(fl
.l_type
);
6914 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6915 target_efl
->l_start
= tswap64(fl
.l_start
);
6916 target_efl
->l_len
= tswap64(fl
.l_len
);
6917 target_efl
->l_pid
= tswap32(fl
.l_pid
);
6918 unlock_user_struct(target_efl
, arg3
, 1);
6922 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6924 target_fl
->l_type
= tswap16(fl
.l_type
);
6925 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6926 target_fl
->l_start
= tswap64(fl
.l_start
);
6927 target_fl
->l_len
= tswap64(fl
.l_len
);
6928 target_fl
->l_pid
= tswap32(fl
.l_pid
);
6929 unlock_user_struct(target_fl
, arg3
, 1);
6934 case TARGET_F_SETLK64
:
6935 case TARGET_F_SETLKW64
:
6937 if (((CPUARMState
*)cpu_env
)->eabi
) {
6938 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6940 fl
.l_type
= tswap16(target_efl
->l_type
);
6941 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6942 fl
.l_start
= tswap64(target_efl
->l_start
);
6943 fl
.l_len
= tswap64(target_efl
->l_len
);
6944 fl
.l_pid
= tswap32(target_efl
->l_pid
);
6945 unlock_user_struct(target_efl
, arg3
, 0);
6949 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6951 fl
.l_type
= tswap16(target_fl
->l_type
);
6952 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6953 fl
.l_start
= tswap64(target_fl
->l_start
);
6954 fl
.l_len
= tswap64(target_fl
->l_len
);
6955 fl
.l_pid
= tswap32(target_fl
->l_pid
);
6956 unlock_user_struct(target_fl
, arg3
, 0);
6958 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6961 ret
= do_fcntl(arg1
, arg2
, arg3
);
6967 #ifdef TARGET_NR_cacheflush
6968 case TARGET_NR_cacheflush
:
6969 /* self-modifying code is handled automatically, so nothing needed */
6973 #ifdef TARGET_NR_security
6974 case TARGET_NR_security
:
6977 #ifdef TARGET_NR_getpagesize
6978 case TARGET_NR_getpagesize
:
6979 ret
= TARGET_PAGE_SIZE
;
6982 case TARGET_NR_gettid
:
6983 ret
= get_errno(gettid());
6985 #ifdef TARGET_NR_readahead
6986 case TARGET_NR_readahead
:
6987 #if TARGET_ABI_BITS == 32
6989 if (((CPUARMState
*)cpu_env
)->eabi
)
6996 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6998 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7002 #ifdef TARGET_NR_setxattr
7003 case TARGET_NR_setxattr
:
7004 case TARGET_NR_lsetxattr
:
7005 case TARGET_NR_fsetxattr
:
7006 case TARGET_NR_getxattr
:
7007 case TARGET_NR_lgetxattr
:
7008 case TARGET_NR_fgetxattr
:
7009 case TARGET_NR_listxattr
:
7010 case TARGET_NR_llistxattr
:
7011 case TARGET_NR_flistxattr
:
7012 case TARGET_NR_removexattr
:
7013 case TARGET_NR_lremovexattr
:
7014 case TARGET_NR_fremovexattr
:
7015 ret
= -TARGET_EOPNOTSUPP
;
7018 #ifdef TARGET_NR_set_thread_area
7019 case TARGET_NR_set_thread_area
:
7020 #if defined(TARGET_MIPS)
7021 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7024 #elif defined(TARGET_CRIS)
7026 ret
= -TARGET_EINVAL
;
7028 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7032 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7033 ret
= do_set_thread_area(cpu_env
, arg1
);
7036 goto unimplemented_nowarn
;
7039 #ifdef TARGET_NR_get_thread_area
7040 case TARGET_NR_get_thread_area
:
7041 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7042 ret
= do_get_thread_area(cpu_env
, arg1
);
7044 goto unimplemented_nowarn
;
7047 #ifdef TARGET_NR_getdomainname
7048 case TARGET_NR_getdomainname
:
7049 goto unimplemented_nowarn
;
7052 #ifdef TARGET_NR_clock_gettime
7053 case TARGET_NR_clock_gettime
:
7056 ret
= get_errno(clock_gettime(arg1
, &ts
));
7057 if (!is_error(ret
)) {
7058 host_to_target_timespec(arg2
, &ts
);
7063 #ifdef TARGET_NR_clock_getres
7064 case TARGET_NR_clock_getres
:
7067 ret
= get_errno(clock_getres(arg1
, &ts
));
7068 if (!is_error(ret
)) {
7069 host_to_target_timespec(arg2
, &ts
);
7074 #ifdef TARGET_NR_clock_nanosleep
7075 case TARGET_NR_clock_nanosleep
:
7078 target_to_host_timespec(&ts
, arg3
);
7079 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
7081 host_to_target_timespec(arg4
, &ts
);
7086 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
7087 case TARGET_NR_set_tid_address
:
7088 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
7092 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
7093 case TARGET_NR_tkill
:
7094 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
7098 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
7099 case TARGET_NR_tgkill
:
7100 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
7101 target_to_host_signal(arg3
)));
7105 #ifdef TARGET_NR_set_robust_list
7106 case TARGET_NR_set_robust_list
:
7107 goto unimplemented_nowarn
;
7110 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
7111 case TARGET_NR_utimensat
:
7113 struct timespec
*tsp
, ts
[2];
7117 target_to_host_timespec(ts
, arg3
);
7118 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
7122 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
7124 if (!(p
= lock_user_string(arg2
))) {
7125 ret
= -TARGET_EFAULT
;
7128 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
7129 unlock_user(p
, arg2
, 0);
7134 #if defined(CONFIG_USE_NPTL)
7135 case TARGET_NR_futex
:
7136 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7139 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
7140 case TARGET_NR_inotify_init
:
7141 ret
= get_errno(sys_inotify_init());
7144 #ifdef CONFIG_INOTIFY1
7145 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
7146 case TARGET_NR_inotify_init1
:
7147 ret
= get_errno(sys_inotify_init1(arg1
));
7151 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
7152 case TARGET_NR_inotify_add_watch
:
7153 p
= lock_user_string(arg2
);
7154 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
7155 unlock_user(p
, arg2
, 0);
7158 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
7159 case TARGET_NR_inotify_rm_watch
:
7160 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
7164 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
7165 case TARGET_NR_mq_open
:
7167 struct mq_attr posix_mq_attr
;
7169 p
= lock_user_string(arg1
- 1);
7171 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
7172 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
7173 unlock_user (p
, arg1
, 0);
7177 case TARGET_NR_mq_unlink
:
7178 p
= lock_user_string(arg1
- 1);
7179 ret
= get_errno(mq_unlink(p
));
7180 unlock_user (p
, arg1
, 0);
7183 case TARGET_NR_mq_timedsend
:
7187 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7189 target_to_host_timespec(&ts
, arg5
);
7190 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
7191 host_to_target_timespec(arg5
, &ts
);
7194 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
7195 unlock_user (p
, arg2
, arg3
);
7199 case TARGET_NR_mq_timedreceive
:
7204 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
7206 target_to_host_timespec(&ts
, arg5
);
7207 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
7208 host_to_target_timespec(arg5
, &ts
);
7211 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
7212 unlock_user (p
, arg2
, arg3
);
7214 put_user_u32(prio
, arg4
);
7218 /* Not implemented for now... */
7219 /* case TARGET_NR_mq_notify: */
7222 case TARGET_NR_mq_getsetattr
:
7224 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
7227 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
7228 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
7231 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
7232 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
7239 #ifdef CONFIG_SPLICE
7240 #ifdef TARGET_NR_tee
7243 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
7247 #ifdef TARGET_NR_splice
7248 case TARGET_NR_splice
:
7250 loff_t loff_in
, loff_out
;
7251 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
7253 get_user_u64(loff_in
, arg2
);
7254 ploff_in
= &loff_in
;
7257 get_user_u64(loff_out
, arg2
);
7258 ploff_out
= &loff_out
;
7260 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
7264 #ifdef TARGET_NR_vmsplice
7265 case TARGET_NR_vmsplice
:
7270 vec
= alloca(count
* sizeof(struct iovec
));
7271 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
7273 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
7274 unlock_iovec(vec
, arg2
, count
, 0);
7278 #endif /* CONFIG_SPLICE */
7279 #ifdef CONFIG_EVENTFD
7280 #if defined(TARGET_NR_eventfd)
7281 case TARGET_NR_eventfd
:
7282 ret
= get_errno(eventfd(arg1
, 0));
7285 #if defined(TARGET_NR_eventfd2)
7286 case TARGET_NR_eventfd2
:
7287 ret
= get_errno(eventfd(arg1
, arg2
));
7290 #endif /* CONFIG_EVENTFD */
7291 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
7292 case TARGET_NR_fallocate
:
7293 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
7298 gemu_log("qemu: Unsupported syscall: %d\n", num
);
7299 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
7300 unimplemented_nowarn
:
7302 ret
= -TARGET_ENOSYS
;
7307 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
7310 print_syscall_ret(num
, ret
);
7313 ret
= -TARGET_EFAULT
;