4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 #define _ATFILE_SOURCE
34 #include <sys/types.h>
40 #include <sys/mount.h>
41 #include <sys/prctl.h>
42 #include <sys/resource.h>
47 #include <sys/socket.h>
51 #include <sys/times.h>
54 #include <sys/statfs.h>
56 #include <sys/sysinfo.h>
57 #include <sys/utsname.h>
58 //#include <sys/user.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <qemu-common.h>
66 #define termios host_termios
67 #define winsize host_winsize
68 #define termio host_termio
69 #define sgttyb host_sgttyb /* same as target */
70 #define tchars host_tchars /* same as target */
71 #define ltchars host_ltchars /* same as target */
73 #include <linux/termios.h>
74 #include <linux/unistd.h>
75 #include <linux/utsname.h>
76 #include <linux/cdrom.h>
77 #include <linux/hdreg.h>
78 #include <linux/soundcard.h>
80 #include <linux/mtio.h>
82 #include "linux_loop.h"
85 #include "qemu-common.h"
88 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
89 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
91 /* XXX: Hardcode the above values. */
92 #define CLONE_NPTL_FLAGS2 0
97 //#include <linux/msdos_fs.h>
98 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
99 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
110 #define _syscall0(type,name) \
111 static type name (void) \
113 return syscall(__NR_##name); \
116 #define _syscall1(type,name,type1,arg1) \
117 static type name (type1 arg1) \
119 return syscall(__NR_##name, arg1); \
122 #define _syscall2(type,name,type1,arg1,type2,arg2) \
123 static type name (type1 arg1,type2 arg2) \
125 return syscall(__NR_##name, arg1, arg2); \
128 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
129 static type name (type1 arg1,type2 arg2,type3 arg3) \
131 return syscall(__NR_##name, arg1, arg2, arg3); \
134 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
135 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
137 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
140 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
142 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
144 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
148 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
149 type5,arg5,type6,arg6) \
150 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
157 #define __NR_sys_uname __NR_uname
158 #define __NR_sys_faccessat __NR_faccessat
159 #define __NR_sys_fchmodat __NR_fchmodat
160 #define __NR_sys_fchownat __NR_fchownat
161 #define __NR_sys_fstatat64 __NR_fstatat64
162 #define __NR_sys_futimesat __NR_futimesat
163 #define __NR_sys_getcwd1 __NR_getcwd
164 #define __NR_sys_getdents __NR_getdents
165 #define __NR_sys_getdents64 __NR_getdents64
166 #define __NR_sys_getpriority __NR_getpriority
167 #define __NR_sys_linkat __NR_linkat
168 #define __NR_sys_mkdirat __NR_mkdirat
169 #define __NR_sys_mknodat __NR_mknodat
170 #define __NR_sys_newfstatat __NR_newfstatat
171 #define __NR_sys_openat __NR_openat
172 #define __NR_sys_readlinkat __NR_readlinkat
173 #define __NR_sys_renameat __NR_renameat
174 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
175 #define __NR_sys_symlinkat __NR_symlinkat
176 #define __NR_sys_syslog __NR_syslog
177 #define __NR_sys_tgkill __NR_tgkill
178 #define __NR_sys_tkill __NR_tkill
179 #define __NR_sys_unlinkat __NR_unlinkat
180 #define __NR_sys_utimensat __NR_utimensat
181 #define __NR_sys_futex __NR_futex
182 #define __NR_sys_inotify_init __NR_inotify_init
183 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
184 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
186 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
187 #define __NR__llseek __NR_lseek
191 _syscall0(int, gettid
)
193 /* This is a replacement for the host gettid() and must return a host
195 static int gettid(void) {
199 #if TARGET_ABI_BITS == 32
200 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
202 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
203 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
205 _syscall2(int, sys_getpriority
, int, which
, int, who
);
206 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
207 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
208 loff_t
*, res
, uint
, wh
);
210 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
211 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
212 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
213 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
215 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
216 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
218 #ifdef __NR_exit_group
219 _syscall1(int,exit_group
,int,error_code
)
221 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
222 _syscall1(int,set_tid_address
,int *,tidptr
)
224 #if defined(USE_NPTL)
225 #if defined(TARGET_NR_futex) && defined(__NR_futex)
226 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
227 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
231 static bitmask_transtbl fcntl_flags_tbl
[] = {
232 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
233 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
234 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
235 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
236 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
237 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
238 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
239 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
240 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
241 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
242 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
243 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
244 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
245 #if defined(O_DIRECT)
246 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
251 #define COPY_UTSNAME_FIELD(dest, src) \
253 /* __NEW_UTS_LEN doesn't include terminating null */ \
254 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
255 (dest)[__NEW_UTS_LEN] = '\0'; \
258 static int sys_uname(struct new_utsname
*buf
)
260 struct utsname uts_buf
;
262 if (uname(&uts_buf
) < 0)
266 * Just in case these have some differences, we
267 * translate utsname to new_utsname (which is the
268 * struct linux kernel uses).
271 bzero(buf
, sizeof (*buf
));
272 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
273 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
274 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
275 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
276 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
278 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
282 #undef COPY_UTSNAME_FIELD
285 static int sys_getcwd1(char *buf
, size_t size
)
287 if (getcwd(buf
, size
) == NULL
) {
288 /* getcwd() sets errno */
291 return strlen(buf
)+1;
296 * Host system seems to have atfile syscall stubs available. We
297 * now enable them one by one as specified by target syscall_nr.h.
300 #ifdef TARGET_NR_faccessat
301 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
303 return (faccessat(dirfd
, pathname
, mode
, 0));
306 #ifdef TARGET_NR_fchmodat
307 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
309 return (fchmodat(dirfd
, pathname
, mode
, 0));
312 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
313 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
314 gid_t group
, int flags
)
316 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
319 #ifdef __NR_fstatat64
320 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
323 return (fstatat(dirfd
, pathname
, buf
, flags
));
326 #ifdef __NR_newfstatat
327 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
330 return (fstatat(dirfd
, pathname
, buf
, flags
));
333 #ifdef TARGET_NR_futimesat
334 static int sys_futimesat(int dirfd
, const char *pathname
,
335 const struct timeval times
[2])
337 return (futimesat(dirfd
, pathname
, times
));
340 #ifdef TARGET_NR_linkat
341 static int sys_linkat(int olddirfd
, const char *oldpath
,
342 int newdirfd
, const char *newpath
, int flags
)
344 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
347 #ifdef TARGET_NR_mkdirat
348 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
350 return (mkdirat(dirfd
, pathname
, mode
));
353 #ifdef TARGET_NR_mknodat
354 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
357 return (mknodat(dirfd
, pathname
, mode
, dev
));
360 #ifdef TARGET_NR_openat
361 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
364 * open(2) has extra parameter 'mode' when called with
367 if ((flags
& O_CREAT
) != 0) {
372 * Get the 'mode' parameter and translate it to
376 mode
= va_arg(ap
, mode_t
);
377 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
380 return (openat(dirfd
, pathname
, flags
, mode
));
382 return (openat(dirfd
, pathname
, flags
));
385 #ifdef TARGET_NR_readlinkat
386 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
388 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
391 #ifdef TARGET_NR_renameat
392 static int sys_renameat(int olddirfd
, const char *oldpath
,
393 int newdirfd
, const char *newpath
)
395 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
398 #ifdef TARGET_NR_symlinkat
399 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
401 return (symlinkat(oldpath
, newdirfd
, newpath
));
404 #ifdef TARGET_NR_unlinkat
405 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
407 return (unlinkat(dirfd
, pathname
, flags
));
410 #else /* !CONFIG_ATFILE */
413 * Try direct syscalls instead
415 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
416 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
418 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
419 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
421 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
422 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
423 uid_t
,owner
,gid_t
,group
,int,flags
)
425 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
426 defined(__NR_fstatat64)
427 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
428 struct stat
*,buf
,int,flags
)
430 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
431 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
432 const struct timeval
*,times
)
434 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
435 defined(__NR_newfstatat)
436 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
437 struct stat
*,buf
,int,flags
)
439 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
440 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
441 int,newdirfd
,const char *,newpath
,int,flags
)
443 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
444 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
446 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
447 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
448 mode_t
,mode
,dev_t
,dev
)
450 #if defined(TARGET_NR_openat) && defined(__NR_openat)
451 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
453 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
454 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
455 char *,buf
,size_t,bufsize
)
457 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
458 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
459 int,newdirfd
,const char *,newpath
)
461 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
462 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
463 int,newdirfd
,const char *,newpath
)
465 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
466 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
469 #endif /* CONFIG_ATFILE */
471 #ifdef CONFIG_UTIMENSAT
472 static int sys_utimensat(int dirfd
, const char *pathname
,
473 const struct timespec times
[2], int flags
)
475 if (pathname
== NULL
)
476 return futimens(dirfd
, times
);
478 return utimensat(dirfd
, pathname
, times
, flags
);
481 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
482 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
483 const struct timespec
*,tsp
,int,flags
)
485 #endif /* CONFIG_UTIMENSAT */
487 #ifdef CONFIG_INOTIFY
488 #include <sys/inotify.h>
490 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
491 static int sys_inotify_init(void)
493 return (inotify_init());
496 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
497 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
499 return (inotify_add_watch(fd
, pathname
, mask
));
502 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
503 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
505 return (inotify_rm_watch(fd
, wd
));
509 /* Userspace can usually survive runtime without inotify */
510 #undef TARGET_NR_inotify_init
511 #undef TARGET_NR_inotify_add_watch
512 #undef TARGET_NR_inotify_rm_watch
513 #endif /* CONFIG_INOTIFY */
516 extern int personality(int);
517 extern int flock(int, int);
518 extern int setfsuid(int);
519 extern int setfsgid(int);
520 extern int setgroups(int, gid_t
*);
522 #define ERRNO_TABLE_SIZE 1200
524 /* target_to_host_errno_table[] is initialized from
525 * host_to_target_errno_table[] in syscall_init(). */
526 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
530 * This list is the union of errno values overridden in asm-<arch>/errno.h
531 * minus the errnos that are not actually generic to all archs.
533 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
534 [EIDRM
] = TARGET_EIDRM
,
535 [ECHRNG
] = TARGET_ECHRNG
,
536 [EL2NSYNC
] = TARGET_EL2NSYNC
,
537 [EL3HLT
] = TARGET_EL3HLT
,
538 [EL3RST
] = TARGET_EL3RST
,
539 [ELNRNG
] = TARGET_ELNRNG
,
540 [EUNATCH
] = TARGET_EUNATCH
,
541 [ENOCSI
] = TARGET_ENOCSI
,
542 [EL2HLT
] = TARGET_EL2HLT
,
543 [EDEADLK
] = TARGET_EDEADLK
,
544 [ENOLCK
] = TARGET_ENOLCK
,
545 [EBADE
] = TARGET_EBADE
,
546 [EBADR
] = TARGET_EBADR
,
547 [EXFULL
] = TARGET_EXFULL
,
548 [ENOANO
] = TARGET_ENOANO
,
549 [EBADRQC
] = TARGET_EBADRQC
,
550 [EBADSLT
] = TARGET_EBADSLT
,
551 [EBFONT
] = TARGET_EBFONT
,
552 [ENOSTR
] = TARGET_ENOSTR
,
553 [ENODATA
] = TARGET_ENODATA
,
554 [ETIME
] = TARGET_ETIME
,
555 [ENOSR
] = TARGET_ENOSR
,
556 [ENONET
] = TARGET_ENONET
,
557 [ENOPKG
] = TARGET_ENOPKG
,
558 [EREMOTE
] = TARGET_EREMOTE
,
559 [ENOLINK
] = TARGET_ENOLINK
,
560 [EADV
] = TARGET_EADV
,
561 [ESRMNT
] = TARGET_ESRMNT
,
562 [ECOMM
] = TARGET_ECOMM
,
563 [EPROTO
] = TARGET_EPROTO
,
564 [EDOTDOT
] = TARGET_EDOTDOT
,
565 [EMULTIHOP
] = TARGET_EMULTIHOP
,
566 [EBADMSG
] = TARGET_EBADMSG
,
567 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
568 [EOVERFLOW
] = TARGET_EOVERFLOW
,
569 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
570 [EBADFD
] = TARGET_EBADFD
,
571 [EREMCHG
] = TARGET_EREMCHG
,
572 [ELIBACC
] = TARGET_ELIBACC
,
573 [ELIBBAD
] = TARGET_ELIBBAD
,
574 [ELIBSCN
] = TARGET_ELIBSCN
,
575 [ELIBMAX
] = TARGET_ELIBMAX
,
576 [ELIBEXEC
] = TARGET_ELIBEXEC
,
577 [EILSEQ
] = TARGET_EILSEQ
,
578 [ENOSYS
] = TARGET_ENOSYS
,
579 [ELOOP
] = TARGET_ELOOP
,
580 [ERESTART
] = TARGET_ERESTART
,
581 [ESTRPIPE
] = TARGET_ESTRPIPE
,
582 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
583 [EUSERS
] = TARGET_EUSERS
,
584 [ENOTSOCK
] = TARGET_ENOTSOCK
,
585 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
586 [EMSGSIZE
] = TARGET_EMSGSIZE
,
587 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
588 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
589 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
590 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
591 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
592 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
593 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
594 [EADDRINUSE
] = TARGET_EADDRINUSE
,
595 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
596 [ENETDOWN
] = TARGET_ENETDOWN
,
597 [ENETUNREACH
] = TARGET_ENETUNREACH
,
598 [ENETRESET
] = TARGET_ENETRESET
,
599 [ECONNABORTED
] = TARGET_ECONNABORTED
,
600 [ECONNRESET
] = TARGET_ECONNRESET
,
601 [ENOBUFS
] = TARGET_ENOBUFS
,
602 [EISCONN
] = TARGET_EISCONN
,
603 [ENOTCONN
] = TARGET_ENOTCONN
,
604 [EUCLEAN
] = TARGET_EUCLEAN
,
605 [ENOTNAM
] = TARGET_ENOTNAM
,
606 [ENAVAIL
] = TARGET_ENAVAIL
,
607 [EISNAM
] = TARGET_EISNAM
,
608 [EREMOTEIO
] = TARGET_EREMOTEIO
,
609 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
610 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
611 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
612 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
613 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
614 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
615 [EALREADY
] = TARGET_EALREADY
,
616 [EINPROGRESS
] = TARGET_EINPROGRESS
,
617 [ESTALE
] = TARGET_ESTALE
,
618 [ECANCELED
] = TARGET_ECANCELED
,
619 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
620 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
622 [ENOKEY
] = TARGET_ENOKEY
,
625 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
628 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
631 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
634 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
636 #ifdef ENOTRECOVERABLE
637 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
641 static inline int host_to_target_errno(int err
)
643 if(host_to_target_errno_table
[err
])
644 return host_to_target_errno_table
[err
];
648 static inline int target_to_host_errno(int err
)
650 if (target_to_host_errno_table
[err
])
651 return target_to_host_errno_table
[err
];
655 static inline abi_long
get_errno(abi_long ret
)
658 return -host_to_target_errno(errno
);
663 static inline int is_error(abi_long ret
)
665 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
668 char *target_strerror(int err
)
670 return strerror(target_to_host_errno(err
));
673 static abi_ulong target_brk
;
674 static abi_ulong target_original_brk
;
676 void target_set_brk(abi_ulong new_brk
)
678 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
681 /* do_brk() must return target values and target errnos. */
682 abi_long
do_brk(abi_ulong new_brk
)
685 abi_long mapped_addr
;
690 if (new_brk
< target_original_brk
)
693 brk_page
= HOST_PAGE_ALIGN(target_brk
);
695 /* If the new brk is less than this, set it and we're done... */
696 if (new_brk
< brk_page
) {
697 target_brk
= new_brk
;
701 /* We need to allocate more memory after the brk... */
702 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
703 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
704 PROT_READ
|PROT_WRITE
,
705 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
707 if (!is_error(mapped_addr
))
708 target_brk
= new_brk
;
713 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
714 abi_ulong target_fds_addr
,
718 abi_ulong b
, *target_fds
;
720 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
721 if (!(target_fds
= lock_user(VERIFY_READ
,
723 sizeof(abi_ulong
) * nw
,
725 return -TARGET_EFAULT
;
729 for (i
= 0; i
< nw
; i
++) {
730 /* grab the abi_ulong */
731 __get_user(b
, &target_fds
[i
]);
732 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
733 /* check the bit inside the abi_ulong */
740 unlock_user(target_fds
, target_fds_addr
, 0);
745 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
751 abi_ulong
*target_fds
;
753 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
754 if (!(target_fds
= lock_user(VERIFY_WRITE
,
756 sizeof(abi_ulong
) * nw
,
758 return -TARGET_EFAULT
;
761 for (i
= 0; i
< nw
; i
++) {
763 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
764 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
767 __put_user(v
, &target_fds
[i
]);
770 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
775 #if defined(__alpha__)
781 static inline abi_long
host_to_target_clock_t(long ticks
)
783 #if HOST_HZ == TARGET_HZ
786 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
790 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
791 const struct rusage
*rusage
)
793 struct target_rusage
*target_rusage
;
795 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
796 return -TARGET_EFAULT
;
797 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
798 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
799 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
800 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
801 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
802 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
803 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
804 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
805 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
806 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
807 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
808 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
809 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
810 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
811 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
812 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
813 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
814 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
815 unlock_user_struct(target_rusage
, target_addr
, 1);
820 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
821 abi_ulong target_tv_addr
)
823 struct target_timeval
*target_tv
;
825 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
826 return -TARGET_EFAULT
;
828 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
829 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
831 unlock_user_struct(target_tv
, target_tv_addr
, 0);
836 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
837 const struct timeval
*tv
)
839 struct target_timeval
*target_tv
;
841 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
842 return -TARGET_EFAULT
;
844 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
845 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
847 unlock_user_struct(target_tv
, target_tv_addr
, 1);
852 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
853 abi_ulong target_mq_attr_addr
)
855 struct target_mq_attr
*target_mq_attr
;
857 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
858 target_mq_attr_addr
, 1))
859 return -TARGET_EFAULT
;
861 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
862 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
863 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
864 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
866 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
871 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
872 const struct mq_attr
*attr
)
874 struct target_mq_attr
*target_mq_attr
;
876 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
877 target_mq_attr_addr
, 0))
878 return -TARGET_EFAULT
;
880 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
881 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
882 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
883 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
885 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
890 /* do_select() must return target values and target errnos. */
891 static abi_long
do_select(int n
,
892 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
893 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
895 fd_set rfds
, wfds
, efds
;
896 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
897 struct timeval tv
, *tv_ptr
;
901 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
902 return -TARGET_EFAULT
;
908 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
909 return -TARGET_EFAULT
;
915 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
916 return -TARGET_EFAULT
;
922 if (target_tv_addr
) {
923 if (copy_from_user_timeval(&tv
, target_tv_addr
))
924 return -TARGET_EFAULT
;
930 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
932 if (!is_error(ret
)) {
933 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
934 return -TARGET_EFAULT
;
935 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
936 return -TARGET_EFAULT
;
937 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
938 return -TARGET_EFAULT
;
940 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
941 return -TARGET_EFAULT
;
947 static abi_long
do_pipe2(int host_pipe
[], int flags
)
950 return pipe2(host_pipe
, flags
);
956 static abi_long
do_pipe(void *cpu_env
, int pipedes
, int flags
)
960 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
963 return get_errno(ret
);
964 #if defined(TARGET_MIPS)
965 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
967 #elif defined(TARGET_SH4)
968 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
971 if (put_user_s32(host_pipe
[0], pipedes
)
972 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
973 return -TARGET_EFAULT
;
975 return get_errno(ret
);
978 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
979 abi_ulong target_addr
,
982 struct target_ip_mreqn
*target_smreqn
;
984 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
986 return -TARGET_EFAULT
;
987 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
988 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
989 if (len
== sizeof(struct target_ip_mreqn
))
990 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
991 unlock_user(target_smreqn
, target_addr
, 0);
996 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
997 abi_ulong target_addr
,
1000 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1001 sa_family_t sa_family
;
1002 struct target_sockaddr
*target_saddr
;
1004 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1006 return -TARGET_EFAULT
;
1008 sa_family
= tswap16(target_saddr
->sa_family
);
1010 /* Oops. The caller might send a incomplete sun_path; sun_path
1011 * must be terminated by \0 (see the manual page), but
1012 * unfortunately it is quite common to specify sockaddr_un
1013 * length as "strlen(x->sun_path)" while it should be
1014 * "strlen(...) + 1". We'll fix that here if needed.
1015 * Linux kernel has a similar feature.
1018 if (sa_family
== AF_UNIX
) {
1019 if (len
< unix_maxlen
&& len
> 0) {
1020 char *cp
= (char*)target_saddr
;
1022 if ( cp
[len
-1] && !cp
[len
] )
1025 if (len
> unix_maxlen
)
1029 memcpy(addr
, target_saddr
, len
);
1030 addr
->sa_family
= sa_family
;
1031 unlock_user(target_saddr
, target_addr
, 0);
1036 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1037 struct sockaddr
*addr
,
1040 struct target_sockaddr
*target_saddr
;
1042 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1044 return -TARGET_EFAULT
;
1045 memcpy(target_saddr
, addr
, len
);
1046 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1047 unlock_user(target_saddr
, target_addr
, len
);
1052 /* ??? Should this also swap msgh->name? */
1053 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1054 struct target_msghdr
*target_msgh
)
1056 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1057 abi_long msg_controllen
;
1058 abi_ulong target_cmsg_addr
;
1059 struct target_cmsghdr
*target_cmsg
;
1060 socklen_t space
= 0;
1062 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1063 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1065 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1066 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1068 return -TARGET_EFAULT
;
1070 while (cmsg
&& target_cmsg
) {
1071 void *data
= CMSG_DATA(cmsg
);
1072 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1074 int len
= tswapl(target_cmsg
->cmsg_len
)
1075 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1077 space
+= CMSG_SPACE(len
);
1078 if (space
> msgh
->msg_controllen
) {
1079 space
-= CMSG_SPACE(len
);
1080 gemu_log("Host cmsg overflow\n");
1084 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1085 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1086 cmsg
->cmsg_len
= CMSG_LEN(len
);
1088 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1089 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1090 memcpy(data
, target_data
, len
);
1092 int *fd
= (int *)data
;
1093 int *target_fd
= (int *)target_data
;
1094 int i
, numfds
= len
/ sizeof(int);
1096 for (i
= 0; i
< numfds
; i
++)
1097 fd
[i
] = tswap32(target_fd
[i
]);
1100 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1101 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1103 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1105 msgh
->msg_controllen
= space
;
1109 /* ??? Should this also swap msgh->name? */
1110 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1111 struct msghdr
*msgh
)
1113 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1114 abi_long msg_controllen
;
1115 abi_ulong target_cmsg_addr
;
1116 struct target_cmsghdr
*target_cmsg
;
1117 socklen_t space
= 0;
1119 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1120 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1122 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1123 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1125 return -TARGET_EFAULT
;
1127 while (cmsg
&& target_cmsg
) {
1128 void *data
= CMSG_DATA(cmsg
);
1129 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1131 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1133 space
+= TARGET_CMSG_SPACE(len
);
1134 if (space
> msg_controllen
) {
1135 space
-= TARGET_CMSG_SPACE(len
);
1136 gemu_log("Target cmsg overflow\n");
1140 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1141 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1142 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1144 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1145 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1146 memcpy(target_data
, data
, len
);
1148 int *fd
= (int *)data
;
1149 int *target_fd
= (int *)target_data
;
1150 int i
, numfds
= len
/ sizeof(int);
1152 for (i
= 0; i
< numfds
; i
++)
1153 target_fd
[i
] = tswap32(fd
[i
]);
1156 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1157 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1159 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1161 target_msgh
->msg_controllen
= tswapl(space
);
1165 /* do_setsockopt() Must return target values and target errnos. */
1166 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1167 abi_ulong optval_addr
, socklen_t optlen
)
1171 struct ip_mreqn
*ip_mreq
;
1172 struct ip_mreq_source
*ip_mreq_source
;
1176 /* TCP options all take an 'int' value. */
1177 if (optlen
< sizeof(uint32_t))
1178 return -TARGET_EINVAL
;
1180 if (get_user_u32(val
, optval_addr
))
1181 return -TARGET_EFAULT
;
1182 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1189 case IP_ROUTER_ALERT
:
1193 case IP_MTU_DISCOVER
:
1199 case IP_MULTICAST_TTL
:
1200 case IP_MULTICAST_LOOP
:
1202 if (optlen
>= sizeof(uint32_t)) {
1203 if (get_user_u32(val
, optval_addr
))
1204 return -TARGET_EFAULT
;
1205 } else if (optlen
>= 1) {
1206 if (get_user_u8(val
, optval_addr
))
1207 return -TARGET_EFAULT
;
1209 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1211 case IP_ADD_MEMBERSHIP
:
1212 case IP_DROP_MEMBERSHIP
:
1213 if (optlen
< sizeof (struct target_ip_mreq
) ||
1214 optlen
> sizeof (struct target_ip_mreqn
))
1215 return -TARGET_EINVAL
;
1217 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1218 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1219 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1222 case IP_BLOCK_SOURCE
:
1223 case IP_UNBLOCK_SOURCE
:
1224 case IP_ADD_SOURCE_MEMBERSHIP
:
1225 case IP_DROP_SOURCE_MEMBERSHIP
:
1226 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1227 return -TARGET_EINVAL
;
1229 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1230 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1231 unlock_user (ip_mreq_source
, optval_addr
, 0);
1238 case TARGET_SOL_SOCKET
:
1240 /* Options with 'int' argument. */
1241 case TARGET_SO_DEBUG
:
1244 case TARGET_SO_REUSEADDR
:
1245 optname
= SO_REUSEADDR
;
1247 case TARGET_SO_TYPE
:
1250 case TARGET_SO_ERROR
:
1253 case TARGET_SO_DONTROUTE
:
1254 optname
= SO_DONTROUTE
;
1256 case TARGET_SO_BROADCAST
:
1257 optname
= SO_BROADCAST
;
1259 case TARGET_SO_SNDBUF
:
1260 optname
= SO_SNDBUF
;
1262 case TARGET_SO_RCVBUF
:
1263 optname
= SO_RCVBUF
;
1265 case TARGET_SO_KEEPALIVE
:
1266 optname
= SO_KEEPALIVE
;
1268 case TARGET_SO_OOBINLINE
:
1269 optname
= SO_OOBINLINE
;
1271 case TARGET_SO_NO_CHECK
:
1272 optname
= SO_NO_CHECK
;
1274 case TARGET_SO_PRIORITY
:
1275 optname
= SO_PRIORITY
;
1278 case TARGET_SO_BSDCOMPAT
:
1279 optname
= SO_BSDCOMPAT
;
1282 case TARGET_SO_PASSCRED
:
1283 optname
= SO_PASSCRED
;
1285 case TARGET_SO_TIMESTAMP
:
1286 optname
= SO_TIMESTAMP
;
1288 case TARGET_SO_RCVLOWAT
:
1289 optname
= SO_RCVLOWAT
;
1291 case TARGET_SO_RCVTIMEO
:
1292 optname
= SO_RCVTIMEO
;
1294 case TARGET_SO_SNDTIMEO
:
1295 optname
= SO_SNDTIMEO
;
1301 if (optlen
< sizeof(uint32_t))
1302 return -TARGET_EINVAL
;
1304 if (get_user_u32(val
, optval_addr
))
1305 return -TARGET_EFAULT
;
1306 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1310 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1311 ret
= -TARGET_ENOPROTOOPT
;
1316 /* do_getsockopt() Must return target values and target errnos. */
1317 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1318 abi_ulong optval_addr
, abi_ulong optlen
)
1325 case TARGET_SOL_SOCKET
:
1328 case TARGET_SO_LINGER
:
1329 case TARGET_SO_RCVTIMEO
:
1330 case TARGET_SO_SNDTIMEO
:
1331 case TARGET_SO_PEERCRED
:
1332 case TARGET_SO_PEERNAME
:
1333 /* These don't just return a single integer */
1340 /* TCP options all take an 'int' value. */
1342 if (get_user_u32(len
, optlen
))
1343 return -TARGET_EFAULT
;
1345 return -TARGET_EINVAL
;
1347 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1353 if (put_user_u32(val
, optval_addr
))
1354 return -TARGET_EFAULT
;
1356 if (put_user_u8(val
, optval_addr
))
1357 return -TARGET_EFAULT
;
1359 if (put_user_u32(len
, optlen
))
1360 return -TARGET_EFAULT
;
1367 case IP_ROUTER_ALERT
:
1371 case IP_MTU_DISCOVER
:
1377 case IP_MULTICAST_TTL
:
1378 case IP_MULTICAST_LOOP
:
1379 if (get_user_u32(len
, optlen
))
1380 return -TARGET_EFAULT
;
1382 return -TARGET_EINVAL
;
1384 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1387 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1389 if (put_user_u32(len
, optlen
)
1390 || put_user_u8(val
, optval_addr
))
1391 return -TARGET_EFAULT
;
1393 if (len
> sizeof(int))
1395 if (put_user_u32(len
, optlen
)
1396 || put_user_u32(val
, optval_addr
))
1397 return -TARGET_EFAULT
;
1401 ret
= -TARGET_ENOPROTOOPT
;
1407 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1409 ret
= -TARGET_EOPNOTSUPP
;
1416 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1417 * other lock functions have a return code of 0 for failure.
1419 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1420 int count
, int copy
)
1422 struct target_iovec
*target_vec
;
1426 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1428 return -TARGET_EFAULT
;
1429 for(i
= 0;i
< count
; i
++) {
1430 base
= tswapl(target_vec
[i
].iov_base
);
1431 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1432 if (vec
[i
].iov_len
!= 0) {
1433 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1434 /* Don't check lock_user return value. We must call writev even
1435 if a element has invalid base address. */
1437 /* zero length pointer is ignored */
1438 vec
[i
].iov_base
= NULL
;
1441 unlock_user (target_vec
, target_addr
, 0);
1445 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1446 int count
, int copy
)
1448 struct target_iovec
*target_vec
;
1452 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1454 return -TARGET_EFAULT
;
1455 for(i
= 0;i
< count
; i
++) {
1456 if (target_vec
[i
].iov_base
) {
1457 base
= tswapl(target_vec
[i
].iov_base
);
1458 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1461 unlock_user (target_vec
, target_addr
, 0);
1466 /* do_socket() Must return target values and target errnos. */
1467 static abi_long
do_socket(int domain
, int type
, int protocol
)
1469 #if defined(TARGET_MIPS)
1471 case TARGET_SOCK_DGRAM
:
1474 case TARGET_SOCK_STREAM
:
1477 case TARGET_SOCK_RAW
:
1480 case TARGET_SOCK_RDM
:
1483 case TARGET_SOCK_SEQPACKET
:
1484 type
= SOCK_SEQPACKET
;
1486 case TARGET_SOCK_PACKET
:
1491 if (domain
== PF_NETLINK
)
1492 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1493 return get_errno(socket(domain
, type
, protocol
));
1496 /* do_bind() Must return target values and target errnos. */
1497 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1503 return -TARGET_EINVAL
;
1505 addr
= alloca(addrlen
+1);
1507 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1508 return get_errno(bind(sockfd
, addr
, addrlen
));
1511 /* do_connect() Must return target values and target errnos. */
1512 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1518 return -TARGET_EINVAL
;
1520 addr
= alloca(addrlen
);
1522 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1523 return get_errno(connect(sockfd
, addr
, addrlen
));
1526 /* do_sendrecvmsg() Must return target values and target errnos. */
1527 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1528 int flags
, int send
)
1531 struct target_msghdr
*msgp
;
1535 abi_ulong target_vec
;
1538 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1542 return -TARGET_EFAULT
;
1543 if (msgp
->msg_name
) {
1544 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1545 msg
.msg_name
= alloca(msg
.msg_namelen
);
1546 target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1549 msg
.msg_name
= NULL
;
1550 msg
.msg_namelen
= 0;
1552 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1553 msg
.msg_control
= alloca(msg
.msg_controllen
);
1554 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1556 count
= tswapl(msgp
->msg_iovlen
);
1557 vec
= alloca(count
* sizeof(struct iovec
));
1558 target_vec
= tswapl(msgp
->msg_iov
);
1559 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1560 msg
.msg_iovlen
= count
;
1564 ret
= target_to_host_cmsg(&msg
, msgp
);
1566 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1568 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1569 if (!is_error(ret
)) {
1571 ret
= host_to_target_cmsg(msgp
, &msg
);
1576 unlock_iovec(vec
, target_vec
, count
, !send
);
1577 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1581 /* do_accept() Must return target values and target errnos. */
1582 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1583 abi_ulong target_addrlen_addr
)
1589 if (get_user_u32(addrlen
, target_addrlen_addr
))
1590 return -TARGET_EFAULT
;
1593 return -TARGET_EINVAL
;
1595 addr
= alloca(addrlen
);
1597 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1598 if (!is_error(ret
)) {
1599 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1600 if (put_user_u32(addrlen
, target_addrlen_addr
))
1601 ret
= -TARGET_EFAULT
;
1606 /* do_getpeername() Must return target values and target errnos. */
1607 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1608 abi_ulong target_addrlen_addr
)
1614 if (get_user_u32(addrlen
, target_addrlen_addr
))
1615 return -TARGET_EFAULT
;
1618 return -TARGET_EINVAL
;
1620 addr
= alloca(addrlen
);
1622 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1623 if (!is_error(ret
)) {
1624 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1625 if (put_user_u32(addrlen
, target_addrlen_addr
))
1626 ret
= -TARGET_EFAULT
;
1631 /* do_getsockname() Must return target values and target errnos. */
1632 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1633 abi_ulong target_addrlen_addr
)
1639 if (target_addr
== 0)
1640 return get_errno(accept(fd
, NULL
, NULL
));
1642 if (get_user_u32(addrlen
, target_addrlen_addr
))
1643 return -TARGET_EFAULT
;
1646 return -TARGET_EINVAL
;
1648 addr
= alloca(addrlen
);
1650 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1651 if (!is_error(ret
)) {
1652 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1653 if (put_user_u32(addrlen
, target_addrlen_addr
))
1654 ret
= -TARGET_EFAULT
;
1659 /* do_socketpair() Must return target values and target errnos. */
1660 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1661 abi_ulong target_tab_addr
)
1666 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1667 if (!is_error(ret
)) {
1668 if (put_user_s32(tab
[0], target_tab_addr
)
1669 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1670 ret
= -TARGET_EFAULT
;
1675 /* do_sendto() Must return target values and target errnos. */
1676 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1677 abi_ulong target_addr
, socklen_t addrlen
)
1684 return -TARGET_EINVAL
;
1686 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1688 return -TARGET_EFAULT
;
1690 addr
= alloca(addrlen
);
1691 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1692 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1694 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1696 unlock_user(host_msg
, msg
, 0);
1700 /* do_recvfrom() Must return target values and target errnos. */
1701 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1702 abi_ulong target_addr
,
1703 abi_ulong target_addrlen
)
1710 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1712 return -TARGET_EFAULT
;
1714 if (get_user_u32(addrlen
, target_addrlen
)) {
1715 ret
= -TARGET_EFAULT
;
1719 ret
= -TARGET_EINVAL
;
1722 addr
= alloca(addrlen
);
1723 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1725 addr
= NULL
; /* To keep compiler quiet. */
1726 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1728 if (!is_error(ret
)) {
1730 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1731 if (put_user_u32(addrlen
, target_addrlen
)) {
1732 ret
= -TARGET_EFAULT
;
1736 unlock_user(host_msg
, msg
, len
);
1739 unlock_user(host_msg
, msg
, 0);
1744 #ifdef TARGET_NR_socketcall
1745 /* do_socketcall() Must return target values and target errnos. */
1746 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1749 const int n
= sizeof(abi_ulong
);
1754 int domain
, type
, protocol
;
1756 if (get_user_s32(domain
, vptr
)
1757 || get_user_s32(type
, vptr
+ n
)
1758 || get_user_s32(protocol
, vptr
+ 2 * n
))
1759 return -TARGET_EFAULT
;
1761 ret
= do_socket(domain
, type
, protocol
);
1767 abi_ulong target_addr
;
1770 if (get_user_s32(sockfd
, vptr
)
1771 || get_user_ual(target_addr
, vptr
+ n
)
1772 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1773 return -TARGET_EFAULT
;
1775 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1778 case SOCKOP_connect
:
1781 abi_ulong target_addr
;
1784 if (get_user_s32(sockfd
, vptr
)
1785 || get_user_ual(target_addr
, vptr
+ n
)
1786 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1787 return -TARGET_EFAULT
;
1789 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1794 int sockfd
, backlog
;
1796 if (get_user_s32(sockfd
, vptr
)
1797 || get_user_s32(backlog
, vptr
+ n
))
1798 return -TARGET_EFAULT
;
1800 ret
= get_errno(listen(sockfd
, backlog
));
1806 abi_ulong target_addr
, target_addrlen
;
1808 if (get_user_s32(sockfd
, vptr
)
1809 || get_user_ual(target_addr
, vptr
+ n
)
1810 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1811 return -TARGET_EFAULT
;
1813 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1816 case SOCKOP_getsockname
:
1819 abi_ulong target_addr
, target_addrlen
;
1821 if (get_user_s32(sockfd
, vptr
)
1822 || get_user_ual(target_addr
, vptr
+ n
)
1823 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1824 return -TARGET_EFAULT
;
1826 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1829 case SOCKOP_getpeername
:
1832 abi_ulong target_addr
, target_addrlen
;
1834 if (get_user_s32(sockfd
, vptr
)
1835 || get_user_ual(target_addr
, vptr
+ n
)
1836 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1837 return -TARGET_EFAULT
;
1839 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1842 case SOCKOP_socketpair
:
1844 int domain
, type
, protocol
;
1847 if (get_user_s32(domain
, vptr
)
1848 || get_user_s32(type
, vptr
+ n
)
1849 || get_user_s32(protocol
, vptr
+ 2 * n
)
1850 || get_user_ual(tab
, vptr
+ 3 * n
))
1851 return -TARGET_EFAULT
;
1853 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1863 if (get_user_s32(sockfd
, vptr
)
1864 || get_user_ual(msg
, vptr
+ n
)
1865 || get_user_ual(len
, vptr
+ 2 * n
)
1866 || get_user_s32(flags
, vptr
+ 3 * n
))
1867 return -TARGET_EFAULT
;
1869 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1879 if (get_user_s32(sockfd
, vptr
)
1880 || get_user_ual(msg
, vptr
+ n
)
1881 || get_user_ual(len
, vptr
+ 2 * n
)
1882 || get_user_s32(flags
, vptr
+ 3 * n
))
1883 return -TARGET_EFAULT
;
1885 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1897 if (get_user_s32(sockfd
, vptr
)
1898 || get_user_ual(msg
, vptr
+ n
)
1899 || get_user_ual(len
, vptr
+ 2 * n
)
1900 || get_user_s32(flags
, vptr
+ 3 * n
)
1901 || get_user_ual(addr
, vptr
+ 4 * n
)
1902 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1903 return -TARGET_EFAULT
;
1905 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1908 case SOCKOP_recvfrom
:
1917 if (get_user_s32(sockfd
, vptr
)
1918 || get_user_ual(msg
, vptr
+ n
)
1919 || get_user_ual(len
, vptr
+ 2 * n
)
1920 || get_user_s32(flags
, vptr
+ 3 * n
)
1921 || get_user_ual(addr
, vptr
+ 4 * n
)
1922 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1923 return -TARGET_EFAULT
;
1925 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1928 case SOCKOP_shutdown
:
1932 if (get_user_s32(sockfd
, vptr
)
1933 || get_user_s32(how
, vptr
+ n
))
1934 return -TARGET_EFAULT
;
1936 ret
= get_errno(shutdown(sockfd
, how
));
1939 case SOCKOP_sendmsg
:
1940 case SOCKOP_recvmsg
:
1943 abi_ulong target_msg
;
1946 if (get_user_s32(fd
, vptr
)
1947 || get_user_ual(target_msg
, vptr
+ n
)
1948 || get_user_s32(flags
, vptr
+ 2 * n
))
1949 return -TARGET_EFAULT
;
1951 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1952 (num
== SOCKOP_sendmsg
));
1955 case SOCKOP_setsockopt
:
1963 if (get_user_s32(sockfd
, vptr
)
1964 || get_user_s32(level
, vptr
+ n
)
1965 || get_user_s32(optname
, vptr
+ 2 * n
)
1966 || get_user_ual(optval
, vptr
+ 3 * n
)
1967 || get_user_u32(optlen
, vptr
+ 4 * n
))
1968 return -TARGET_EFAULT
;
1970 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
1973 case SOCKOP_getsockopt
:
1981 if (get_user_s32(sockfd
, vptr
)
1982 || get_user_s32(level
, vptr
+ n
)
1983 || get_user_s32(optname
, vptr
+ 2 * n
)
1984 || get_user_ual(optval
, vptr
+ 3 * n
)
1985 || get_user_u32(optlen
, vptr
+ 4 * n
))
1986 return -TARGET_EFAULT
;
1988 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
1992 gemu_log("Unsupported socketcall: %d\n", num
);
1993 ret
= -TARGET_ENOSYS
;
2000 #define N_SHM_REGIONS 32
2002 static struct shm_region
{
2005 } shm_regions
[N_SHM_REGIONS
];
2007 struct target_ipc_perm
2014 unsigned short int mode
;
2015 unsigned short int __pad1
;
2016 unsigned short int __seq
;
2017 unsigned short int __pad2
;
2018 abi_ulong __unused1
;
2019 abi_ulong __unused2
;
2022 struct target_semid_ds
2024 struct target_ipc_perm sem_perm
;
2025 abi_ulong sem_otime
;
2026 abi_ulong __unused1
;
2027 abi_ulong sem_ctime
;
2028 abi_ulong __unused2
;
2029 abi_ulong sem_nsems
;
2030 abi_ulong __unused3
;
2031 abi_ulong __unused4
;
2034 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2035 abi_ulong target_addr
)
2037 struct target_ipc_perm
*target_ip
;
2038 struct target_semid_ds
*target_sd
;
2040 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2041 return -TARGET_EFAULT
;
2042 target_ip
=&(target_sd
->sem_perm
);
2043 host_ip
->__key
= tswapl(target_ip
->__key
);
2044 host_ip
->uid
= tswapl(target_ip
->uid
);
2045 host_ip
->gid
= tswapl(target_ip
->gid
);
2046 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2047 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2048 host_ip
->mode
= tswapl(target_ip
->mode
);
2049 unlock_user_struct(target_sd
, target_addr
, 0);
2053 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2054 struct ipc_perm
*host_ip
)
2056 struct target_ipc_perm
*target_ip
;
2057 struct target_semid_ds
*target_sd
;
2059 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2060 return -TARGET_EFAULT
;
2061 target_ip
= &(target_sd
->sem_perm
);
2062 target_ip
->__key
= tswapl(host_ip
->__key
);
2063 target_ip
->uid
= tswapl(host_ip
->uid
);
2064 target_ip
->gid
= tswapl(host_ip
->gid
);
2065 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2066 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2067 target_ip
->mode
= tswapl(host_ip
->mode
);
2068 unlock_user_struct(target_sd
, target_addr
, 1);
2072 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2073 abi_ulong target_addr
)
2075 struct target_semid_ds
*target_sd
;
2077 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2078 return -TARGET_EFAULT
;
2079 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2080 return -TARGET_EFAULT
;
2081 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2082 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2083 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2084 unlock_user_struct(target_sd
, target_addr
, 0);
2088 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2089 struct semid_ds
*host_sd
)
2091 struct target_semid_ds
*target_sd
;
2093 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2094 return -TARGET_EFAULT
;
2095 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2096 return -TARGET_EFAULT
;;
2097 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2098 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2099 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2100 unlock_user_struct(target_sd
, target_addr
, 1);
2104 struct target_seminfo
{
2117 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2118 struct seminfo
*host_seminfo
)
2120 struct target_seminfo
*target_seminfo
;
2121 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2122 return -TARGET_EFAULT
;
2123 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2124 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2125 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2126 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2127 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2128 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2129 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2130 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2131 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2132 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2133 unlock_user_struct(target_seminfo
, target_addr
, 1);
2139 struct semid_ds
*buf
;
2140 unsigned short *array
;
2141 struct seminfo
*__buf
;
2144 union target_semun
{
2151 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2152 abi_ulong target_addr
)
2155 unsigned short *array
;
2157 struct semid_ds semid_ds
;
2160 semun
.buf
= &semid_ds
;
2162 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2164 return get_errno(ret
);
2166 nsems
= semid_ds
.sem_nsems
;
2168 *host_array
= malloc(nsems
*sizeof(unsigned short));
2169 array
= lock_user(VERIFY_READ
, target_addr
,
2170 nsems
*sizeof(unsigned short), 1);
2172 return -TARGET_EFAULT
;
2174 for(i
=0; i
<nsems
; i
++) {
2175 __get_user((*host_array
)[i
], &array
[i
]);
2177 unlock_user(array
, target_addr
, 0);
2182 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2183 unsigned short **host_array
)
2186 unsigned short *array
;
2188 struct semid_ds semid_ds
;
2191 semun
.buf
= &semid_ds
;
2193 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2195 return get_errno(ret
);
2197 nsems
= semid_ds
.sem_nsems
;
2199 array
= lock_user(VERIFY_WRITE
, target_addr
,
2200 nsems
*sizeof(unsigned short), 0);
2202 return -TARGET_EFAULT
;
2204 for(i
=0; i
<nsems
; i
++) {
2205 __put_user((*host_array
)[i
], &array
[i
]);
2208 unlock_user(array
, target_addr
, 1);
2213 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2214 union target_semun target_su
)
2217 struct semid_ds dsarg
;
2218 unsigned short *array
;
2219 struct seminfo seminfo
;
2220 abi_long ret
= -TARGET_EINVAL
;
2227 arg
.val
= tswapl(target_su
.val
);
2228 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2229 target_su
.val
= tswapl(arg
.val
);
2233 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2237 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2238 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2245 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2249 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2250 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2256 arg
.__buf
= &seminfo
;
2257 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2258 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2266 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2273 struct target_sembuf
{
2274 unsigned short sem_num
;
2279 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2280 abi_ulong target_addr
,
2283 struct target_sembuf
*target_sembuf
;
2286 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2287 nsops
*sizeof(struct target_sembuf
), 1);
2289 return -TARGET_EFAULT
;
2291 for(i
=0; i
<nsops
; i
++) {
2292 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2293 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2294 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2297 unlock_user(target_sembuf
, target_addr
, 0);
2302 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2304 struct sembuf sops
[nsops
];
2306 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2307 return -TARGET_EFAULT
;
2309 return semop(semid
, sops
, nsops
);
2312 struct target_msqid_ds
2314 struct target_ipc_perm msg_perm
;
2315 abi_ulong msg_stime
;
2316 #if TARGET_ABI_BITS == 32
2317 abi_ulong __unused1
;
2319 abi_ulong msg_rtime
;
2320 #if TARGET_ABI_BITS == 32
2321 abi_ulong __unused2
;
2323 abi_ulong msg_ctime
;
2324 #if TARGET_ABI_BITS == 32
2325 abi_ulong __unused3
;
2327 abi_ulong __msg_cbytes
;
2329 abi_ulong msg_qbytes
;
2330 abi_ulong msg_lspid
;
2331 abi_ulong msg_lrpid
;
2332 abi_ulong __unused4
;
2333 abi_ulong __unused5
;
2336 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2337 abi_ulong target_addr
)
2339 struct target_msqid_ds
*target_md
;
2341 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2342 return -TARGET_EFAULT
;
2343 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2344 return -TARGET_EFAULT
;
2345 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2346 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2347 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2348 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2349 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2350 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2351 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2352 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2353 unlock_user_struct(target_md
, target_addr
, 0);
2357 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2358 struct msqid_ds
*host_md
)
2360 struct target_msqid_ds
*target_md
;
2362 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2363 return -TARGET_EFAULT
;
2364 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2365 return -TARGET_EFAULT
;
2366 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2367 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2368 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2369 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2370 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2371 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2372 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2373 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2374 unlock_user_struct(target_md
, target_addr
, 1);
2378 struct target_msginfo
{
2386 unsigned short int msgseg
;
2389 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2390 struct msginfo
*host_msginfo
)
2392 struct target_msginfo
*target_msginfo
;
2393 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2394 return -TARGET_EFAULT
;
2395 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2396 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2397 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2398 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2399 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2400 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2401 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2402 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2403 unlock_user_struct(target_msginfo
, target_addr
, 1);
2407 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2409 struct msqid_ds dsarg
;
2410 struct msginfo msginfo
;
2411 abi_long ret
= -TARGET_EINVAL
;
2419 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2420 return -TARGET_EFAULT
;
2421 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2422 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2423 return -TARGET_EFAULT
;
2426 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2430 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2431 if (host_to_target_msginfo(ptr
, &msginfo
))
2432 return -TARGET_EFAULT
;
2439 struct target_msgbuf
{
2444 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2445 unsigned int msgsz
, int msgflg
)
2447 struct target_msgbuf
*target_mb
;
2448 struct msgbuf
*host_mb
;
2451 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2452 return -TARGET_EFAULT
;
2453 host_mb
= malloc(msgsz
+sizeof(long));
2454 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2455 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2456 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2458 unlock_user_struct(target_mb
, msgp
, 0);
2463 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2464 unsigned int msgsz
, abi_long msgtyp
,
2467 struct target_msgbuf
*target_mb
;
2469 struct msgbuf
*host_mb
;
2472 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2473 return -TARGET_EFAULT
;
2475 host_mb
= malloc(msgsz
+sizeof(long));
2476 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2479 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2480 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2481 if (!target_mtext
) {
2482 ret
= -TARGET_EFAULT
;
2485 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2486 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2489 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2494 unlock_user_struct(target_mb
, msgp
, 1);
2498 struct target_shmid_ds
2500 struct target_ipc_perm shm_perm
;
2501 abi_ulong shm_segsz
;
2502 abi_ulong shm_atime
;
2503 #if TARGET_ABI_BITS == 32
2504 abi_ulong __unused1
;
2506 abi_ulong shm_dtime
;
2507 #if TARGET_ABI_BITS == 32
2508 abi_ulong __unused2
;
2510 abi_ulong shm_ctime
;
2511 #if TARGET_ABI_BITS == 32
2512 abi_ulong __unused3
;
2516 abi_ulong shm_nattch
;
2517 unsigned long int __unused4
;
2518 unsigned long int __unused5
;
2521 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2522 abi_ulong target_addr
)
2524 struct target_shmid_ds
*target_sd
;
2526 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2527 return -TARGET_EFAULT
;
2528 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2529 return -TARGET_EFAULT
;
2530 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2531 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2532 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2533 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2534 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2535 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2536 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2537 unlock_user_struct(target_sd
, target_addr
, 0);
2541 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2542 struct shmid_ds
*host_sd
)
2544 struct target_shmid_ds
*target_sd
;
2546 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2547 return -TARGET_EFAULT
;
2548 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2549 return -TARGET_EFAULT
;
2550 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2551 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2552 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2553 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2554 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2555 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2556 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2557 unlock_user_struct(target_sd
, target_addr
, 1);
2561 struct target_shminfo
{
2569 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2570 struct shminfo
*host_shminfo
)
2572 struct target_shminfo
*target_shminfo
;
2573 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2574 return -TARGET_EFAULT
;
2575 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2576 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2577 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2578 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2579 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2580 unlock_user_struct(target_shminfo
, target_addr
, 1);
2584 struct target_shm_info
{
2589 abi_ulong swap_attempts
;
2590 abi_ulong swap_successes
;
2593 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2594 struct shm_info
*host_shm_info
)
2596 struct target_shm_info
*target_shm_info
;
2597 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2598 return -TARGET_EFAULT
;
2599 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2600 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2601 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2602 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2603 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2604 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2605 unlock_user_struct(target_shm_info
, target_addr
, 1);
2609 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2611 struct shmid_ds dsarg
;
2612 struct shminfo shminfo
;
2613 struct shm_info shm_info
;
2614 abi_long ret
= -TARGET_EINVAL
;
2622 if (target_to_host_shmid_ds(&dsarg
, buf
))
2623 return -TARGET_EFAULT
;
2624 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2625 if (host_to_target_shmid_ds(buf
, &dsarg
))
2626 return -TARGET_EFAULT
;
2629 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2630 if (host_to_target_shminfo(buf
, &shminfo
))
2631 return -TARGET_EFAULT
;
2634 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2635 if (host_to_target_shm_info(buf
, &shm_info
))
2636 return -TARGET_EFAULT
;
2641 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2648 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2652 struct shmid_ds shm_info
;
2655 /* find out the length of the shared memory segment */
2656 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2657 if (is_error(ret
)) {
2658 /* can't get length, bail out */
2665 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2667 abi_ulong mmap_start
;
2669 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2671 if (mmap_start
== -1) {
2673 host_raddr
= (void *)-1;
2675 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2678 if (host_raddr
== (void *)-1) {
2680 return get_errno((long)host_raddr
);
2682 raddr
=h2g((unsigned long)host_raddr
);
2684 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2685 PAGE_VALID
| PAGE_READ
|
2686 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2688 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2689 if (shm_regions
[i
].start
== 0) {
2690 shm_regions
[i
].start
= raddr
;
2691 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2701 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2705 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2706 if (shm_regions
[i
].start
== shmaddr
) {
2707 shm_regions
[i
].start
= 0;
2708 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2713 return get_errno(shmdt(g2h(shmaddr
)));
2716 #ifdef TARGET_NR_ipc
2717 /* ??? This only works with linear mappings. */
2718 /* do_ipc() must return target values and target errnos. */
2719 static abi_long
do_ipc(unsigned int call
, int first
,
2720 int second
, int third
,
2721 abi_long ptr
, abi_long fifth
)
2726 version
= call
>> 16;
2731 ret
= do_semop(first
, ptr
, second
);
2735 ret
= get_errno(semget(first
, second
, third
));
2739 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2743 ret
= get_errno(msgget(first
, second
));
2747 ret
= do_msgsnd(first
, ptr
, second
, third
);
2751 ret
= do_msgctl(first
, second
, ptr
);
2758 struct target_ipc_kludge
{
2763 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2764 ret
= -TARGET_EFAULT
;
2768 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2770 unlock_user_struct(tmp
, ptr
, 0);
2774 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2783 raddr
= do_shmat(first
, ptr
, second
);
2784 if (is_error(raddr
))
2785 return get_errno(raddr
);
2786 if (put_user_ual(raddr
, third
))
2787 return -TARGET_EFAULT
;
2791 ret
= -TARGET_EINVAL
;
2796 ret
= do_shmdt(ptr
);
2800 /* IPC_* flag values are the same on all linux platforms */
2801 ret
= get_errno(shmget(first
, second
, third
));
2804 /* IPC_* and SHM_* command values are the same on all linux platforms */
2806 ret
= do_shmctl(first
, second
, third
);
2809 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2810 ret
= -TARGET_ENOSYS
;
2817 /* kernel structure types definitions */
2820 #define STRUCT(name, ...) STRUCT_ ## name,
2821 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2823 #include "syscall_types.h"
2826 #undef STRUCT_SPECIAL
2828 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2829 #define STRUCT_SPECIAL(name)
2830 #include "syscall_types.h"
2832 #undef STRUCT_SPECIAL
2834 typedef struct IOCTLEntry
{
2835 unsigned int target_cmd
;
2836 unsigned int host_cmd
;
2839 const argtype arg_type
[5];
2842 #define IOC_R 0x0001
2843 #define IOC_W 0x0002
2844 #define IOC_RW (IOC_R | IOC_W)
2846 #define MAX_STRUCT_SIZE 4096
2848 static IOCTLEntry ioctl_entries
[] = {
2849 #define IOCTL(cmd, access, ...) \
2850 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2855 /* ??? Implement proper locking for ioctls. */
2856 /* do_ioctl() Must return target values and target errnos. */
2857 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2859 const IOCTLEntry
*ie
;
2860 const argtype
*arg_type
;
2862 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2868 if (ie
->target_cmd
== 0) {
2869 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2870 return -TARGET_ENOSYS
;
2872 if (ie
->target_cmd
== cmd
)
2876 arg_type
= ie
->arg_type
;
2878 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2880 switch(arg_type
[0]) {
2883 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2888 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2892 target_size
= thunk_type_size(arg_type
, 0);
2893 switch(ie
->access
) {
2895 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2896 if (!is_error(ret
)) {
2897 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2899 return -TARGET_EFAULT
;
2900 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2901 unlock_user(argptr
, arg
, target_size
);
2905 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2907 return -TARGET_EFAULT
;
2908 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2909 unlock_user(argptr
, arg
, 0);
2910 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2914 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2916 return -TARGET_EFAULT
;
2917 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2918 unlock_user(argptr
, arg
, 0);
2919 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2920 if (!is_error(ret
)) {
2921 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2923 return -TARGET_EFAULT
;
2924 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2925 unlock_user(argptr
, arg
, target_size
);
2931 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2932 (long)cmd
, arg_type
[0]);
2933 ret
= -TARGET_ENOSYS
;
2939 static const bitmask_transtbl iflag_tbl
[] = {
2940 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2941 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2942 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2943 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2944 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2945 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2946 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2947 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2948 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2949 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2950 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2951 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2952 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2953 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2957 static const bitmask_transtbl oflag_tbl
[] = {
2958 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2959 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2960 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2961 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2962 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2963 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2964 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2965 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2966 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2967 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2968 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2969 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
2970 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
2971 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
2972 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
2973 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
2974 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
2975 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
2976 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
2977 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
2978 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
2979 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
2980 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
2981 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
2985 static const bitmask_transtbl cflag_tbl
[] = {
2986 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
2987 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
2988 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
2989 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
2990 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
2991 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
2992 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
2993 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
2994 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
2995 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
2996 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
2997 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
2998 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
2999 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3000 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3001 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3002 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3003 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3004 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3005 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3006 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3007 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3008 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3009 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3010 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3011 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3012 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3013 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3014 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3015 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3016 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3020 static const bitmask_transtbl lflag_tbl
[] = {
3021 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3022 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3023 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3024 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3025 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3026 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3027 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3028 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3029 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3030 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3031 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3032 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3033 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3034 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3035 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3039 static void target_to_host_termios (void *dst
, const void *src
)
3041 struct host_termios
*host
= dst
;
3042 const struct target_termios
*target
= src
;
3045 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3047 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3049 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3051 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3052 host
->c_line
= target
->c_line
;
3054 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3055 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3056 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3057 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3058 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3059 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3060 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3061 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3062 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3063 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3064 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3065 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3066 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3067 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3068 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3069 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3070 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3071 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3074 static void host_to_target_termios (void *dst
, const void *src
)
3076 struct target_termios
*target
= dst
;
3077 const struct host_termios
*host
= src
;
3080 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3082 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3084 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3086 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3087 target
->c_line
= host
->c_line
;
3089 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3090 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3091 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3092 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3093 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3094 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3095 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3096 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3097 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3098 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3099 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3100 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3101 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3102 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3103 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3104 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3105 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3106 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3109 static const StructEntry struct_termios_def
= {
3110 .convert
= { host_to_target_termios
, target_to_host_termios
},
3111 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3112 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3115 static bitmask_transtbl mmap_flags_tbl
[] = {
3116 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3117 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3118 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3119 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3120 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3121 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3122 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3123 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3127 #if defined(TARGET_I386)
3129 /* NOTE: there is really one LDT for all the threads */
3130 static uint8_t *ldt_table
;
3132 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3139 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3140 if (size
> bytecount
)
3142 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3144 return -TARGET_EFAULT
;
3145 /* ??? Should this by byteswapped? */
3146 memcpy(p
, ldt_table
, size
);
3147 unlock_user(p
, ptr
, size
);
3151 /* XXX: add locking support */
3152 static abi_long
write_ldt(CPUX86State
*env
,
3153 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3155 struct target_modify_ldt_ldt_s ldt_info
;
3156 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3157 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3158 int seg_not_present
, useable
, lm
;
3159 uint32_t *lp
, entry_1
, entry_2
;
3161 if (bytecount
!= sizeof(ldt_info
))
3162 return -TARGET_EINVAL
;
3163 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3164 return -TARGET_EFAULT
;
3165 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3166 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3167 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3168 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3169 unlock_user_struct(target_ldt_info
, ptr
, 0);
3171 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3172 return -TARGET_EINVAL
;
3173 seg_32bit
= ldt_info
.flags
& 1;
3174 contents
= (ldt_info
.flags
>> 1) & 3;
3175 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3176 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3177 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3178 useable
= (ldt_info
.flags
>> 6) & 1;
3182 lm
= (ldt_info
.flags
>> 7) & 1;
3184 if (contents
== 3) {
3186 return -TARGET_EINVAL
;
3187 if (seg_not_present
== 0)
3188 return -TARGET_EINVAL
;
3190 /* allocate the LDT */
3192 env
->ldt
.base
= target_mmap(0,
3193 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3194 PROT_READ
|PROT_WRITE
,
3195 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3196 if (env
->ldt
.base
== -1)
3197 return -TARGET_ENOMEM
;
3198 memset(g2h(env
->ldt
.base
), 0,
3199 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3200 env
->ldt
.limit
= 0xffff;
3201 ldt_table
= g2h(env
->ldt
.base
);
3204 /* NOTE: same code as Linux kernel */
3205 /* Allow LDTs to be cleared by the user. */
3206 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3209 read_exec_only
== 1 &&
3211 limit_in_pages
== 0 &&
3212 seg_not_present
== 1 &&
3220 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3221 (ldt_info
.limit
& 0x0ffff);
3222 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3223 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3224 (ldt_info
.limit
& 0xf0000) |
3225 ((read_exec_only
^ 1) << 9) |
3227 ((seg_not_present
^ 1) << 15) |
3229 (limit_in_pages
<< 23) |
3233 entry_2
|= (useable
<< 20);
3235 /* Install the new entry ... */
3237 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3238 lp
[0] = tswap32(entry_1
);
3239 lp
[1] = tswap32(entry_2
);
3243 /* specific and weird i386 syscalls */
3244 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3245 unsigned long bytecount
)
3251 ret
= read_ldt(ptr
, bytecount
);
3254 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3257 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3260 ret
= -TARGET_ENOSYS
;
3266 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3267 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3269 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3270 struct target_modify_ldt_ldt_s ldt_info
;
3271 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3272 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3273 int seg_not_present
, useable
, lm
;
3274 uint32_t *lp
, entry_1
, entry_2
;
3277 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3278 if (!target_ldt_info
)
3279 return -TARGET_EFAULT
;
3280 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3281 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3282 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3283 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3284 if (ldt_info
.entry_number
== -1) {
3285 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3286 if (gdt_table
[i
] == 0) {
3287 ldt_info
.entry_number
= i
;
3288 target_ldt_info
->entry_number
= tswap32(i
);
3293 unlock_user_struct(target_ldt_info
, ptr
, 1);
3295 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3296 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3297 return -TARGET_EINVAL
;
3298 seg_32bit
= ldt_info
.flags
& 1;
3299 contents
= (ldt_info
.flags
>> 1) & 3;
3300 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3301 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3302 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3303 useable
= (ldt_info
.flags
>> 6) & 1;
3307 lm
= (ldt_info
.flags
>> 7) & 1;
3310 if (contents
== 3) {
3311 if (seg_not_present
== 0)
3312 return -TARGET_EINVAL
;
3315 /* NOTE: same code as Linux kernel */
3316 /* Allow LDTs to be cleared by the user. */
3317 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3318 if ((contents
== 0 &&
3319 read_exec_only
== 1 &&
3321 limit_in_pages
== 0 &&
3322 seg_not_present
== 1 &&
3330 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3331 (ldt_info
.limit
& 0x0ffff);
3332 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3333 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3334 (ldt_info
.limit
& 0xf0000) |
3335 ((read_exec_only
^ 1) << 9) |
3337 ((seg_not_present
^ 1) << 15) |
3339 (limit_in_pages
<< 23) |
3344 /* Install the new entry ... */
3346 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3347 lp
[0] = tswap32(entry_1
);
3348 lp
[1] = tswap32(entry_2
);
3352 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3354 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3355 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3356 uint32_t base_addr
, limit
, flags
;
3357 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3358 int seg_not_present
, useable
, lm
;
3359 uint32_t *lp
, entry_1
, entry_2
;
3361 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3362 if (!target_ldt_info
)
3363 return -TARGET_EFAULT
;
3364 idx
= tswap32(target_ldt_info
->entry_number
);
3365 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3366 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3367 unlock_user_struct(target_ldt_info
, ptr
, 1);
3368 return -TARGET_EINVAL
;
3370 lp
= (uint32_t *)(gdt_table
+ idx
);
3371 entry_1
= tswap32(lp
[0]);
3372 entry_2
= tswap32(lp
[1]);
3374 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3375 contents
= (entry_2
>> 10) & 3;
3376 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3377 seg_32bit
= (entry_2
>> 22) & 1;
3378 limit_in_pages
= (entry_2
>> 23) & 1;
3379 useable
= (entry_2
>> 20) & 1;
3383 lm
= (entry_2
>> 21) & 1;
3385 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3386 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3387 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3388 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3389 base_addr
= (entry_1
>> 16) |
3390 (entry_2
& 0xff000000) |
3391 ((entry_2
& 0xff) << 16);
3392 target_ldt_info
->base_addr
= tswapl(base_addr
);
3393 target_ldt_info
->limit
= tswap32(limit
);
3394 target_ldt_info
->flags
= tswap32(flags
);
3395 unlock_user_struct(target_ldt_info
, ptr
, 1);
3398 #endif /* TARGET_I386 && TARGET_ABI32 */
3400 #ifndef TARGET_ABI32
3401 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3408 case TARGET_ARCH_SET_GS
:
3409 case TARGET_ARCH_SET_FS
:
3410 if (code
== TARGET_ARCH_SET_GS
)
3414 cpu_x86_load_seg(env
, idx
, 0);
3415 env
->segs
[idx
].base
= addr
;
3417 case TARGET_ARCH_GET_GS
:
3418 case TARGET_ARCH_GET_FS
:
3419 if (code
== TARGET_ARCH_GET_GS
)
3423 val
= env
->segs
[idx
].base
;
3424 if (put_user(val
, addr
, abi_ulong
))
3425 return -TARGET_EFAULT
;
3428 ret
= -TARGET_EINVAL
;
3435 #endif /* defined(TARGET_I386) */
3437 #if defined(USE_NPTL)
3439 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3441 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3444 pthread_mutex_t mutex
;
3445 pthread_cond_t cond
;
3448 abi_ulong child_tidptr
;
3449 abi_ulong parent_tidptr
;
3453 static void *clone_func(void *arg
)
3455 new_thread_info
*info
= arg
;
3461 ts
= (TaskState
*)thread_env
->opaque
;
3462 info
->tid
= gettid();
3463 env
->host_tid
= info
->tid
;
3465 if (info
->child_tidptr
)
3466 put_user_u32(info
->tid
, info
->child_tidptr
);
3467 if (info
->parent_tidptr
)
3468 put_user_u32(info
->tid
, info
->parent_tidptr
);
3469 /* Enable signals. */
3470 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3471 /* Signal to the parent that we're ready. */
3472 pthread_mutex_lock(&info
->mutex
);
3473 pthread_cond_broadcast(&info
->cond
);
3474 pthread_mutex_unlock(&info
->mutex
);
3475 /* Wait until the parent has finshed initializing the tls state. */
3476 pthread_mutex_lock(&clone_lock
);
3477 pthread_mutex_unlock(&clone_lock
);
3483 /* this stack is the equivalent of the kernel stack associated with a
3485 #define NEW_STACK_SIZE 8192
3487 static int clone_func(void *arg
)
3489 CPUState
*env
= arg
;
3496 /* do_fork() Must return host values and target errnos (unlike most
3497 do_*() functions). */
3498 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3499 abi_ulong parent_tidptr
, target_ulong newtls
,
3500 abi_ulong child_tidptr
)
3506 #if defined(USE_NPTL)
3507 unsigned int nptl_flags
;
3511 /* Emulate vfork() with fork() */
3512 if (flags
& CLONE_VFORK
)
3513 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3515 if (flags
& CLONE_VM
) {
3516 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3517 #if defined(USE_NPTL)
3518 new_thread_info info
;
3519 pthread_attr_t attr
;
3521 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3522 init_task_state(ts
);
3523 new_stack
= ts
->stack
;
3524 /* we create a new CPU instance. */
3525 new_env
= cpu_copy(env
);
3526 /* Init regs that differ from the parent. */
3527 cpu_clone_regs(new_env
, newsp
);
3528 new_env
->opaque
= ts
;
3529 ts
->bprm
= parent_ts
->bprm
;
3530 ts
->info
= parent_ts
->info
;
3531 #if defined(USE_NPTL)
3533 flags
&= ~CLONE_NPTL_FLAGS2
;
3535 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3536 ts
->child_tidptr
= child_tidptr
;
3539 if (nptl_flags
& CLONE_SETTLS
)
3540 cpu_set_tls (new_env
, newtls
);
3542 /* Grab a mutex so that thread setup appears atomic. */
3543 pthread_mutex_lock(&clone_lock
);
3545 memset(&info
, 0, sizeof(info
));
3546 pthread_mutex_init(&info
.mutex
, NULL
);
3547 pthread_mutex_lock(&info
.mutex
);
3548 pthread_cond_init(&info
.cond
, NULL
);
3550 if (nptl_flags
& CLONE_CHILD_SETTID
)
3551 info
.child_tidptr
= child_tidptr
;
3552 if (nptl_flags
& CLONE_PARENT_SETTID
)
3553 info
.parent_tidptr
= parent_tidptr
;
3555 ret
= pthread_attr_init(&attr
);
3556 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3557 /* It is not safe to deliver signals until the child has finished
3558 initializing, so temporarily block all signals. */
3559 sigfillset(&sigmask
);
3560 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3562 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3563 /* TODO: Free new CPU state if thread creation failed. */
3565 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3566 pthread_attr_destroy(&attr
);
3568 /* Wait for the child to initialize. */
3569 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3571 if (flags
& CLONE_PARENT_SETTID
)
3572 put_user_u32(ret
, parent_tidptr
);
3576 pthread_mutex_unlock(&info
.mutex
);
3577 pthread_cond_destroy(&info
.cond
);
3578 pthread_mutex_destroy(&info
.mutex
);
3579 pthread_mutex_unlock(&clone_lock
);
3581 if (flags
& CLONE_NPTL_FLAGS2
)
3583 /* This is probably going to die very quickly, but do it anyway. */
3585 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3587 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3591 /* if no CLONE_VM, we consider it is a fork */
3592 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3597 /* Child Process. */
3598 cpu_clone_regs(env
, newsp
);
3600 #if defined(USE_NPTL)
3601 /* There is a race condition here. The parent process could
3602 theoretically read the TID in the child process before the child
3603 tid is set. This would require using either ptrace
3604 (not implemented) or having *_tidptr to point at a shared memory
3605 mapping. We can't repeat the spinlock hack used above because
3606 the child process gets its own copy of the lock. */
3607 if (flags
& CLONE_CHILD_SETTID
)
3608 put_user_u32(gettid(), child_tidptr
);
3609 if (flags
& CLONE_PARENT_SETTID
)
3610 put_user_u32(gettid(), parent_tidptr
);
3611 ts
= (TaskState
*)env
->opaque
;
3612 if (flags
& CLONE_SETTLS
)
3613 cpu_set_tls (env
, newtls
);
3614 if (flags
& CLONE_CHILD_CLEARTID
)
3615 ts
->child_tidptr
= child_tidptr
;
3624 /* warning : doesn't handle linux specific flags... */
3625 static int target_to_host_fcntl_cmd(int cmd
)
3628 case TARGET_F_DUPFD
:
3629 case TARGET_F_GETFD
:
3630 case TARGET_F_SETFD
:
3631 case TARGET_F_GETFL
:
3632 case TARGET_F_SETFL
:
3634 case TARGET_F_GETLK
:
3636 case TARGET_F_SETLK
:
3638 case TARGET_F_SETLKW
:
3640 case TARGET_F_GETOWN
:
3642 case TARGET_F_SETOWN
:
3644 case TARGET_F_GETSIG
:
3646 case TARGET_F_SETSIG
:
3648 #if TARGET_ABI_BITS == 32
3649 case TARGET_F_GETLK64
:
3651 case TARGET_F_SETLK64
:
3653 case TARGET_F_SETLKW64
:
3657 return -TARGET_EINVAL
;
3659 return -TARGET_EINVAL
;
3662 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3665 struct target_flock
*target_fl
;
3666 struct flock64 fl64
;
3667 struct target_flock64
*target_fl64
;
3669 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3671 if (host_cmd
== -TARGET_EINVAL
)
3675 case TARGET_F_GETLK
:
3676 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3677 return -TARGET_EFAULT
;
3678 fl
.l_type
= tswap16(target_fl
->l_type
);
3679 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3680 fl
.l_start
= tswapl(target_fl
->l_start
);
3681 fl
.l_len
= tswapl(target_fl
->l_len
);
3682 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3683 unlock_user_struct(target_fl
, arg
, 0);
3684 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3686 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3687 return -TARGET_EFAULT
;
3688 target_fl
->l_type
= tswap16(fl
.l_type
);
3689 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3690 target_fl
->l_start
= tswapl(fl
.l_start
);
3691 target_fl
->l_len
= tswapl(fl
.l_len
);
3692 target_fl
->l_pid
= tswapl(fl
.l_pid
);
3693 unlock_user_struct(target_fl
, arg
, 1);
3697 case TARGET_F_SETLK
:
3698 case TARGET_F_SETLKW
:
3699 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3700 return -TARGET_EFAULT
;
3701 fl
.l_type
= tswap16(target_fl
->l_type
);
3702 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3703 fl
.l_start
= tswapl(target_fl
->l_start
);
3704 fl
.l_len
= tswapl(target_fl
->l_len
);
3705 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3706 unlock_user_struct(target_fl
, arg
, 0);
3707 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3710 case TARGET_F_GETLK64
:
3711 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3712 return -TARGET_EFAULT
;
3713 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3714 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3715 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3716 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3717 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3718 unlock_user_struct(target_fl64
, arg
, 0);
3719 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3721 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3722 return -TARGET_EFAULT
;
3723 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3724 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3725 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3726 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3727 target_fl64
->l_pid
= tswapl(fl64
.l_pid
);
3728 unlock_user_struct(target_fl64
, arg
, 1);
3731 case TARGET_F_SETLK64
:
3732 case TARGET_F_SETLKW64
:
3733 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3734 return -TARGET_EFAULT
;
3735 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3736 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3737 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3738 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3739 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3740 unlock_user_struct(target_fl64
, arg
, 0);
3741 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3744 case TARGET_F_GETFL
:
3745 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3747 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3751 case TARGET_F_SETFL
:
3752 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3755 case TARGET_F_SETOWN
:
3756 case TARGET_F_GETOWN
:
3757 case TARGET_F_SETSIG
:
3758 case TARGET_F_GETSIG
:
3759 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3763 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3771 static inline int high2lowuid(int uid
)
3779 static inline int high2lowgid(int gid
)
3787 static inline int low2highuid(int uid
)
3789 if ((int16_t)uid
== -1)
3795 static inline int low2highgid(int gid
)
3797 if ((int16_t)gid
== -1)
3803 #endif /* USE_UID16 */
3805 void syscall_init(void)
3808 const argtype
*arg_type
;
3812 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3813 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3814 #include "syscall_types.h"
3816 #undef STRUCT_SPECIAL
3818 /* we patch the ioctl size if necessary. We rely on the fact that
3819 no ioctl has all the bits at '1' in the size field */
3821 while (ie
->target_cmd
!= 0) {
3822 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3823 TARGET_IOC_SIZEMASK
) {
3824 arg_type
= ie
->arg_type
;
3825 if (arg_type
[0] != TYPE_PTR
) {
3826 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3831 size
= thunk_type_size(arg_type
, 0);
3832 ie
->target_cmd
= (ie
->target_cmd
&
3833 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3834 (size
<< TARGET_IOC_SIZESHIFT
);
3837 /* Build target_to_host_errno_table[] table from
3838 * host_to_target_errno_table[]. */
3839 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3840 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3842 /* automatic consistency check if same arch */
3843 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3844 (defined(__x86_64__) && defined(TARGET_X86_64))
3845 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3846 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3847 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3854 #if TARGET_ABI_BITS == 32
3855 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3857 #ifdef TARGET_WORDS_BIGENDIAN
3858 return ((uint64_t)word0
<< 32) | word1
;
3860 return ((uint64_t)word1
<< 32) | word0
;
3863 #else /* TARGET_ABI_BITS == 32 */
3864 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3868 #endif /* TARGET_ABI_BITS != 32 */
3870 #ifdef TARGET_NR_truncate64
3871 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3877 if (((CPUARMState
*)cpu_env
)->eabi
)
3883 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3887 #ifdef TARGET_NR_ftruncate64
3888 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3894 if (((CPUARMState
*)cpu_env
)->eabi
)
3900 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3904 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3905 abi_ulong target_addr
)
3907 struct target_timespec
*target_ts
;
3909 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3910 return -TARGET_EFAULT
;
3911 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3912 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3913 unlock_user_struct(target_ts
, target_addr
, 0);
3917 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3918 struct timespec
*host_ts
)
3920 struct target_timespec
*target_ts
;
3922 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3923 return -TARGET_EFAULT
;
3924 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3925 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3926 unlock_user_struct(target_ts
, target_addr
, 1);
3930 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3931 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3932 abi_ulong target_addr
,
3933 struct stat
*host_st
)
3936 if (((CPUARMState
*)cpu_env
)->eabi
) {
3937 struct target_eabi_stat64
*target_st
;
3939 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3940 return -TARGET_EFAULT
;
3941 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3942 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3943 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3944 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3945 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3947 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3948 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3949 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3950 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3951 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3952 __put_user(host_st
->st_size
, &target_st
->st_size
);
3953 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3954 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3955 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3956 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3957 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3958 unlock_user_struct(target_st
, target_addr
, 1);
3962 #if TARGET_LONG_BITS == 64
3963 struct target_stat
*target_st
;
3965 struct target_stat64
*target_st
;
3968 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3969 return -TARGET_EFAULT
;
3970 memset(target_st
, 0, sizeof(*target_st
));
3971 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3972 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3973 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3974 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3976 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3977 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3978 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3979 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3980 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3981 /* XXX: better use of kernel struct */
3982 __put_user(host_st
->st_size
, &target_st
->st_size
);
3983 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3984 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3985 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3986 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3987 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3988 unlock_user_struct(target_st
, target_addr
, 1);
3995 #if defined(USE_NPTL)
3996 /* ??? Using host futex calls even when target atomic operations
3997 are not really atomic probably breaks things. However implementing
3998 futexes locally would make futexes shared between multiple processes
3999 tricky. However they're probably useless because guest atomic
4000 operations won't work either. */
4001 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4002 target_ulong uaddr2
, int val3
)
4004 struct timespec ts
, *pts
;
4006 /* ??? We assume FUTEX_* constants are the same on both host
4008 #ifdef FUTEX_CMD_MASK
4009 switch ((op
&FUTEX_CMD_MASK
)) {
4016 target_to_host_timespec(pts
, timeout
);
4020 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4023 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4025 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, g2h(uaddr2
), val3
));
4027 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4029 return get_errno(sys_futex(g2h(uaddr
), op
, val
,
4030 NULL
, g2h(uaddr2
), 0));
4031 case FUTEX_CMP_REQUEUE
:
4032 return get_errno(sys_futex(g2h(uaddr
), op
, val
,
4033 NULL
, g2h(uaddr2
), tswap32(val3
)));
4035 return -TARGET_ENOSYS
;
4040 /* Map host to target signal numbers for the wait family of syscalls.
4041 Assume all other status bits are the same. */
4042 static int host_to_target_waitstatus(int status
)
4044 if (WIFSIGNALED(status
)) {
4045 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4047 if (WIFSTOPPED(status
)) {
4048 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4054 int get_osversion(void)
4056 static int osversion
;
4057 struct new_utsname buf
;
4062 if (qemu_uname_release
&& *qemu_uname_release
) {
4063 s
= qemu_uname_release
;
4065 if (sys_uname(&buf
))
4070 for (i
= 0; i
< 3; i
++) {
4072 while (*s
>= '0' && *s
<= '9') {
4077 tmp
= (tmp
<< 8) + n
;
4085 /* do_syscall() should always have a single exit point at the end so
4086 that actions, such as logging of syscall results, can be performed.
4087 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4088 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4089 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4090 abi_long arg5
, abi_long arg6
)
4098 gemu_log("syscall %d", num
);
4101 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4104 case TARGET_NR_exit
:
4106 /* In old applications this may be used to implement _exit(2).
4107 However in threaded applictions it is used for thread termination,
4108 and _exit_group is used for application termination.
4109 Do thread termination if we have more then one thread. */
4110 /* FIXME: This probably breaks if a signal arrives. We should probably
4111 be disabling signals. */
4112 if (first_cpu
->next_cpu
) {
4120 while (p
&& p
!= (CPUState
*)cpu_env
) {
4121 lastp
= &p
->next_cpu
;
4124 /* If we didn't find the CPU for this thread then something is
4128 /* Remove the CPU from the list. */
4129 *lastp
= p
->next_cpu
;
4131 ts
= ((CPUState
*)cpu_env
)->opaque
;
4132 if (ts
->child_tidptr
) {
4133 put_user_u32(0, ts
->child_tidptr
);
4134 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4137 /* TODO: Free CPU state. */
4144 gdb_exit(cpu_env
, arg1
);
4146 ret
= 0; /* avoid warning */
4148 case TARGET_NR_read
:
4152 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4154 ret
= get_errno(read(arg1
, p
, arg3
));
4155 unlock_user(p
, arg2
, ret
);
4158 case TARGET_NR_write
:
4159 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4161 ret
= get_errno(write(arg1
, p
, arg3
));
4162 unlock_user(p
, arg2
, 0);
4164 case TARGET_NR_open
:
4165 if (!(p
= lock_user_string(arg1
)))
4167 ret
= get_errno(open(path(p
),
4168 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4170 unlock_user(p
, arg1
, 0);
4172 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4173 case TARGET_NR_openat
:
4174 if (!(p
= lock_user_string(arg2
)))
4176 ret
= get_errno(sys_openat(arg1
,
4178 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4180 unlock_user(p
, arg2
, 0);
4183 case TARGET_NR_close
:
4184 ret
= get_errno(close(arg1
));
4189 case TARGET_NR_fork
:
4190 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4192 #ifdef TARGET_NR_waitpid
4193 case TARGET_NR_waitpid
:
4196 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4197 if (!is_error(ret
) && arg2
4198 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4203 #ifdef TARGET_NR_waitid
4204 case TARGET_NR_waitid
:
4208 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4209 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4210 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4212 host_to_target_siginfo(p
, &info
);
4213 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4218 #ifdef TARGET_NR_creat /* not on alpha */
4219 case TARGET_NR_creat
:
4220 if (!(p
= lock_user_string(arg1
)))
4222 ret
= get_errno(creat(p
, arg2
));
4223 unlock_user(p
, arg1
, 0);
4226 case TARGET_NR_link
:
4229 p
= lock_user_string(arg1
);
4230 p2
= lock_user_string(arg2
);
4232 ret
= -TARGET_EFAULT
;
4234 ret
= get_errno(link(p
, p2
));
4235 unlock_user(p2
, arg2
, 0);
4236 unlock_user(p
, arg1
, 0);
4239 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4240 case TARGET_NR_linkat
:
4245 p
= lock_user_string(arg2
);
4246 p2
= lock_user_string(arg4
);
4248 ret
= -TARGET_EFAULT
;
4250 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4251 unlock_user(p
, arg2
, 0);
4252 unlock_user(p2
, arg4
, 0);
4256 case TARGET_NR_unlink
:
4257 if (!(p
= lock_user_string(arg1
)))
4259 ret
= get_errno(unlink(p
));
4260 unlock_user(p
, arg1
, 0);
4262 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4263 case TARGET_NR_unlinkat
:
4264 if (!(p
= lock_user_string(arg2
)))
4266 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4267 unlock_user(p
, arg2
, 0);
4270 case TARGET_NR_execve
:
4272 char **argp
, **envp
;
4275 abi_ulong guest_argp
;
4276 abi_ulong guest_envp
;
4282 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4283 if (get_user_ual(addr
, gp
))
4291 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4292 if (get_user_ual(addr
, gp
))
4299 argp
= alloca((argc
+ 1) * sizeof(void *));
4300 envp
= alloca((envc
+ 1) * sizeof(void *));
4302 for (gp
= guest_argp
, q
= argp
; gp
;
4303 gp
+= sizeof(abi_ulong
), q
++) {
4304 if (get_user_ual(addr
, gp
))
4308 if (!(*q
= lock_user_string(addr
)))
4313 for (gp
= guest_envp
, q
= envp
; gp
;
4314 gp
+= sizeof(abi_ulong
), q
++) {
4315 if (get_user_ual(addr
, gp
))
4319 if (!(*q
= lock_user_string(addr
)))
4324 if (!(p
= lock_user_string(arg1
)))
4326 ret
= get_errno(execve(p
, argp
, envp
));
4327 unlock_user(p
, arg1
, 0);
4332 ret
= -TARGET_EFAULT
;
4335 for (gp
= guest_argp
, q
= argp
; *q
;
4336 gp
+= sizeof(abi_ulong
), q
++) {
4337 if (get_user_ual(addr
, gp
)
4340 unlock_user(*q
, addr
, 0);
4342 for (gp
= guest_envp
, q
= envp
; *q
;
4343 gp
+= sizeof(abi_ulong
), q
++) {
4344 if (get_user_ual(addr
, gp
)
4347 unlock_user(*q
, addr
, 0);
4351 case TARGET_NR_chdir
:
4352 if (!(p
= lock_user_string(arg1
)))
4354 ret
= get_errno(chdir(p
));
4355 unlock_user(p
, arg1
, 0);
4357 #ifdef TARGET_NR_time
4358 case TARGET_NR_time
:
4361 ret
= get_errno(time(&host_time
));
4364 && put_user_sal(host_time
, arg1
))
4369 case TARGET_NR_mknod
:
4370 if (!(p
= lock_user_string(arg1
)))
4372 ret
= get_errno(mknod(p
, arg2
, arg3
));
4373 unlock_user(p
, arg1
, 0);
4375 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4376 case TARGET_NR_mknodat
:
4377 if (!(p
= lock_user_string(arg2
)))
4379 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4380 unlock_user(p
, arg2
, 0);
4383 case TARGET_NR_chmod
:
4384 if (!(p
= lock_user_string(arg1
)))
4386 ret
= get_errno(chmod(p
, arg2
));
4387 unlock_user(p
, arg1
, 0);
4389 #ifdef TARGET_NR_break
4390 case TARGET_NR_break
:
4393 #ifdef TARGET_NR_oldstat
4394 case TARGET_NR_oldstat
:
4397 case TARGET_NR_lseek
:
4398 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4400 #ifdef TARGET_NR_getxpid
4401 case TARGET_NR_getxpid
:
4403 case TARGET_NR_getpid
:
4405 ret
= get_errno(getpid());
4407 case TARGET_NR_mount
:
4409 /* need to look at the data field */
4411 p
= lock_user_string(arg1
);
4412 p2
= lock_user_string(arg2
);
4413 p3
= lock_user_string(arg3
);
4414 if (!p
|| !p2
|| !p3
)
4415 ret
= -TARGET_EFAULT
;
4417 /* FIXME - arg5 should be locked, but it isn't clear how to
4418 * do that since it's not guaranteed to be a NULL-terminated
4421 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4422 unlock_user(p
, arg1
, 0);
4423 unlock_user(p2
, arg2
, 0);
4424 unlock_user(p3
, arg3
, 0);
4427 #ifdef TARGET_NR_umount
4428 case TARGET_NR_umount
:
4429 if (!(p
= lock_user_string(arg1
)))
4431 ret
= get_errno(umount(p
));
4432 unlock_user(p
, arg1
, 0);
4435 #ifdef TARGET_NR_stime /* not on alpha */
4436 case TARGET_NR_stime
:
4439 if (get_user_sal(host_time
, arg1
))
4441 ret
= get_errno(stime(&host_time
));
4445 case TARGET_NR_ptrace
:
4447 #ifdef TARGET_NR_alarm /* not on alpha */
4448 case TARGET_NR_alarm
:
4452 #ifdef TARGET_NR_oldfstat
4453 case TARGET_NR_oldfstat
:
4456 #ifdef TARGET_NR_pause /* not on alpha */
4457 case TARGET_NR_pause
:
4458 ret
= get_errno(pause());
4461 #ifdef TARGET_NR_utime
4462 case TARGET_NR_utime
:
4464 struct utimbuf tbuf
, *host_tbuf
;
4465 struct target_utimbuf
*target_tbuf
;
4467 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4469 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4470 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4471 unlock_user_struct(target_tbuf
, arg2
, 0);
4476 if (!(p
= lock_user_string(arg1
)))
4478 ret
= get_errno(utime(p
, host_tbuf
));
4479 unlock_user(p
, arg1
, 0);
4483 case TARGET_NR_utimes
:
4485 struct timeval
*tvp
, tv
[2];
4487 if (copy_from_user_timeval(&tv
[0], arg2
)
4488 || copy_from_user_timeval(&tv
[1],
4489 arg2
+ sizeof(struct target_timeval
)))
4495 if (!(p
= lock_user_string(arg1
)))
4497 ret
= get_errno(utimes(p
, tvp
));
4498 unlock_user(p
, arg1
, 0);
4501 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4502 case TARGET_NR_futimesat
:
4504 struct timeval
*tvp
, tv
[2];
4506 if (copy_from_user_timeval(&tv
[0], arg3
)
4507 || copy_from_user_timeval(&tv
[1],
4508 arg3
+ sizeof(struct target_timeval
)))
4514 if (!(p
= lock_user_string(arg2
)))
4516 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4517 unlock_user(p
, arg2
, 0);
4521 #ifdef TARGET_NR_stty
4522 case TARGET_NR_stty
:
4525 #ifdef TARGET_NR_gtty
4526 case TARGET_NR_gtty
:
4529 case TARGET_NR_access
:
4530 if (!(p
= lock_user_string(arg1
)))
4532 ret
= get_errno(access(p
, arg2
));
4533 unlock_user(p
, arg1
, 0);
4535 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4536 case TARGET_NR_faccessat
:
4537 if (!(p
= lock_user_string(arg2
)))
4539 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4540 unlock_user(p
, arg2
, 0);
4543 #ifdef TARGET_NR_nice /* not on alpha */
4544 case TARGET_NR_nice
:
4545 ret
= get_errno(nice(arg1
));
4548 #ifdef TARGET_NR_ftime
4549 case TARGET_NR_ftime
:
4552 case TARGET_NR_sync
:
4556 case TARGET_NR_kill
:
4557 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4559 case TARGET_NR_rename
:
4562 p
= lock_user_string(arg1
);
4563 p2
= lock_user_string(arg2
);
4565 ret
= -TARGET_EFAULT
;
4567 ret
= get_errno(rename(p
, p2
));
4568 unlock_user(p2
, arg2
, 0);
4569 unlock_user(p
, arg1
, 0);
4572 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4573 case TARGET_NR_renameat
:
4576 p
= lock_user_string(arg2
);
4577 p2
= lock_user_string(arg4
);
4579 ret
= -TARGET_EFAULT
;
4581 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4582 unlock_user(p2
, arg4
, 0);
4583 unlock_user(p
, arg2
, 0);
4587 case TARGET_NR_mkdir
:
4588 if (!(p
= lock_user_string(arg1
)))
4590 ret
= get_errno(mkdir(p
, arg2
));
4591 unlock_user(p
, arg1
, 0);
4593 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4594 case TARGET_NR_mkdirat
:
4595 if (!(p
= lock_user_string(arg2
)))
4597 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4598 unlock_user(p
, arg2
, 0);
4601 case TARGET_NR_rmdir
:
4602 if (!(p
= lock_user_string(arg1
)))
4604 ret
= get_errno(rmdir(p
));
4605 unlock_user(p
, arg1
, 0);
4608 ret
= get_errno(dup(arg1
));
4610 case TARGET_NR_pipe
:
4611 ret
= do_pipe(cpu_env
, arg1
, 0);
4613 #ifdef TARGET_NR_pipe2
4614 case TARGET_NR_pipe2
:
4615 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4618 case TARGET_NR_times
:
4620 struct target_tms
*tmsp
;
4622 ret
= get_errno(times(&tms
));
4624 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4627 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4628 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4629 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4630 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4633 ret
= host_to_target_clock_t(ret
);
4636 #ifdef TARGET_NR_prof
4637 case TARGET_NR_prof
:
4640 #ifdef TARGET_NR_signal
4641 case TARGET_NR_signal
:
4644 case TARGET_NR_acct
:
4646 ret
= get_errno(acct(NULL
));
4648 if (!(p
= lock_user_string(arg1
)))
4650 ret
= get_errno(acct(path(p
)));
4651 unlock_user(p
, arg1
, 0);
4654 #ifdef TARGET_NR_umount2 /* not on alpha */
4655 case TARGET_NR_umount2
:
4656 if (!(p
= lock_user_string(arg1
)))
4658 ret
= get_errno(umount2(p
, arg2
));
4659 unlock_user(p
, arg1
, 0);
4662 #ifdef TARGET_NR_lock
4663 case TARGET_NR_lock
:
4666 case TARGET_NR_ioctl
:
4667 ret
= do_ioctl(arg1
, arg2
, arg3
);
4669 case TARGET_NR_fcntl
:
4670 ret
= do_fcntl(arg1
, arg2
, arg3
);
4672 #ifdef TARGET_NR_mpx
4676 case TARGET_NR_setpgid
:
4677 ret
= get_errno(setpgid(arg1
, arg2
));
4679 #ifdef TARGET_NR_ulimit
4680 case TARGET_NR_ulimit
:
4683 #ifdef TARGET_NR_oldolduname
4684 case TARGET_NR_oldolduname
:
4687 case TARGET_NR_umask
:
4688 ret
= get_errno(umask(arg1
));
4690 case TARGET_NR_chroot
:
4691 if (!(p
= lock_user_string(arg1
)))
4693 ret
= get_errno(chroot(p
));
4694 unlock_user(p
, arg1
, 0);
4696 case TARGET_NR_ustat
:
4698 case TARGET_NR_dup2
:
4699 ret
= get_errno(dup2(arg1
, arg2
));
4701 #ifdef TARGET_NR_getppid /* not on alpha */
4702 case TARGET_NR_getppid
:
4703 ret
= get_errno(getppid());
4706 case TARGET_NR_getpgrp
:
4707 ret
= get_errno(getpgrp());
4709 case TARGET_NR_setsid
:
4710 ret
= get_errno(setsid());
4712 #ifdef TARGET_NR_sigaction
4713 case TARGET_NR_sigaction
:
4715 #if !defined(TARGET_MIPS)
4716 struct target_old_sigaction
*old_act
;
4717 struct target_sigaction act
, oact
, *pact
;
4719 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4721 act
._sa_handler
= old_act
->_sa_handler
;
4722 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4723 act
.sa_flags
= old_act
->sa_flags
;
4724 act
.sa_restorer
= old_act
->sa_restorer
;
4725 unlock_user_struct(old_act
, arg2
, 0);
4730 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4731 if (!is_error(ret
) && arg3
) {
4732 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4734 old_act
->_sa_handler
= oact
._sa_handler
;
4735 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4736 old_act
->sa_flags
= oact
.sa_flags
;
4737 old_act
->sa_restorer
= oact
.sa_restorer
;
4738 unlock_user_struct(old_act
, arg3
, 1);
4741 struct target_sigaction act
, oact
, *pact
, *old_act
;
4744 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4746 act
._sa_handler
= old_act
->_sa_handler
;
4747 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4748 act
.sa_flags
= old_act
->sa_flags
;
4749 unlock_user_struct(old_act
, arg2
, 0);
4755 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4757 if (!is_error(ret
) && arg3
) {
4758 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4760 old_act
->_sa_handler
= oact
._sa_handler
;
4761 old_act
->sa_flags
= oact
.sa_flags
;
4762 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4763 old_act
->sa_mask
.sig
[1] = 0;
4764 old_act
->sa_mask
.sig
[2] = 0;
4765 old_act
->sa_mask
.sig
[3] = 0;
4766 unlock_user_struct(old_act
, arg3
, 1);
4772 case TARGET_NR_rt_sigaction
:
4774 struct target_sigaction
*act
;
4775 struct target_sigaction
*oact
;
4778 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4783 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4784 ret
= -TARGET_EFAULT
;
4785 goto rt_sigaction_fail
;
4789 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4792 unlock_user_struct(act
, arg2
, 0);
4794 unlock_user_struct(oact
, arg3
, 1);
4797 #ifdef TARGET_NR_sgetmask /* not on alpha */
4798 case TARGET_NR_sgetmask
:
4801 abi_ulong target_set
;
4802 sigprocmask(0, NULL
, &cur_set
);
4803 host_to_target_old_sigset(&target_set
, &cur_set
);
4808 #ifdef TARGET_NR_ssetmask /* not on alpha */
4809 case TARGET_NR_ssetmask
:
4811 sigset_t set
, oset
, cur_set
;
4812 abi_ulong target_set
= arg1
;
4813 sigprocmask(0, NULL
, &cur_set
);
4814 target_to_host_old_sigset(&set
, &target_set
);
4815 sigorset(&set
, &set
, &cur_set
);
4816 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4817 host_to_target_old_sigset(&target_set
, &oset
);
4822 #ifdef TARGET_NR_sigprocmask
4823 case TARGET_NR_sigprocmask
:
4826 sigset_t set
, oldset
, *set_ptr
;
4830 case TARGET_SIG_BLOCK
:
4833 case TARGET_SIG_UNBLOCK
:
4836 case TARGET_SIG_SETMASK
:
4840 ret
= -TARGET_EINVAL
;
4843 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4845 target_to_host_old_sigset(&set
, p
);
4846 unlock_user(p
, arg2
, 0);
4852 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4853 if (!is_error(ret
) && arg3
) {
4854 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4856 host_to_target_old_sigset(p
, &oldset
);
4857 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4862 case TARGET_NR_rt_sigprocmask
:
4865 sigset_t set
, oldset
, *set_ptr
;
4869 case TARGET_SIG_BLOCK
:
4872 case TARGET_SIG_UNBLOCK
:
4875 case TARGET_SIG_SETMASK
:
4879 ret
= -TARGET_EINVAL
;
4882 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4884 target_to_host_sigset(&set
, p
);
4885 unlock_user(p
, arg2
, 0);
4891 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4892 if (!is_error(ret
) && arg3
) {
4893 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4895 host_to_target_sigset(p
, &oldset
);
4896 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4900 #ifdef TARGET_NR_sigpending
4901 case TARGET_NR_sigpending
:
4904 ret
= get_errno(sigpending(&set
));
4905 if (!is_error(ret
)) {
4906 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4908 host_to_target_old_sigset(p
, &set
);
4909 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4914 case TARGET_NR_rt_sigpending
:
4917 ret
= get_errno(sigpending(&set
));
4918 if (!is_error(ret
)) {
4919 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4921 host_to_target_sigset(p
, &set
);
4922 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4926 #ifdef TARGET_NR_sigsuspend
4927 case TARGET_NR_sigsuspend
:
4930 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4932 target_to_host_old_sigset(&set
, p
);
4933 unlock_user(p
, arg1
, 0);
4934 ret
= get_errno(sigsuspend(&set
));
4938 case TARGET_NR_rt_sigsuspend
:
4941 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4943 target_to_host_sigset(&set
, p
);
4944 unlock_user(p
, arg1
, 0);
4945 ret
= get_errno(sigsuspend(&set
));
4948 case TARGET_NR_rt_sigtimedwait
:
4951 struct timespec uts
, *puts
;
4954 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4956 target_to_host_sigset(&set
, p
);
4957 unlock_user(p
, arg1
, 0);
4960 target_to_host_timespec(puts
, arg3
);
4964 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
4965 if (!is_error(ret
) && arg2
) {
4966 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
4968 host_to_target_siginfo(p
, &uinfo
);
4969 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
4973 case TARGET_NR_rt_sigqueueinfo
:
4976 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
4978 target_to_host_siginfo(&uinfo
, p
);
4979 unlock_user(p
, arg1
, 0);
4980 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
4983 #ifdef TARGET_NR_sigreturn
4984 case TARGET_NR_sigreturn
:
4985 /* NOTE: ret is eax, so not transcoding must be done */
4986 ret
= do_sigreturn(cpu_env
);
4989 case TARGET_NR_rt_sigreturn
:
4990 /* NOTE: ret is eax, so not transcoding must be done */
4991 ret
= do_rt_sigreturn(cpu_env
);
4993 case TARGET_NR_sethostname
:
4994 if (!(p
= lock_user_string(arg1
)))
4996 ret
= get_errno(sethostname(p
, arg2
));
4997 unlock_user(p
, arg1
, 0);
4999 case TARGET_NR_setrlimit
:
5001 /* XXX: convert resource ? */
5002 int resource
= arg1
;
5003 struct target_rlimit
*target_rlim
;
5005 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5007 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5008 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5009 unlock_user_struct(target_rlim
, arg2
, 0);
5010 ret
= get_errno(setrlimit(resource
, &rlim
));
5013 case TARGET_NR_getrlimit
:
5015 /* XXX: convert resource ? */
5016 int resource
= arg1
;
5017 struct target_rlimit
*target_rlim
;
5020 ret
= get_errno(getrlimit(resource
, &rlim
));
5021 if (!is_error(ret
)) {
5022 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5024 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5025 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5026 unlock_user_struct(target_rlim
, arg2
, 1);
5030 case TARGET_NR_getrusage
:
5032 struct rusage rusage
;
5033 ret
= get_errno(getrusage(arg1
, &rusage
));
5034 if (!is_error(ret
)) {
5035 host_to_target_rusage(arg2
, &rusage
);
5039 case TARGET_NR_gettimeofday
:
5042 ret
= get_errno(gettimeofday(&tv
, NULL
));
5043 if (!is_error(ret
)) {
5044 if (copy_to_user_timeval(arg1
, &tv
))
5049 case TARGET_NR_settimeofday
:
5052 if (copy_from_user_timeval(&tv
, arg1
))
5054 ret
= get_errno(settimeofday(&tv
, NULL
));
5057 #ifdef TARGET_NR_select
5058 case TARGET_NR_select
:
5060 struct target_sel_arg_struct
*sel
;
5061 abi_ulong inp
, outp
, exp
, tvp
;
5064 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5066 nsel
= tswapl(sel
->n
);
5067 inp
= tswapl(sel
->inp
);
5068 outp
= tswapl(sel
->outp
);
5069 exp
= tswapl(sel
->exp
);
5070 tvp
= tswapl(sel
->tvp
);
5071 unlock_user_struct(sel
, arg1
, 0);
5072 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5076 case TARGET_NR_symlink
:
5079 p
= lock_user_string(arg1
);
5080 p2
= lock_user_string(arg2
);
5082 ret
= -TARGET_EFAULT
;
5084 ret
= get_errno(symlink(p
, p2
));
5085 unlock_user(p2
, arg2
, 0);
5086 unlock_user(p
, arg1
, 0);
5089 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5090 case TARGET_NR_symlinkat
:
5093 p
= lock_user_string(arg1
);
5094 p2
= lock_user_string(arg3
);
5096 ret
= -TARGET_EFAULT
;
5098 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5099 unlock_user(p2
, arg3
, 0);
5100 unlock_user(p
, arg1
, 0);
5104 #ifdef TARGET_NR_oldlstat
5105 case TARGET_NR_oldlstat
:
5108 case TARGET_NR_readlink
:
5111 p
= lock_user_string(arg1
);
5112 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5114 ret
= -TARGET_EFAULT
;
5116 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5117 char real
[PATH_MAX
];
5118 temp
= realpath(exec_path
,real
);
5119 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5120 snprintf((char *)p2
, arg3
, "%s", real
);
5123 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5125 unlock_user(p2
, arg2
, ret
);
5126 unlock_user(p
, arg1
, 0);
5129 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5130 case TARGET_NR_readlinkat
:
5133 p
= lock_user_string(arg2
);
5134 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5136 ret
= -TARGET_EFAULT
;
5138 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5139 unlock_user(p2
, arg3
, ret
);
5140 unlock_user(p
, arg2
, 0);
5144 #ifdef TARGET_NR_uselib
5145 case TARGET_NR_uselib
:
5148 #ifdef TARGET_NR_swapon
5149 case TARGET_NR_swapon
:
5150 if (!(p
= lock_user_string(arg1
)))
5152 ret
= get_errno(swapon(p
, arg2
));
5153 unlock_user(p
, arg1
, 0);
5156 case TARGET_NR_reboot
:
5158 #ifdef TARGET_NR_readdir
5159 case TARGET_NR_readdir
:
5162 #ifdef TARGET_NR_mmap
5163 case TARGET_NR_mmap
:
5164 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5167 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5168 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5176 unlock_user(v
, arg1
, 0);
5177 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5178 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5182 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5183 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5189 #ifdef TARGET_NR_mmap2
5190 case TARGET_NR_mmap2
:
5192 #define MMAP_SHIFT 12
5194 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5195 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5197 arg6
<< MMAP_SHIFT
));
5200 case TARGET_NR_munmap
:
5201 ret
= get_errno(target_munmap(arg1
, arg2
));
5203 case TARGET_NR_mprotect
:
5204 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5206 #ifdef TARGET_NR_mremap
5207 case TARGET_NR_mremap
:
5208 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5211 /* ??? msync/mlock/munlock are broken for softmmu. */
5212 #ifdef TARGET_NR_msync
5213 case TARGET_NR_msync
:
5214 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5217 #ifdef TARGET_NR_mlock
5218 case TARGET_NR_mlock
:
5219 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5222 #ifdef TARGET_NR_munlock
5223 case TARGET_NR_munlock
:
5224 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5227 #ifdef TARGET_NR_mlockall
5228 case TARGET_NR_mlockall
:
5229 ret
= get_errno(mlockall(arg1
));
5232 #ifdef TARGET_NR_munlockall
5233 case TARGET_NR_munlockall
:
5234 ret
= get_errno(munlockall());
5237 case TARGET_NR_truncate
:
5238 if (!(p
= lock_user_string(arg1
)))
5240 ret
= get_errno(truncate(p
, arg2
));
5241 unlock_user(p
, arg1
, 0);
5243 case TARGET_NR_ftruncate
:
5244 ret
= get_errno(ftruncate(arg1
, arg2
));
5246 case TARGET_NR_fchmod
:
5247 ret
= get_errno(fchmod(arg1
, arg2
));
5249 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5250 case TARGET_NR_fchmodat
:
5251 if (!(p
= lock_user_string(arg2
)))
5253 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5254 unlock_user(p
, arg2
, 0);
5257 case TARGET_NR_getpriority
:
5258 /* libc does special remapping of the return value of
5259 * sys_getpriority() so it's just easiest to call
5260 * sys_getpriority() directly rather than through libc. */
5261 ret
= sys_getpriority(arg1
, arg2
);
5263 case TARGET_NR_setpriority
:
5264 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5266 #ifdef TARGET_NR_profil
5267 case TARGET_NR_profil
:
5270 case TARGET_NR_statfs
:
5271 if (!(p
= lock_user_string(arg1
)))
5273 ret
= get_errno(statfs(path(p
), &stfs
));
5274 unlock_user(p
, arg1
, 0);
5276 if (!is_error(ret
)) {
5277 struct target_statfs
*target_stfs
;
5279 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5281 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5282 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5283 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5284 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5285 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5286 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5287 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5288 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5289 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5290 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5291 unlock_user_struct(target_stfs
, arg2
, 1);
5294 case TARGET_NR_fstatfs
:
5295 ret
= get_errno(fstatfs(arg1
, &stfs
));
5296 goto convert_statfs
;
5297 #ifdef TARGET_NR_statfs64
5298 case TARGET_NR_statfs64
:
5299 if (!(p
= lock_user_string(arg1
)))
5301 ret
= get_errno(statfs(path(p
), &stfs
));
5302 unlock_user(p
, arg1
, 0);
5304 if (!is_error(ret
)) {
5305 struct target_statfs64
*target_stfs
;
5307 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5309 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5310 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5311 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5312 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5313 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5314 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5315 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5316 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5317 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5318 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5319 unlock_user_struct(target_stfs
, arg3
, 1);
5322 case TARGET_NR_fstatfs64
:
5323 ret
= get_errno(fstatfs(arg1
, &stfs
));
5324 goto convert_statfs64
;
5326 #ifdef TARGET_NR_ioperm
5327 case TARGET_NR_ioperm
:
5330 #ifdef TARGET_NR_socketcall
5331 case TARGET_NR_socketcall
:
5332 ret
= do_socketcall(arg1
, arg2
);
5335 #ifdef TARGET_NR_accept
5336 case TARGET_NR_accept
:
5337 ret
= do_accept(arg1
, arg2
, arg3
);
5340 #ifdef TARGET_NR_bind
5341 case TARGET_NR_bind
:
5342 ret
= do_bind(arg1
, arg2
, arg3
);
5345 #ifdef TARGET_NR_connect
5346 case TARGET_NR_connect
:
5347 ret
= do_connect(arg1
, arg2
, arg3
);
5350 #ifdef TARGET_NR_getpeername
5351 case TARGET_NR_getpeername
:
5352 ret
= do_getpeername(arg1
, arg2
, arg3
);
5355 #ifdef TARGET_NR_getsockname
5356 case TARGET_NR_getsockname
:
5357 ret
= do_getsockname(arg1
, arg2
, arg3
);
5360 #ifdef TARGET_NR_getsockopt
5361 case TARGET_NR_getsockopt
:
5362 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5365 #ifdef TARGET_NR_listen
5366 case TARGET_NR_listen
:
5367 ret
= get_errno(listen(arg1
, arg2
));
5370 #ifdef TARGET_NR_recv
5371 case TARGET_NR_recv
:
5372 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5375 #ifdef TARGET_NR_recvfrom
5376 case TARGET_NR_recvfrom
:
5377 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5380 #ifdef TARGET_NR_recvmsg
5381 case TARGET_NR_recvmsg
:
5382 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5385 #ifdef TARGET_NR_send
5386 case TARGET_NR_send
:
5387 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5390 #ifdef TARGET_NR_sendmsg
5391 case TARGET_NR_sendmsg
:
5392 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5395 #ifdef TARGET_NR_sendto
5396 case TARGET_NR_sendto
:
5397 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5400 #ifdef TARGET_NR_shutdown
5401 case TARGET_NR_shutdown
:
5402 ret
= get_errno(shutdown(arg1
, arg2
));
5405 #ifdef TARGET_NR_socket
5406 case TARGET_NR_socket
:
5407 ret
= do_socket(arg1
, arg2
, arg3
);
5410 #ifdef TARGET_NR_socketpair
5411 case TARGET_NR_socketpair
:
5412 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5415 #ifdef TARGET_NR_setsockopt
5416 case TARGET_NR_setsockopt
:
5417 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5421 case TARGET_NR_syslog
:
5422 if (!(p
= lock_user_string(arg2
)))
5424 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5425 unlock_user(p
, arg2
, 0);
5428 case TARGET_NR_setitimer
:
5430 struct itimerval value
, ovalue
, *pvalue
;
5434 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5435 || copy_from_user_timeval(&pvalue
->it_value
,
5436 arg2
+ sizeof(struct target_timeval
)))
5441 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5442 if (!is_error(ret
) && arg3
) {
5443 if (copy_to_user_timeval(arg3
,
5444 &ovalue
.it_interval
)
5445 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5451 case TARGET_NR_getitimer
:
5453 struct itimerval value
;
5455 ret
= get_errno(getitimer(arg1
, &value
));
5456 if (!is_error(ret
) && arg2
) {
5457 if (copy_to_user_timeval(arg2
,
5459 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5465 case TARGET_NR_stat
:
5466 if (!(p
= lock_user_string(arg1
)))
5468 ret
= get_errno(stat(path(p
), &st
));
5469 unlock_user(p
, arg1
, 0);
5471 case TARGET_NR_lstat
:
5472 if (!(p
= lock_user_string(arg1
)))
5474 ret
= get_errno(lstat(path(p
), &st
));
5475 unlock_user(p
, arg1
, 0);
5477 case TARGET_NR_fstat
:
5479 ret
= get_errno(fstat(arg1
, &st
));
5481 if (!is_error(ret
)) {
5482 struct target_stat
*target_st
;
5484 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5486 __put_user(st
.st_dev
, &target_st
->st_dev
);
5487 __put_user(st
.st_ino
, &target_st
->st_ino
);
5488 __put_user(st
.st_mode
, &target_st
->st_mode
);
5489 __put_user(st
.st_uid
, &target_st
->st_uid
);
5490 __put_user(st
.st_gid
, &target_st
->st_gid
);
5491 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5492 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5493 __put_user(st
.st_size
, &target_st
->st_size
);
5494 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5495 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5496 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5497 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5498 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5499 unlock_user_struct(target_st
, arg2
, 1);
5503 #ifdef TARGET_NR_olduname
5504 case TARGET_NR_olduname
:
5507 #ifdef TARGET_NR_iopl
5508 case TARGET_NR_iopl
:
5511 case TARGET_NR_vhangup
:
5512 ret
= get_errno(vhangup());
5514 #ifdef TARGET_NR_idle
5515 case TARGET_NR_idle
:
5518 #ifdef TARGET_NR_syscall
5519 case TARGET_NR_syscall
:
5520 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5523 case TARGET_NR_wait4
:
5526 abi_long status_ptr
= arg2
;
5527 struct rusage rusage
, *rusage_ptr
;
5528 abi_ulong target_rusage
= arg4
;
5530 rusage_ptr
= &rusage
;
5533 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5534 if (!is_error(ret
)) {
5536 status
= host_to_target_waitstatus(status
);
5537 if (put_user_s32(status
, status_ptr
))
5541 host_to_target_rusage(target_rusage
, &rusage
);
5545 #ifdef TARGET_NR_swapoff
5546 case TARGET_NR_swapoff
:
5547 if (!(p
= lock_user_string(arg1
)))
5549 ret
= get_errno(swapoff(p
));
5550 unlock_user(p
, arg1
, 0);
5553 case TARGET_NR_sysinfo
:
5555 struct target_sysinfo
*target_value
;
5556 struct sysinfo value
;
5557 ret
= get_errno(sysinfo(&value
));
5558 if (!is_error(ret
) && arg1
)
5560 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5562 __put_user(value
.uptime
, &target_value
->uptime
);
5563 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5564 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5565 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5566 __put_user(value
.totalram
, &target_value
->totalram
);
5567 __put_user(value
.freeram
, &target_value
->freeram
);
5568 __put_user(value
.sharedram
, &target_value
->sharedram
);
5569 __put_user(value
.bufferram
, &target_value
->bufferram
);
5570 __put_user(value
.totalswap
, &target_value
->totalswap
);
5571 __put_user(value
.freeswap
, &target_value
->freeswap
);
5572 __put_user(value
.procs
, &target_value
->procs
);
5573 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5574 __put_user(value
.freehigh
, &target_value
->freehigh
);
5575 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5576 unlock_user_struct(target_value
, arg1
, 1);
5580 #ifdef TARGET_NR_ipc
5582 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5585 #ifdef TARGET_NR_semget
5586 case TARGET_NR_semget
:
5587 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5590 #ifdef TARGET_NR_semop
5591 case TARGET_NR_semop
:
5592 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5595 #ifdef TARGET_NR_semctl
5596 case TARGET_NR_semctl
:
5597 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5600 #ifdef TARGET_NR_msgctl
5601 case TARGET_NR_msgctl
:
5602 ret
= do_msgctl(arg1
, arg2
, arg3
);
5605 #ifdef TARGET_NR_msgget
5606 case TARGET_NR_msgget
:
5607 ret
= get_errno(msgget(arg1
, arg2
));
5610 #ifdef TARGET_NR_msgrcv
5611 case TARGET_NR_msgrcv
:
5612 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5615 #ifdef TARGET_NR_msgsnd
5616 case TARGET_NR_msgsnd
:
5617 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5620 #ifdef TARGET_NR_shmget
5621 case TARGET_NR_shmget
:
5622 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5625 #ifdef TARGET_NR_shmctl
5626 case TARGET_NR_shmctl
:
5627 ret
= do_shmctl(arg1
, arg2
, arg3
);
5630 #ifdef TARGET_NR_shmat
5631 case TARGET_NR_shmat
:
5632 ret
= do_shmat(arg1
, arg2
, arg3
);
5635 #ifdef TARGET_NR_shmdt
5636 case TARGET_NR_shmdt
:
5637 ret
= do_shmdt(arg1
);
5640 case TARGET_NR_fsync
:
5641 ret
= get_errno(fsync(arg1
));
5643 case TARGET_NR_clone
:
5644 #if defined(TARGET_SH4)
5645 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5646 #elif defined(TARGET_CRIS)
5647 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5649 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5652 #ifdef __NR_exit_group
5653 /* new thread calls */
5654 case TARGET_NR_exit_group
:
5658 gdb_exit(cpu_env
, arg1
);
5659 ret
= get_errno(exit_group(arg1
));
5662 case TARGET_NR_setdomainname
:
5663 if (!(p
= lock_user_string(arg1
)))
5665 ret
= get_errno(setdomainname(p
, arg2
));
5666 unlock_user(p
, arg1
, 0);
5668 case TARGET_NR_uname
:
5669 /* no need to transcode because we use the linux syscall */
5671 struct new_utsname
* buf
;
5673 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5675 ret
= get_errno(sys_uname(buf
));
5676 if (!is_error(ret
)) {
5677 /* Overrite the native machine name with whatever is being
5679 strcpy (buf
->machine
, UNAME_MACHINE
);
5680 /* Allow the user to override the reported release. */
5681 if (qemu_uname_release
&& *qemu_uname_release
)
5682 strcpy (buf
->release
, qemu_uname_release
);
5684 unlock_user_struct(buf
, arg1
, 1);
5688 case TARGET_NR_modify_ldt
:
5689 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5691 #if !defined(TARGET_X86_64)
5692 case TARGET_NR_vm86old
:
5694 case TARGET_NR_vm86
:
5695 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5699 case TARGET_NR_adjtimex
:
5701 #ifdef TARGET_NR_create_module
5702 case TARGET_NR_create_module
:
5704 case TARGET_NR_init_module
:
5705 case TARGET_NR_delete_module
:
5706 #ifdef TARGET_NR_get_kernel_syms
5707 case TARGET_NR_get_kernel_syms
:
5710 case TARGET_NR_quotactl
:
5712 case TARGET_NR_getpgid
:
5713 ret
= get_errno(getpgid(arg1
));
5715 case TARGET_NR_fchdir
:
5716 ret
= get_errno(fchdir(arg1
));
5718 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5719 case TARGET_NR_bdflush
:
5722 #ifdef TARGET_NR_sysfs
5723 case TARGET_NR_sysfs
:
5726 case TARGET_NR_personality
:
5727 ret
= get_errno(personality(arg1
));
5729 #ifdef TARGET_NR_afs_syscall
5730 case TARGET_NR_afs_syscall
:
5733 #ifdef TARGET_NR__llseek /* Not on alpha */
5734 case TARGET_NR__llseek
:
5736 #if defined (__x86_64__)
5737 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5738 if (put_user_s64(ret
, arg4
))
5742 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5743 if (put_user_s64(res
, arg4
))
5749 case TARGET_NR_getdents
:
5750 #if TARGET_ABI_BITS != 32
5752 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5754 struct target_dirent
*target_dirp
;
5755 struct linux_dirent
*dirp
;
5756 abi_long count
= arg3
;
5758 dirp
= malloc(count
);
5760 ret
= -TARGET_ENOMEM
;
5764 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5765 if (!is_error(ret
)) {
5766 struct linux_dirent
*de
;
5767 struct target_dirent
*tde
;
5769 int reclen
, treclen
;
5770 int count1
, tnamelen
;
5774 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5778 reclen
= de
->d_reclen
;
5779 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5780 tde
->d_reclen
= tswap16(treclen
);
5781 tde
->d_ino
= tswapl(de
->d_ino
);
5782 tde
->d_off
= tswapl(de
->d_off
);
5783 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5786 /* XXX: may not be correct */
5787 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5788 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5790 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5794 unlock_user(target_dirp
, arg2
, ret
);
5800 struct linux_dirent
*dirp
;
5801 abi_long count
= arg3
;
5803 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5805 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5806 if (!is_error(ret
)) {
5807 struct linux_dirent
*de
;
5812 reclen
= de
->d_reclen
;
5815 de
->d_reclen
= tswap16(reclen
);
5816 tswapls(&de
->d_ino
);
5817 tswapls(&de
->d_off
);
5818 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5822 unlock_user(dirp
, arg2
, ret
);
5826 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5827 case TARGET_NR_getdents64
:
5829 struct linux_dirent64
*dirp
;
5830 abi_long count
= arg3
;
5831 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5833 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5834 if (!is_error(ret
)) {
5835 struct linux_dirent64
*de
;
5840 reclen
= de
->d_reclen
;
5843 de
->d_reclen
= tswap16(reclen
);
5844 tswap64s((uint64_t *)&de
->d_ino
);
5845 tswap64s((uint64_t *)&de
->d_off
);
5846 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5850 unlock_user(dirp
, arg2
, ret
);
5853 #endif /* TARGET_NR_getdents64 */
5854 #ifdef TARGET_NR__newselect
5855 case TARGET_NR__newselect
:
5856 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5859 #ifdef TARGET_NR_poll
5860 case TARGET_NR_poll
:
5862 struct target_pollfd
*target_pfd
;
5863 unsigned int nfds
= arg2
;
5868 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5871 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5872 for(i
= 0; i
< nfds
; i
++) {
5873 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5874 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5876 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5877 if (!is_error(ret
)) {
5878 for(i
= 0; i
< nfds
; i
++) {
5879 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5881 ret
+= nfds
* (sizeof(struct target_pollfd
)
5882 - sizeof(struct pollfd
));
5884 unlock_user(target_pfd
, arg1
, ret
);
5888 case TARGET_NR_flock
:
5889 /* NOTE: the flock constant seems to be the same for every
5891 ret
= get_errno(flock(arg1
, arg2
));
5893 case TARGET_NR_readv
:
5898 vec
= alloca(count
* sizeof(struct iovec
));
5899 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5901 ret
= get_errno(readv(arg1
, vec
, count
));
5902 unlock_iovec(vec
, arg2
, count
, 1);
5905 case TARGET_NR_writev
:
5910 vec
= alloca(count
* sizeof(struct iovec
));
5911 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5913 ret
= get_errno(writev(arg1
, vec
, count
));
5914 unlock_iovec(vec
, arg2
, count
, 0);
5917 case TARGET_NR_getsid
:
5918 ret
= get_errno(getsid(arg1
));
5920 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5921 case TARGET_NR_fdatasync
:
5922 ret
= get_errno(fdatasync(arg1
));
5925 case TARGET_NR__sysctl
:
5926 /* We don't implement this, but ENOTDIR is always a safe
5928 ret
= -TARGET_ENOTDIR
;
5930 case TARGET_NR_sched_setparam
:
5932 struct sched_param
*target_schp
;
5933 struct sched_param schp
;
5935 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5937 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5938 unlock_user_struct(target_schp
, arg2
, 0);
5939 ret
= get_errno(sched_setparam(arg1
, &schp
));
5942 case TARGET_NR_sched_getparam
:
5944 struct sched_param
*target_schp
;
5945 struct sched_param schp
;
5946 ret
= get_errno(sched_getparam(arg1
, &schp
));
5947 if (!is_error(ret
)) {
5948 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
5950 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
5951 unlock_user_struct(target_schp
, arg2
, 1);
5955 case TARGET_NR_sched_setscheduler
:
5957 struct sched_param
*target_schp
;
5958 struct sched_param schp
;
5959 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
5961 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5962 unlock_user_struct(target_schp
, arg3
, 0);
5963 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
5966 case TARGET_NR_sched_getscheduler
:
5967 ret
= get_errno(sched_getscheduler(arg1
));
5969 case TARGET_NR_sched_yield
:
5970 ret
= get_errno(sched_yield());
5972 case TARGET_NR_sched_get_priority_max
:
5973 ret
= get_errno(sched_get_priority_max(arg1
));
5975 case TARGET_NR_sched_get_priority_min
:
5976 ret
= get_errno(sched_get_priority_min(arg1
));
5978 case TARGET_NR_sched_rr_get_interval
:
5981 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
5982 if (!is_error(ret
)) {
5983 host_to_target_timespec(arg2
, &ts
);
5987 case TARGET_NR_nanosleep
:
5989 struct timespec req
, rem
;
5990 target_to_host_timespec(&req
, arg1
);
5991 ret
= get_errno(nanosleep(&req
, &rem
));
5992 if (is_error(ret
) && arg2
) {
5993 host_to_target_timespec(arg2
, &rem
);
5997 #ifdef TARGET_NR_query_module
5998 case TARGET_NR_query_module
:
6001 #ifdef TARGET_NR_nfsservctl
6002 case TARGET_NR_nfsservctl
:
6005 case TARGET_NR_prctl
:
6008 case PR_GET_PDEATHSIG
:
6011 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6012 if (!is_error(ret
) && arg2
6013 && put_user_ual(deathsig
, arg2
))
6018 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6022 #ifdef TARGET_NR_arch_prctl
6023 case TARGET_NR_arch_prctl
:
6024 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6025 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6031 #ifdef TARGET_NR_pread
6032 case TARGET_NR_pread
:
6034 if (((CPUARMState
*)cpu_env
)->eabi
)
6037 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6039 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6040 unlock_user(p
, arg2
, ret
);
6042 case TARGET_NR_pwrite
:
6044 if (((CPUARMState
*)cpu_env
)->eabi
)
6047 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6049 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6050 unlock_user(p
, arg2
, 0);
6053 #ifdef TARGET_NR_pread64
6054 case TARGET_NR_pread64
:
6055 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6057 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6058 unlock_user(p
, arg2
, ret
);
6060 case TARGET_NR_pwrite64
:
6061 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6063 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6064 unlock_user(p
, arg2
, 0);
6067 case TARGET_NR_getcwd
:
6068 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6070 ret
= get_errno(sys_getcwd1(p
, arg2
));
6071 unlock_user(p
, arg1
, ret
);
6073 case TARGET_NR_capget
:
6075 case TARGET_NR_capset
:
6077 case TARGET_NR_sigaltstack
:
6078 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6079 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6080 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6085 case TARGET_NR_sendfile
:
6087 #ifdef TARGET_NR_getpmsg
6088 case TARGET_NR_getpmsg
:
6091 #ifdef TARGET_NR_putpmsg
6092 case TARGET_NR_putpmsg
:
6095 #ifdef TARGET_NR_vfork
6096 case TARGET_NR_vfork
:
6097 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6101 #ifdef TARGET_NR_ugetrlimit
6102 case TARGET_NR_ugetrlimit
:
6105 ret
= get_errno(getrlimit(arg1
, &rlim
));
6106 if (!is_error(ret
)) {
6107 struct target_rlimit
*target_rlim
;
6108 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6110 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6111 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6112 unlock_user_struct(target_rlim
, arg2
, 1);
6117 #ifdef TARGET_NR_truncate64
6118 case TARGET_NR_truncate64
:
6119 if (!(p
= lock_user_string(arg1
)))
6121 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6122 unlock_user(p
, arg1
, 0);
6125 #ifdef TARGET_NR_ftruncate64
6126 case TARGET_NR_ftruncate64
:
6127 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6130 #ifdef TARGET_NR_stat64
6131 case TARGET_NR_stat64
:
6132 if (!(p
= lock_user_string(arg1
)))
6134 ret
= get_errno(stat(path(p
), &st
));
6135 unlock_user(p
, arg1
, 0);
6137 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6140 #ifdef TARGET_NR_lstat64
6141 case TARGET_NR_lstat64
:
6142 if (!(p
= lock_user_string(arg1
)))
6144 ret
= get_errno(lstat(path(p
), &st
));
6145 unlock_user(p
, arg1
, 0);
6147 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6150 #ifdef TARGET_NR_fstat64
6151 case TARGET_NR_fstat64
:
6152 ret
= get_errno(fstat(arg1
, &st
));
6154 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6157 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6158 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6159 #ifdef TARGET_NR_fstatat64
6160 case TARGET_NR_fstatat64
:
6162 #ifdef TARGET_NR_newfstatat
6163 case TARGET_NR_newfstatat
:
6165 if (!(p
= lock_user_string(arg2
)))
6167 #ifdef __NR_fstatat64
6168 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6170 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6173 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6177 case TARGET_NR_lchown
:
6178 if (!(p
= lock_user_string(arg1
)))
6180 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6181 unlock_user(p
, arg1
, 0);
6183 case TARGET_NR_getuid
:
6184 ret
= get_errno(high2lowuid(getuid()));
6186 case TARGET_NR_getgid
:
6187 ret
= get_errno(high2lowgid(getgid()));
6189 case TARGET_NR_geteuid
:
6190 ret
= get_errno(high2lowuid(geteuid()));
6192 case TARGET_NR_getegid
:
6193 ret
= get_errno(high2lowgid(getegid()));
6195 case TARGET_NR_setreuid
:
6196 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6198 case TARGET_NR_setregid
:
6199 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6201 case TARGET_NR_getgroups
:
6203 int gidsetsize
= arg1
;
6204 uint16_t *target_grouplist
;
6208 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6209 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6210 if (gidsetsize
== 0)
6212 if (!is_error(ret
)) {
6213 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6214 if (!target_grouplist
)
6216 for(i
= 0;i
< ret
; i
++)
6217 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6218 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6222 case TARGET_NR_setgroups
:
6224 int gidsetsize
= arg1
;
6225 uint16_t *target_grouplist
;
6229 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6230 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6231 if (!target_grouplist
) {
6232 ret
= -TARGET_EFAULT
;
6235 for(i
= 0;i
< gidsetsize
; i
++)
6236 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6237 unlock_user(target_grouplist
, arg2
, 0);
6238 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6241 case TARGET_NR_fchown
:
6242 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6244 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6245 case TARGET_NR_fchownat
:
6246 if (!(p
= lock_user_string(arg2
)))
6248 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6249 unlock_user(p
, arg2
, 0);
6252 #ifdef TARGET_NR_setresuid
6253 case TARGET_NR_setresuid
:
6254 ret
= get_errno(setresuid(low2highuid(arg1
),
6256 low2highuid(arg3
)));
6259 #ifdef TARGET_NR_getresuid
6260 case TARGET_NR_getresuid
:
6262 uid_t ruid
, euid
, suid
;
6263 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6264 if (!is_error(ret
)) {
6265 if (put_user_u16(high2lowuid(ruid
), arg1
)
6266 || put_user_u16(high2lowuid(euid
), arg2
)
6267 || put_user_u16(high2lowuid(suid
), arg3
))
6273 #ifdef TARGET_NR_getresgid
6274 case TARGET_NR_setresgid
:
6275 ret
= get_errno(setresgid(low2highgid(arg1
),
6277 low2highgid(arg3
)));
6280 #ifdef TARGET_NR_getresgid
6281 case TARGET_NR_getresgid
:
6283 gid_t rgid
, egid
, sgid
;
6284 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6285 if (!is_error(ret
)) {
6286 if (put_user_u16(high2lowgid(rgid
), arg1
)
6287 || put_user_u16(high2lowgid(egid
), arg2
)
6288 || put_user_u16(high2lowgid(sgid
), arg3
))
6294 case TARGET_NR_chown
:
6295 if (!(p
= lock_user_string(arg1
)))
6297 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6298 unlock_user(p
, arg1
, 0);
6300 case TARGET_NR_setuid
:
6301 ret
= get_errno(setuid(low2highuid(arg1
)));
6303 case TARGET_NR_setgid
:
6304 ret
= get_errno(setgid(low2highgid(arg1
)));
6306 case TARGET_NR_setfsuid
:
6307 ret
= get_errno(setfsuid(arg1
));
6309 case TARGET_NR_setfsgid
:
6310 ret
= get_errno(setfsgid(arg1
));
6312 #endif /* USE_UID16 */
6314 #ifdef TARGET_NR_lchown32
6315 case TARGET_NR_lchown32
:
6316 if (!(p
= lock_user_string(arg1
)))
6318 ret
= get_errno(lchown(p
, arg2
, arg3
));
6319 unlock_user(p
, arg1
, 0);
6322 #ifdef TARGET_NR_getuid32
6323 case TARGET_NR_getuid32
:
6324 ret
= get_errno(getuid());
6328 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6329 /* Alpha specific */
6330 case TARGET_NR_getxuid
:
6334 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6336 ret
= get_errno(getuid());
6339 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6340 /* Alpha specific */
6341 case TARGET_NR_getxgid
:
6345 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6347 ret
= get_errno(getgid());
6351 #ifdef TARGET_NR_getgid32
6352 case TARGET_NR_getgid32
:
6353 ret
= get_errno(getgid());
6356 #ifdef TARGET_NR_geteuid32
6357 case TARGET_NR_geteuid32
:
6358 ret
= get_errno(geteuid());
6361 #ifdef TARGET_NR_getegid32
6362 case TARGET_NR_getegid32
:
6363 ret
= get_errno(getegid());
6366 #ifdef TARGET_NR_setreuid32
6367 case TARGET_NR_setreuid32
:
6368 ret
= get_errno(setreuid(arg1
, arg2
));
6371 #ifdef TARGET_NR_setregid32
6372 case TARGET_NR_setregid32
:
6373 ret
= get_errno(setregid(arg1
, arg2
));
6376 #ifdef TARGET_NR_getgroups32
6377 case TARGET_NR_getgroups32
:
6379 int gidsetsize
= arg1
;
6380 uint32_t *target_grouplist
;
6384 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6385 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6386 if (gidsetsize
== 0)
6388 if (!is_error(ret
)) {
6389 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6390 if (!target_grouplist
) {
6391 ret
= -TARGET_EFAULT
;
6394 for(i
= 0;i
< ret
; i
++)
6395 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6396 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6401 #ifdef TARGET_NR_setgroups32
6402 case TARGET_NR_setgroups32
:
6404 int gidsetsize
= arg1
;
6405 uint32_t *target_grouplist
;
6409 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6410 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6411 if (!target_grouplist
) {
6412 ret
= -TARGET_EFAULT
;
6415 for(i
= 0;i
< gidsetsize
; i
++)
6416 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6417 unlock_user(target_grouplist
, arg2
, 0);
6418 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6422 #ifdef TARGET_NR_fchown32
6423 case TARGET_NR_fchown32
:
6424 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6427 #ifdef TARGET_NR_setresuid32
6428 case TARGET_NR_setresuid32
:
6429 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6432 #ifdef TARGET_NR_getresuid32
6433 case TARGET_NR_getresuid32
:
6435 uid_t ruid
, euid
, suid
;
6436 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6437 if (!is_error(ret
)) {
6438 if (put_user_u32(ruid
, arg1
)
6439 || put_user_u32(euid
, arg2
)
6440 || put_user_u32(suid
, arg3
))
6446 #ifdef TARGET_NR_setresgid32
6447 case TARGET_NR_setresgid32
:
6448 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6451 #ifdef TARGET_NR_getresgid32
6452 case TARGET_NR_getresgid32
:
6454 gid_t rgid
, egid
, sgid
;
6455 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6456 if (!is_error(ret
)) {
6457 if (put_user_u32(rgid
, arg1
)
6458 || put_user_u32(egid
, arg2
)
6459 || put_user_u32(sgid
, arg3
))
6465 #ifdef TARGET_NR_chown32
6466 case TARGET_NR_chown32
:
6467 if (!(p
= lock_user_string(arg1
)))
6469 ret
= get_errno(chown(p
, arg2
, arg3
));
6470 unlock_user(p
, arg1
, 0);
6473 #ifdef TARGET_NR_setuid32
6474 case TARGET_NR_setuid32
:
6475 ret
= get_errno(setuid(arg1
));
6478 #ifdef TARGET_NR_setgid32
6479 case TARGET_NR_setgid32
:
6480 ret
= get_errno(setgid(arg1
));
6483 #ifdef TARGET_NR_setfsuid32
6484 case TARGET_NR_setfsuid32
:
6485 ret
= get_errno(setfsuid(arg1
));
6488 #ifdef TARGET_NR_setfsgid32
6489 case TARGET_NR_setfsgid32
:
6490 ret
= get_errno(setfsgid(arg1
));
6494 case TARGET_NR_pivot_root
:
6496 #ifdef TARGET_NR_mincore
6497 case TARGET_NR_mincore
:
6500 ret
= -TARGET_EFAULT
;
6501 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6503 if (!(p
= lock_user_string(arg3
)))
6505 ret
= get_errno(mincore(a
, arg2
, p
));
6506 unlock_user(p
, arg3
, ret
);
6508 unlock_user(a
, arg1
, 0);
6512 #ifdef TARGET_NR_arm_fadvise64_64
6513 case TARGET_NR_arm_fadvise64_64
:
6516 * arm_fadvise64_64 looks like fadvise64_64 but
6517 * with different argument order
6525 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6526 #ifdef TARGET_NR_fadvise64_64
6527 case TARGET_NR_fadvise64_64
:
6529 /* This is a hint, so ignoring and returning success is ok. */
6533 #ifdef TARGET_NR_madvise
6534 case TARGET_NR_madvise
:
6535 /* A straight passthrough may not be safe because qemu sometimes
6536 turns private flie-backed mappings into anonymous mappings.
6537 This will break MADV_DONTNEED.
6538 This is a hint, so ignoring and returning success is ok. */
6542 #if TARGET_ABI_BITS == 32
6543 case TARGET_NR_fcntl64
:
6547 struct target_flock64
*target_fl
;
6549 struct target_eabi_flock64
*target_efl
;
6552 cmd
= target_to_host_fcntl_cmd(arg2
);
6553 if (cmd
== -TARGET_EINVAL
)
6557 case TARGET_F_GETLK64
:
6559 if (((CPUARMState
*)cpu_env
)->eabi
) {
6560 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6562 fl
.l_type
= tswap16(target_efl
->l_type
);
6563 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6564 fl
.l_start
= tswap64(target_efl
->l_start
);
6565 fl
.l_len
= tswap64(target_efl
->l_len
);
6566 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6567 unlock_user_struct(target_efl
, arg3
, 0);
6571 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6573 fl
.l_type
= tswap16(target_fl
->l_type
);
6574 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6575 fl
.l_start
= tswap64(target_fl
->l_start
);
6576 fl
.l_len
= tswap64(target_fl
->l_len
);
6577 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6578 unlock_user_struct(target_fl
, arg3
, 0);
6580 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6583 if (((CPUARMState
*)cpu_env
)->eabi
) {
6584 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6586 target_efl
->l_type
= tswap16(fl
.l_type
);
6587 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6588 target_efl
->l_start
= tswap64(fl
.l_start
);
6589 target_efl
->l_len
= tswap64(fl
.l_len
);
6590 target_efl
->l_pid
= tswapl(fl
.l_pid
);
6591 unlock_user_struct(target_efl
, arg3
, 1);
6595 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6597 target_fl
->l_type
= tswap16(fl
.l_type
);
6598 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6599 target_fl
->l_start
= tswap64(fl
.l_start
);
6600 target_fl
->l_len
= tswap64(fl
.l_len
);
6601 target_fl
->l_pid
= tswapl(fl
.l_pid
);
6602 unlock_user_struct(target_fl
, arg3
, 1);
6607 case TARGET_F_SETLK64
:
6608 case TARGET_F_SETLKW64
:
6610 if (((CPUARMState
*)cpu_env
)->eabi
) {
6611 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6613 fl
.l_type
= tswap16(target_efl
->l_type
);
6614 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6615 fl
.l_start
= tswap64(target_efl
->l_start
);
6616 fl
.l_len
= tswap64(target_efl
->l_len
);
6617 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6618 unlock_user_struct(target_efl
, arg3
, 0);
6622 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6624 fl
.l_type
= tswap16(target_fl
->l_type
);
6625 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6626 fl
.l_start
= tswap64(target_fl
->l_start
);
6627 fl
.l_len
= tswap64(target_fl
->l_len
);
6628 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6629 unlock_user_struct(target_fl
, arg3
, 0);
6631 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6634 ret
= do_fcntl(arg1
, arg2
, arg3
);
6640 #ifdef TARGET_NR_cacheflush
6641 case TARGET_NR_cacheflush
:
6642 /* self-modifying code is handled automatically, so nothing needed */
6646 #ifdef TARGET_NR_security
6647 case TARGET_NR_security
:
6650 #ifdef TARGET_NR_getpagesize
6651 case TARGET_NR_getpagesize
:
6652 ret
= TARGET_PAGE_SIZE
;
6655 case TARGET_NR_gettid
:
6656 ret
= get_errno(gettid());
6658 #ifdef TARGET_NR_readahead
6659 case TARGET_NR_readahead
:
6660 #if TARGET_ABI_BITS == 32
6662 if (((CPUARMState
*)cpu_env
)->eabi
)
6669 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6671 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6675 #ifdef TARGET_NR_setxattr
6676 case TARGET_NR_setxattr
:
6677 case TARGET_NR_lsetxattr
:
6678 case TARGET_NR_fsetxattr
:
6679 case TARGET_NR_getxattr
:
6680 case TARGET_NR_lgetxattr
:
6681 case TARGET_NR_fgetxattr
:
6682 case TARGET_NR_listxattr
:
6683 case TARGET_NR_llistxattr
:
6684 case TARGET_NR_flistxattr
:
6685 case TARGET_NR_removexattr
:
6686 case TARGET_NR_lremovexattr
:
6687 case TARGET_NR_fremovexattr
:
6688 ret
= -TARGET_EOPNOTSUPP
;
6691 #ifdef TARGET_NR_set_thread_area
6692 case TARGET_NR_set_thread_area
:
6693 #if defined(TARGET_MIPS)
6694 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6697 #elif defined(TARGET_CRIS)
6699 ret
= -TARGET_EINVAL
;
6701 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6705 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6706 ret
= do_set_thread_area(cpu_env
, arg1
);
6709 goto unimplemented_nowarn
;
6712 #ifdef TARGET_NR_get_thread_area
6713 case TARGET_NR_get_thread_area
:
6714 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6715 ret
= do_get_thread_area(cpu_env
, arg1
);
6717 goto unimplemented_nowarn
;
6720 #ifdef TARGET_NR_getdomainname
6721 case TARGET_NR_getdomainname
:
6722 goto unimplemented_nowarn
;
6725 #ifdef TARGET_NR_clock_gettime
6726 case TARGET_NR_clock_gettime
:
6729 ret
= get_errno(clock_gettime(arg1
, &ts
));
6730 if (!is_error(ret
)) {
6731 host_to_target_timespec(arg2
, &ts
);
6736 #ifdef TARGET_NR_clock_getres
6737 case TARGET_NR_clock_getres
:
6740 ret
= get_errno(clock_getres(arg1
, &ts
));
6741 if (!is_error(ret
)) {
6742 host_to_target_timespec(arg2
, &ts
);
6747 #ifdef TARGET_NR_clock_nanosleep
6748 case TARGET_NR_clock_nanosleep
:
6751 target_to_host_timespec(&ts
, arg3
);
6752 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6754 host_to_target_timespec(arg4
, &ts
);
6759 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6760 case TARGET_NR_set_tid_address
:
6761 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6765 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6766 case TARGET_NR_tkill
:
6767 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6771 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6772 case TARGET_NR_tgkill
:
6773 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6774 target_to_host_signal(arg3
)));
6778 #ifdef TARGET_NR_set_robust_list
6779 case TARGET_NR_set_robust_list
:
6780 goto unimplemented_nowarn
;
6783 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6784 case TARGET_NR_utimensat
:
6786 struct timespec
*tsp
, ts
[2];
6790 target_to_host_timespec(ts
, arg3
);
6791 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6795 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
6797 if (!(p
= lock_user_string(arg2
))) {
6798 ret
= -TARGET_EFAULT
;
6801 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
6802 unlock_user(p
, arg2
, 0);
6807 #if defined(USE_NPTL)
6808 case TARGET_NR_futex
:
6809 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6812 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6813 case TARGET_NR_inotify_init
:
6814 ret
= get_errno(sys_inotify_init());
6817 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6818 case TARGET_NR_inotify_add_watch
:
6819 p
= lock_user_string(arg2
);
6820 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
6821 unlock_user(p
, arg2
, 0);
6824 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6825 case TARGET_NR_inotify_rm_watch
:
6826 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
6830 #ifdef TARGET_NR_mq_open
6831 case TARGET_NR_mq_open
:
6833 struct mq_attr posix_mq_attr
;
6835 p
= lock_user_string(arg1
- 1);
6837 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
6838 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
6839 unlock_user (p
, arg1
, 0);
6843 case TARGET_NR_mq_unlink
:
6844 p
= lock_user_string(arg1
- 1);
6845 ret
= get_errno(mq_unlink(p
));
6846 unlock_user (p
, arg1
, 0);
6849 case TARGET_NR_mq_timedsend
:
6853 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6855 target_to_host_timespec(&ts
, arg5
);
6856 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
6857 host_to_target_timespec(arg5
, &ts
);
6860 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
6861 unlock_user (p
, arg2
, arg3
);
6865 case TARGET_NR_mq_timedreceive
:
6870 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6872 target_to_host_timespec(&ts
, arg5
);
6873 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
6874 host_to_target_timespec(arg5
, &ts
);
6877 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
6878 unlock_user (p
, arg2
, arg3
);
6880 put_user_u32(prio
, arg4
);
6884 /* Not implemented for now... */
6885 /* case TARGET_NR_mq_notify: */
6888 case TARGET_NR_mq_getsetattr
:
6890 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
6893 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
6894 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
6897 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
6898 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
6905 #ifdef CONFIG_SPLICE
6906 #ifdef TARGET_NR_tee
6909 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
6913 #ifdef TARGET_NR_splice
6914 case TARGET_NR_splice
:
6916 loff_t loff_in
, loff_out
;
6917 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
6919 get_user_u64(loff_in
, arg2
);
6920 ploff_in
= &loff_in
;
6923 get_user_u64(loff_out
, arg2
);
6924 ploff_out
= &loff_out
;
6926 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
6930 #ifdef TARGET_NR_vmsplice
6931 case TARGET_NR_vmsplice
:
6936 vec
= alloca(count
* sizeof(struct iovec
));
6937 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6939 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
6940 unlock_iovec(vec
, arg2
, count
, 0);
6944 #endif /* CONFIG_SPLICE */
6947 gemu_log("qemu: Unsupported syscall: %d\n", num
);
6948 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6949 unimplemented_nowarn
:
6951 ret
= -TARGET_ENOSYS
;
6956 gemu_log(" = %ld\n", ret
);
6959 print_syscall_ret(num
, ret
);
6962 ret
= -TARGET_EFAULT
;