4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
32 #include <sys/types.h>
38 #include <sys/mount.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
45 #include <sys/socket.h>
49 #include <sys/times.h>
52 #include <sys/statfs.h>
54 #include <sys/sysinfo.h>
55 #include <sys/utsname.h>
56 //#include <sys/user.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <qemu-common.h>
64 #define termios host_termios
65 #define winsize host_winsize
66 #define termio host_termio
67 #define sgttyb host_sgttyb /* same as target */
68 #define tchars host_tchars /* same as target */
69 #define ltchars host_ltchars /* same as target */
71 #include <linux/termios.h>
72 #include <linux/unistd.h>
73 #include <linux/utsname.h>
74 #include <linux/cdrom.h>
75 #include <linux/hdreg.h>
76 #include <linux/soundcard.h>
78 #include <linux/mtio.h>
80 #include "linux_loop.h"
83 #include "qemu-common.h"
86 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
87 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
89 /* XXX: Hardcode the above values. */
90 #define CLONE_NPTL_FLAGS2 0
95 //#include <linux/msdos_fs.h>
96 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
97 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
108 #define _syscall0(type,name) \
109 static type name (void) \
111 return syscall(__NR_##name); \
114 #define _syscall1(type,name,type1,arg1) \
115 static type name (type1 arg1) \
117 return syscall(__NR_##name, arg1); \
120 #define _syscall2(type,name,type1,arg1,type2,arg2) \
121 static type name (type1 arg1,type2 arg2) \
123 return syscall(__NR_##name, arg1, arg2); \
126 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
127 static type name (type1 arg1,type2 arg2,type3 arg3) \
129 return syscall(__NR_##name, arg1, arg2, arg3); \
132 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
133 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
135 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
138 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
140 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
142 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
146 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
147 type5,arg5,type6,arg6) \
148 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
151 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
155 #define __NR_sys_uname __NR_uname
156 #define __NR_sys_faccessat __NR_faccessat
157 #define __NR_sys_fchmodat __NR_fchmodat
158 #define __NR_sys_fchownat __NR_fchownat
159 #define __NR_sys_fstatat64 __NR_fstatat64
160 #define __NR_sys_futimesat __NR_futimesat
161 #define __NR_sys_getcwd1 __NR_getcwd
162 #define __NR_sys_getdents __NR_getdents
163 #define __NR_sys_getdents64 __NR_getdents64
164 #define __NR_sys_getpriority __NR_getpriority
165 #define __NR_sys_linkat __NR_linkat
166 #define __NR_sys_mkdirat __NR_mkdirat
167 #define __NR_sys_mknodat __NR_mknodat
168 #define __NR_sys_newfstatat __NR_newfstatat
169 #define __NR_sys_openat __NR_openat
170 #define __NR_sys_readlinkat __NR_readlinkat
171 #define __NR_sys_renameat __NR_renameat
172 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
173 #define __NR_sys_symlinkat __NR_symlinkat
174 #define __NR_sys_syslog __NR_syslog
175 #define __NR_sys_tgkill __NR_tgkill
176 #define __NR_sys_tkill __NR_tkill
177 #define __NR_sys_unlinkat __NR_unlinkat
178 #define __NR_sys_utimensat __NR_utimensat
179 #define __NR_sys_futex __NR_futex
180 #define __NR_sys_inotify_init __NR_inotify_init
181 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
182 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
184 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
185 #define __NR__llseek __NR_lseek
189 _syscall0(int, gettid
)
191 /* This is a replacement for the host gettid() and must return a host
193 static int gettid(void) {
197 #if TARGET_ABI_BITS == 32
198 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
200 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
201 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
203 _syscall2(int, sys_getpriority
, int, which
, int, who
);
204 #if defined(TARGET_NR__llseek) && !defined (__x86_64__)
205 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
206 loff_t
*, res
, uint
, wh
);
208 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
209 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
210 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
211 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
213 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
214 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
216 #ifdef __NR_exit_group
217 _syscall1(int,exit_group
,int,error_code
)
219 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
220 _syscall1(int,set_tid_address
,int *,tidptr
)
222 #if defined(USE_NPTL)
223 #if defined(TARGET_NR_futex) && defined(__NR_futex)
224 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
225 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
229 static bitmask_transtbl fcntl_flags_tbl
[] = {
230 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
231 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
232 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
233 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
234 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
235 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
236 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
237 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
238 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
239 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
240 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
241 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
242 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
243 #if defined(O_DIRECT)
244 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
249 #define COPY_UTSNAME_FIELD(dest, src) \
251 /* __NEW_UTS_LEN doesn't include terminating null */ \
252 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
253 (dest)[__NEW_UTS_LEN] = '\0'; \
256 static int sys_uname(struct new_utsname
*buf
)
258 struct utsname uts_buf
;
260 if (uname(&uts_buf
) < 0)
264 * Just in case these have some differences, we
265 * translate utsname to new_utsname (which is the
266 * struct linux kernel uses).
269 bzero(buf
, sizeof (*buf
));
270 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
271 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
272 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
273 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
274 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
276 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
280 #undef COPY_UTSNAME_FIELD
283 static int sys_getcwd1(char *buf
, size_t size
)
285 if (getcwd(buf
, size
) == NULL
) {
286 /* getcwd() sets errno */
289 return strlen(buf
)+1;
294 * Host system seems to have atfile syscall stubs available. We
295 * now enable them one by one as specified by target syscall_nr.h.
298 #ifdef TARGET_NR_faccessat
299 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
301 return (faccessat(dirfd
, pathname
, mode
, 0));
304 #ifdef TARGET_NR_fchmodat
305 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
307 return (fchmodat(dirfd
, pathname
, mode
, 0));
310 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
311 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
312 gid_t group
, int flags
)
314 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
317 #ifdef __NR_fstatat64
318 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
321 return (fstatat(dirfd
, pathname
, buf
, flags
));
324 #ifdef __NR_newfstatat
325 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
328 return (fstatat(dirfd
, pathname
, buf
, flags
));
331 #ifdef TARGET_NR_futimesat
332 static int sys_futimesat(int dirfd
, const char *pathname
,
333 const struct timeval times
[2])
335 return (futimesat(dirfd
, pathname
, times
));
338 #ifdef TARGET_NR_linkat
339 static int sys_linkat(int olddirfd
, const char *oldpath
,
340 int newdirfd
, const char *newpath
, int flags
)
342 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
345 #ifdef TARGET_NR_mkdirat
346 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
348 return (mkdirat(dirfd
, pathname
, mode
));
351 #ifdef TARGET_NR_mknodat
352 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
355 return (mknodat(dirfd
, pathname
, mode
, dev
));
358 #ifdef TARGET_NR_openat
359 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
362 * open(2) has extra parameter 'mode' when called with
365 if ((flags
& O_CREAT
) != 0) {
370 * Get the 'mode' parameter and translate it to
374 mode
= va_arg(ap
, mode_t
);
375 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
378 return (openat(dirfd
, pathname
, flags
, mode
));
380 return (openat(dirfd
, pathname
, flags
));
383 #ifdef TARGET_NR_readlinkat
384 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
386 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
389 #ifdef TARGET_NR_renameat
390 static int sys_renameat(int olddirfd
, const char *oldpath
,
391 int newdirfd
, const char *newpath
)
393 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
396 #ifdef TARGET_NR_symlinkat
397 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
399 return (symlinkat(oldpath
, newdirfd
, newpath
));
402 #ifdef TARGET_NR_unlinkat
403 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
405 return (unlinkat(dirfd
, pathname
, flags
));
408 #else /* !CONFIG_ATFILE */
411 * Try direct syscalls instead
413 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
414 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
416 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
417 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
419 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
420 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
421 uid_t
,owner
,gid_t
,group
,int,flags
)
423 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
424 defined(__NR_fstatat64)
425 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
426 struct stat
*,buf
,int,flags
)
428 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
429 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
430 const struct timeval
*,times
)
432 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
433 defined(__NR_newfstatat)
434 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
435 struct stat
*,buf
,int,flags
)
437 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
438 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
439 int,newdirfd
,const char *,newpath
,int,flags
)
441 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
442 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
444 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
445 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
446 mode_t
,mode
,dev_t
,dev
)
448 #if defined(TARGET_NR_openat) && defined(__NR_openat)
449 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
451 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
452 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
453 char *,buf
,size_t,bufsize
)
455 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
456 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
457 int,newdirfd
,const char *,newpath
)
459 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
460 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
461 int,newdirfd
,const char *,newpath
)
463 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
464 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
467 #endif /* CONFIG_ATFILE */
469 #ifdef CONFIG_UTIMENSAT
470 static int sys_utimensat(int dirfd
, const char *pathname
,
471 const struct timespec times
[2], int flags
)
473 if (pathname
== NULL
)
474 return futimens(dirfd
, times
);
476 return utimensat(dirfd
, pathname
, times
, flags
);
479 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
480 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
481 const struct timespec
*,tsp
,int,flags
)
483 #endif /* CONFIG_UTIMENSAT */
485 #ifdef CONFIG_INOTIFY
486 #include <sys/inotify.h>
488 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
489 static int sys_inotify_init(void)
491 return (inotify_init());
494 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
495 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
497 return (inotify_add_watch(fd
, pathname
, mask
));
500 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
501 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
503 return (inotify_rm_watch(fd
, wd
));
507 /* Userspace can usually survive runtime without inotify */
508 #undef TARGET_NR_inotify_init
509 #undef TARGET_NR_inotify_add_watch
510 #undef TARGET_NR_inotify_rm_watch
511 #endif /* CONFIG_INOTIFY */
514 extern int personality(int);
515 extern int flock(int, int);
516 extern int setfsuid(int);
517 extern int setfsgid(int);
518 extern int setgroups(int, gid_t
*);
520 #define ERRNO_TABLE_SIZE 1200
522 /* target_to_host_errno_table[] is initialized from
523 * host_to_target_errno_table[] in syscall_init(). */
524 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
528 * This list is the union of errno values overridden in asm-<arch>/errno.h
529 * minus the errnos that are not actually generic to all archs.
531 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
532 [EIDRM
] = TARGET_EIDRM
,
533 [ECHRNG
] = TARGET_ECHRNG
,
534 [EL2NSYNC
] = TARGET_EL2NSYNC
,
535 [EL3HLT
] = TARGET_EL3HLT
,
536 [EL3RST
] = TARGET_EL3RST
,
537 [ELNRNG
] = TARGET_ELNRNG
,
538 [EUNATCH
] = TARGET_EUNATCH
,
539 [ENOCSI
] = TARGET_ENOCSI
,
540 [EL2HLT
] = TARGET_EL2HLT
,
541 [EDEADLK
] = TARGET_EDEADLK
,
542 [ENOLCK
] = TARGET_ENOLCK
,
543 [EBADE
] = TARGET_EBADE
,
544 [EBADR
] = TARGET_EBADR
,
545 [EXFULL
] = TARGET_EXFULL
,
546 [ENOANO
] = TARGET_ENOANO
,
547 [EBADRQC
] = TARGET_EBADRQC
,
548 [EBADSLT
] = TARGET_EBADSLT
,
549 [EBFONT
] = TARGET_EBFONT
,
550 [ENOSTR
] = TARGET_ENOSTR
,
551 [ENODATA
] = TARGET_ENODATA
,
552 [ETIME
] = TARGET_ETIME
,
553 [ENOSR
] = TARGET_ENOSR
,
554 [ENONET
] = TARGET_ENONET
,
555 [ENOPKG
] = TARGET_ENOPKG
,
556 [EREMOTE
] = TARGET_EREMOTE
,
557 [ENOLINK
] = TARGET_ENOLINK
,
558 [EADV
] = TARGET_EADV
,
559 [ESRMNT
] = TARGET_ESRMNT
,
560 [ECOMM
] = TARGET_ECOMM
,
561 [EPROTO
] = TARGET_EPROTO
,
562 [EDOTDOT
] = TARGET_EDOTDOT
,
563 [EMULTIHOP
] = TARGET_EMULTIHOP
,
564 [EBADMSG
] = TARGET_EBADMSG
,
565 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
566 [EOVERFLOW
] = TARGET_EOVERFLOW
,
567 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
568 [EBADFD
] = TARGET_EBADFD
,
569 [EREMCHG
] = TARGET_EREMCHG
,
570 [ELIBACC
] = TARGET_ELIBACC
,
571 [ELIBBAD
] = TARGET_ELIBBAD
,
572 [ELIBSCN
] = TARGET_ELIBSCN
,
573 [ELIBMAX
] = TARGET_ELIBMAX
,
574 [ELIBEXEC
] = TARGET_ELIBEXEC
,
575 [EILSEQ
] = TARGET_EILSEQ
,
576 [ENOSYS
] = TARGET_ENOSYS
,
577 [ELOOP
] = TARGET_ELOOP
,
578 [ERESTART
] = TARGET_ERESTART
,
579 [ESTRPIPE
] = TARGET_ESTRPIPE
,
580 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
581 [EUSERS
] = TARGET_EUSERS
,
582 [ENOTSOCK
] = TARGET_ENOTSOCK
,
583 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
584 [EMSGSIZE
] = TARGET_EMSGSIZE
,
585 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
586 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
587 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
588 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
589 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
590 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
591 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
592 [EADDRINUSE
] = TARGET_EADDRINUSE
,
593 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
594 [ENETDOWN
] = TARGET_ENETDOWN
,
595 [ENETUNREACH
] = TARGET_ENETUNREACH
,
596 [ENETRESET
] = TARGET_ENETRESET
,
597 [ECONNABORTED
] = TARGET_ECONNABORTED
,
598 [ECONNRESET
] = TARGET_ECONNRESET
,
599 [ENOBUFS
] = TARGET_ENOBUFS
,
600 [EISCONN
] = TARGET_EISCONN
,
601 [ENOTCONN
] = TARGET_ENOTCONN
,
602 [EUCLEAN
] = TARGET_EUCLEAN
,
603 [ENOTNAM
] = TARGET_ENOTNAM
,
604 [ENAVAIL
] = TARGET_ENAVAIL
,
605 [EISNAM
] = TARGET_EISNAM
,
606 [EREMOTEIO
] = TARGET_EREMOTEIO
,
607 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
608 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
609 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
610 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
611 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
612 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
613 [EALREADY
] = TARGET_EALREADY
,
614 [EINPROGRESS
] = TARGET_EINPROGRESS
,
615 [ESTALE
] = TARGET_ESTALE
,
616 [ECANCELED
] = TARGET_ECANCELED
,
617 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
618 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
620 [ENOKEY
] = TARGET_ENOKEY
,
623 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
626 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
629 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
632 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
634 #ifdef ENOTRECOVERABLE
635 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
639 static inline int host_to_target_errno(int err
)
641 if(host_to_target_errno_table
[err
])
642 return host_to_target_errno_table
[err
];
646 static inline int target_to_host_errno(int err
)
648 if (target_to_host_errno_table
[err
])
649 return target_to_host_errno_table
[err
];
653 static inline abi_long
get_errno(abi_long ret
)
656 return -host_to_target_errno(errno
);
661 static inline int is_error(abi_long ret
)
663 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
666 char *target_strerror(int err
)
668 return strerror(target_to_host_errno(err
));
671 static abi_ulong target_brk
;
672 static abi_ulong target_original_brk
;
674 void target_set_brk(abi_ulong new_brk
)
676 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
679 /* do_brk() must return target values and target errnos. */
680 abi_long
do_brk(abi_ulong new_brk
)
683 abi_long mapped_addr
;
688 if (new_brk
< target_original_brk
)
691 brk_page
= HOST_PAGE_ALIGN(target_brk
);
693 /* If the new brk is less than this, set it and we're done... */
694 if (new_brk
< brk_page
) {
695 target_brk
= new_brk
;
699 /* We need to allocate more memory after the brk... */
700 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
701 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
702 PROT_READ
|PROT_WRITE
,
703 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
705 if (!is_error(mapped_addr
))
706 target_brk
= new_brk
;
711 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
712 abi_ulong target_fds_addr
,
716 abi_ulong b
, *target_fds
;
718 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
719 if (!(target_fds
= lock_user(VERIFY_READ
,
721 sizeof(abi_ulong
) * nw
,
723 return -TARGET_EFAULT
;
727 for (i
= 0; i
< nw
; i
++) {
728 /* grab the abi_ulong */
729 __get_user(b
, &target_fds
[i
]);
730 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
731 /* check the bit inside the abi_ulong */
738 unlock_user(target_fds
, target_fds_addr
, 0);
743 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
749 abi_ulong
*target_fds
;
751 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
752 if (!(target_fds
= lock_user(VERIFY_WRITE
,
754 sizeof(abi_ulong
) * nw
,
756 return -TARGET_EFAULT
;
759 for (i
= 0; i
< nw
; i
++) {
761 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
762 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
765 __put_user(v
, &target_fds
[i
]);
768 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
773 #if defined(__alpha__)
779 static inline abi_long
host_to_target_clock_t(long ticks
)
781 #if HOST_HZ == TARGET_HZ
784 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
788 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
789 const struct rusage
*rusage
)
791 struct target_rusage
*target_rusage
;
793 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
794 return -TARGET_EFAULT
;
795 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
796 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
797 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
798 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
799 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
800 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
801 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
802 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
803 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
804 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
805 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
806 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
807 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
808 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
809 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
810 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
811 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
812 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
813 unlock_user_struct(target_rusage
, target_addr
, 1);
818 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
819 abi_ulong target_tv_addr
)
821 struct target_timeval
*target_tv
;
823 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
824 return -TARGET_EFAULT
;
826 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
827 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
829 unlock_user_struct(target_tv
, target_tv_addr
, 0);
834 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
835 const struct timeval
*tv
)
837 struct target_timeval
*target_tv
;
839 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
840 return -TARGET_EFAULT
;
842 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
843 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
845 unlock_user_struct(target_tv
, target_tv_addr
, 1);
850 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
851 abi_ulong target_mq_attr_addr
)
853 struct target_mq_attr
*target_mq_attr
;
855 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
856 target_mq_attr_addr
, 1))
857 return -TARGET_EFAULT
;
859 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
860 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
861 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
862 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
864 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
869 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
870 const struct mq_attr
*attr
)
872 struct target_mq_attr
*target_mq_attr
;
874 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
875 target_mq_attr_addr
, 0))
876 return -TARGET_EFAULT
;
878 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
879 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
880 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
881 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
883 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
888 /* do_select() must return target values and target errnos. */
889 static abi_long
do_select(int n
,
890 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
891 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
893 fd_set rfds
, wfds
, efds
;
894 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
895 struct timeval tv
, *tv_ptr
;
899 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
900 return -TARGET_EFAULT
;
906 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
907 return -TARGET_EFAULT
;
913 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
914 return -TARGET_EFAULT
;
920 if (target_tv_addr
) {
921 if (copy_from_user_timeval(&tv
, target_tv_addr
))
922 return -TARGET_EFAULT
;
928 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
930 if (!is_error(ret
)) {
931 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
932 return -TARGET_EFAULT
;
933 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
934 return -TARGET_EFAULT
;
935 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
936 return -TARGET_EFAULT
;
938 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
939 return -TARGET_EFAULT
;
945 static abi_long
do_pipe2(int host_pipe
[], int flags
)
948 return pipe2(host_pipe
, flags
);
954 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
, int flags
)
958 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
961 return get_errno(ret
);
962 #if defined(TARGET_MIPS)
963 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
965 #elif defined(TARGET_SH4)
966 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
969 if (put_user_s32(host_pipe
[0], pipedes
)
970 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
971 return -TARGET_EFAULT
;
973 return get_errno(ret
);
976 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
977 abi_ulong target_addr
,
980 struct target_ip_mreqn
*target_smreqn
;
982 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
984 return -TARGET_EFAULT
;
985 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
986 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
987 if (len
== sizeof(struct target_ip_mreqn
))
988 mreqn
->imr_ifindex
= tswapl(target_smreqn
->imr_ifindex
);
989 unlock_user(target_smreqn
, target_addr
, 0);
994 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
995 abi_ulong target_addr
,
998 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
999 sa_family_t sa_family
;
1000 struct target_sockaddr
*target_saddr
;
1002 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1004 return -TARGET_EFAULT
;
1006 sa_family
= tswap16(target_saddr
->sa_family
);
1008 /* Oops. The caller might send a incomplete sun_path; sun_path
1009 * must be terminated by \0 (see the manual page), but
1010 * unfortunately it is quite common to specify sockaddr_un
1011 * length as "strlen(x->sun_path)" while it should be
1012 * "strlen(...) + 1". We'll fix that here if needed.
1013 * Linux kernel has a similar feature.
1016 if (sa_family
== AF_UNIX
) {
1017 if (len
< unix_maxlen
&& len
> 0) {
1018 char *cp
= (char*)target_saddr
;
1020 if ( cp
[len
-1] && !cp
[len
] )
1023 if (len
> unix_maxlen
)
1027 memcpy(addr
, target_saddr
, len
);
1028 addr
->sa_family
= sa_family
;
1029 unlock_user(target_saddr
, target_addr
, 0);
1034 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1035 struct sockaddr
*addr
,
1038 struct target_sockaddr
*target_saddr
;
1040 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1042 return -TARGET_EFAULT
;
1043 memcpy(target_saddr
, addr
, len
);
1044 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1045 unlock_user(target_saddr
, target_addr
, len
);
1050 /* ??? Should this also swap msgh->name? */
1051 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1052 struct target_msghdr
*target_msgh
)
1054 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1055 abi_long msg_controllen
;
1056 abi_ulong target_cmsg_addr
;
1057 struct target_cmsghdr
*target_cmsg
;
1058 socklen_t space
= 0;
1060 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1061 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1063 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1064 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1066 return -TARGET_EFAULT
;
1068 while (cmsg
&& target_cmsg
) {
1069 void *data
= CMSG_DATA(cmsg
);
1070 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1072 int len
= tswapl(target_cmsg
->cmsg_len
)
1073 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1075 space
+= CMSG_SPACE(len
);
1076 if (space
> msgh
->msg_controllen
) {
1077 space
-= CMSG_SPACE(len
);
1078 gemu_log("Host cmsg overflow\n");
1082 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1083 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1084 cmsg
->cmsg_len
= CMSG_LEN(len
);
1086 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1087 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1088 memcpy(data
, target_data
, len
);
1090 int *fd
= (int *)data
;
1091 int *target_fd
= (int *)target_data
;
1092 int i
, numfds
= len
/ sizeof(int);
1094 for (i
= 0; i
< numfds
; i
++)
1095 fd
[i
] = tswap32(target_fd
[i
]);
1098 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1099 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1101 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1103 msgh
->msg_controllen
= space
;
1107 /* ??? Should this also swap msgh->name? */
1108 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1109 struct msghdr
*msgh
)
1111 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1112 abi_long msg_controllen
;
1113 abi_ulong target_cmsg_addr
;
1114 struct target_cmsghdr
*target_cmsg
;
1115 socklen_t space
= 0;
1117 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1118 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1120 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1121 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1123 return -TARGET_EFAULT
;
1125 while (cmsg
&& target_cmsg
) {
1126 void *data
= CMSG_DATA(cmsg
);
1127 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1129 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1131 space
+= TARGET_CMSG_SPACE(len
);
1132 if (space
> msg_controllen
) {
1133 space
-= TARGET_CMSG_SPACE(len
);
1134 gemu_log("Target cmsg overflow\n");
1138 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1139 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1140 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1142 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1143 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1144 memcpy(target_data
, data
, len
);
1146 int *fd
= (int *)data
;
1147 int *target_fd
= (int *)target_data
;
1148 int i
, numfds
= len
/ sizeof(int);
1150 for (i
= 0; i
< numfds
; i
++)
1151 target_fd
[i
] = tswap32(fd
[i
]);
1154 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1155 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1157 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1159 target_msgh
->msg_controllen
= tswapl(space
);
1163 /* do_setsockopt() Must return target values and target errnos. */
1164 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1165 abi_ulong optval_addr
, socklen_t optlen
)
1169 struct ip_mreqn
*ip_mreq
;
1170 struct ip_mreq_source
*ip_mreq_source
;
1174 /* TCP options all take an 'int' value. */
1175 if (optlen
< sizeof(uint32_t))
1176 return -TARGET_EINVAL
;
1178 if (get_user_u32(val
, optval_addr
))
1179 return -TARGET_EFAULT
;
1180 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1187 case IP_ROUTER_ALERT
:
1191 case IP_MTU_DISCOVER
:
1197 case IP_MULTICAST_TTL
:
1198 case IP_MULTICAST_LOOP
:
1200 if (optlen
>= sizeof(uint32_t)) {
1201 if (get_user_u32(val
, optval_addr
))
1202 return -TARGET_EFAULT
;
1203 } else if (optlen
>= 1) {
1204 if (get_user_u8(val
, optval_addr
))
1205 return -TARGET_EFAULT
;
1207 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1209 case IP_ADD_MEMBERSHIP
:
1210 case IP_DROP_MEMBERSHIP
:
1211 if (optlen
< sizeof (struct target_ip_mreq
) ||
1212 optlen
> sizeof (struct target_ip_mreqn
))
1213 return -TARGET_EINVAL
;
1215 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1216 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1217 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1220 case IP_BLOCK_SOURCE
:
1221 case IP_UNBLOCK_SOURCE
:
1222 case IP_ADD_SOURCE_MEMBERSHIP
:
1223 case IP_DROP_SOURCE_MEMBERSHIP
:
1224 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1225 return -TARGET_EINVAL
;
1227 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1228 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1229 unlock_user (ip_mreq_source
, optval_addr
, 0);
1236 case TARGET_SOL_SOCKET
:
1238 /* Options with 'int' argument. */
1239 case TARGET_SO_DEBUG
:
1242 case TARGET_SO_REUSEADDR
:
1243 optname
= SO_REUSEADDR
;
1245 case TARGET_SO_TYPE
:
1248 case TARGET_SO_ERROR
:
1251 case TARGET_SO_DONTROUTE
:
1252 optname
= SO_DONTROUTE
;
1254 case TARGET_SO_BROADCAST
:
1255 optname
= SO_BROADCAST
;
1257 case TARGET_SO_SNDBUF
:
1258 optname
= SO_SNDBUF
;
1260 case TARGET_SO_RCVBUF
:
1261 optname
= SO_RCVBUF
;
1263 case TARGET_SO_KEEPALIVE
:
1264 optname
= SO_KEEPALIVE
;
1266 case TARGET_SO_OOBINLINE
:
1267 optname
= SO_OOBINLINE
;
1269 case TARGET_SO_NO_CHECK
:
1270 optname
= SO_NO_CHECK
;
1272 case TARGET_SO_PRIORITY
:
1273 optname
= SO_PRIORITY
;
1276 case TARGET_SO_BSDCOMPAT
:
1277 optname
= SO_BSDCOMPAT
;
1280 case TARGET_SO_PASSCRED
:
1281 optname
= SO_PASSCRED
;
1283 case TARGET_SO_TIMESTAMP
:
1284 optname
= SO_TIMESTAMP
;
1286 case TARGET_SO_RCVLOWAT
:
1287 optname
= SO_RCVLOWAT
;
1289 case TARGET_SO_RCVTIMEO
:
1290 optname
= SO_RCVTIMEO
;
1292 case TARGET_SO_SNDTIMEO
:
1293 optname
= SO_SNDTIMEO
;
1299 if (optlen
< sizeof(uint32_t))
1300 return -TARGET_EINVAL
;
1302 if (get_user_u32(val
, optval_addr
))
1303 return -TARGET_EFAULT
;
1304 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1308 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1309 ret
= -TARGET_ENOPROTOOPT
;
1314 /* do_getsockopt() Must return target values and target errnos. */
1315 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1316 abi_ulong optval_addr
, abi_ulong optlen
)
1323 case TARGET_SOL_SOCKET
:
1326 case TARGET_SO_LINGER
:
1327 case TARGET_SO_RCVTIMEO
:
1328 case TARGET_SO_SNDTIMEO
:
1329 case TARGET_SO_PEERCRED
:
1330 case TARGET_SO_PEERNAME
:
1331 /* These don't just return a single integer */
1338 /* TCP options all take an 'int' value. */
1340 if (get_user_u32(len
, optlen
))
1341 return -TARGET_EFAULT
;
1343 return -TARGET_EINVAL
;
1345 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1351 if (put_user_u32(val
, optval_addr
))
1352 return -TARGET_EFAULT
;
1354 if (put_user_u8(val
, optval_addr
))
1355 return -TARGET_EFAULT
;
1357 if (put_user_u32(len
, optlen
))
1358 return -TARGET_EFAULT
;
1365 case IP_ROUTER_ALERT
:
1369 case IP_MTU_DISCOVER
:
1375 case IP_MULTICAST_TTL
:
1376 case IP_MULTICAST_LOOP
:
1377 if (get_user_u32(len
, optlen
))
1378 return -TARGET_EFAULT
;
1380 return -TARGET_EINVAL
;
1382 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1385 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1387 if (put_user_u32(len
, optlen
)
1388 || put_user_u8(val
, optval_addr
))
1389 return -TARGET_EFAULT
;
1391 if (len
> sizeof(int))
1393 if (put_user_u32(len
, optlen
)
1394 || put_user_u32(val
, optval_addr
))
1395 return -TARGET_EFAULT
;
1399 ret
= -TARGET_ENOPROTOOPT
;
1405 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1407 ret
= -TARGET_EOPNOTSUPP
;
1414 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1415 * other lock functions have a return code of 0 for failure.
1417 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1418 int count
, int copy
)
1420 struct target_iovec
*target_vec
;
1424 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1426 return -TARGET_EFAULT
;
1427 for(i
= 0;i
< count
; i
++) {
1428 base
= tswapl(target_vec
[i
].iov_base
);
1429 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1430 if (vec
[i
].iov_len
!= 0) {
1431 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1432 /* Don't check lock_user return value. We must call writev even
1433 if a element has invalid base address. */
1435 /* zero length pointer is ignored */
1436 vec
[i
].iov_base
= NULL
;
1439 unlock_user (target_vec
, target_addr
, 0);
1443 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1444 int count
, int copy
)
1446 struct target_iovec
*target_vec
;
1450 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1452 return -TARGET_EFAULT
;
1453 for(i
= 0;i
< count
; i
++) {
1454 if (target_vec
[i
].iov_base
) {
1455 base
= tswapl(target_vec
[i
].iov_base
);
1456 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1459 unlock_user (target_vec
, target_addr
, 0);
1464 /* do_socket() Must return target values and target errnos. */
1465 static abi_long
do_socket(int domain
, int type
, int protocol
)
1467 #if defined(TARGET_MIPS)
1469 case TARGET_SOCK_DGRAM
:
1472 case TARGET_SOCK_STREAM
:
1475 case TARGET_SOCK_RAW
:
1478 case TARGET_SOCK_RDM
:
1481 case TARGET_SOCK_SEQPACKET
:
1482 type
= SOCK_SEQPACKET
;
1484 case TARGET_SOCK_PACKET
:
1489 if (domain
== PF_NETLINK
)
1490 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1491 return get_errno(socket(domain
, type
, protocol
));
1494 /* do_bind() Must return target values and target errnos. */
1495 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1502 return -TARGET_EINVAL
;
1504 addr
= alloca(addrlen
+1);
1506 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1510 return get_errno(bind(sockfd
, addr
, addrlen
));
1513 /* do_connect() Must return target values and target errnos. */
1514 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1521 return -TARGET_EINVAL
;
1523 addr
= alloca(addrlen
);
1525 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1529 return get_errno(connect(sockfd
, addr
, addrlen
));
1532 /* do_sendrecvmsg() Must return target values and target errnos. */
1533 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1534 int flags
, int send
)
1537 struct target_msghdr
*msgp
;
1541 abi_ulong target_vec
;
1544 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1548 return -TARGET_EFAULT
;
1549 if (msgp
->msg_name
) {
1550 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1551 msg
.msg_name
= alloca(msg
.msg_namelen
);
1552 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1555 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1559 msg
.msg_name
= NULL
;
1560 msg
.msg_namelen
= 0;
1562 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1563 msg
.msg_control
= alloca(msg
.msg_controllen
);
1564 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1566 count
= tswapl(msgp
->msg_iovlen
);
1567 vec
= alloca(count
* sizeof(struct iovec
));
1568 target_vec
= tswapl(msgp
->msg_iov
);
1569 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1570 msg
.msg_iovlen
= count
;
1574 ret
= target_to_host_cmsg(&msg
, msgp
);
1576 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1578 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1579 if (!is_error(ret
)) {
1581 ret
= host_to_target_cmsg(msgp
, &msg
);
1586 unlock_iovec(vec
, target_vec
, count
, !send
);
1587 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1591 /* do_accept() Must return target values and target errnos. */
1592 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1593 abi_ulong target_addrlen_addr
)
1599 if (target_addr
== 0)
1600 return get_errno(accept(fd
, NULL
, NULL
));
1602 /* linux returns EINVAL if addrlen pointer is invalid */
1603 if (get_user_u32(addrlen
, target_addrlen_addr
))
1604 return -TARGET_EINVAL
;
1607 return -TARGET_EINVAL
;
1609 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1610 return -TARGET_EINVAL
;
1612 addr
= alloca(addrlen
);
1614 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1615 if (!is_error(ret
)) {
1616 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1617 if (put_user_u32(addrlen
, target_addrlen_addr
))
1618 ret
= -TARGET_EFAULT
;
1623 /* do_getpeername() Must return target values and target errnos. */
1624 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1625 abi_ulong target_addrlen_addr
)
1631 if (get_user_u32(addrlen
, target_addrlen_addr
))
1632 return -TARGET_EFAULT
;
1635 return -TARGET_EINVAL
;
1637 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1638 return -TARGET_EFAULT
;
1640 addr
= alloca(addrlen
);
1642 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1643 if (!is_error(ret
)) {
1644 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1645 if (put_user_u32(addrlen
, target_addrlen_addr
))
1646 ret
= -TARGET_EFAULT
;
1651 /* do_getsockname() Must return target values and target errnos. */
1652 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1653 abi_ulong target_addrlen_addr
)
1659 if (get_user_u32(addrlen
, target_addrlen_addr
))
1660 return -TARGET_EFAULT
;
1663 return -TARGET_EINVAL
;
1665 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1666 return -TARGET_EFAULT
;
1668 addr
= alloca(addrlen
);
1670 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1671 if (!is_error(ret
)) {
1672 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1673 if (put_user_u32(addrlen
, target_addrlen_addr
))
1674 ret
= -TARGET_EFAULT
;
1679 /* do_socketpair() Must return target values and target errnos. */
1680 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1681 abi_ulong target_tab_addr
)
1686 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1687 if (!is_error(ret
)) {
1688 if (put_user_s32(tab
[0], target_tab_addr
)
1689 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1690 ret
= -TARGET_EFAULT
;
1695 /* do_sendto() Must return target values and target errnos. */
1696 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1697 abi_ulong target_addr
, socklen_t addrlen
)
1704 return -TARGET_EINVAL
;
1706 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1708 return -TARGET_EFAULT
;
1710 addr
= alloca(addrlen
);
1711 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1713 unlock_user(host_msg
, msg
, 0);
1716 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1718 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1720 unlock_user(host_msg
, msg
, 0);
1724 /* do_recvfrom() Must return target values and target errnos. */
1725 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1726 abi_ulong target_addr
,
1727 abi_ulong target_addrlen
)
1734 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1736 return -TARGET_EFAULT
;
1738 if (get_user_u32(addrlen
, target_addrlen
)) {
1739 ret
= -TARGET_EFAULT
;
1743 ret
= -TARGET_EINVAL
;
1746 addr
= alloca(addrlen
);
1747 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1749 addr
= NULL
; /* To keep compiler quiet. */
1750 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1752 if (!is_error(ret
)) {
1754 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1755 if (put_user_u32(addrlen
, target_addrlen
)) {
1756 ret
= -TARGET_EFAULT
;
1760 unlock_user(host_msg
, msg
, len
);
1763 unlock_user(host_msg
, msg
, 0);
1768 #ifdef TARGET_NR_socketcall
1769 /* do_socketcall() Must return target values and target errnos. */
1770 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1773 const int n
= sizeof(abi_ulong
);
1778 abi_ulong domain
, type
, protocol
;
1780 if (get_user_ual(domain
, vptr
)
1781 || get_user_ual(type
, vptr
+ n
)
1782 || get_user_ual(protocol
, vptr
+ 2 * n
))
1783 return -TARGET_EFAULT
;
1785 ret
= do_socket(domain
, type
, protocol
);
1791 abi_ulong target_addr
;
1794 if (get_user_ual(sockfd
, vptr
)
1795 || get_user_ual(target_addr
, vptr
+ n
)
1796 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1797 return -TARGET_EFAULT
;
1799 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1802 case SOCKOP_connect
:
1805 abi_ulong target_addr
;
1808 if (get_user_ual(sockfd
, vptr
)
1809 || get_user_ual(target_addr
, vptr
+ n
)
1810 || get_user_ual(addrlen
, vptr
+ 2 * n
))
1811 return -TARGET_EFAULT
;
1813 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1818 abi_ulong sockfd
, backlog
;
1820 if (get_user_ual(sockfd
, vptr
)
1821 || get_user_ual(backlog
, vptr
+ n
))
1822 return -TARGET_EFAULT
;
1824 ret
= get_errno(listen(sockfd
, backlog
));
1830 abi_ulong target_addr
, target_addrlen
;
1832 if (get_user_ual(sockfd
, vptr
)
1833 || get_user_ual(target_addr
, vptr
+ n
)
1834 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1835 return -TARGET_EFAULT
;
1837 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1840 case SOCKOP_getsockname
:
1843 abi_ulong target_addr
, target_addrlen
;
1845 if (get_user_ual(sockfd
, vptr
)
1846 || get_user_ual(target_addr
, vptr
+ n
)
1847 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1848 return -TARGET_EFAULT
;
1850 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1853 case SOCKOP_getpeername
:
1856 abi_ulong target_addr
, target_addrlen
;
1858 if (get_user_ual(sockfd
, vptr
)
1859 || get_user_ual(target_addr
, vptr
+ n
)
1860 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
1861 return -TARGET_EFAULT
;
1863 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1866 case SOCKOP_socketpair
:
1868 abi_ulong domain
, type
, protocol
;
1871 if (get_user_ual(domain
, vptr
)
1872 || get_user_ual(type
, vptr
+ n
)
1873 || get_user_ual(protocol
, vptr
+ 2 * n
)
1874 || get_user_ual(tab
, vptr
+ 3 * n
))
1875 return -TARGET_EFAULT
;
1877 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1887 if (get_user_ual(sockfd
, vptr
)
1888 || get_user_ual(msg
, vptr
+ n
)
1889 || get_user_ual(len
, vptr
+ 2 * n
)
1890 || get_user_ual(flags
, vptr
+ 3 * n
))
1891 return -TARGET_EFAULT
;
1893 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1903 if (get_user_ual(sockfd
, vptr
)
1904 || get_user_ual(msg
, vptr
+ n
)
1905 || get_user_ual(len
, vptr
+ 2 * n
)
1906 || get_user_ual(flags
, vptr
+ 3 * n
))
1907 return -TARGET_EFAULT
;
1909 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1921 if (get_user_ual(sockfd
, vptr
)
1922 || get_user_ual(msg
, vptr
+ n
)
1923 || get_user_ual(len
, vptr
+ 2 * n
)
1924 || get_user_ual(flags
, vptr
+ 3 * n
)
1925 || get_user_ual(addr
, vptr
+ 4 * n
)
1926 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1927 return -TARGET_EFAULT
;
1929 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1932 case SOCKOP_recvfrom
:
1941 if (get_user_ual(sockfd
, vptr
)
1942 || get_user_ual(msg
, vptr
+ n
)
1943 || get_user_ual(len
, vptr
+ 2 * n
)
1944 || get_user_ual(flags
, vptr
+ 3 * n
)
1945 || get_user_ual(addr
, vptr
+ 4 * n
)
1946 || get_user_ual(addrlen
, vptr
+ 5 * n
))
1947 return -TARGET_EFAULT
;
1949 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1952 case SOCKOP_shutdown
:
1954 abi_ulong sockfd
, how
;
1956 if (get_user_ual(sockfd
, vptr
)
1957 || get_user_ual(how
, vptr
+ n
))
1958 return -TARGET_EFAULT
;
1960 ret
= get_errno(shutdown(sockfd
, how
));
1963 case SOCKOP_sendmsg
:
1964 case SOCKOP_recvmsg
:
1967 abi_ulong target_msg
;
1970 if (get_user_ual(fd
, vptr
)
1971 || get_user_ual(target_msg
, vptr
+ n
)
1972 || get_user_ual(flags
, vptr
+ 2 * n
))
1973 return -TARGET_EFAULT
;
1975 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1976 (num
== SOCKOP_sendmsg
));
1979 case SOCKOP_setsockopt
:
1987 if (get_user_ual(sockfd
, vptr
)
1988 || get_user_ual(level
, vptr
+ n
)
1989 || get_user_ual(optname
, vptr
+ 2 * n
)
1990 || get_user_ual(optval
, vptr
+ 3 * n
)
1991 || get_user_ual(optlen
, vptr
+ 4 * n
))
1992 return -TARGET_EFAULT
;
1994 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
1997 case SOCKOP_getsockopt
:
2005 if (get_user_ual(sockfd
, vptr
)
2006 || get_user_ual(level
, vptr
+ n
)
2007 || get_user_ual(optname
, vptr
+ 2 * n
)
2008 || get_user_ual(optval
, vptr
+ 3 * n
)
2009 || get_user_ual(optlen
, vptr
+ 4 * n
))
2010 return -TARGET_EFAULT
;
2012 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2016 gemu_log("Unsupported socketcall: %d\n", num
);
2017 ret
= -TARGET_ENOSYS
;
2024 #define N_SHM_REGIONS 32
2026 static struct shm_region
{
2029 } shm_regions
[N_SHM_REGIONS
];
2031 struct target_ipc_perm
2038 unsigned short int mode
;
2039 unsigned short int __pad1
;
2040 unsigned short int __seq
;
2041 unsigned short int __pad2
;
2042 abi_ulong __unused1
;
2043 abi_ulong __unused2
;
2046 struct target_semid_ds
2048 struct target_ipc_perm sem_perm
;
2049 abi_ulong sem_otime
;
2050 abi_ulong __unused1
;
2051 abi_ulong sem_ctime
;
2052 abi_ulong __unused2
;
2053 abi_ulong sem_nsems
;
2054 abi_ulong __unused3
;
2055 abi_ulong __unused4
;
2058 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2059 abi_ulong target_addr
)
2061 struct target_ipc_perm
*target_ip
;
2062 struct target_semid_ds
*target_sd
;
2064 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2065 return -TARGET_EFAULT
;
2066 target_ip
=&(target_sd
->sem_perm
);
2067 host_ip
->__key
= tswapl(target_ip
->__key
);
2068 host_ip
->uid
= tswapl(target_ip
->uid
);
2069 host_ip
->gid
= tswapl(target_ip
->gid
);
2070 host_ip
->cuid
= tswapl(target_ip
->cuid
);
2071 host_ip
->cgid
= tswapl(target_ip
->cgid
);
2072 host_ip
->mode
= tswapl(target_ip
->mode
);
2073 unlock_user_struct(target_sd
, target_addr
, 0);
2077 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2078 struct ipc_perm
*host_ip
)
2080 struct target_ipc_perm
*target_ip
;
2081 struct target_semid_ds
*target_sd
;
2083 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2084 return -TARGET_EFAULT
;
2085 target_ip
= &(target_sd
->sem_perm
);
2086 target_ip
->__key
= tswapl(host_ip
->__key
);
2087 target_ip
->uid
= tswapl(host_ip
->uid
);
2088 target_ip
->gid
= tswapl(host_ip
->gid
);
2089 target_ip
->cuid
= tswapl(host_ip
->cuid
);
2090 target_ip
->cgid
= tswapl(host_ip
->cgid
);
2091 target_ip
->mode
= tswapl(host_ip
->mode
);
2092 unlock_user_struct(target_sd
, target_addr
, 1);
2096 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2097 abi_ulong target_addr
)
2099 struct target_semid_ds
*target_sd
;
2101 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2102 return -TARGET_EFAULT
;
2103 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2104 return -TARGET_EFAULT
;
2105 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2106 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2107 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2108 unlock_user_struct(target_sd
, target_addr
, 0);
2112 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2113 struct semid_ds
*host_sd
)
2115 struct target_semid_ds
*target_sd
;
2117 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2118 return -TARGET_EFAULT
;
2119 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2120 return -TARGET_EFAULT
;;
2121 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2122 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2123 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2124 unlock_user_struct(target_sd
, target_addr
, 1);
2128 struct target_seminfo
{
2141 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2142 struct seminfo
*host_seminfo
)
2144 struct target_seminfo
*target_seminfo
;
2145 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2146 return -TARGET_EFAULT
;
2147 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2148 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2149 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2150 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2151 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2152 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2153 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2154 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2155 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2156 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2157 unlock_user_struct(target_seminfo
, target_addr
, 1);
2163 struct semid_ds
*buf
;
2164 unsigned short *array
;
2165 struct seminfo
*__buf
;
2168 union target_semun
{
2175 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2176 abi_ulong target_addr
)
2179 unsigned short *array
;
2181 struct semid_ds semid_ds
;
2184 semun
.buf
= &semid_ds
;
2186 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2188 return get_errno(ret
);
2190 nsems
= semid_ds
.sem_nsems
;
2192 *host_array
= malloc(nsems
*sizeof(unsigned short));
2193 array
= lock_user(VERIFY_READ
, target_addr
,
2194 nsems
*sizeof(unsigned short), 1);
2196 return -TARGET_EFAULT
;
2198 for(i
=0; i
<nsems
; i
++) {
2199 __get_user((*host_array
)[i
], &array
[i
]);
2201 unlock_user(array
, target_addr
, 0);
2206 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2207 unsigned short **host_array
)
2210 unsigned short *array
;
2212 struct semid_ds semid_ds
;
2215 semun
.buf
= &semid_ds
;
2217 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2219 return get_errno(ret
);
2221 nsems
= semid_ds
.sem_nsems
;
2223 array
= lock_user(VERIFY_WRITE
, target_addr
,
2224 nsems
*sizeof(unsigned short), 0);
2226 return -TARGET_EFAULT
;
2228 for(i
=0; i
<nsems
; i
++) {
2229 __put_user((*host_array
)[i
], &array
[i
]);
2232 unlock_user(array
, target_addr
, 1);
2237 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2238 union target_semun target_su
)
2241 struct semid_ds dsarg
;
2242 unsigned short *array
= NULL
;
2243 struct seminfo seminfo
;
2244 abi_long ret
= -TARGET_EINVAL
;
2251 arg
.val
= tswapl(target_su
.val
);
2252 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2253 target_su
.val
= tswapl(arg
.val
);
2257 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2261 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2262 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2269 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2273 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2274 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2280 arg
.__buf
= &seminfo
;
2281 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2282 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2290 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2297 struct target_sembuf
{
2298 unsigned short sem_num
;
2303 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2304 abi_ulong target_addr
,
2307 struct target_sembuf
*target_sembuf
;
2310 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2311 nsops
*sizeof(struct target_sembuf
), 1);
2313 return -TARGET_EFAULT
;
2315 for(i
=0; i
<nsops
; i
++) {
2316 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2317 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2318 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2321 unlock_user(target_sembuf
, target_addr
, 0);
2326 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2328 struct sembuf sops
[nsops
];
2330 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2331 return -TARGET_EFAULT
;
2333 return semop(semid
, sops
, nsops
);
2336 struct target_msqid_ds
2338 struct target_ipc_perm msg_perm
;
2339 abi_ulong msg_stime
;
2340 #if TARGET_ABI_BITS == 32
2341 abi_ulong __unused1
;
2343 abi_ulong msg_rtime
;
2344 #if TARGET_ABI_BITS == 32
2345 abi_ulong __unused2
;
2347 abi_ulong msg_ctime
;
2348 #if TARGET_ABI_BITS == 32
2349 abi_ulong __unused3
;
2351 abi_ulong __msg_cbytes
;
2353 abi_ulong msg_qbytes
;
2354 abi_ulong msg_lspid
;
2355 abi_ulong msg_lrpid
;
2356 abi_ulong __unused4
;
2357 abi_ulong __unused5
;
2360 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2361 abi_ulong target_addr
)
2363 struct target_msqid_ds
*target_md
;
2365 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2366 return -TARGET_EFAULT
;
2367 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2368 return -TARGET_EFAULT
;
2369 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2370 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2371 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2372 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2373 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2374 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2375 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2376 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2377 unlock_user_struct(target_md
, target_addr
, 0);
2381 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2382 struct msqid_ds
*host_md
)
2384 struct target_msqid_ds
*target_md
;
2386 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2387 return -TARGET_EFAULT
;
2388 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2389 return -TARGET_EFAULT
;
2390 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2391 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2392 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2393 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2394 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2395 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2396 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2397 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2398 unlock_user_struct(target_md
, target_addr
, 1);
2402 struct target_msginfo
{
2410 unsigned short int msgseg
;
2413 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2414 struct msginfo
*host_msginfo
)
2416 struct target_msginfo
*target_msginfo
;
2417 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2418 return -TARGET_EFAULT
;
2419 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2420 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2421 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2422 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2423 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2424 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2425 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2426 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2427 unlock_user_struct(target_msginfo
, target_addr
, 1);
2431 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2433 struct msqid_ds dsarg
;
2434 struct msginfo msginfo
;
2435 abi_long ret
= -TARGET_EINVAL
;
2443 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2444 return -TARGET_EFAULT
;
2445 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2446 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2447 return -TARGET_EFAULT
;
2450 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2454 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2455 if (host_to_target_msginfo(ptr
, &msginfo
))
2456 return -TARGET_EFAULT
;
2463 struct target_msgbuf
{
2468 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2469 unsigned int msgsz
, int msgflg
)
2471 struct target_msgbuf
*target_mb
;
2472 struct msgbuf
*host_mb
;
2475 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2476 return -TARGET_EFAULT
;
2477 host_mb
= malloc(msgsz
+sizeof(long));
2478 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2479 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2480 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2482 unlock_user_struct(target_mb
, msgp
, 0);
2487 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2488 unsigned int msgsz
, abi_long msgtyp
,
2491 struct target_msgbuf
*target_mb
;
2493 struct msgbuf
*host_mb
;
2496 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2497 return -TARGET_EFAULT
;
2499 host_mb
= malloc(msgsz
+sizeof(long));
2500 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2503 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2504 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2505 if (!target_mtext
) {
2506 ret
= -TARGET_EFAULT
;
2509 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2510 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2513 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2518 unlock_user_struct(target_mb
, msgp
, 1);
2522 struct target_shmid_ds
2524 struct target_ipc_perm shm_perm
;
2525 abi_ulong shm_segsz
;
2526 abi_ulong shm_atime
;
2527 #if TARGET_ABI_BITS == 32
2528 abi_ulong __unused1
;
2530 abi_ulong shm_dtime
;
2531 #if TARGET_ABI_BITS == 32
2532 abi_ulong __unused2
;
2534 abi_ulong shm_ctime
;
2535 #if TARGET_ABI_BITS == 32
2536 abi_ulong __unused3
;
2540 abi_ulong shm_nattch
;
2541 unsigned long int __unused4
;
2542 unsigned long int __unused5
;
2545 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2546 abi_ulong target_addr
)
2548 struct target_shmid_ds
*target_sd
;
2550 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2551 return -TARGET_EFAULT
;
2552 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2553 return -TARGET_EFAULT
;
2554 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2555 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2556 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2557 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2558 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2559 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2560 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2561 unlock_user_struct(target_sd
, target_addr
, 0);
2565 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2566 struct shmid_ds
*host_sd
)
2568 struct target_shmid_ds
*target_sd
;
2570 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2571 return -TARGET_EFAULT
;
2572 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2573 return -TARGET_EFAULT
;
2574 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2575 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2576 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2577 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2578 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2579 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2580 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2581 unlock_user_struct(target_sd
, target_addr
, 1);
2585 struct target_shminfo
{
2593 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2594 struct shminfo
*host_shminfo
)
2596 struct target_shminfo
*target_shminfo
;
2597 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2598 return -TARGET_EFAULT
;
2599 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2600 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2601 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2602 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2603 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2604 unlock_user_struct(target_shminfo
, target_addr
, 1);
2608 struct target_shm_info
{
2613 abi_ulong swap_attempts
;
2614 abi_ulong swap_successes
;
2617 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2618 struct shm_info
*host_shm_info
)
2620 struct target_shm_info
*target_shm_info
;
2621 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2622 return -TARGET_EFAULT
;
2623 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2624 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2625 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2626 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2627 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2628 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2629 unlock_user_struct(target_shm_info
, target_addr
, 1);
2633 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2635 struct shmid_ds dsarg
;
2636 struct shminfo shminfo
;
2637 struct shm_info shm_info
;
2638 abi_long ret
= -TARGET_EINVAL
;
2646 if (target_to_host_shmid_ds(&dsarg
, buf
))
2647 return -TARGET_EFAULT
;
2648 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2649 if (host_to_target_shmid_ds(buf
, &dsarg
))
2650 return -TARGET_EFAULT
;
2653 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2654 if (host_to_target_shminfo(buf
, &shminfo
))
2655 return -TARGET_EFAULT
;
2658 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2659 if (host_to_target_shm_info(buf
, &shm_info
))
2660 return -TARGET_EFAULT
;
2665 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2672 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2676 struct shmid_ds shm_info
;
2679 /* find out the length of the shared memory segment */
2680 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2681 if (is_error(ret
)) {
2682 /* can't get length, bail out */
2689 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2691 abi_ulong mmap_start
;
2693 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2695 if (mmap_start
== -1) {
2697 host_raddr
= (void *)-1;
2699 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2702 if (host_raddr
== (void *)-1) {
2704 return get_errno((long)host_raddr
);
2706 raddr
=h2g((unsigned long)host_raddr
);
2708 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2709 PAGE_VALID
| PAGE_READ
|
2710 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
2712 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
2713 if (shm_regions
[i
].start
== 0) {
2714 shm_regions
[i
].start
= raddr
;
2715 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2725 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
2729 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2730 if (shm_regions
[i
].start
== shmaddr
) {
2731 shm_regions
[i
].start
= 0;
2732 page_set_flags(shmaddr
, shm_regions
[i
].size
, 0);
2737 return get_errno(shmdt(g2h(shmaddr
)));
2740 #ifdef TARGET_NR_ipc
2741 /* ??? This only works with linear mappings. */
2742 /* do_ipc() must return target values and target errnos. */
2743 static abi_long
do_ipc(unsigned int call
, int first
,
2744 int second
, int third
,
2745 abi_long ptr
, abi_long fifth
)
2750 version
= call
>> 16;
2755 ret
= do_semop(first
, ptr
, second
);
2759 ret
= get_errno(semget(first
, second
, third
));
2763 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2767 ret
= get_errno(msgget(first
, second
));
2771 ret
= do_msgsnd(first
, ptr
, second
, third
);
2775 ret
= do_msgctl(first
, second
, ptr
);
2782 struct target_ipc_kludge
{
2787 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2788 ret
= -TARGET_EFAULT
;
2792 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2794 unlock_user_struct(tmp
, ptr
, 0);
2798 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2807 raddr
= do_shmat(first
, ptr
, second
);
2808 if (is_error(raddr
))
2809 return get_errno(raddr
);
2810 if (put_user_ual(raddr
, third
))
2811 return -TARGET_EFAULT
;
2815 ret
= -TARGET_EINVAL
;
2820 ret
= do_shmdt(ptr
);
2824 /* IPC_* flag values are the same on all linux platforms */
2825 ret
= get_errno(shmget(first
, second
, third
));
2828 /* IPC_* and SHM_* command values are the same on all linux platforms */
2830 ret
= do_shmctl(first
, second
, third
);
2833 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2834 ret
= -TARGET_ENOSYS
;
2841 /* kernel structure types definitions */
2844 #define STRUCT(name, ...) STRUCT_ ## name,
2845 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2847 #include "syscall_types.h"
2850 #undef STRUCT_SPECIAL
2852 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
2853 #define STRUCT_SPECIAL(name)
2854 #include "syscall_types.h"
2856 #undef STRUCT_SPECIAL
2858 typedef struct IOCTLEntry
{
2859 unsigned int target_cmd
;
2860 unsigned int host_cmd
;
2863 const argtype arg_type
[5];
2866 #define IOC_R 0x0001
2867 #define IOC_W 0x0002
2868 #define IOC_RW (IOC_R | IOC_W)
2870 #define MAX_STRUCT_SIZE 4096
2872 static IOCTLEntry ioctl_entries
[] = {
2873 #define IOCTL(cmd, access, ...) \
2874 { TARGET_ ## cmd, cmd, #cmd, access, { __VA_ARGS__ } },
2879 /* ??? Implement proper locking for ioctls. */
2880 /* do_ioctl() Must return target values and target errnos. */
2881 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2883 const IOCTLEntry
*ie
;
2884 const argtype
*arg_type
;
2886 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2892 if (ie
->target_cmd
== 0) {
2893 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2894 return -TARGET_ENOSYS
;
2896 if (ie
->target_cmd
== cmd
)
2900 arg_type
= ie
->arg_type
;
2902 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2904 switch(arg_type
[0]) {
2907 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2912 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2916 target_size
= thunk_type_size(arg_type
, 0);
2917 switch(ie
->access
) {
2919 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2920 if (!is_error(ret
)) {
2921 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2923 return -TARGET_EFAULT
;
2924 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2925 unlock_user(argptr
, arg
, target_size
);
2929 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2931 return -TARGET_EFAULT
;
2932 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2933 unlock_user(argptr
, arg
, 0);
2934 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2938 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2940 return -TARGET_EFAULT
;
2941 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2942 unlock_user(argptr
, arg
, 0);
2943 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2944 if (!is_error(ret
)) {
2945 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2947 return -TARGET_EFAULT
;
2948 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2949 unlock_user(argptr
, arg
, target_size
);
2955 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2956 (long)cmd
, arg_type
[0]);
2957 ret
= -TARGET_ENOSYS
;
2963 static const bitmask_transtbl iflag_tbl
[] = {
2964 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2965 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2966 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2967 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2968 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2969 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2970 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2971 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2972 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2973 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2974 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2975 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2976 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2977 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2981 static const bitmask_transtbl oflag_tbl
[] = {
2982 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2983 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2984 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2985 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2986 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2987 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2988 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2989 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2990 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2991 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2992 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2993 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
2994 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
2995 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
2996 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
2997 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
2998 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
2999 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3000 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3001 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3002 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3003 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3004 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3005 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3009 static const bitmask_transtbl cflag_tbl
[] = {
3010 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3011 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3012 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3013 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3014 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3015 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3016 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3017 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3018 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3019 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3020 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3021 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3022 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3023 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3024 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3025 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3026 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3027 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3028 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3029 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3030 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3031 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3032 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3033 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3034 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3035 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3036 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3037 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3038 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3039 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3040 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3044 static const bitmask_transtbl lflag_tbl
[] = {
3045 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3046 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3047 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3048 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3049 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3050 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3051 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3052 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3053 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3054 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3055 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3056 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3057 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3058 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3059 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3063 static void target_to_host_termios (void *dst
, const void *src
)
3065 struct host_termios
*host
= dst
;
3066 const struct target_termios
*target
= src
;
3069 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3071 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3073 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3075 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3076 host
->c_line
= target
->c_line
;
3078 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3079 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3080 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3081 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3082 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3083 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3084 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3085 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3086 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3087 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3088 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3089 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3090 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3091 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3092 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3093 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3094 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3095 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3098 static void host_to_target_termios (void *dst
, const void *src
)
3100 struct target_termios
*target
= dst
;
3101 const struct host_termios
*host
= src
;
3104 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3106 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3108 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3110 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3111 target
->c_line
= host
->c_line
;
3113 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3114 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3115 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3116 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3117 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3118 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3119 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3120 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3121 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3122 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3123 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3124 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3125 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3126 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3127 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3128 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3129 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3130 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3133 static const StructEntry struct_termios_def
= {
3134 .convert
= { host_to_target_termios
, target_to_host_termios
},
3135 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3136 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3139 static bitmask_transtbl mmap_flags_tbl
[] = {
3140 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3141 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3142 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3143 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3144 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3145 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3146 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3147 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3151 #if defined(TARGET_I386)
3153 /* NOTE: there is really one LDT for all the threads */
3154 static uint8_t *ldt_table
;
3156 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3163 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3164 if (size
> bytecount
)
3166 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3168 return -TARGET_EFAULT
;
3169 /* ??? Should this by byteswapped? */
3170 memcpy(p
, ldt_table
, size
);
3171 unlock_user(p
, ptr
, size
);
3175 /* XXX: add locking support */
3176 static abi_long
write_ldt(CPUX86State
*env
,
3177 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3179 struct target_modify_ldt_ldt_s ldt_info
;
3180 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3181 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3182 int seg_not_present
, useable
, lm
;
3183 uint32_t *lp
, entry_1
, entry_2
;
3185 if (bytecount
!= sizeof(ldt_info
))
3186 return -TARGET_EINVAL
;
3187 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3188 return -TARGET_EFAULT
;
3189 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3190 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3191 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3192 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3193 unlock_user_struct(target_ldt_info
, ptr
, 0);
3195 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3196 return -TARGET_EINVAL
;
3197 seg_32bit
= ldt_info
.flags
& 1;
3198 contents
= (ldt_info
.flags
>> 1) & 3;
3199 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3200 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3201 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3202 useable
= (ldt_info
.flags
>> 6) & 1;
3206 lm
= (ldt_info
.flags
>> 7) & 1;
3208 if (contents
== 3) {
3210 return -TARGET_EINVAL
;
3211 if (seg_not_present
== 0)
3212 return -TARGET_EINVAL
;
3214 /* allocate the LDT */
3216 env
->ldt
.base
= target_mmap(0,
3217 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3218 PROT_READ
|PROT_WRITE
,
3219 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3220 if (env
->ldt
.base
== -1)
3221 return -TARGET_ENOMEM
;
3222 memset(g2h(env
->ldt
.base
), 0,
3223 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3224 env
->ldt
.limit
= 0xffff;
3225 ldt_table
= g2h(env
->ldt
.base
);
3228 /* NOTE: same code as Linux kernel */
3229 /* Allow LDTs to be cleared by the user. */
3230 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3233 read_exec_only
== 1 &&
3235 limit_in_pages
== 0 &&
3236 seg_not_present
== 1 &&
3244 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3245 (ldt_info
.limit
& 0x0ffff);
3246 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3247 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3248 (ldt_info
.limit
& 0xf0000) |
3249 ((read_exec_only
^ 1) << 9) |
3251 ((seg_not_present
^ 1) << 15) |
3253 (limit_in_pages
<< 23) |
3257 entry_2
|= (useable
<< 20);
3259 /* Install the new entry ... */
3261 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3262 lp
[0] = tswap32(entry_1
);
3263 lp
[1] = tswap32(entry_2
);
3267 /* specific and weird i386 syscalls */
3268 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3269 unsigned long bytecount
)
3275 ret
= read_ldt(ptr
, bytecount
);
3278 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3281 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3284 ret
= -TARGET_ENOSYS
;
3290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3291 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3293 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3294 struct target_modify_ldt_ldt_s ldt_info
;
3295 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3296 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3297 int seg_not_present
, useable
, lm
;
3298 uint32_t *lp
, entry_1
, entry_2
;
3301 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3302 if (!target_ldt_info
)
3303 return -TARGET_EFAULT
;
3304 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3305 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3306 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3307 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3308 if (ldt_info
.entry_number
== -1) {
3309 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3310 if (gdt_table
[i
] == 0) {
3311 ldt_info
.entry_number
= i
;
3312 target_ldt_info
->entry_number
= tswap32(i
);
3317 unlock_user_struct(target_ldt_info
, ptr
, 1);
3319 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3320 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3321 return -TARGET_EINVAL
;
3322 seg_32bit
= ldt_info
.flags
& 1;
3323 contents
= (ldt_info
.flags
>> 1) & 3;
3324 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3325 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3326 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3327 useable
= (ldt_info
.flags
>> 6) & 1;
3331 lm
= (ldt_info
.flags
>> 7) & 1;
3334 if (contents
== 3) {
3335 if (seg_not_present
== 0)
3336 return -TARGET_EINVAL
;
3339 /* NOTE: same code as Linux kernel */
3340 /* Allow LDTs to be cleared by the user. */
3341 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3342 if ((contents
== 0 &&
3343 read_exec_only
== 1 &&
3345 limit_in_pages
== 0 &&
3346 seg_not_present
== 1 &&
3354 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3355 (ldt_info
.limit
& 0x0ffff);
3356 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3357 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3358 (ldt_info
.limit
& 0xf0000) |
3359 ((read_exec_only
^ 1) << 9) |
3361 ((seg_not_present
^ 1) << 15) |
3363 (limit_in_pages
<< 23) |
3368 /* Install the new entry ... */
3370 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3371 lp
[0] = tswap32(entry_1
);
3372 lp
[1] = tswap32(entry_2
);
3376 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3378 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3379 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3380 uint32_t base_addr
, limit
, flags
;
3381 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3382 int seg_not_present
, useable
, lm
;
3383 uint32_t *lp
, entry_1
, entry_2
;
3385 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3386 if (!target_ldt_info
)
3387 return -TARGET_EFAULT
;
3388 idx
= tswap32(target_ldt_info
->entry_number
);
3389 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3390 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3391 unlock_user_struct(target_ldt_info
, ptr
, 1);
3392 return -TARGET_EINVAL
;
3394 lp
= (uint32_t *)(gdt_table
+ idx
);
3395 entry_1
= tswap32(lp
[0]);
3396 entry_2
= tswap32(lp
[1]);
3398 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3399 contents
= (entry_2
>> 10) & 3;
3400 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3401 seg_32bit
= (entry_2
>> 22) & 1;
3402 limit_in_pages
= (entry_2
>> 23) & 1;
3403 useable
= (entry_2
>> 20) & 1;
3407 lm
= (entry_2
>> 21) & 1;
3409 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3410 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3411 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3412 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3413 base_addr
= (entry_1
>> 16) |
3414 (entry_2
& 0xff000000) |
3415 ((entry_2
& 0xff) << 16);
3416 target_ldt_info
->base_addr
= tswapl(base_addr
);
3417 target_ldt_info
->limit
= tswap32(limit
);
3418 target_ldt_info
->flags
= tswap32(flags
);
3419 unlock_user_struct(target_ldt_info
, ptr
, 1);
3422 #endif /* TARGET_I386 && TARGET_ABI32 */
3424 #ifndef TARGET_ABI32
3425 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3432 case TARGET_ARCH_SET_GS
:
3433 case TARGET_ARCH_SET_FS
:
3434 if (code
== TARGET_ARCH_SET_GS
)
3438 cpu_x86_load_seg(env
, idx
, 0);
3439 env
->segs
[idx
].base
= addr
;
3441 case TARGET_ARCH_GET_GS
:
3442 case TARGET_ARCH_GET_FS
:
3443 if (code
== TARGET_ARCH_GET_GS
)
3447 val
= env
->segs
[idx
].base
;
3448 if (put_user(val
, addr
, abi_ulong
))
3449 return -TARGET_EFAULT
;
3452 ret
= -TARGET_EINVAL
;
3459 #endif /* defined(TARGET_I386) */
3461 #if defined(USE_NPTL)
3463 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3465 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3468 pthread_mutex_t mutex
;
3469 pthread_cond_t cond
;
3472 abi_ulong child_tidptr
;
3473 abi_ulong parent_tidptr
;
3477 static void *clone_func(void *arg
)
3479 new_thread_info
*info
= arg
;
3485 ts
= (TaskState
*)thread_env
->opaque
;
3486 info
->tid
= gettid();
3487 env
->host_tid
= info
->tid
;
3489 if (info
->child_tidptr
)
3490 put_user_u32(info
->tid
, info
->child_tidptr
);
3491 if (info
->parent_tidptr
)
3492 put_user_u32(info
->tid
, info
->parent_tidptr
);
3493 /* Enable signals. */
3494 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3495 /* Signal to the parent that we're ready. */
3496 pthread_mutex_lock(&info
->mutex
);
3497 pthread_cond_broadcast(&info
->cond
);
3498 pthread_mutex_unlock(&info
->mutex
);
3499 /* Wait until the parent has finshed initializing the tls state. */
3500 pthread_mutex_lock(&clone_lock
);
3501 pthread_mutex_unlock(&clone_lock
);
3507 /* this stack is the equivalent of the kernel stack associated with a
3509 #define NEW_STACK_SIZE 8192
3511 static int clone_func(void *arg
)
3513 CPUState
*env
= arg
;
3520 /* do_fork() Must return host values and target errnos (unlike most
3521 do_*() functions). */
3522 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3523 abi_ulong parent_tidptr
, target_ulong newtls
,
3524 abi_ulong child_tidptr
)
3530 #if defined(USE_NPTL)
3531 unsigned int nptl_flags
;
3535 /* Emulate vfork() with fork() */
3536 if (flags
& CLONE_VFORK
)
3537 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3539 if (flags
& CLONE_VM
) {
3540 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
3541 #if defined(USE_NPTL)
3542 new_thread_info info
;
3543 pthread_attr_t attr
;
3545 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3546 init_task_state(ts
);
3547 new_stack
= ts
->stack
;
3548 /* we create a new CPU instance. */
3549 new_env
= cpu_copy(env
);
3550 /* Init regs that differ from the parent. */
3551 cpu_clone_regs(new_env
, newsp
);
3552 new_env
->opaque
= ts
;
3553 ts
->bprm
= parent_ts
->bprm
;
3554 ts
->info
= parent_ts
->info
;
3555 #if defined(USE_NPTL)
3557 flags
&= ~CLONE_NPTL_FLAGS2
;
3559 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3560 ts
->child_tidptr
= child_tidptr
;
3563 if (nptl_flags
& CLONE_SETTLS
)
3564 cpu_set_tls (new_env
, newtls
);
3566 /* Grab a mutex so that thread setup appears atomic. */
3567 pthread_mutex_lock(&clone_lock
);
3569 memset(&info
, 0, sizeof(info
));
3570 pthread_mutex_init(&info
.mutex
, NULL
);
3571 pthread_mutex_lock(&info
.mutex
);
3572 pthread_cond_init(&info
.cond
, NULL
);
3574 if (nptl_flags
& CLONE_CHILD_SETTID
)
3575 info
.child_tidptr
= child_tidptr
;
3576 if (nptl_flags
& CLONE_PARENT_SETTID
)
3577 info
.parent_tidptr
= parent_tidptr
;
3579 ret
= pthread_attr_init(&attr
);
3580 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3581 /* It is not safe to deliver signals until the child has finished
3582 initializing, so temporarily block all signals. */
3583 sigfillset(&sigmask
);
3584 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3586 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3587 /* TODO: Free new CPU state if thread creation failed. */
3589 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3590 pthread_attr_destroy(&attr
);
3592 /* Wait for the child to initialize. */
3593 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3595 if (flags
& CLONE_PARENT_SETTID
)
3596 put_user_u32(ret
, parent_tidptr
);
3600 pthread_mutex_unlock(&info
.mutex
);
3601 pthread_cond_destroy(&info
.cond
);
3602 pthread_mutex_destroy(&info
.mutex
);
3603 pthread_mutex_unlock(&clone_lock
);
3605 if (flags
& CLONE_NPTL_FLAGS2
)
3607 /* This is probably going to die very quickly, but do it anyway. */
3609 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3611 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3615 /* if no CLONE_VM, we consider it is a fork */
3616 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3621 /* Child Process. */
3622 cpu_clone_regs(env
, newsp
);
3624 #if defined(USE_NPTL)
3625 /* There is a race condition here. The parent process could
3626 theoretically read the TID in the child process before the child
3627 tid is set. This would require using either ptrace
3628 (not implemented) or having *_tidptr to point at a shared memory
3629 mapping. We can't repeat the spinlock hack used above because
3630 the child process gets its own copy of the lock. */
3631 if (flags
& CLONE_CHILD_SETTID
)
3632 put_user_u32(gettid(), child_tidptr
);
3633 if (flags
& CLONE_PARENT_SETTID
)
3634 put_user_u32(gettid(), parent_tidptr
);
3635 ts
= (TaskState
*)env
->opaque
;
3636 if (flags
& CLONE_SETTLS
)
3637 cpu_set_tls (env
, newtls
);
3638 if (flags
& CLONE_CHILD_CLEARTID
)
3639 ts
->child_tidptr
= child_tidptr
;
3648 /* warning : doesn't handle linux specific flags... */
3649 static int target_to_host_fcntl_cmd(int cmd
)
3652 case TARGET_F_DUPFD
:
3653 case TARGET_F_GETFD
:
3654 case TARGET_F_SETFD
:
3655 case TARGET_F_GETFL
:
3656 case TARGET_F_SETFL
:
3658 case TARGET_F_GETLK
:
3660 case TARGET_F_SETLK
:
3662 case TARGET_F_SETLKW
:
3664 case TARGET_F_GETOWN
:
3666 case TARGET_F_SETOWN
:
3668 case TARGET_F_GETSIG
:
3670 case TARGET_F_SETSIG
:
3672 #if TARGET_ABI_BITS == 32
3673 case TARGET_F_GETLK64
:
3675 case TARGET_F_SETLK64
:
3677 case TARGET_F_SETLKW64
:
3681 return -TARGET_EINVAL
;
3683 return -TARGET_EINVAL
;
3686 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3689 struct target_flock
*target_fl
;
3690 struct flock64 fl64
;
3691 struct target_flock64
*target_fl64
;
3693 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
3695 if (host_cmd
== -TARGET_EINVAL
)
3699 case TARGET_F_GETLK
:
3700 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3701 return -TARGET_EFAULT
;
3702 fl
.l_type
= tswap16(target_fl
->l_type
);
3703 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3704 fl
.l_start
= tswapl(target_fl
->l_start
);
3705 fl
.l_len
= tswapl(target_fl
->l_len
);
3706 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3707 unlock_user_struct(target_fl
, arg
, 0);
3708 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3710 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3711 return -TARGET_EFAULT
;
3712 target_fl
->l_type
= tswap16(fl
.l_type
);
3713 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3714 target_fl
->l_start
= tswapl(fl
.l_start
);
3715 target_fl
->l_len
= tswapl(fl
.l_len
);
3716 target_fl
->l_pid
= tswapl(fl
.l_pid
);
3717 unlock_user_struct(target_fl
, arg
, 1);
3721 case TARGET_F_SETLK
:
3722 case TARGET_F_SETLKW
:
3723 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3724 return -TARGET_EFAULT
;
3725 fl
.l_type
= tswap16(target_fl
->l_type
);
3726 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3727 fl
.l_start
= tswapl(target_fl
->l_start
);
3728 fl
.l_len
= tswapl(target_fl
->l_len
);
3729 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3730 unlock_user_struct(target_fl
, arg
, 0);
3731 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
3734 case TARGET_F_GETLK64
:
3735 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3736 return -TARGET_EFAULT
;
3737 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3738 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3739 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3740 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3741 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3742 unlock_user_struct(target_fl64
, arg
, 0);
3743 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3745 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3746 return -TARGET_EFAULT
;
3747 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3748 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3749 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3750 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3751 target_fl64
->l_pid
= tswapl(fl64
.l_pid
);
3752 unlock_user_struct(target_fl64
, arg
, 1);
3755 case TARGET_F_SETLK64
:
3756 case TARGET_F_SETLKW64
:
3757 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3758 return -TARGET_EFAULT
;
3759 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3760 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3761 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3762 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3763 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3764 unlock_user_struct(target_fl64
, arg
, 0);
3765 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
3768 case TARGET_F_GETFL
:
3769 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3771 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3775 case TARGET_F_SETFL
:
3776 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3779 case TARGET_F_SETOWN
:
3780 case TARGET_F_GETOWN
:
3781 case TARGET_F_SETSIG
:
3782 case TARGET_F_GETSIG
:
3783 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
3787 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3795 static inline int high2lowuid(int uid
)
3803 static inline int high2lowgid(int gid
)
3811 static inline int low2highuid(int uid
)
3813 if ((int16_t)uid
== -1)
3819 static inline int low2highgid(int gid
)
3821 if ((int16_t)gid
== -1)
3827 #endif /* USE_UID16 */
3829 void syscall_init(void)
3832 const argtype
*arg_type
;
3836 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3837 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3838 #include "syscall_types.h"
3840 #undef STRUCT_SPECIAL
3842 /* we patch the ioctl size if necessary. We rely on the fact that
3843 no ioctl has all the bits at '1' in the size field */
3845 while (ie
->target_cmd
!= 0) {
3846 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3847 TARGET_IOC_SIZEMASK
) {
3848 arg_type
= ie
->arg_type
;
3849 if (arg_type
[0] != TYPE_PTR
) {
3850 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3855 size
= thunk_type_size(arg_type
, 0);
3856 ie
->target_cmd
= (ie
->target_cmd
&
3857 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3858 (size
<< TARGET_IOC_SIZESHIFT
);
3861 /* Build target_to_host_errno_table[] table from
3862 * host_to_target_errno_table[]. */
3863 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3864 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3866 /* automatic consistency check if same arch */
3867 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3868 (defined(__x86_64__) && defined(TARGET_X86_64))
3869 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3870 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3871 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3878 #if TARGET_ABI_BITS == 32
3879 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3881 #ifdef TARGET_WORDS_BIGENDIAN
3882 return ((uint64_t)word0
<< 32) | word1
;
3884 return ((uint64_t)word1
<< 32) | word0
;
3887 #else /* TARGET_ABI_BITS == 32 */
3888 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3892 #endif /* TARGET_ABI_BITS != 32 */
3894 #ifdef TARGET_NR_truncate64
3895 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3901 if (((CPUARMState
*)cpu_env
)->eabi
)
3907 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3911 #ifdef TARGET_NR_ftruncate64
3912 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3918 if (((CPUARMState
*)cpu_env
)->eabi
)
3924 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3928 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3929 abi_ulong target_addr
)
3931 struct target_timespec
*target_ts
;
3933 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3934 return -TARGET_EFAULT
;
3935 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3936 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3937 unlock_user_struct(target_ts
, target_addr
, 0);
3941 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3942 struct timespec
*host_ts
)
3944 struct target_timespec
*target_ts
;
3946 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3947 return -TARGET_EFAULT
;
3948 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3949 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3950 unlock_user_struct(target_ts
, target_addr
, 1);
3954 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3955 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3956 abi_ulong target_addr
,
3957 struct stat
*host_st
)
3960 if (((CPUARMState
*)cpu_env
)->eabi
) {
3961 struct target_eabi_stat64
*target_st
;
3963 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3964 return -TARGET_EFAULT
;
3965 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3966 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3967 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3968 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3969 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3971 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3972 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3973 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3974 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3975 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3976 __put_user(host_st
->st_size
, &target_st
->st_size
);
3977 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3978 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3979 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3980 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3981 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3982 unlock_user_struct(target_st
, target_addr
, 1);
3986 #if TARGET_LONG_BITS == 64
3987 struct target_stat
*target_st
;
3989 struct target_stat64
*target_st
;
3992 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3993 return -TARGET_EFAULT
;
3994 memset(target_st
, 0, sizeof(*target_st
));
3995 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3996 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3997 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3998 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4000 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4001 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4002 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4003 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4004 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4005 /* XXX: better use of kernel struct */
4006 __put_user(host_st
->st_size
, &target_st
->st_size
);
4007 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4008 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4009 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4010 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4011 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4012 unlock_user_struct(target_st
, target_addr
, 1);
4019 #if defined(USE_NPTL)
4020 /* ??? Using host futex calls even when target atomic operations
4021 are not really atomic probably breaks things. However implementing
4022 futexes locally would make futexes shared between multiple processes
4023 tricky. However they're probably useless because guest atomic
4024 operations won't work either. */
4025 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4026 target_ulong uaddr2
, int val3
)
4028 struct timespec ts
, *pts
;
4030 /* ??? We assume FUTEX_* constants are the same on both host
4032 #ifdef FUTEX_CMD_MASK
4033 switch ((op
&FUTEX_CMD_MASK
)) {
4040 target_to_host_timespec(pts
, timeout
);
4044 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4047 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4049 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, g2h(uaddr2
), val3
));
4051 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4053 return get_errno(sys_futex(g2h(uaddr
), op
, val
,
4054 NULL
, g2h(uaddr2
), 0));
4055 case FUTEX_CMP_REQUEUE
:
4056 return get_errno(sys_futex(g2h(uaddr
), op
, val
,
4057 NULL
, g2h(uaddr2
), tswap32(val3
)));
4059 return -TARGET_ENOSYS
;
4064 /* Map host to target signal numbers for the wait family of syscalls.
4065 Assume all other status bits are the same. */
4066 static int host_to_target_waitstatus(int status
)
4068 if (WIFSIGNALED(status
)) {
4069 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4071 if (WIFSTOPPED(status
)) {
4072 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4078 int get_osversion(void)
4080 static int osversion
;
4081 struct new_utsname buf
;
4086 if (qemu_uname_release
&& *qemu_uname_release
) {
4087 s
= qemu_uname_release
;
4089 if (sys_uname(&buf
))
4094 for (i
= 0; i
< 3; i
++) {
4096 while (*s
>= '0' && *s
<= '9') {
4101 tmp
= (tmp
<< 8) + n
;
4109 /* do_syscall() should always have a single exit point at the end so
4110 that actions, such as logging of syscall results, can be performed.
4111 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4112 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4113 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4114 abi_long arg5
, abi_long arg6
)
4122 gemu_log("syscall %d", num
);
4125 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4128 case TARGET_NR_exit
:
4130 /* In old applications this may be used to implement _exit(2).
4131 However in threaded applictions it is used for thread termination,
4132 and _exit_group is used for application termination.
4133 Do thread termination if we have more then one thread. */
4134 /* FIXME: This probably breaks if a signal arrives. We should probably
4135 be disabling signals. */
4136 if (first_cpu
->next_cpu
) {
4144 while (p
&& p
!= (CPUState
*)cpu_env
) {
4145 lastp
= &p
->next_cpu
;
4148 /* If we didn't find the CPU for this thread then something is
4152 /* Remove the CPU from the list. */
4153 *lastp
= p
->next_cpu
;
4155 ts
= ((CPUState
*)cpu_env
)->opaque
;
4156 if (ts
->child_tidptr
) {
4157 put_user_u32(0, ts
->child_tidptr
);
4158 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4161 /* TODO: Free CPU state. */
4168 gdb_exit(cpu_env
, arg1
);
4170 ret
= 0; /* avoid warning */
4172 case TARGET_NR_read
:
4176 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4178 ret
= get_errno(read(arg1
, p
, arg3
));
4179 unlock_user(p
, arg2
, ret
);
4182 case TARGET_NR_write
:
4183 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4185 ret
= get_errno(write(arg1
, p
, arg3
));
4186 unlock_user(p
, arg2
, 0);
4188 case TARGET_NR_open
:
4189 if (!(p
= lock_user_string(arg1
)))
4191 ret
= get_errno(open(path(p
),
4192 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4194 unlock_user(p
, arg1
, 0);
4196 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4197 case TARGET_NR_openat
:
4198 if (!(p
= lock_user_string(arg2
)))
4200 ret
= get_errno(sys_openat(arg1
,
4202 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4204 unlock_user(p
, arg2
, 0);
4207 case TARGET_NR_close
:
4208 ret
= get_errno(close(arg1
));
4213 case TARGET_NR_fork
:
4214 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4216 #ifdef TARGET_NR_waitpid
4217 case TARGET_NR_waitpid
:
4220 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4221 if (!is_error(ret
) && arg2
4222 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4227 #ifdef TARGET_NR_waitid
4228 case TARGET_NR_waitid
:
4232 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4233 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4234 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4236 host_to_target_siginfo(p
, &info
);
4237 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4242 #ifdef TARGET_NR_creat /* not on alpha */
4243 case TARGET_NR_creat
:
4244 if (!(p
= lock_user_string(arg1
)))
4246 ret
= get_errno(creat(p
, arg2
));
4247 unlock_user(p
, arg1
, 0);
4250 case TARGET_NR_link
:
4253 p
= lock_user_string(arg1
);
4254 p2
= lock_user_string(arg2
);
4256 ret
= -TARGET_EFAULT
;
4258 ret
= get_errno(link(p
, p2
));
4259 unlock_user(p2
, arg2
, 0);
4260 unlock_user(p
, arg1
, 0);
4263 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4264 case TARGET_NR_linkat
:
4269 p
= lock_user_string(arg2
);
4270 p2
= lock_user_string(arg4
);
4272 ret
= -TARGET_EFAULT
;
4274 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4275 unlock_user(p
, arg2
, 0);
4276 unlock_user(p2
, arg4
, 0);
4280 case TARGET_NR_unlink
:
4281 if (!(p
= lock_user_string(arg1
)))
4283 ret
= get_errno(unlink(p
));
4284 unlock_user(p
, arg1
, 0);
4286 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4287 case TARGET_NR_unlinkat
:
4288 if (!(p
= lock_user_string(arg2
)))
4290 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4291 unlock_user(p
, arg2
, 0);
4294 case TARGET_NR_execve
:
4296 char **argp
, **envp
;
4299 abi_ulong guest_argp
;
4300 abi_ulong guest_envp
;
4306 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4307 if (get_user_ual(addr
, gp
))
4315 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4316 if (get_user_ual(addr
, gp
))
4323 argp
= alloca((argc
+ 1) * sizeof(void *));
4324 envp
= alloca((envc
+ 1) * sizeof(void *));
4326 for (gp
= guest_argp
, q
= argp
; gp
;
4327 gp
+= sizeof(abi_ulong
), q
++) {
4328 if (get_user_ual(addr
, gp
))
4332 if (!(*q
= lock_user_string(addr
)))
4337 for (gp
= guest_envp
, q
= envp
; gp
;
4338 gp
+= sizeof(abi_ulong
), q
++) {
4339 if (get_user_ual(addr
, gp
))
4343 if (!(*q
= lock_user_string(addr
)))
4348 if (!(p
= lock_user_string(arg1
)))
4350 ret
= get_errno(execve(p
, argp
, envp
));
4351 unlock_user(p
, arg1
, 0);
4356 ret
= -TARGET_EFAULT
;
4359 for (gp
= guest_argp
, q
= argp
; *q
;
4360 gp
+= sizeof(abi_ulong
), q
++) {
4361 if (get_user_ual(addr
, gp
)
4364 unlock_user(*q
, addr
, 0);
4366 for (gp
= guest_envp
, q
= envp
; *q
;
4367 gp
+= sizeof(abi_ulong
), q
++) {
4368 if (get_user_ual(addr
, gp
)
4371 unlock_user(*q
, addr
, 0);
4375 case TARGET_NR_chdir
:
4376 if (!(p
= lock_user_string(arg1
)))
4378 ret
= get_errno(chdir(p
));
4379 unlock_user(p
, arg1
, 0);
4381 #ifdef TARGET_NR_time
4382 case TARGET_NR_time
:
4385 ret
= get_errno(time(&host_time
));
4388 && put_user_sal(host_time
, arg1
))
4393 case TARGET_NR_mknod
:
4394 if (!(p
= lock_user_string(arg1
)))
4396 ret
= get_errno(mknod(p
, arg2
, arg3
));
4397 unlock_user(p
, arg1
, 0);
4399 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4400 case TARGET_NR_mknodat
:
4401 if (!(p
= lock_user_string(arg2
)))
4403 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4404 unlock_user(p
, arg2
, 0);
4407 case TARGET_NR_chmod
:
4408 if (!(p
= lock_user_string(arg1
)))
4410 ret
= get_errno(chmod(p
, arg2
));
4411 unlock_user(p
, arg1
, 0);
4413 #ifdef TARGET_NR_break
4414 case TARGET_NR_break
:
4417 #ifdef TARGET_NR_oldstat
4418 case TARGET_NR_oldstat
:
4421 case TARGET_NR_lseek
:
4422 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4424 #ifdef TARGET_NR_getxpid
4425 case TARGET_NR_getxpid
:
4427 case TARGET_NR_getpid
:
4429 ret
= get_errno(getpid());
4431 case TARGET_NR_mount
:
4433 /* need to look at the data field */
4435 p
= lock_user_string(arg1
);
4436 p2
= lock_user_string(arg2
);
4437 p3
= lock_user_string(arg3
);
4438 if (!p
|| !p2
|| !p3
)
4439 ret
= -TARGET_EFAULT
;
4441 /* FIXME - arg5 should be locked, but it isn't clear how to
4442 * do that since it's not guaranteed to be a NULL-terminated
4445 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4446 unlock_user(p
, arg1
, 0);
4447 unlock_user(p2
, arg2
, 0);
4448 unlock_user(p3
, arg3
, 0);
4451 #ifdef TARGET_NR_umount
4452 case TARGET_NR_umount
:
4453 if (!(p
= lock_user_string(arg1
)))
4455 ret
= get_errno(umount(p
));
4456 unlock_user(p
, arg1
, 0);
4459 #ifdef TARGET_NR_stime /* not on alpha */
4460 case TARGET_NR_stime
:
4463 if (get_user_sal(host_time
, arg1
))
4465 ret
= get_errno(stime(&host_time
));
4469 case TARGET_NR_ptrace
:
4471 #ifdef TARGET_NR_alarm /* not on alpha */
4472 case TARGET_NR_alarm
:
4476 #ifdef TARGET_NR_oldfstat
4477 case TARGET_NR_oldfstat
:
4480 #ifdef TARGET_NR_pause /* not on alpha */
4481 case TARGET_NR_pause
:
4482 ret
= get_errno(pause());
4485 #ifdef TARGET_NR_utime
4486 case TARGET_NR_utime
:
4488 struct utimbuf tbuf
, *host_tbuf
;
4489 struct target_utimbuf
*target_tbuf
;
4491 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4493 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4494 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4495 unlock_user_struct(target_tbuf
, arg2
, 0);
4500 if (!(p
= lock_user_string(arg1
)))
4502 ret
= get_errno(utime(p
, host_tbuf
));
4503 unlock_user(p
, arg1
, 0);
4507 case TARGET_NR_utimes
:
4509 struct timeval
*tvp
, tv
[2];
4511 if (copy_from_user_timeval(&tv
[0], arg2
)
4512 || copy_from_user_timeval(&tv
[1],
4513 arg2
+ sizeof(struct target_timeval
)))
4519 if (!(p
= lock_user_string(arg1
)))
4521 ret
= get_errno(utimes(p
, tvp
));
4522 unlock_user(p
, arg1
, 0);
4525 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4526 case TARGET_NR_futimesat
:
4528 struct timeval
*tvp
, tv
[2];
4530 if (copy_from_user_timeval(&tv
[0], arg3
)
4531 || copy_from_user_timeval(&tv
[1],
4532 arg3
+ sizeof(struct target_timeval
)))
4538 if (!(p
= lock_user_string(arg2
)))
4540 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4541 unlock_user(p
, arg2
, 0);
4545 #ifdef TARGET_NR_stty
4546 case TARGET_NR_stty
:
4549 #ifdef TARGET_NR_gtty
4550 case TARGET_NR_gtty
:
4553 case TARGET_NR_access
:
4554 if (!(p
= lock_user_string(arg1
)))
4556 ret
= get_errno(access(path(p
), arg2
));
4557 unlock_user(p
, arg1
, 0);
4559 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4560 case TARGET_NR_faccessat
:
4561 if (!(p
= lock_user_string(arg2
)))
4563 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4564 unlock_user(p
, arg2
, 0);
4567 #ifdef TARGET_NR_nice /* not on alpha */
4568 case TARGET_NR_nice
:
4569 ret
= get_errno(nice(arg1
));
4572 #ifdef TARGET_NR_ftime
4573 case TARGET_NR_ftime
:
4576 case TARGET_NR_sync
:
4580 case TARGET_NR_kill
:
4581 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4583 case TARGET_NR_rename
:
4586 p
= lock_user_string(arg1
);
4587 p2
= lock_user_string(arg2
);
4589 ret
= -TARGET_EFAULT
;
4591 ret
= get_errno(rename(p
, p2
));
4592 unlock_user(p2
, arg2
, 0);
4593 unlock_user(p
, arg1
, 0);
4596 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4597 case TARGET_NR_renameat
:
4600 p
= lock_user_string(arg2
);
4601 p2
= lock_user_string(arg4
);
4603 ret
= -TARGET_EFAULT
;
4605 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4606 unlock_user(p2
, arg4
, 0);
4607 unlock_user(p
, arg2
, 0);
4611 case TARGET_NR_mkdir
:
4612 if (!(p
= lock_user_string(arg1
)))
4614 ret
= get_errno(mkdir(p
, arg2
));
4615 unlock_user(p
, arg1
, 0);
4617 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4618 case TARGET_NR_mkdirat
:
4619 if (!(p
= lock_user_string(arg2
)))
4621 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4622 unlock_user(p
, arg2
, 0);
4625 case TARGET_NR_rmdir
:
4626 if (!(p
= lock_user_string(arg1
)))
4628 ret
= get_errno(rmdir(p
));
4629 unlock_user(p
, arg1
, 0);
4632 ret
= get_errno(dup(arg1
));
4634 case TARGET_NR_pipe
:
4635 ret
= do_pipe(cpu_env
, arg1
, 0);
4637 #ifdef TARGET_NR_pipe2
4638 case TARGET_NR_pipe2
:
4639 ret
= do_pipe(cpu_env
, arg1
, arg2
);
4642 case TARGET_NR_times
:
4644 struct target_tms
*tmsp
;
4646 ret
= get_errno(times(&tms
));
4648 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4651 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4652 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4653 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4654 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4657 ret
= host_to_target_clock_t(ret
);
4660 #ifdef TARGET_NR_prof
4661 case TARGET_NR_prof
:
4664 #ifdef TARGET_NR_signal
4665 case TARGET_NR_signal
:
4668 case TARGET_NR_acct
:
4670 ret
= get_errno(acct(NULL
));
4672 if (!(p
= lock_user_string(arg1
)))
4674 ret
= get_errno(acct(path(p
)));
4675 unlock_user(p
, arg1
, 0);
4678 #ifdef TARGET_NR_umount2 /* not on alpha */
4679 case TARGET_NR_umount2
:
4680 if (!(p
= lock_user_string(arg1
)))
4682 ret
= get_errno(umount2(p
, arg2
));
4683 unlock_user(p
, arg1
, 0);
4686 #ifdef TARGET_NR_lock
4687 case TARGET_NR_lock
:
4690 case TARGET_NR_ioctl
:
4691 ret
= do_ioctl(arg1
, arg2
, arg3
);
4693 case TARGET_NR_fcntl
:
4694 ret
= do_fcntl(arg1
, arg2
, arg3
);
4696 #ifdef TARGET_NR_mpx
4700 case TARGET_NR_setpgid
:
4701 ret
= get_errno(setpgid(arg1
, arg2
));
4703 #ifdef TARGET_NR_ulimit
4704 case TARGET_NR_ulimit
:
4707 #ifdef TARGET_NR_oldolduname
4708 case TARGET_NR_oldolduname
:
4711 case TARGET_NR_umask
:
4712 ret
= get_errno(umask(arg1
));
4714 case TARGET_NR_chroot
:
4715 if (!(p
= lock_user_string(arg1
)))
4717 ret
= get_errno(chroot(p
));
4718 unlock_user(p
, arg1
, 0);
4720 case TARGET_NR_ustat
:
4722 case TARGET_NR_dup2
:
4723 ret
= get_errno(dup2(arg1
, arg2
));
4725 #ifdef TARGET_NR_getppid /* not on alpha */
4726 case TARGET_NR_getppid
:
4727 ret
= get_errno(getppid());
4730 case TARGET_NR_getpgrp
:
4731 ret
= get_errno(getpgrp());
4733 case TARGET_NR_setsid
:
4734 ret
= get_errno(setsid());
4736 #ifdef TARGET_NR_sigaction
4737 case TARGET_NR_sigaction
:
4739 #if !defined(TARGET_MIPS)
4740 struct target_old_sigaction
*old_act
;
4741 struct target_sigaction act
, oact
, *pact
;
4743 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4745 act
._sa_handler
= old_act
->_sa_handler
;
4746 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4747 act
.sa_flags
= old_act
->sa_flags
;
4748 act
.sa_restorer
= old_act
->sa_restorer
;
4749 unlock_user_struct(old_act
, arg2
, 0);
4754 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4755 if (!is_error(ret
) && arg3
) {
4756 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4758 old_act
->_sa_handler
= oact
._sa_handler
;
4759 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4760 old_act
->sa_flags
= oact
.sa_flags
;
4761 old_act
->sa_restorer
= oact
.sa_restorer
;
4762 unlock_user_struct(old_act
, arg3
, 1);
4765 struct target_sigaction act
, oact
, *pact
, *old_act
;
4768 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4770 act
._sa_handler
= old_act
->_sa_handler
;
4771 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4772 act
.sa_flags
= old_act
->sa_flags
;
4773 unlock_user_struct(old_act
, arg2
, 0);
4779 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4781 if (!is_error(ret
) && arg3
) {
4782 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4784 old_act
->_sa_handler
= oact
._sa_handler
;
4785 old_act
->sa_flags
= oact
.sa_flags
;
4786 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4787 old_act
->sa_mask
.sig
[1] = 0;
4788 old_act
->sa_mask
.sig
[2] = 0;
4789 old_act
->sa_mask
.sig
[3] = 0;
4790 unlock_user_struct(old_act
, arg3
, 1);
4796 case TARGET_NR_rt_sigaction
:
4798 struct target_sigaction
*act
;
4799 struct target_sigaction
*oact
;
4802 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4807 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4808 ret
= -TARGET_EFAULT
;
4809 goto rt_sigaction_fail
;
4813 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4816 unlock_user_struct(act
, arg2
, 0);
4818 unlock_user_struct(oact
, arg3
, 1);
4821 #ifdef TARGET_NR_sgetmask /* not on alpha */
4822 case TARGET_NR_sgetmask
:
4825 abi_ulong target_set
;
4826 sigprocmask(0, NULL
, &cur_set
);
4827 host_to_target_old_sigset(&target_set
, &cur_set
);
4832 #ifdef TARGET_NR_ssetmask /* not on alpha */
4833 case TARGET_NR_ssetmask
:
4835 sigset_t set
, oset
, cur_set
;
4836 abi_ulong target_set
= arg1
;
4837 sigprocmask(0, NULL
, &cur_set
);
4838 target_to_host_old_sigset(&set
, &target_set
);
4839 sigorset(&set
, &set
, &cur_set
);
4840 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4841 host_to_target_old_sigset(&target_set
, &oset
);
4846 #ifdef TARGET_NR_sigprocmask
4847 case TARGET_NR_sigprocmask
:
4850 sigset_t set
, oldset
, *set_ptr
;
4854 case TARGET_SIG_BLOCK
:
4857 case TARGET_SIG_UNBLOCK
:
4860 case TARGET_SIG_SETMASK
:
4864 ret
= -TARGET_EINVAL
;
4867 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4869 target_to_host_old_sigset(&set
, p
);
4870 unlock_user(p
, arg2
, 0);
4876 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4877 if (!is_error(ret
) && arg3
) {
4878 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4880 host_to_target_old_sigset(p
, &oldset
);
4881 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4886 case TARGET_NR_rt_sigprocmask
:
4889 sigset_t set
, oldset
, *set_ptr
;
4893 case TARGET_SIG_BLOCK
:
4896 case TARGET_SIG_UNBLOCK
:
4899 case TARGET_SIG_SETMASK
:
4903 ret
= -TARGET_EINVAL
;
4906 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4908 target_to_host_sigset(&set
, p
);
4909 unlock_user(p
, arg2
, 0);
4915 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4916 if (!is_error(ret
) && arg3
) {
4917 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4919 host_to_target_sigset(p
, &oldset
);
4920 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4924 #ifdef TARGET_NR_sigpending
4925 case TARGET_NR_sigpending
:
4928 ret
= get_errno(sigpending(&set
));
4929 if (!is_error(ret
)) {
4930 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4932 host_to_target_old_sigset(p
, &set
);
4933 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4938 case TARGET_NR_rt_sigpending
:
4941 ret
= get_errno(sigpending(&set
));
4942 if (!is_error(ret
)) {
4943 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4945 host_to_target_sigset(p
, &set
);
4946 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4950 #ifdef TARGET_NR_sigsuspend
4951 case TARGET_NR_sigsuspend
:
4954 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4956 target_to_host_old_sigset(&set
, p
);
4957 unlock_user(p
, arg1
, 0);
4958 ret
= get_errno(sigsuspend(&set
));
4962 case TARGET_NR_rt_sigsuspend
:
4965 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4967 target_to_host_sigset(&set
, p
);
4968 unlock_user(p
, arg1
, 0);
4969 ret
= get_errno(sigsuspend(&set
));
4972 case TARGET_NR_rt_sigtimedwait
:
4975 struct timespec uts
, *puts
;
4978 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4980 target_to_host_sigset(&set
, p
);
4981 unlock_user(p
, arg1
, 0);
4984 target_to_host_timespec(puts
, arg3
);
4988 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
4989 if (!is_error(ret
) && arg2
) {
4990 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
4992 host_to_target_siginfo(p
, &uinfo
);
4993 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
4997 case TARGET_NR_rt_sigqueueinfo
:
5000 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5002 target_to_host_siginfo(&uinfo
, p
);
5003 unlock_user(p
, arg1
, 0);
5004 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5007 #ifdef TARGET_NR_sigreturn
5008 case TARGET_NR_sigreturn
:
5009 /* NOTE: ret is eax, so not transcoding must be done */
5010 ret
= do_sigreturn(cpu_env
);
5013 case TARGET_NR_rt_sigreturn
:
5014 /* NOTE: ret is eax, so not transcoding must be done */
5015 ret
= do_rt_sigreturn(cpu_env
);
5017 case TARGET_NR_sethostname
:
5018 if (!(p
= lock_user_string(arg1
)))
5020 ret
= get_errno(sethostname(p
, arg2
));
5021 unlock_user(p
, arg1
, 0);
5023 case TARGET_NR_setrlimit
:
5025 /* XXX: convert resource ? */
5026 int resource
= arg1
;
5027 struct target_rlimit
*target_rlim
;
5029 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5031 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
5032 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
5033 unlock_user_struct(target_rlim
, arg2
, 0);
5034 ret
= get_errno(setrlimit(resource
, &rlim
));
5037 case TARGET_NR_getrlimit
:
5039 /* XXX: convert resource ? */
5040 int resource
= arg1
;
5041 struct target_rlimit
*target_rlim
;
5044 ret
= get_errno(getrlimit(resource
, &rlim
));
5045 if (!is_error(ret
)) {
5046 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5048 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5049 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5050 unlock_user_struct(target_rlim
, arg2
, 1);
5054 case TARGET_NR_getrusage
:
5056 struct rusage rusage
;
5057 ret
= get_errno(getrusage(arg1
, &rusage
));
5058 if (!is_error(ret
)) {
5059 host_to_target_rusage(arg2
, &rusage
);
5063 case TARGET_NR_gettimeofday
:
5066 ret
= get_errno(gettimeofday(&tv
, NULL
));
5067 if (!is_error(ret
)) {
5068 if (copy_to_user_timeval(arg1
, &tv
))
5073 case TARGET_NR_settimeofday
:
5076 if (copy_from_user_timeval(&tv
, arg1
))
5078 ret
= get_errno(settimeofday(&tv
, NULL
));
5081 #ifdef TARGET_NR_select
5082 case TARGET_NR_select
:
5084 struct target_sel_arg_struct
*sel
;
5085 abi_ulong inp
, outp
, exp
, tvp
;
5088 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5090 nsel
= tswapl(sel
->n
);
5091 inp
= tswapl(sel
->inp
);
5092 outp
= tswapl(sel
->outp
);
5093 exp
= tswapl(sel
->exp
);
5094 tvp
= tswapl(sel
->tvp
);
5095 unlock_user_struct(sel
, arg1
, 0);
5096 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5100 case TARGET_NR_symlink
:
5103 p
= lock_user_string(arg1
);
5104 p2
= lock_user_string(arg2
);
5106 ret
= -TARGET_EFAULT
;
5108 ret
= get_errno(symlink(p
, p2
));
5109 unlock_user(p2
, arg2
, 0);
5110 unlock_user(p
, arg1
, 0);
5113 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5114 case TARGET_NR_symlinkat
:
5117 p
= lock_user_string(arg1
);
5118 p2
= lock_user_string(arg3
);
5120 ret
= -TARGET_EFAULT
;
5122 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5123 unlock_user(p2
, arg3
, 0);
5124 unlock_user(p
, arg1
, 0);
5128 #ifdef TARGET_NR_oldlstat
5129 case TARGET_NR_oldlstat
:
5132 case TARGET_NR_readlink
:
5135 p
= lock_user_string(arg1
);
5136 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5138 ret
= -TARGET_EFAULT
;
5140 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5141 char real
[PATH_MAX
];
5142 temp
= realpath(exec_path
,real
);
5143 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5144 snprintf((char *)p2
, arg3
, "%s", real
);
5147 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5149 unlock_user(p2
, arg2
, ret
);
5150 unlock_user(p
, arg1
, 0);
5153 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
5154 case TARGET_NR_readlinkat
:
5157 p
= lock_user_string(arg2
);
5158 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
5160 ret
= -TARGET_EFAULT
;
5162 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
5163 unlock_user(p2
, arg3
, ret
);
5164 unlock_user(p
, arg2
, 0);
5168 #ifdef TARGET_NR_uselib
5169 case TARGET_NR_uselib
:
5172 #ifdef TARGET_NR_swapon
5173 case TARGET_NR_swapon
:
5174 if (!(p
= lock_user_string(arg1
)))
5176 ret
= get_errno(swapon(p
, arg2
));
5177 unlock_user(p
, arg1
, 0);
5180 case TARGET_NR_reboot
:
5182 #ifdef TARGET_NR_readdir
5183 case TARGET_NR_readdir
:
5186 #ifdef TARGET_NR_mmap
5187 case TARGET_NR_mmap
:
5188 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE)
5191 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
5192 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
5200 unlock_user(v
, arg1
, 0);
5201 ret
= get_errno(target_mmap(v1
, v2
, v3
,
5202 target_to_host_bitmask(v4
, mmap_flags_tbl
),
5206 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5207 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5213 #ifdef TARGET_NR_mmap2
5214 case TARGET_NR_mmap2
:
5216 #define MMAP_SHIFT 12
5218 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
5219 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
5221 arg6
<< MMAP_SHIFT
));
5224 case TARGET_NR_munmap
:
5225 ret
= get_errno(target_munmap(arg1
, arg2
));
5227 case TARGET_NR_mprotect
:
5228 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
5230 #ifdef TARGET_NR_mremap
5231 case TARGET_NR_mremap
:
5232 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
5235 /* ??? msync/mlock/munlock are broken for softmmu. */
5236 #ifdef TARGET_NR_msync
5237 case TARGET_NR_msync
:
5238 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
5241 #ifdef TARGET_NR_mlock
5242 case TARGET_NR_mlock
:
5243 ret
= get_errno(mlock(g2h(arg1
), arg2
));
5246 #ifdef TARGET_NR_munlock
5247 case TARGET_NR_munlock
:
5248 ret
= get_errno(munlock(g2h(arg1
), arg2
));
5251 #ifdef TARGET_NR_mlockall
5252 case TARGET_NR_mlockall
:
5253 ret
= get_errno(mlockall(arg1
));
5256 #ifdef TARGET_NR_munlockall
5257 case TARGET_NR_munlockall
:
5258 ret
= get_errno(munlockall());
5261 case TARGET_NR_truncate
:
5262 if (!(p
= lock_user_string(arg1
)))
5264 ret
= get_errno(truncate(p
, arg2
));
5265 unlock_user(p
, arg1
, 0);
5267 case TARGET_NR_ftruncate
:
5268 ret
= get_errno(ftruncate(arg1
, arg2
));
5270 case TARGET_NR_fchmod
:
5271 ret
= get_errno(fchmod(arg1
, arg2
));
5273 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
5274 case TARGET_NR_fchmodat
:
5275 if (!(p
= lock_user_string(arg2
)))
5277 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
5278 unlock_user(p
, arg2
, 0);
5281 case TARGET_NR_getpriority
:
5282 /* libc does special remapping of the return value of
5283 * sys_getpriority() so it's just easiest to call
5284 * sys_getpriority() directly rather than through libc. */
5285 ret
= sys_getpriority(arg1
, arg2
);
5287 case TARGET_NR_setpriority
:
5288 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
5290 #ifdef TARGET_NR_profil
5291 case TARGET_NR_profil
:
5294 case TARGET_NR_statfs
:
5295 if (!(p
= lock_user_string(arg1
)))
5297 ret
= get_errno(statfs(path(p
), &stfs
));
5298 unlock_user(p
, arg1
, 0);
5300 if (!is_error(ret
)) {
5301 struct target_statfs
*target_stfs
;
5303 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
5305 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5306 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5307 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5308 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5309 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5310 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5311 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5312 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5313 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5314 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5315 unlock_user_struct(target_stfs
, arg2
, 1);
5318 case TARGET_NR_fstatfs
:
5319 ret
= get_errno(fstatfs(arg1
, &stfs
));
5320 goto convert_statfs
;
5321 #ifdef TARGET_NR_statfs64
5322 case TARGET_NR_statfs64
:
5323 if (!(p
= lock_user_string(arg1
)))
5325 ret
= get_errno(statfs(path(p
), &stfs
));
5326 unlock_user(p
, arg1
, 0);
5328 if (!is_error(ret
)) {
5329 struct target_statfs64
*target_stfs
;
5331 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5333 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5334 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5335 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5336 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5337 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5338 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5339 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5340 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5341 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5342 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5343 unlock_user_struct(target_stfs
, arg3
, 1);
5346 case TARGET_NR_fstatfs64
:
5347 ret
= get_errno(fstatfs(arg1
, &stfs
));
5348 goto convert_statfs64
;
5350 #ifdef TARGET_NR_ioperm
5351 case TARGET_NR_ioperm
:
5354 #ifdef TARGET_NR_socketcall
5355 case TARGET_NR_socketcall
:
5356 ret
= do_socketcall(arg1
, arg2
);
5359 #ifdef TARGET_NR_accept
5360 case TARGET_NR_accept
:
5361 ret
= do_accept(arg1
, arg2
, arg3
);
5364 #ifdef TARGET_NR_bind
5365 case TARGET_NR_bind
:
5366 ret
= do_bind(arg1
, arg2
, arg3
);
5369 #ifdef TARGET_NR_connect
5370 case TARGET_NR_connect
:
5371 ret
= do_connect(arg1
, arg2
, arg3
);
5374 #ifdef TARGET_NR_getpeername
5375 case TARGET_NR_getpeername
:
5376 ret
= do_getpeername(arg1
, arg2
, arg3
);
5379 #ifdef TARGET_NR_getsockname
5380 case TARGET_NR_getsockname
:
5381 ret
= do_getsockname(arg1
, arg2
, arg3
);
5384 #ifdef TARGET_NR_getsockopt
5385 case TARGET_NR_getsockopt
:
5386 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5389 #ifdef TARGET_NR_listen
5390 case TARGET_NR_listen
:
5391 ret
= get_errno(listen(arg1
, arg2
));
5394 #ifdef TARGET_NR_recv
5395 case TARGET_NR_recv
:
5396 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5399 #ifdef TARGET_NR_recvfrom
5400 case TARGET_NR_recvfrom
:
5401 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5404 #ifdef TARGET_NR_recvmsg
5405 case TARGET_NR_recvmsg
:
5406 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5409 #ifdef TARGET_NR_send
5410 case TARGET_NR_send
:
5411 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5414 #ifdef TARGET_NR_sendmsg
5415 case TARGET_NR_sendmsg
:
5416 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5419 #ifdef TARGET_NR_sendto
5420 case TARGET_NR_sendto
:
5421 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5424 #ifdef TARGET_NR_shutdown
5425 case TARGET_NR_shutdown
:
5426 ret
= get_errno(shutdown(arg1
, arg2
));
5429 #ifdef TARGET_NR_socket
5430 case TARGET_NR_socket
:
5431 ret
= do_socket(arg1
, arg2
, arg3
);
5434 #ifdef TARGET_NR_socketpair
5435 case TARGET_NR_socketpair
:
5436 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5439 #ifdef TARGET_NR_setsockopt
5440 case TARGET_NR_setsockopt
:
5441 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5445 case TARGET_NR_syslog
:
5446 if (!(p
= lock_user_string(arg2
)))
5448 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5449 unlock_user(p
, arg2
, 0);
5452 case TARGET_NR_setitimer
:
5454 struct itimerval value
, ovalue
, *pvalue
;
5458 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5459 || copy_from_user_timeval(&pvalue
->it_value
,
5460 arg2
+ sizeof(struct target_timeval
)))
5465 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5466 if (!is_error(ret
) && arg3
) {
5467 if (copy_to_user_timeval(arg3
,
5468 &ovalue
.it_interval
)
5469 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5475 case TARGET_NR_getitimer
:
5477 struct itimerval value
;
5479 ret
= get_errno(getitimer(arg1
, &value
));
5480 if (!is_error(ret
) && arg2
) {
5481 if (copy_to_user_timeval(arg2
,
5483 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5489 case TARGET_NR_stat
:
5490 if (!(p
= lock_user_string(arg1
)))
5492 ret
= get_errno(stat(path(p
), &st
));
5493 unlock_user(p
, arg1
, 0);
5495 case TARGET_NR_lstat
:
5496 if (!(p
= lock_user_string(arg1
)))
5498 ret
= get_errno(lstat(path(p
), &st
));
5499 unlock_user(p
, arg1
, 0);
5501 case TARGET_NR_fstat
:
5503 ret
= get_errno(fstat(arg1
, &st
));
5505 if (!is_error(ret
)) {
5506 struct target_stat
*target_st
;
5508 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5510 __put_user(st
.st_dev
, &target_st
->st_dev
);
5511 __put_user(st
.st_ino
, &target_st
->st_ino
);
5512 __put_user(st
.st_mode
, &target_st
->st_mode
);
5513 __put_user(st
.st_uid
, &target_st
->st_uid
);
5514 __put_user(st
.st_gid
, &target_st
->st_gid
);
5515 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5516 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5517 __put_user(st
.st_size
, &target_st
->st_size
);
5518 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5519 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5520 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5521 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5522 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5523 unlock_user_struct(target_st
, arg2
, 1);
5527 #ifdef TARGET_NR_olduname
5528 case TARGET_NR_olduname
:
5531 #ifdef TARGET_NR_iopl
5532 case TARGET_NR_iopl
:
5535 case TARGET_NR_vhangup
:
5536 ret
= get_errno(vhangup());
5538 #ifdef TARGET_NR_idle
5539 case TARGET_NR_idle
:
5542 #ifdef TARGET_NR_syscall
5543 case TARGET_NR_syscall
:
5544 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5547 case TARGET_NR_wait4
:
5550 abi_long status_ptr
= arg2
;
5551 struct rusage rusage
, *rusage_ptr
;
5552 abi_ulong target_rusage
= arg4
;
5554 rusage_ptr
= &rusage
;
5557 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5558 if (!is_error(ret
)) {
5560 status
= host_to_target_waitstatus(status
);
5561 if (put_user_s32(status
, status_ptr
))
5565 host_to_target_rusage(target_rusage
, &rusage
);
5569 #ifdef TARGET_NR_swapoff
5570 case TARGET_NR_swapoff
:
5571 if (!(p
= lock_user_string(arg1
)))
5573 ret
= get_errno(swapoff(p
));
5574 unlock_user(p
, arg1
, 0);
5577 case TARGET_NR_sysinfo
:
5579 struct target_sysinfo
*target_value
;
5580 struct sysinfo value
;
5581 ret
= get_errno(sysinfo(&value
));
5582 if (!is_error(ret
) && arg1
)
5584 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5586 __put_user(value
.uptime
, &target_value
->uptime
);
5587 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5588 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5589 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5590 __put_user(value
.totalram
, &target_value
->totalram
);
5591 __put_user(value
.freeram
, &target_value
->freeram
);
5592 __put_user(value
.sharedram
, &target_value
->sharedram
);
5593 __put_user(value
.bufferram
, &target_value
->bufferram
);
5594 __put_user(value
.totalswap
, &target_value
->totalswap
);
5595 __put_user(value
.freeswap
, &target_value
->freeswap
);
5596 __put_user(value
.procs
, &target_value
->procs
);
5597 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5598 __put_user(value
.freehigh
, &target_value
->freehigh
);
5599 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5600 unlock_user_struct(target_value
, arg1
, 1);
5604 #ifdef TARGET_NR_ipc
5606 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5609 #ifdef TARGET_NR_semget
5610 case TARGET_NR_semget
:
5611 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5614 #ifdef TARGET_NR_semop
5615 case TARGET_NR_semop
:
5616 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5619 #ifdef TARGET_NR_semctl
5620 case TARGET_NR_semctl
:
5621 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5624 #ifdef TARGET_NR_msgctl
5625 case TARGET_NR_msgctl
:
5626 ret
= do_msgctl(arg1
, arg2
, arg3
);
5629 #ifdef TARGET_NR_msgget
5630 case TARGET_NR_msgget
:
5631 ret
= get_errno(msgget(arg1
, arg2
));
5634 #ifdef TARGET_NR_msgrcv
5635 case TARGET_NR_msgrcv
:
5636 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5639 #ifdef TARGET_NR_msgsnd
5640 case TARGET_NR_msgsnd
:
5641 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5644 #ifdef TARGET_NR_shmget
5645 case TARGET_NR_shmget
:
5646 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
5649 #ifdef TARGET_NR_shmctl
5650 case TARGET_NR_shmctl
:
5651 ret
= do_shmctl(arg1
, arg2
, arg3
);
5654 #ifdef TARGET_NR_shmat
5655 case TARGET_NR_shmat
:
5656 ret
= do_shmat(arg1
, arg2
, arg3
);
5659 #ifdef TARGET_NR_shmdt
5660 case TARGET_NR_shmdt
:
5661 ret
= do_shmdt(arg1
);
5664 case TARGET_NR_fsync
:
5665 ret
= get_errno(fsync(arg1
));
5667 case TARGET_NR_clone
:
5668 #if defined(TARGET_SH4)
5669 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5670 #elif defined(TARGET_CRIS)
5671 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5673 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5676 #ifdef __NR_exit_group
5677 /* new thread calls */
5678 case TARGET_NR_exit_group
:
5682 gdb_exit(cpu_env
, arg1
);
5683 ret
= get_errno(exit_group(arg1
));
5686 case TARGET_NR_setdomainname
:
5687 if (!(p
= lock_user_string(arg1
)))
5689 ret
= get_errno(setdomainname(p
, arg2
));
5690 unlock_user(p
, arg1
, 0);
5692 case TARGET_NR_uname
:
5693 /* no need to transcode because we use the linux syscall */
5695 struct new_utsname
* buf
;
5697 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5699 ret
= get_errno(sys_uname(buf
));
5700 if (!is_error(ret
)) {
5701 /* Overrite the native machine name with whatever is being
5703 strcpy (buf
->machine
, UNAME_MACHINE
);
5704 /* Allow the user to override the reported release. */
5705 if (qemu_uname_release
&& *qemu_uname_release
)
5706 strcpy (buf
->release
, qemu_uname_release
);
5708 unlock_user_struct(buf
, arg1
, 1);
5712 case TARGET_NR_modify_ldt
:
5713 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5715 #if !defined(TARGET_X86_64)
5716 case TARGET_NR_vm86old
:
5718 case TARGET_NR_vm86
:
5719 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5723 case TARGET_NR_adjtimex
:
5725 #ifdef TARGET_NR_create_module
5726 case TARGET_NR_create_module
:
5728 case TARGET_NR_init_module
:
5729 case TARGET_NR_delete_module
:
5730 #ifdef TARGET_NR_get_kernel_syms
5731 case TARGET_NR_get_kernel_syms
:
5734 case TARGET_NR_quotactl
:
5736 case TARGET_NR_getpgid
:
5737 ret
= get_errno(getpgid(arg1
));
5739 case TARGET_NR_fchdir
:
5740 ret
= get_errno(fchdir(arg1
));
5742 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5743 case TARGET_NR_bdflush
:
5746 #ifdef TARGET_NR_sysfs
5747 case TARGET_NR_sysfs
:
5750 case TARGET_NR_personality
:
5751 ret
= get_errno(personality(arg1
));
5753 #ifdef TARGET_NR_afs_syscall
5754 case TARGET_NR_afs_syscall
:
5757 #ifdef TARGET_NR__llseek /* Not on alpha */
5758 case TARGET_NR__llseek
:
5760 #if defined (__x86_64__)
5761 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5762 if (put_user_s64(ret
, arg4
))
5766 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5767 if (put_user_s64(res
, arg4
))
5773 case TARGET_NR_getdents
:
5774 #if TARGET_ABI_BITS != 32
5776 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5778 struct target_dirent
*target_dirp
;
5779 struct linux_dirent
*dirp
;
5780 abi_long count
= arg3
;
5782 dirp
= malloc(count
);
5784 ret
= -TARGET_ENOMEM
;
5788 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5789 if (!is_error(ret
)) {
5790 struct linux_dirent
*de
;
5791 struct target_dirent
*tde
;
5793 int reclen
, treclen
;
5794 int count1
, tnamelen
;
5798 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5802 reclen
= de
->d_reclen
;
5803 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5804 tde
->d_reclen
= tswap16(treclen
);
5805 tde
->d_ino
= tswapl(de
->d_ino
);
5806 tde
->d_off
= tswapl(de
->d_off
);
5807 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5810 /* XXX: may not be correct */
5811 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5812 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5814 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5818 unlock_user(target_dirp
, arg2
, ret
);
5824 struct linux_dirent
*dirp
;
5825 abi_long count
= arg3
;
5827 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5829 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5830 if (!is_error(ret
)) {
5831 struct linux_dirent
*de
;
5836 reclen
= de
->d_reclen
;
5839 de
->d_reclen
= tswap16(reclen
);
5840 tswapls(&de
->d_ino
);
5841 tswapls(&de
->d_off
);
5842 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5846 unlock_user(dirp
, arg2
, ret
);
5850 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5851 case TARGET_NR_getdents64
:
5853 struct linux_dirent64
*dirp
;
5854 abi_long count
= arg3
;
5855 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5857 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5858 if (!is_error(ret
)) {
5859 struct linux_dirent64
*de
;
5864 reclen
= de
->d_reclen
;
5867 de
->d_reclen
= tswap16(reclen
);
5868 tswap64s((uint64_t *)&de
->d_ino
);
5869 tswap64s((uint64_t *)&de
->d_off
);
5870 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5874 unlock_user(dirp
, arg2
, ret
);
5877 #endif /* TARGET_NR_getdents64 */
5878 #ifdef TARGET_NR__newselect
5879 case TARGET_NR__newselect
:
5880 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5883 #ifdef TARGET_NR_poll
5884 case TARGET_NR_poll
:
5886 struct target_pollfd
*target_pfd
;
5887 unsigned int nfds
= arg2
;
5892 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5895 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5896 for(i
= 0; i
< nfds
; i
++) {
5897 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5898 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5900 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5901 if (!is_error(ret
)) {
5902 for(i
= 0; i
< nfds
; i
++) {
5903 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5905 ret
+= nfds
* (sizeof(struct target_pollfd
)
5906 - sizeof(struct pollfd
));
5908 unlock_user(target_pfd
, arg1
, ret
);
5912 case TARGET_NR_flock
:
5913 /* NOTE: the flock constant seems to be the same for every
5915 ret
= get_errno(flock(arg1
, arg2
));
5917 case TARGET_NR_readv
:
5922 vec
= alloca(count
* sizeof(struct iovec
));
5923 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5925 ret
= get_errno(readv(arg1
, vec
, count
));
5926 unlock_iovec(vec
, arg2
, count
, 1);
5929 case TARGET_NR_writev
:
5934 vec
= alloca(count
* sizeof(struct iovec
));
5935 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5937 ret
= get_errno(writev(arg1
, vec
, count
));
5938 unlock_iovec(vec
, arg2
, count
, 0);
5941 case TARGET_NR_getsid
:
5942 ret
= get_errno(getsid(arg1
));
5944 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5945 case TARGET_NR_fdatasync
:
5946 ret
= get_errno(fdatasync(arg1
));
5949 case TARGET_NR__sysctl
:
5950 /* We don't implement this, but ENOTDIR is always a safe
5952 ret
= -TARGET_ENOTDIR
;
5954 case TARGET_NR_sched_setparam
:
5956 struct sched_param
*target_schp
;
5957 struct sched_param schp
;
5959 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5961 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5962 unlock_user_struct(target_schp
, arg2
, 0);
5963 ret
= get_errno(sched_setparam(arg1
, &schp
));
5966 case TARGET_NR_sched_getparam
:
5968 struct sched_param
*target_schp
;
5969 struct sched_param schp
;
5970 ret
= get_errno(sched_getparam(arg1
, &schp
));
5971 if (!is_error(ret
)) {
5972 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
5974 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
5975 unlock_user_struct(target_schp
, arg2
, 1);
5979 case TARGET_NR_sched_setscheduler
:
5981 struct sched_param
*target_schp
;
5982 struct sched_param schp
;
5983 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
5985 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5986 unlock_user_struct(target_schp
, arg3
, 0);
5987 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
5990 case TARGET_NR_sched_getscheduler
:
5991 ret
= get_errno(sched_getscheduler(arg1
));
5993 case TARGET_NR_sched_yield
:
5994 ret
= get_errno(sched_yield());
5996 case TARGET_NR_sched_get_priority_max
:
5997 ret
= get_errno(sched_get_priority_max(arg1
));
5999 case TARGET_NR_sched_get_priority_min
:
6000 ret
= get_errno(sched_get_priority_min(arg1
));
6002 case TARGET_NR_sched_rr_get_interval
:
6005 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6006 if (!is_error(ret
)) {
6007 host_to_target_timespec(arg2
, &ts
);
6011 case TARGET_NR_nanosleep
:
6013 struct timespec req
, rem
;
6014 target_to_host_timespec(&req
, arg1
);
6015 ret
= get_errno(nanosleep(&req
, &rem
));
6016 if (is_error(ret
) && arg2
) {
6017 host_to_target_timespec(arg2
, &rem
);
6021 #ifdef TARGET_NR_query_module
6022 case TARGET_NR_query_module
:
6025 #ifdef TARGET_NR_nfsservctl
6026 case TARGET_NR_nfsservctl
:
6029 case TARGET_NR_prctl
:
6032 case PR_GET_PDEATHSIG
:
6035 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
6036 if (!is_error(ret
) && arg2
6037 && put_user_ual(deathsig
, arg2
))
6042 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
6046 #ifdef TARGET_NR_arch_prctl
6047 case TARGET_NR_arch_prctl
:
6048 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
6049 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
6055 #ifdef TARGET_NR_pread
6056 case TARGET_NR_pread
:
6058 if (((CPUARMState
*)cpu_env
)->eabi
)
6061 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6063 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
6064 unlock_user(p
, arg2
, ret
);
6066 case TARGET_NR_pwrite
:
6068 if (((CPUARMState
*)cpu_env
)->eabi
)
6071 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6073 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
6074 unlock_user(p
, arg2
, 0);
6077 #ifdef TARGET_NR_pread64
6078 case TARGET_NR_pread64
:
6079 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
6081 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6082 unlock_user(p
, arg2
, ret
);
6084 case TARGET_NR_pwrite64
:
6085 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
6087 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
6088 unlock_user(p
, arg2
, 0);
6091 case TARGET_NR_getcwd
:
6092 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
6094 ret
= get_errno(sys_getcwd1(p
, arg2
));
6095 unlock_user(p
, arg1
, ret
);
6097 case TARGET_NR_capget
:
6099 case TARGET_NR_capset
:
6101 case TARGET_NR_sigaltstack
:
6102 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
6103 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
6104 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
6109 case TARGET_NR_sendfile
:
6111 #ifdef TARGET_NR_getpmsg
6112 case TARGET_NR_getpmsg
:
6115 #ifdef TARGET_NR_putpmsg
6116 case TARGET_NR_putpmsg
:
6119 #ifdef TARGET_NR_vfork
6120 case TARGET_NR_vfork
:
6121 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
6125 #ifdef TARGET_NR_ugetrlimit
6126 case TARGET_NR_ugetrlimit
:
6129 ret
= get_errno(getrlimit(arg1
, &rlim
));
6130 if (!is_error(ret
)) {
6131 struct target_rlimit
*target_rlim
;
6132 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
6134 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
6135 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
6136 unlock_user_struct(target_rlim
, arg2
, 1);
6141 #ifdef TARGET_NR_truncate64
6142 case TARGET_NR_truncate64
:
6143 if (!(p
= lock_user_string(arg1
)))
6145 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
6146 unlock_user(p
, arg1
, 0);
6149 #ifdef TARGET_NR_ftruncate64
6150 case TARGET_NR_ftruncate64
:
6151 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
6154 #ifdef TARGET_NR_stat64
6155 case TARGET_NR_stat64
:
6156 if (!(p
= lock_user_string(arg1
)))
6158 ret
= get_errno(stat(path(p
), &st
));
6159 unlock_user(p
, arg1
, 0);
6161 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6164 #ifdef TARGET_NR_lstat64
6165 case TARGET_NR_lstat64
:
6166 if (!(p
= lock_user_string(arg1
)))
6168 ret
= get_errno(lstat(path(p
), &st
));
6169 unlock_user(p
, arg1
, 0);
6171 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6174 #ifdef TARGET_NR_fstat64
6175 case TARGET_NR_fstat64
:
6176 ret
= get_errno(fstat(arg1
, &st
));
6178 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
6181 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
6182 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
6183 #ifdef TARGET_NR_fstatat64
6184 case TARGET_NR_fstatat64
:
6186 #ifdef TARGET_NR_newfstatat
6187 case TARGET_NR_newfstatat
:
6189 if (!(p
= lock_user_string(arg2
)))
6191 #ifdef __NR_fstatat64
6192 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
6194 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
6197 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
6201 case TARGET_NR_lchown
:
6202 if (!(p
= lock_user_string(arg1
)))
6204 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6205 unlock_user(p
, arg1
, 0);
6207 case TARGET_NR_getuid
:
6208 ret
= get_errno(high2lowuid(getuid()));
6210 case TARGET_NR_getgid
:
6211 ret
= get_errno(high2lowgid(getgid()));
6213 case TARGET_NR_geteuid
:
6214 ret
= get_errno(high2lowuid(geteuid()));
6216 case TARGET_NR_getegid
:
6217 ret
= get_errno(high2lowgid(getegid()));
6219 case TARGET_NR_setreuid
:
6220 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
6222 case TARGET_NR_setregid
:
6223 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
6225 case TARGET_NR_getgroups
:
6227 int gidsetsize
= arg1
;
6228 uint16_t *target_grouplist
;
6232 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6233 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6234 if (gidsetsize
== 0)
6236 if (!is_error(ret
)) {
6237 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
6238 if (!target_grouplist
)
6240 for(i
= 0;i
< ret
; i
++)
6241 target_grouplist
[i
] = tswap16(grouplist
[i
]);
6242 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
6246 case TARGET_NR_setgroups
:
6248 int gidsetsize
= arg1
;
6249 uint16_t *target_grouplist
;
6253 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6254 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
6255 if (!target_grouplist
) {
6256 ret
= -TARGET_EFAULT
;
6259 for(i
= 0;i
< gidsetsize
; i
++)
6260 grouplist
[i
] = tswap16(target_grouplist
[i
]);
6261 unlock_user(target_grouplist
, arg2
, 0);
6262 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6265 case TARGET_NR_fchown
:
6266 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
6268 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
6269 case TARGET_NR_fchownat
:
6270 if (!(p
= lock_user_string(arg2
)))
6272 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
6273 unlock_user(p
, arg2
, 0);
6276 #ifdef TARGET_NR_setresuid
6277 case TARGET_NR_setresuid
:
6278 ret
= get_errno(setresuid(low2highuid(arg1
),
6280 low2highuid(arg3
)));
6283 #ifdef TARGET_NR_getresuid
6284 case TARGET_NR_getresuid
:
6286 uid_t ruid
, euid
, suid
;
6287 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6288 if (!is_error(ret
)) {
6289 if (put_user_u16(high2lowuid(ruid
), arg1
)
6290 || put_user_u16(high2lowuid(euid
), arg2
)
6291 || put_user_u16(high2lowuid(suid
), arg3
))
6297 #ifdef TARGET_NR_getresgid
6298 case TARGET_NR_setresgid
:
6299 ret
= get_errno(setresgid(low2highgid(arg1
),
6301 low2highgid(arg3
)));
6304 #ifdef TARGET_NR_getresgid
6305 case TARGET_NR_getresgid
:
6307 gid_t rgid
, egid
, sgid
;
6308 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6309 if (!is_error(ret
)) {
6310 if (put_user_u16(high2lowgid(rgid
), arg1
)
6311 || put_user_u16(high2lowgid(egid
), arg2
)
6312 || put_user_u16(high2lowgid(sgid
), arg3
))
6318 case TARGET_NR_chown
:
6319 if (!(p
= lock_user_string(arg1
)))
6321 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
6322 unlock_user(p
, arg1
, 0);
6324 case TARGET_NR_setuid
:
6325 ret
= get_errno(setuid(low2highuid(arg1
)));
6327 case TARGET_NR_setgid
:
6328 ret
= get_errno(setgid(low2highgid(arg1
)));
6330 case TARGET_NR_setfsuid
:
6331 ret
= get_errno(setfsuid(arg1
));
6333 case TARGET_NR_setfsgid
:
6334 ret
= get_errno(setfsgid(arg1
));
6336 #endif /* USE_UID16 */
6338 #ifdef TARGET_NR_lchown32
6339 case TARGET_NR_lchown32
:
6340 if (!(p
= lock_user_string(arg1
)))
6342 ret
= get_errno(lchown(p
, arg2
, arg3
));
6343 unlock_user(p
, arg1
, 0);
6346 #ifdef TARGET_NR_getuid32
6347 case TARGET_NR_getuid32
:
6348 ret
= get_errno(getuid());
6352 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6353 /* Alpha specific */
6354 case TARGET_NR_getxuid
:
6358 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6360 ret
= get_errno(getuid());
6363 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6364 /* Alpha specific */
6365 case TARGET_NR_getxgid
:
6369 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6371 ret
= get_errno(getgid());
6375 #ifdef TARGET_NR_getgid32
6376 case TARGET_NR_getgid32
:
6377 ret
= get_errno(getgid());
6380 #ifdef TARGET_NR_geteuid32
6381 case TARGET_NR_geteuid32
:
6382 ret
= get_errno(geteuid());
6385 #ifdef TARGET_NR_getegid32
6386 case TARGET_NR_getegid32
:
6387 ret
= get_errno(getegid());
6390 #ifdef TARGET_NR_setreuid32
6391 case TARGET_NR_setreuid32
:
6392 ret
= get_errno(setreuid(arg1
, arg2
));
6395 #ifdef TARGET_NR_setregid32
6396 case TARGET_NR_setregid32
:
6397 ret
= get_errno(setregid(arg1
, arg2
));
6400 #ifdef TARGET_NR_getgroups32
6401 case TARGET_NR_getgroups32
:
6403 int gidsetsize
= arg1
;
6404 uint32_t *target_grouplist
;
6408 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6409 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6410 if (gidsetsize
== 0)
6412 if (!is_error(ret
)) {
6413 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6414 if (!target_grouplist
) {
6415 ret
= -TARGET_EFAULT
;
6418 for(i
= 0;i
< ret
; i
++)
6419 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6420 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6425 #ifdef TARGET_NR_setgroups32
6426 case TARGET_NR_setgroups32
:
6428 int gidsetsize
= arg1
;
6429 uint32_t *target_grouplist
;
6433 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6434 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6435 if (!target_grouplist
) {
6436 ret
= -TARGET_EFAULT
;
6439 for(i
= 0;i
< gidsetsize
; i
++)
6440 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6441 unlock_user(target_grouplist
, arg2
, 0);
6442 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6446 #ifdef TARGET_NR_fchown32
6447 case TARGET_NR_fchown32
:
6448 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6451 #ifdef TARGET_NR_setresuid32
6452 case TARGET_NR_setresuid32
:
6453 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6456 #ifdef TARGET_NR_getresuid32
6457 case TARGET_NR_getresuid32
:
6459 uid_t ruid
, euid
, suid
;
6460 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6461 if (!is_error(ret
)) {
6462 if (put_user_u32(ruid
, arg1
)
6463 || put_user_u32(euid
, arg2
)
6464 || put_user_u32(suid
, arg3
))
6470 #ifdef TARGET_NR_setresgid32
6471 case TARGET_NR_setresgid32
:
6472 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6475 #ifdef TARGET_NR_getresgid32
6476 case TARGET_NR_getresgid32
:
6478 gid_t rgid
, egid
, sgid
;
6479 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6480 if (!is_error(ret
)) {
6481 if (put_user_u32(rgid
, arg1
)
6482 || put_user_u32(egid
, arg2
)
6483 || put_user_u32(sgid
, arg3
))
6489 #ifdef TARGET_NR_chown32
6490 case TARGET_NR_chown32
:
6491 if (!(p
= lock_user_string(arg1
)))
6493 ret
= get_errno(chown(p
, arg2
, arg3
));
6494 unlock_user(p
, arg1
, 0);
6497 #ifdef TARGET_NR_setuid32
6498 case TARGET_NR_setuid32
:
6499 ret
= get_errno(setuid(arg1
));
6502 #ifdef TARGET_NR_setgid32
6503 case TARGET_NR_setgid32
:
6504 ret
= get_errno(setgid(arg1
));
6507 #ifdef TARGET_NR_setfsuid32
6508 case TARGET_NR_setfsuid32
:
6509 ret
= get_errno(setfsuid(arg1
));
6512 #ifdef TARGET_NR_setfsgid32
6513 case TARGET_NR_setfsgid32
:
6514 ret
= get_errno(setfsgid(arg1
));
6518 case TARGET_NR_pivot_root
:
6520 #ifdef TARGET_NR_mincore
6521 case TARGET_NR_mincore
:
6524 ret
= -TARGET_EFAULT
;
6525 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6527 if (!(p
= lock_user_string(arg3
)))
6529 ret
= get_errno(mincore(a
, arg2
, p
));
6530 unlock_user(p
, arg3
, ret
);
6532 unlock_user(a
, arg1
, 0);
6536 #ifdef TARGET_NR_arm_fadvise64_64
6537 case TARGET_NR_arm_fadvise64_64
:
6540 * arm_fadvise64_64 looks like fadvise64_64 but
6541 * with different argument order
6549 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6550 #ifdef TARGET_NR_fadvise64_64
6551 case TARGET_NR_fadvise64_64
:
6553 /* This is a hint, so ignoring and returning success is ok. */
6557 #ifdef TARGET_NR_madvise
6558 case TARGET_NR_madvise
:
6559 /* A straight passthrough may not be safe because qemu sometimes
6560 turns private flie-backed mappings into anonymous mappings.
6561 This will break MADV_DONTNEED.
6562 This is a hint, so ignoring and returning success is ok. */
6566 #if TARGET_ABI_BITS == 32
6567 case TARGET_NR_fcntl64
:
6571 struct target_flock64
*target_fl
;
6573 struct target_eabi_flock64
*target_efl
;
6576 cmd
= target_to_host_fcntl_cmd(arg2
);
6577 if (cmd
== -TARGET_EINVAL
)
6581 case TARGET_F_GETLK64
:
6583 if (((CPUARMState
*)cpu_env
)->eabi
) {
6584 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6586 fl
.l_type
= tswap16(target_efl
->l_type
);
6587 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6588 fl
.l_start
= tswap64(target_efl
->l_start
);
6589 fl
.l_len
= tswap64(target_efl
->l_len
);
6590 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6591 unlock_user_struct(target_efl
, arg3
, 0);
6595 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6597 fl
.l_type
= tswap16(target_fl
->l_type
);
6598 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6599 fl
.l_start
= tswap64(target_fl
->l_start
);
6600 fl
.l_len
= tswap64(target_fl
->l_len
);
6601 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6602 unlock_user_struct(target_fl
, arg3
, 0);
6604 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6607 if (((CPUARMState
*)cpu_env
)->eabi
) {
6608 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6610 target_efl
->l_type
= tswap16(fl
.l_type
);
6611 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6612 target_efl
->l_start
= tswap64(fl
.l_start
);
6613 target_efl
->l_len
= tswap64(fl
.l_len
);
6614 target_efl
->l_pid
= tswapl(fl
.l_pid
);
6615 unlock_user_struct(target_efl
, arg3
, 1);
6619 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6621 target_fl
->l_type
= tswap16(fl
.l_type
);
6622 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6623 target_fl
->l_start
= tswap64(fl
.l_start
);
6624 target_fl
->l_len
= tswap64(fl
.l_len
);
6625 target_fl
->l_pid
= tswapl(fl
.l_pid
);
6626 unlock_user_struct(target_fl
, arg3
, 1);
6631 case TARGET_F_SETLK64
:
6632 case TARGET_F_SETLKW64
:
6634 if (((CPUARMState
*)cpu_env
)->eabi
) {
6635 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6637 fl
.l_type
= tswap16(target_efl
->l_type
);
6638 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6639 fl
.l_start
= tswap64(target_efl
->l_start
);
6640 fl
.l_len
= tswap64(target_efl
->l_len
);
6641 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6642 unlock_user_struct(target_efl
, arg3
, 0);
6646 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6648 fl
.l_type
= tswap16(target_fl
->l_type
);
6649 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6650 fl
.l_start
= tswap64(target_fl
->l_start
);
6651 fl
.l_len
= tswap64(target_fl
->l_len
);
6652 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6653 unlock_user_struct(target_fl
, arg3
, 0);
6655 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6658 ret
= do_fcntl(arg1
, arg2
, arg3
);
6664 #ifdef TARGET_NR_cacheflush
6665 case TARGET_NR_cacheflush
:
6666 /* self-modifying code is handled automatically, so nothing needed */
6670 #ifdef TARGET_NR_security
6671 case TARGET_NR_security
:
6674 #ifdef TARGET_NR_getpagesize
6675 case TARGET_NR_getpagesize
:
6676 ret
= TARGET_PAGE_SIZE
;
6679 case TARGET_NR_gettid
:
6680 ret
= get_errno(gettid());
6682 #ifdef TARGET_NR_readahead
6683 case TARGET_NR_readahead
:
6684 #if TARGET_ABI_BITS == 32
6686 if (((CPUARMState
*)cpu_env
)->eabi
)
6693 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6695 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6699 #ifdef TARGET_NR_setxattr
6700 case TARGET_NR_setxattr
:
6701 case TARGET_NR_lsetxattr
:
6702 case TARGET_NR_fsetxattr
:
6703 case TARGET_NR_getxattr
:
6704 case TARGET_NR_lgetxattr
:
6705 case TARGET_NR_fgetxattr
:
6706 case TARGET_NR_listxattr
:
6707 case TARGET_NR_llistxattr
:
6708 case TARGET_NR_flistxattr
:
6709 case TARGET_NR_removexattr
:
6710 case TARGET_NR_lremovexattr
:
6711 case TARGET_NR_fremovexattr
:
6712 ret
= -TARGET_EOPNOTSUPP
;
6715 #ifdef TARGET_NR_set_thread_area
6716 case TARGET_NR_set_thread_area
:
6717 #if defined(TARGET_MIPS)
6718 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6721 #elif defined(TARGET_CRIS)
6723 ret
= -TARGET_EINVAL
;
6725 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6729 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6730 ret
= do_set_thread_area(cpu_env
, arg1
);
6733 goto unimplemented_nowarn
;
6736 #ifdef TARGET_NR_get_thread_area
6737 case TARGET_NR_get_thread_area
:
6738 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6739 ret
= do_get_thread_area(cpu_env
, arg1
);
6741 goto unimplemented_nowarn
;
6744 #ifdef TARGET_NR_getdomainname
6745 case TARGET_NR_getdomainname
:
6746 goto unimplemented_nowarn
;
6749 #ifdef TARGET_NR_clock_gettime
6750 case TARGET_NR_clock_gettime
:
6753 ret
= get_errno(clock_gettime(arg1
, &ts
));
6754 if (!is_error(ret
)) {
6755 host_to_target_timespec(arg2
, &ts
);
6760 #ifdef TARGET_NR_clock_getres
6761 case TARGET_NR_clock_getres
:
6764 ret
= get_errno(clock_getres(arg1
, &ts
));
6765 if (!is_error(ret
)) {
6766 host_to_target_timespec(arg2
, &ts
);
6771 #ifdef TARGET_NR_clock_nanosleep
6772 case TARGET_NR_clock_nanosleep
:
6775 target_to_host_timespec(&ts
, arg3
);
6776 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6778 host_to_target_timespec(arg4
, &ts
);
6783 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6784 case TARGET_NR_set_tid_address
:
6785 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6789 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6790 case TARGET_NR_tkill
:
6791 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6795 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6796 case TARGET_NR_tgkill
:
6797 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6798 target_to_host_signal(arg3
)));
6802 #ifdef TARGET_NR_set_robust_list
6803 case TARGET_NR_set_robust_list
:
6804 goto unimplemented_nowarn
;
6807 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6808 case TARGET_NR_utimensat
:
6810 struct timespec
*tsp
, ts
[2];
6814 target_to_host_timespec(ts
, arg3
);
6815 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6819 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
6821 if (!(p
= lock_user_string(arg2
))) {
6822 ret
= -TARGET_EFAULT
;
6825 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
6826 unlock_user(p
, arg2
, 0);
6831 #if defined(USE_NPTL)
6832 case TARGET_NR_futex
:
6833 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6836 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6837 case TARGET_NR_inotify_init
:
6838 ret
= get_errno(sys_inotify_init());
6841 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6842 case TARGET_NR_inotify_add_watch
:
6843 p
= lock_user_string(arg2
);
6844 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
6845 unlock_user(p
, arg2
, 0);
6848 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6849 case TARGET_NR_inotify_rm_watch
:
6850 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
6854 #ifdef TARGET_NR_mq_open
6855 case TARGET_NR_mq_open
:
6857 struct mq_attr posix_mq_attr
;
6859 p
= lock_user_string(arg1
- 1);
6861 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
6862 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
6863 unlock_user (p
, arg1
, 0);
6867 case TARGET_NR_mq_unlink
:
6868 p
= lock_user_string(arg1
- 1);
6869 ret
= get_errno(mq_unlink(p
));
6870 unlock_user (p
, arg1
, 0);
6873 case TARGET_NR_mq_timedsend
:
6877 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6879 target_to_host_timespec(&ts
, arg5
);
6880 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
6881 host_to_target_timespec(arg5
, &ts
);
6884 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
6885 unlock_user (p
, arg2
, arg3
);
6889 case TARGET_NR_mq_timedreceive
:
6894 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6896 target_to_host_timespec(&ts
, arg5
);
6897 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
6898 host_to_target_timespec(arg5
, &ts
);
6901 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
6902 unlock_user (p
, arg2
, arg3
);
6904 put_user_u32(prio
, arg4
);
6908 /* Not implemented for now... */
6909 /* case TARGET_NR_mq_notify: */
6912 case TARGET_NR_mq_getsetattr
:
6914 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
6917 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
6918 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
6921 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
6922 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
6929 #ifdef CONFIG_SPLICE
6930 #ifdef TARGET_NR_tee
6933 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
6937 #ifdef TARGET_NR_splice
6938 case TARGET_NR_splice
:
6940 loff_t loff_in
, loff_out
;
6941 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
6943 get_user_u64(loff_in
, arg2
);
6944 ploff_in
= &loff_in
;
6947 get_user_u64(loff_out
, arg2
);
6948 ploff_out
= &loff_out
;
6950 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
6954 #ifdef TARGET_NR_vmsplice
6955 case TARGET_NR_vmsplice
:
6960 vec
= alloca(count
* sizeof(struct iovec
));
6961 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6963 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
6964 unlock_iovec(vec
, arg2
, count
, 0);
6968 #endif /* CONFIG_SPLICE */
6971 gemu_log("qemu: Unsupported syscall: %d\n", num
);
6972 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6973 unimplemented_nowarn
:
6975 ret
= -TARGET_ENOSYS
;
6980 gemu_log(" = %ld\n", ret
);
6983 print_syscall_ret(num
, ret
);
6986 ret
= -TARGET_EFAULT
;