4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
33 #include <sys/types.h>
39 #include <sys/mount.h>
40 #include <sys/prctl.h>
41 #include <sys/resource.h>
46 #include <sys/socket.h>
50 #include <sys/times.h>
53 #include <sys/statfs.h>
55 #include <sys/sysinfo.h>
56 #include <sys/utsname.h>
57 //#include <sys/user.h>
58 #include <netinet/ip.h>
59 #include <netinet/tcp.h>
60 #include <qemu-common.h>
65 #define termios host_termios
66 #define winsize host_winsize
67 #define termio host_termio
68 #define sgttyb host_sgttyb /* same as target */
69 #define tchars host_tchars /* same as target */
70 #define ltchars host_ltchars /* same as target */
72 #include <linux/termios.h>
73 #include <linux/unistd.h>
74 #include <linux/utsname.h>
75 #include <linux/cdrom.h>
76 #include <linux/hdreg.h>
77 #include <linux/soundcard.h>
79 #include <linux/mtio.h>
80 #include "linux_loop.h"
83 #include "qemu-common.h"
86 #include <linux/futex.h>
87 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
88 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
90 /* XXX: Hardcode the above values. */
91 #define CLONE_NPTL_FLAGS2 0
96 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SPARC) \
97 || defined(TARGET_M68K) || defined(TARGET_SH4) || defined(TARGET_CRIS)
98 /* 16 bit uid wrappers emulation */
102 //#include <linux/msdos_fs.h>
103 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
104 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
115 #define _syscall0(type,name) \
116 static type name (void) \
118 return syscall(__NR_##name); \
121 #define _syscall1(type,name,type1,arg1) \
122 static type name (type1 arg1) \
124 return syscall(__NR_##name, arg1); \
127 #define _syscall2(type,name,type1,arg1,type2,arg2) \
128 static type name (type1 arg1,type2 arg2) \
130 return syscall(__NR_##name, arg1, arg2); \
133 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
134 static type name (type1 arg1,type2 arg2,type3 arg3) \
136 return syscall(__NR_##name, arg1, arg2, arg3); \
139 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
140 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
142 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
145 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
147 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
149 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
153 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
154 type5,arg5,type6,arg6) \
155 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
158 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
162 #define __NR_sys_uname __NR_uname
163 #define __NR_sys_faccessat __NR_faccessat
164 #define __NR_sys_fchmodat __NR_fchmodat
165 #define __NR_sys_fchownat __NR_fchownat
166 #define __NR_sys_fstatat64 __NR_fstatat64
167 #define __NR_sys_futimesat __NR_futimesat
168 #define __NR_sys_getcwd1 __NR_getcwd
169 #define __NR_sys_getdents __NR_getdents
170 #define __NR_sys_getdents64 __NR_getdents64
171 #define __NR_sys_getpriority __NR_getpriority
172 #define __NR_sys_linkat __NR_linkat
173 #define __NR_sys_mkdirat __NR_mkdirat
174 #define __NR_sys_mknodat __NR_mknodat
175 #define __NR_sys_newfstatat __NR_newfstatat
176 #define __NR_sys_openat __NR_openat
177 #define __NR_sys_readlinkat __NR_readlinkat
178 #define __NR_sys_renameat __NR_renameat
179 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
180 #define __NR_sys_symlinkat __NR_symlinkat
181 #define __NR_sys_syslog __NR_syslog
182 #define __NR_sys_tgkill __NR_tgkill
183 #define __NR_sys_tkill __NR_tkill
184 #define __NR_sys_unlinkat __NR_unlinkat
185 #define __NR_sys_utimensat __NR_utimensat
186 #define __NR_sys_futex __NR_futex
187 #define __NR_sys_inotify_init __NR_inotify_init
188 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
189 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
191 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__)
192 #define __NR__llseek __NR_lseek
196 _syscall0(int, gettid
)
198 /* This is a replacement for the host gettid() and must return a host
200 static int gettid(void) {
204 #if TARGET_ABI_BITS == 32
205 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
207 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
208 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
210 _syscall2(int, sys_getpriority
, int, which
, int, who
);
211 #if !defined (__x86_64__)
212 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
213 loff_t
*, res
, uint
, wh
);
215 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
216 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
217 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
218 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
220 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
221 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
223 #ifdef __NR_exit_group
224 _syscall1(int,exit_group
,int,error_code
)
226 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
227 _syscall1(int,set_tid_address
,int *,tidptr
)
229 #if defined(USE_NPTL)
230 #if defined(TARGET_NR_futex) && defined(__NR_futex)
231 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
232 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
236 static bitmask_transtbl fcntl_flags_tbl
[] = {
237 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
238 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
239 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
240 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
241 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
242 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
243 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
244 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
245 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
246 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
247 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
248 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
249 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
250 #if defined(O_DIRECT)
251 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
256 #define COPY_UTSNAME_FIELD(dest, src) \
258 /* __NEW_UTS_LEN doesn't include terminating null */ \
259 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
260 (dest)[__NEW_UTS_LEN] = '\0'; \
263 static int sys_uname(struct new_utsname
*buf
)
265 struct utsname uts_buf
;
267 if (uname(&uts_buf
) < 0)
271 * Just in case these have some differences, we
272 * translate utsname to new_utsname (which is the
273 * struct linux kernel uses).
276 bzero(buf
, sizeof (*buf
));
277 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
278 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
279 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
280 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
281 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
283 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
287 #undef COPY_UTSNAME_FIELD
290 static int sys_getcwd1(char *buf
, size_t size
)
292 if (getcwd(buf
, size
) == NULL
) {
293 /* getcwd() sets errno */
296 return strlen(buf
)+1;
301 * Host system seems to have atfile syscall stubs available. We
302 * now enable them one by one as specified by target syscall_nr.h.
305 #ifdef TARGET_NR_faccessat
306 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
308 return (faccessat(dirfd
, pathname
, mode
, 0));
311 #ifdef TARGET_NR_fchmodat
312 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
314 return (fchmodat(dirfd
, pathname
, mode
, 0));
317 #if defined(TARGET_NR_fchownat) && defined(USE_UID16)
318 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
319 gid_t group
, int flags
)
321 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
324 #ifdef __NR_fstatat64
325 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
328 return (fstatat(dirfd
, pathname
, buf
, flags
));
331 #ifdef __NR_newfstatat
332 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
335 return (fstatat(dirfd
, pathname
, buf
, flags
));
338 #ifdef TARGET_NR_futimesat
339 static int sys_futimesat(int dirfd
, const char *pathname
,
340 const struct timeval times
[2])
342 return (futimesat(dirfd
, pathname
, times
));
345 #ifdef TARGET_NR_linkat
346 static int sys_linkat(int olddirfd
, const char *oldpath
,
347 int newdirfd
, const char *newpath
, int flags
)
349 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
352 #ifdef TARGET_NR_mkdirat
353 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
355 return (mkdirat(dirfd
, pathname
, mode
));
358 #ifdef TARGET_NR_mknodat
359 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
362 return (mknodat(dirfd
, pathname
, mode
, dev
));
365 #ifdef TARGET_NR_openat
366 static int sys_openat(int dirfd
, const char *pathname
, int flags
, ...)
369 * open(2) has extra parameter 'mode' when called with
372 if ((flags
& O_CREAT
) != 0) {
377 * Get the 'mode' parameter and translate it to
381 mode
= va_arg(ap
, mode_t
);
382 mode
= target_to_host_bitmask(mode
, fcntl_flags_tbl
);
385 return (openat(dirfd
, pathname
, flags
, mode
));
387 return (openat(dirfd
, pathname
, flags
));
390 #ifdef TARGET_NR_readlinkat
391 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
393 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
396 #ifdef TARGET_NR_renameat
397 static int sys_renameat(int olddirfd
, const char *oldpath
,
398 int newdirfd
, const char *newpath
)
400 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
403 #ifdef TARGET_NR_symlinkat
404 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
406 return (symlinkat(oldpath
, newdirfd
, newpath
));
409 #ifdef TARGET_NR_unlinkat
410 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
412 return (unlinkat(dirfd
, pathname
, flags
));
415 #ifdef TARGET_NR_utimensat
416 static int sys_utimensat(int dirfd
, const char *pathname
,
417 const struct timespec times
[2], int flags
)
419 return (utimensat(dirfd
, pathname
, times
, flags
));
422 #else /* !CONFIG_ATFILE */
425 * Try direct syscalls instead
427 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
428 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
430 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
431 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
433 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat) && defined(USE_UID16)
434 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
435 uid_t
,owner
,gid_t
,group
,int,flags
)
437 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
438 defined(__NR_fstatat64)
439 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
440 struct stat
*,buf
,int,flags
)
442 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
443 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
444 const struct timeval
*,times
)
446 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
447 defined(__NR_newfstatat)
448 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
449 struct stat
*,buf
,int,flags
)
451 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
452 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
453 int,newdirfd
,const char *,newpath
,int,flags
)
455 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
456 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
458 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
459 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
460 mode_t
,mode
,dev_t
,dev
)
462 #if defined(TARGET_NR_openat) && defined(__NR_openat)
463 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
465 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
466 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
467 char *,buf
,size_t,bufsize
)
469 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
470 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
471 int,newdirfd
,const char *,newpath
)
473 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
474 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
475 int,newdirfd
,const char *,newpath
)
477 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
478 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
480 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
481 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
482 const struct timespec
*,tsp
,int,flags
)
485 #endif /* CONFIG_ATFILE */
487 #ifdef CONFIG_INOTIFY
488 #include <sys/inotify.h>
490 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
491 static int sys_inotify_init(void)
493 return (inotify_init());
496 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
497 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
499 return (inotify_add_watch(fd
, pathname
, mask
));
502 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
503 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
505 return (inotify_rm_watch(fd
, wd
));
509 /* Userspace can usually survive runtime without inotify */
510 #undef TARGET_NR_inotify_init
511 #undef TARGET_NR_inotify_add_watch
512 #undef TARGET_NR_inotify_rm_watch
513 #endif /* CONFIG_INOTIFY */
516 extern int personality(int);
517 extern int flock(int, int);
518 extern int setfsuid(int);
519 extern int setfsgid(int);
520 extern int setgroups(int, gid_t
*);
522 #define ERRNO_TABLE_SIZE 1200
524 /* target_to_host_errno_table[] is initialized from
525 * host_to_target_errno_table[] in syscall_init(). */
526 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
530 * This list is the union of errno values overridden in asm-<arch>/errno.h
531 * minus the errnos that are not actually generic to all archs.
533 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
534 [EIDRM
] = TARGET_EIDRM
,
535 [ECHRNG
] = TARGET_ECHRNG
,
536 [EL2NSYNC
] = TARGET_EL2NSYNC
,
537 [EL3HLT
] = TARGET_EL3HLT
,
538 [EL3RST
] = TARGET_EL3RST
,
539 [ELNRNG
] = TARGET_ELNRNG
,
540 [EUNATCH
] = TARGET_EUNATCH
,
541 [ENOCSI
] = TARGET_ENOCSI
,
542 [EL2HLT
] = TARGET_EL2HLT
,
543 [EDEADLK
] = TARGET_EDEADLK
,
544 [ENOLCK
] = TARGET_ENOLCK
,
545 [EBADE
] = TARGET_EBADE
,
546 [EBADR
] = TARGET_EBADR
,
547 [EXFULL
] = TARGET_EXFULL
,
548 [ENOANO
] = TARGET_ENOANO
,
549 [EBADRQC
] = TARGET_EBADRQC
,
550 [EBADSLT
] = TARGET_EBADSLT
,
551 [EBFONT
] = TARGET_EBFONT
,
552 [ENOSTR
] = TARGET_ENOSTR
,
553 [ENODATA
] = TARGET_ENODATA
,
554 [ETIME
] = TARGET_ETIME
,
555 [ENOSR
] = TARGET_ENOSR
,
556 [ENONET
] = TARGET_ENONET
,
557 [ENOPKG
] = TARGET_ENOPKG
,
558 [EREMOTE
] = TARGET_EREMOTE
,
559 [ENOLINK
] = TARGET_ENOLINK
,
560 [EADV
] = TARGET_EADV
,
561 [ESRMNT
] = TARGET_ESRMNT
,
562 [ECOMM
] = TARGET_ECOMM
,
563 [EPROTO
] = TARGET_EPROTO
,
564 [EDOTDOT
] = TARGET_EDOTDOT
,
565 [EMULTIHOP
] = TARGET_EMULTIHOP
,
566 [EBADMSG
] = TARGET_EBADMSG
,
567 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
568 [EOVERFLOW
] = TARGET_EOVERFLOW
,
569 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
570 [EBADFD
] = TARGET_EBADFD
,
571 [EREMCHG
] = TARGET_EREMCHG
,
572 [ELIBACC
] = TARGET_ELIBACC
,
573 [ELIBBAD
] = TARGET_ELIBBAD
,
574 [ELIBSCN
] = TARGET_ELIBSCN
,
575 [ELIBMAX
] = TARGET_ELIBMAX
,
576 [ELIBEXEC
] = TARGET_ELIBEXEC
,
577 [EILSEQ
] = TARGET_EILSEQ
,
578 [ENOSYS
] = TARGET_ENOSYS
,
579 [ELOOP
] = TARGET_ELOOP
,
580 [ERESTART
] = TARGET_ERESTART
,
581 [ESTRPIPE
] = TARGET_ESTRPIPE
,
582 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
583 [EUSERS
] = TARGET_EUSERS
,
584 [ENOTSOCK
] = TARGET_ENOTSOCK
,
585 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
586 [EMSGSIZE
] = TARGET_EMSGSIZE
,
587 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
588 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
589 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
590 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
591 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
592 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
593 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
594 [EADDRINUSE
] = TARGET_EADDRINUSE
,
595 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
596 [ENETDOWN
] = TARGET_ENETDOWN
,
597 [ENETUNREACH
] = TARGET_ENETUNREACH
,
598 [ENETRESET
] = TARGET_ENETRESET
,
599 [ECONNABORTED
] = TARGET_ECONNABORTED
,
600 [ECONNRESET
] = TARGET_ECONNRESET
,
601 [ENOBUFS
] = TARGET_ENOBUFS
,
602 [EISCONN
] = TARGET_EISCONN
,
603 [ENOTCONN
] = TARGET_ENOTCONN
,
604 [EUCLEAN
] = TARGET_EUCLEAN
,
605 [ENOTNAM
] = TARGET_ENOTNAM
,
606 [ENAVAIL
] = TARGET_ENAVAIL
,
607 [EISNAM
] = TARGET_EISNAM
,
608 [EREMOTEIO
] = TARGET_EREMOTEIO
,
609 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
610 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
611 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
612 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
613 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
614 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
615 [EALREADY
] = TARGET_EALREADY
,
616 [EINPROGRESS
] = TARGET_EINPROGRESS
,
617 [ESTALE
] = TARGET_ESTALE
,
618 [ECANCELED
] = TARGET_ECANCELED
,
619 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
620 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
622 [ENOKEY
] = TARGET_ENOKEY
,
625 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
628 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
631 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
634 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
636 #ifdef ENOTRECOVERABLE
637 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
641 static inline int host_to_target_errno(int err
)
643 if(host_to_target_errno_table
[err
])
644 return host_to_target_errno_table
[err
];
648 static inline int target_to_host_errno(int err
)
650 if (target_to_host_errno_table
[err
])
651 return target_to_host_errno_table
[err
];
655 static inline abi_long
get_errno(abi_long ret
)
658 return -host_to_target_errno(errno
);
663 static inline int is_error(abi_long ret
)
665 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
668 char *target_strerror(int err
)
670 return strerror(target_to_host_errno(err
));
673 static abi_ulong target_brk
;
674 static abi_ulong target_original_brk
;
676 void target_set_brk(abi_ulong new_brk
)
678 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
681 /* do_brk() must return target values and target errnos. */
682 abi_long
do_brk(abi_ulong new_brk
)
685 abi_long mapped_addr
;
690 if (new_brk
< target_original_brk
)
693 brk_page
= HOST_PAGE_ALIGN(target_brk
);
695 /* If the new brk is less than this, set it and we're done... */
696 if (new_brk
< brk_page
) {
697 target_brk
= new_brk
;
701 /* We need to allocate more memory after the brk... */
702 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
+ 1);
703 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
704 PROT_READ
|PROT_WRITE
,
705 MAP_ANON
|MAP_FIXED
|MAP_PRIVATE
, 0, 0));
707 if (!is_error(mapped_addr
))
708 target_brk
= new_brk
;
713 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
714 abi_ulong target_fds_addr
,
718 abi_ulong b
, *target_fds
;
720 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
721 if (!(target_fds
= lock_user(VERIFY_READ
,
723 sizeof(abi_ulong
) * nw
,
725 return -TARGET_EFAULT
;
729 for (i
= 0; i
< nw
; i
++) {
730 /* grab the abi_ulong */
731 __get_user(b
, &target_fds
[i
]);
732 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
733 /* check the bit inside the abi_ulong */
740 unlock_user(target_fds
, target_fds_addr
, 0);
745 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
751 abi_ulong
*target_fds
;
753 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
754 if (!(target_fds
= lock_user(VERIFY_WRITE
,
756 sizeof(abi_ulong
) * nw
,
758 return -TARGET_EFAULT
;
761 for (i
= 0; i
< nw
; i
++) {
763 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
764 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
767 __put_user(v
, &target_fds
[i
]);
770 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
775 #if defined(__alpha__)
781 static inline abi_long
host_to_target_clock_t(long ticks
)
783 #if HOST_HZ == TARGET_HZ
786 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
790 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
791 const struct rusage
*rusage
)
793 struct target_rusage
*target_rusage
;
795 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
796 return -TARGET_EFAULT
;
797 target_rusage
->ru_utime
.tv_sec
= tswapl(rusage
->ru_utime
.tv_sec
);
798 target_rusage
->ru_utime
.tv_usec
= tswapl(rusage
->ru_utime
.tv_usec
);
799 target_rusage
->ru_stime
.tv_sec
= tswapl(rusage
->ru_stime
.tv_sec
);
800 target_rusage
->ru_stime
.tv_usec
= tswapl(rusage
->ru_stime
.tv_usec
);
801 target_rusage
->ru_maxrss
= tswapl(rusage
->ru_maxrss
);
802 target_rusage
->ru_ixrss
= tswapl(rusage
->ru_ixrss
);
803 target_rusage
->ru_idrss
= tswapl(rusage
->ru_idrss
);
804 target_rusage
->ru_isrss
= tswapl(rusage
->ru_isrss
);
805 target_rusage
->ru_minflt
= tswapl(rusage
->ru_minflt
);
806 target_rusage
->ru_majflt
= tswapl(rusage
->ru_majflt
);
807 target_rusage
->ru_nswap
= tswapl(rusage
->ru_nswap
);
808 target_rusage
->ru_inblock
= tswapl(rusage
->ru_inblock
);
809 target_rusage
->ru_oublock
= tswapl(rusage
->ru_oublock
);
810 target_rusage
->ru_msgsnd
= tswapl(rusage
->ru_msgsnd
);
811 target_rusage
->ru_msgrcv
= tswapl(rusage
->ru_msgrcv
);
812 target_rusage
->ru_nsignals
= tswapl(rusage
->ru_nsignals
);
813 target_rusage
->ru_nvcsw
= tswapl(rusage
->ru_nvcsw
);
814 target_rusage
->ru_nivcsw
= tswapl(rusage
->ru_nivcsw
);
815 unlock_user_struct(target_rusage
, target_addr
, 1);
820 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
821 abi_ulong target_tv_addr
)
823 struct target_timeval
*target_tv
;
825 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
826 return -TARGET_EFAULT
;
828 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
829 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
831 unlock_user_struct(target_tv
, target_tv_addr
, 0);
836 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
837 const struct timeval
*tv
)
839 struct target_timeval
*target_tv
;
841 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
842 return -TARGET_EFAULT
;
844 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
845 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
847 unlock_user_struct(target_tv
, target_tv_addr
, 1);
852 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
853 abi_ulong target_mq_attr_addr
)
855 struct target_mq_attr
*target_mq_attr
;
857 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
858 target_mq_attr_addr
, 1))
859 return -TARGET_EFAULT
;
861 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
862 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
863 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
864 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
866 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
871 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
872 const struct mq_attr
*attr
)
874 struct target_mq_attr
*target_mq_attr
;
876 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
877 target_mq_attr_addr
, 0))
878 return -TARGET_EFAULT
;
880 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
881 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
882 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
883 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
885 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
890 /* do_select() must return target values and target errnos. */
891 static abi_long
do_select(int n
,
892 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
893 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
895 fd_set rfds
, wfds
, efds
;
896 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
897 struct timeval tv
, *tv_ptr
;
901 if (copy_from_user_fdset(&rfds
, rfd_addr
, n
))
902 return -TARGET_EFAULT
;
908 if (copy_from_user_fdset(&wfds
, wfd_addr
, n
))
909 return -TARGET_EFAULT
;
915 if (copy_from_user_fdset(&efds
, efd_addr
, n
))
916 return -TARGET_EFAULT
;
922 if (target_tv_addr
) {
923 if (copy_from_user_timeval(&tv
, target_tv_addr
))
924 return -TARGET_EFAULT
;
930 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
932 if (!is_error(ret
)) {
933 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
934 return -TARGET_EFAULT
;
935 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
936 return -TARGET_EFAULT
;
937 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
938 return -TARGET_EFAULT
;
940 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
941 return -TARGET_EFAULT
;
947 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
948 abi_ulong target_addr
,
951 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
952 sa_family_t sa_family
;
953 struct target_sockaddr
*target_saddr
;
955 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
957 return -TARGET_EFAULT
;
959 sa_family
= tswap16(target_saddr
->sa_family
);
961 /* Oops. The caller might send a incomplete sun_path; sun_path
962 * must be terminated by \0 (see the manual page), but
963 * unfortunately it is quite common to specify sockaddr_un
964 * length as "strlen(x->sun_path)" while it should be
965 * "strlen(...) + 1". We'll fix that here if needed.
966 * Linux kernel has a similar feature.
969 if (sa_family
== AF_UNIX
) {
970 if (len
< unix_maxlen
&& len
> 0) {
971 char *cp
= (char*)target_saddr
;
973 if ( cp
[len
-1] && !cp
[len
] )
976 if (len
> unix_maxlen
)
980 memcpy(addr
, target_saddr
, len
);
981 addr
->sa_family
= sa_family
;
982 unlock_user(target_saddr
, target_addr
, 0);
987 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
988 struct sockaddr
*addr
,
991 struct target_sockaddr
*target_saddr
;
993 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
995 return -TARGET_EFAULT
;
996 memcpy(target_saddr
, addr
, len
);
997 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
998 unlock_user(target_saddr
, target_addr
, len
);
1003 /* ??? Should this also swap msgh->name? */
1004 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1005 struct target_msghdr
*target_msgh
)
1007 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1008 abi_long msg_controllen
;
1009 abi_ulong target_cmsg_addr
;
1010 struct target_cmsghdr
*target_cmsg
;
1011 socklen_t space
= 0;
1013 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1014 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1016 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1017 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1019 return -TARGET_EFAULT
;
1021 while (cmsg
&& target_cmsg
) {
1022 void *data
= CMSG_DATA(cmsg
);
1023 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1025 int len
= tswapl(target_cmsg
->cmsg_len
)
1026 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1028 space
+= CMSG_SPACE(len
);
1029 if (space
> msgh
->msg_controllen
) {
1030 space
-= CMSG_SPACE(len
);
1031 gemu_log("Host cmsg overflow\n");
1035 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1036 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1037 cmsg
->cmsg_len
= CMSG_LEN(len
);
1039 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1040 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1041 memcpy(data
, target_data
, len
);
1043 int *fd
= (int *)data
;
1044 int *target_fd
= (int *)target_data
;
1045 int i
, numfds
= len
/ sizeof(int);
1047 for (i
= 0; i
< numfds
; i
++)
1048 fd
[i
] = tswap32(target_fd
[i
]);
1051 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1052 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1054 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1056 msgh
->msg_controllen
= space
;
1060 /* ??? Should this also swap msgh->name? */
1061 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1062 struct msghdr
*msgh
)
1064 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1065 abi_long msg_controllen
;
1066 abi_ulong target_cmsg_addr
;
1067 struct target_cmsghdr
*target_cmsg
;
1068 socklen_t space
= 0;
1070 msg_controllen
= tswapl(target_msgh
->msg_controllen
);
1071 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1073 target_cmsg_addr
= tswapl(target_msgh
->msg_control
);
1074 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1076 return -TARGET_EFAULT
;
1078 while (cmsg
&& target_cmsg
) {
1079 void *data
= CMSG_DATA(cmsg
);
1080 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1082 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1084 space
+= TARGET_CMSG_SPACE(len
);
1085 if (space
> msg_controllen
) {
1086 space
-= TARGET_CMSG_SPACE(len
);
1087 gemu_log("Target cmsg overflow\n");
1091 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1092 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1093 target_cmsg
->cmsg_len
= tswapl(TARGET_CMSG_LEN(len
));
1095 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1096 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1097 memcpy(target_data
, data
, len
);
1099 int *fd
= (int *)data
;
1100 int *target_fd
= (int *)target_data
;
1101 int i
, numfds
= len
/ sizeof(int);
1103 for (i
= 0; i
< numfds
; i
++)
1104 target_fd
[i
] = tswap32(fd
[i
]);
1107 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1108 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1110 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1112 target_msgh
->msg_controllen
= tswapl(space
);
1116 /* do_setsockopt() Must return target values and target errnos. */
1117 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1118 abi_ulong optval_addr
, socklen_t optlen
)
1125 /* TCP options all take an 'int' value. */
1126 if (optlen
< sizeof(uint32_t))
1127 return -TARGET_EINVAL
;
1129 if (get_user_u32(val
, optval_addr
))
1130 return -TARGET_EFAULT
;
1131 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1138 case IP_ROUTER_ALERT
:
1142 case IP_MTU_DISCOVER
:
1148 case IP_MULTICAST_TTL
:
1149 case IP_MULTICAST_LOOP
:
1151 if (optlen
>= sizeof(uint32_t)) {
1152 if (get_user_u32(val
, optval_addr
))
1153 return -TARGET_EFAULT
;
1154 } else if (optlen
>= 1) {
1155 if (get_user_u8(val
, optval_addr
))
1156 return -TARGET_EFAULT
;
1158 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1164 case TARGET_SOL_SOCKET
:
1166 /* Options with 'int' argument. */
1167 case TARGET_SO_DEBUG
:
1170 case TARGET_SO_REUSEADDR
:
1171 optname
= SO_REUSEADDR
;
1173 case TARGET_SO_TYPE
:
1176 case TARGET_SO_ERROR
:
1179 case TARGET_SO_DONTROUTE
:
1180 optname
= SO_DONTROUTE
;
1182 case TARGET_SO_BROADCAST
:
1183 optname
= SO_BROADCAST
;
1185 case TARGET_SO_SNDBUF
:
1186 optname
= SO_SNDBUF
;
1188 case TARGET_SO_RCVBUF
:
1189 optname
= SO_RCVBUF
;
1191 case TARGET_SO_KEEPALIVE
:
1192 optname
= SO_KEEPALIVE
;
1194 case TARGET_SO_OOBINLINE
:
1195 optname
= SO_OOBINLINE
;
1197 case TARGET_SO_NO_CHECK
:
1198 optname
= SO_NO_CHECK
;
1200 case TARGET_SO_PRIORITY
:
1201 optname
= SO_PRIORITY
;
1204 case TARGET_SO_BSDCOMPAT
:
1205 optname
= SO_BSDCOMPAT
;
1208 case TARGET_SO_PASSCRED
:
1209 optname
= SO_PASSCRED
;
1211 case TARGET_SO_TIMESTAMP
:
1212 optname
= SO_TIMESTAMP
;
1214 case TARGET_SO_RCVLOWAT
:
1215 optname
= SO_RCVLOWAT
;
1217 case TARGET_SO_RCVTIMEO
:
1218 optname
= SO_RCVTIMEO
;
1220 case TARGET_SO_SNDTIMEO
:
1221 optname
= SO_SNDTIMEO
;
1227 if (optlen
< sizeof(uint32_t))
1228 return -TARGET_EINVAL
;
1230 if (get_user_u32(val
, optval_addr
))
1231 return -TARGET_EFAULT
;
1232 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1236 gemu_log("Unsupported setsockopt level=%d optname=%d \n", level
, optname
);
1237 ret
= -TARGET_ENOPROTOOPT
;
1242 /* do_getsockopt() Must return target values and target errnos. */
1243 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1244 abi_ulong optval_addr
, abi_ulong optlen
)
1251 case TARGET_SOL_SOCKET
:
1254 case TARGET_SO_LINGER
:
1255 case TARGET_SO_RCVTIMEO
:
1256 case TARGET_SO_SNDTIMEO
:
1257 case TARGET_SO_PEERCRED
:
1258 case TARGET_SO_PEERNAME
:
1259 /* These don't just return a single integer */
1266 /* TCP options all take an 'int' value. */
1268 if (get_user_u32(len
, optlen
))
1269 return -TARGET_EFAULT
;
1271 return -TARGET_EINVAL
;
1273 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1280 if (put_user_u32(val
, optval_addr
))
1281 return -TARGET_EFAULT
;
1283 if (put_user_u8(val
, optval_addr
))
1284 return -TARGET_EFAULT
;
1286 if (put_user_u32(len
, optlen
))
1287 return -TARGET_EFAULT
;
1294 case IP_ROUTER_ALERT
:
1298 case IP_MTU_DISCOVER
:
1304 case IP_MULTICAST_TTL
:
1305 case IP_MULTICAST_LOOP
:
1306 if (get_user_u32(len
, optlen
))
1307 return -TARGET_EFAULT
;
1309 return -TARGET_EINVAL
;
1311 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1314 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1316 if (put_user_u32(len
, optlen
)
1317 || put_user_u8(val
, optval_addr
))
1318 return -TARGET_EFAULT
;
1320 if (len
> sizeof(int))
1322 if (put_user_u32(len
, optlen
)
1323 || put_user_u32(val
, optval_addr
))
1324 return -TARGET_EFAULT
;
1328 ret
= -TARGET_ENOPROTOOPT
;
1334 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1336 ret
= -TARGET_EOPNOTSUPP
;
1343 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1344 * other lock functions have a return code of 0 for failure.
1346 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1347 int count
, int copy
)
1349 struct target_iovec
*target_vec
;
1353 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1355 return -TARGET_EFAULT
;
1356 for(i
= 0;i
< count
; i
++) {
1357 base
= tswapl(target_vec
[i
].iov_base
);
1358 vec
[i
].iov_len
= tswapl(target_vec
[i
].iov_len
);
1359 if (vec
[i
].iov_len
!= 0) {
1360 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1361 /* Don't check lock_user return value. We must call writev even
1362 if a element has invalid base address. */
1364 /* zero length pointer is ignored */
1365 vec
[i
].iov_base
= NULL
;
1368 unlock_user (target_vec
, target_addr
, 0);
1372 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1373 int count
, int copy
)
1375 struct target_iovec
*target_vec
;
1379 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1381 return -TARGET_EFAULT
;
1382 for(i
= 0;i
< count
; i
++) {
1383 if (target_vec
[i
].iov_base
) {
1384 base
= tswapl(target_vec
[i
].iov_base
);
1385 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1388 unlock_user (target_vec
, target_addr
, 0);
1393 /* do_socket() Must return target values and target errnos. */
1394 static abi_long
do_socket(int domain
, int type
, int protocol
)
1396 #if defined(TARGET_MIPS)
1398 case TARGET_SOCK_DGRAM
:
1401 case TARGET_SOCK_STREAM
:
1404 case TARGET_SOCK_RAW
:
1407 case TARGET_SOCK_RDM
:
1410 case TARGET_SOCK_SEQPACKET
:
1411 type
= SOCK_SEQPACKET
;
1413 case TARGET_SOCK_PACKET
:
1418 if (domain
== PF_NETLINK
)
1419 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1420 return get_errno(socket(domain
, type
, protocol
));
1423 /* do_bind() Must return target values and target errnos. */
1424 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1430 return -TARGET_EINVAL
;
1432 addr
= alloca(addrlen
+1);
1434 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1435 return get_errno(bind(sockfd
, addr
, addrlen
));
1438 /* do_connect() Must return target values and target errnos. */
1439 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1445 return -TARGET_EINVAL
;
1447 addr
= alloca(addrlen
);
1449 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1450 return get_errno(connect(sockfd
, addr
, addrlen
));
1453 /* do_sendrecvmsg() Must return target values and target errnos. */
1454 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1455 int flags
, int send
)
1458 struct target_msghdr
*msgp
;
1462 abi_ulong target_vec
;
1465 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1469 return -TARGET_EFAULT
;
1470 if (msgp
->msg_name
) {
1471 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1472 msg
.msg_name
= alloca(msg
.msg_namelen
);
1473 target_to_host_sockaddr(msg
.msg_name
, tswapl(msgp
->msg_name
),
1476 msg
.msg_name
= NULL
;
1477 msg
.msg_namelen
= 0;
1479 msg
.msg_controllen
= 2 * tswapl(msgp
->msg_controllen
);
1480 msg
.msg_control
= alloca(msg
.msg_controllen
);
1481 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1483 count
= tswapl(msgp
->msg_iovlen
);
1484 vec
= alloca(count
* sizeof(struct iovec
));
1485 target_vec
= tswapl(msgp
->msg_iov
);
1486 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1487 msg
.msg_iovlen
= count
;
1491 ret
= target_to_host_cmsg(&msg
, msgp
);
1493 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1495 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1496 if (!is_error(ret
)) {
1498 ret
= host_to_target_cmsg(msgp
, &msg
);
1503 unlock_iovec(vec
, target_vec
, count
, !send
);
1504 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1508 /* do_accept() Must return target values and target errnos. */
1509 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1510 abi_ulong target_addrlen_addr
)
1516 if (get_user_u32(addrlen
, target_addrlen_addr
))
1517 return -TARGET_EFAULT
;
1520 return -TARGET_EINVAL
;
1522 addr
= alloca(addrlen
);
1524 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1525 if (!is_error(ret
)) {
1526 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1527 if (put_user_u32(addrlen
, target_addrlen_addr
))
1528 ret
= -TARGET_EFAULT
;
1533 /* do_getpeername() Must return target values and target errnos. */
1534 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1535 abi_ulong target_addrlen_addr
)
1541 if (get_user_u32(addrlen
, target_addrlen_addr
))
1542 return -TARGET_EFAULT
;
1545 return -TARGET_EINVAL
;
1547 addr
= alloca(addrlen
);
1549 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1550 if (!is_error(ret
)) {
1551 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1552 if (put_user_u32(addrlen
, target_addrlen_addr
))
1553 ret
= -TARGET_EFAULT
;
1558 /* do_getsockname() Must return target values and target errnos. */
1559 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1560 abi_ulong target_addrlen_addr
)
1566 if (target_addr
== 0)
1567 return get_errno(accept(fd
, NULL
, NULL
));
1569 if (get_user_u32(addrlen
, target_addrlen_addr
))
1570 return -TARGET_EFAULT
;
1573 return -TARGET_EINVAL
;
1575 addr
= alloca(addrlen
);
1577 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1578 if (!is_error(ret
)) {
1579 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1580 if (put_user_u32(addrlen
, target_addrlen_addr
))
1581 ret
= -TARGET_EFAULT
;
1586 /* do_socketpair() Must return target values and target errnos. */
1587 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1588 abi_ulong target_tab_addr
)
1593 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1594 if (!is_error(ret
)) {
1595 if (put_user_s32(tab
[0], target_tab_addr
)
1596 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1597 ret
= -TARGET_EFAULT
;
1602 /* do_sendto() Must return target values and target errnos. */
1603 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1604 abi_ulong target_addr
, socklen_t addrlen
)
1611 return -TARGET_EINVAL
;
1613 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
1615 return -TARGET_EFAULT
;
1617 addr
= alloca(addrlen
);
1618 target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1619 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
1621 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
1623 unlock_user(host_msg
, msg
, 0);
1627 /* do_recvfrom() Must return target values and target errnos. */
1628 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
1629 abi_ulong target_addr
,
1630 abi_ulong target_addrlen
)
1637 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
1639 return -TARGET_EFAULT
;
1641 if (get_user_u32(addrlen
, target_addrlen
)) {
1642 ret
= -TARGET_EFAULT
;
1646 ret
= -TARGET_EINVAL
;
1649 addr
= alloca(addrlen
);
1650 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
1652 addr
= NULL
; /* To keep compiler quiet. */
1653 ret
= get_errno(recv(fd
, host_msg
, len
, flags
));
1655 if (!is_error(ret
)) {
1657 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1658 if (put_user_u32(addrlen
, target_addrlen
)) {
1659 ret
= -TARGET_EFAULT
;
1663 unlock_user(host_msg
, msg
, len
);
1666 unlock_user(host_msg
, msg
, 0);
1671 #ifdef TARGET_NR_socketcall
1672 /* do_socketcall() Must return target values and target errnos. */
1673 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
1676 const int n
= sizeof(abi_ulong
);
1681 int domain
, type
, protocol
;
1683 if (get_user_s32(domain
, vptr
)
1684 || get_user_s32(type
, vptr
+ n
)
1685 || get_user_s32(protocol
, vptr
+ 2 * n
))
1686 return -TARGET_EFAULT
;
1688 ret
= do_socket(domain
, type
, protocol
);
1694 abi_ulong target_addr
;
1697 if (get_user_s32(sockfd
, vptr
)
1698 || get_user_ual(target_addr
, vptr
+ n
)
1699 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1700 return -TARGET_EFAULT
;
1702 ret
= do_bind(sockfd
, target_addr
, addrlen
);
1705 case SOCKOP_connect
:
1708 abi_ulong target_addr
;
1711 if (get_user_s32(sockfd
, vptr
)
1712 || get_user_ual(target_addr
, vptr
+ n
)
1713 || get_user_u32(addrlen
, vptr
+ 2 * n
))
1714 return -TARGET_EFAULT
;
1716 ret
= do_connect(sockfd
, target_addr
, addrlen
);
1721 int sockfd
, backlog
;
1723 if (get_user_s32(sockfd
, vptr
)
1724 || get_user_s32(backlog
, vptr
+ n
))
1725 return -TARGET_EFAULT
;
1727 ret
= get_errno(listen(sockfd
, backlog
));
1733 abi_ulong target_addr
, target_addrlen
;
1735 if (get_user_s32(sockfd
, vptr
)
1736 || get_user_ual(target_addr
, vptr
+ n
)
1737 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1738 return -TARGET_EFAULT
;
1740 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
1743 case SOCKOP_getsockname
:
1746 abi_ulong target_addr
, target_addrlen
;
1748 if (get_user_s32(sockfd
, vptr
)
1749 || get_user_ual(target_addr
, vptr
+ n
)
1750 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1751 return -TARGET_EFAULT
;
1753 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
1756 case SOCKOP_getpeername
:
1759 abi_ulong target_addr
, target_addrlen
;
1761 if (get_user_s32(sockfd
, vptr
)
1762 || get_user_ual(target_addr
, vptr
+ n
)
1763 || get_user_u32(target_addrlen
, vptr
+ 2 * n
))
1764 return -TARGET_EFAULT
;
1766 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
1769 case SOCKOP_socketpair
:
1771 int domain
, type
, protocol
;
1774 if (get_user_s32(domain
, vptr
)
1775 || get_user_s32(type
, vptr
+ n
)
1776 || get_user_s32(protocol
, vptr
+ 2 * n
)
1777 || get_user_ual(tab
, vptr
+ 3 * n
))
1778 return -TARGET_EFAULT
;
1780 ret
= do_socketpair(domain
, type
, protocol
, tab
);
1790 if (get_user_s32(sockfd
, vptr
)
1791 || get_user_ual(msg
, vptr
+ n
)
1792 || get_user_ual(len
, vptr
+ 2 * n
)
1793 || get_user_s32(flags
, vptr
+ 3 * n
))
1794 return -TARGET_EFAULT
;
1796 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
1806 if (get_user_s32(sockfd
, vptr
)
1807 || get_user_ual(msg
, vptr
+ n
)
1808 || get_user_ual(len
, vptr
+ 2 * n
)
1809 || get_user_s32(flags
, vptr
+ 3 * n
))
1810 return -TARGET_EFAULT
;
1812 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
1824 if (get_user_s32(sockfd
, vptr
)
1825 || get_user_ual(msg
, vptr
+ n
)
1826 || get_user_ual(len
, vptr
+ 2 * n
)
1827 || get_user_s32(flags
, vptr
+ 3 * n
)
1828 || get_user_ual(addr
, vptr
+ 4 * n
)
1829 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1830 return -TARGET_EFAULT
;
1832 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1835 case SOCKOP_recvfrom
:
1844 if (get_user_s32(sockfd
, vptr
)
1845 || get_user_ual(msg
, vptr
+ n
)
1846 || get_user_ual(len
, vptr
+ 2 * n
)
1847 || get_user_s32(flags
, vptr
+ 3 * n
)
1848 || get_user_ual(addr
, vptr
+ 4 * n
)
1849 || get_user_u32(addrlen
, vptr
+ 5 * n
))
1850 return -TARGET_EFAULT
;
1852 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
1855 case SOCKOP_shutdown
:
1859 if (get_user_s32(sockfd
, vptr
)
1860 || get_user_s32(how
, vptr
+ n
))
1861 return -TARGET_EFAULT
;
1863 ret
= get_errno(shutdown(sockfd
, how
));
1866 case SOCKOP_sendmsg
:
1867 case SOCKOP_recvmsg
:
1870 abi_ulong target_msg
;
1873 if (get_user_s32(fd
, vptr
)
1874 || get_user_ual(target_msg
, vptr
+ n
)
1875 || get_user_s32(flags
, vptr
+ 2 * n
))
1876 return -TARGET_EFAULT
;
1878 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
1879 (num
== SOCKOP_sendmsg
));
1882 case SOCKOP_setsockopt
:
1890 if (get_user_s32(sockfd
, vptr
)
1891 || get_user_s32(level
, vptr
+ n
)
1892 || get_user_s32(optname
, vptr
+ 2 * n
)
1893 || get_user_ual(optval
, vptr
+ 3 * n
)
1894 || get_user_u32(optlen
, vptr
+ 4 * n
))
1895 return -TARGET_EFAULT
;
1897 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
1900 case SOCKOP_getsockopt
:
1908 if (get_user_s32(sockfd
, vptr
)
1909 || get_user_s32(level
, vptr
+ n
)
1910 || get_user_s32(optname
, vptr
+ 2 * n
)
1911 || get_user_ual(optval
, vptr
+ 3 * n
)
1912 || get_user_u32(optlen
, vptr
+ 4 * n
))
1913 return -TARGET_EFAULT
;
1915 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
1919 gemu_log("Unsupported socketcall: %d\n", num
);
1920 ret
= -TARGET_ENOSYS
;
1927 #ifdef TARGET_NR_ipc
1928 #define N_SHM_REGIONS 32
1930 static struct shm_region
{
1933 } shm_regions
[N_SHM_REGIONS
];
1936 struct target_ipc_perm
1943 unsigned short int mode
;
1944 unsigned short int __pad1
;
1945 unsigned short int __seq
;
1946 unsigned short int __pad2
;
1947 abi_ulong __unused1
;
1948 abi_ulong __unused2
;
1951 struct target_semid_ds
1953 struct target_ipc_perm sem_perm
;
1954 abi_ulong sem_otime
;
1955 abi_ulong __unused1
;
1956 abi_ulong sem_ctime
;
1957 abi_ulong __unused2
;
1958 abi_ulong sem_nsems
;
1959 abi_ulong __unused3
;
1960 abi_ulong __unused4
;
1963 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
1964 abi_ulong target_addr
)
1966 struct target_ipc_perm
*target_ip
;
1967 struct target_semid_ds
*target_sd
;
1969 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
1970 return -TARGET_EFAULT
;
1971 target_ip
=&(target_sd
->sem_perm
);
1972 host_ip
->__key
= tswapl(target_ip
->__key
);
1973 host_ip
->uid
= tswapl(target_ip
->uid
);
1974 host_ip
->gid
= tswapl(target_ip
->gid
);
1975 host_ip
->cuid
= tswapl(target_ip
->cuid
);
1976 host_ip
->cgid
= tswapl(target_ip
->cgid
);
1977 host_ip
->mode
= tswapl(target_ip
->mode
);
1978 unlock_user_struct(target_sd
, target_addr
, 0);
1982 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
1983 struct ipc_perm
*host_ip
)
1985 struct target_ipc_perm
*target_ip
;
1986 struct target_semid_ds
*target_sd
;
1988 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
1989 return -TARGET_EFAULT
;
1990 target_ip
= &(target_sd
->sem_perm
);
1991 target_ip
->__key
= tswapl(host_ip
->__key
);
1992 target_ip
->uid
= tswapl(host_ip
->uid
);
1993 target_ip
->gid
= tswapl(host_ip
->gid
);
1994 target_ip
->cuid
= tswapl(host_ip
->cuid
);
1995 target_ip
->cgid
= tswapl(host_ip
->cgid
);
1996 target_ip
->mode
= tswapl(host_ip
->mode
);
1997 unlock_user_struct(target_sd
, target_addr
, 1);
2001 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2002 abi_ulong target_addr
)
2004 struct target_semid_ds
*target_sd
;
2006 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2007 return -TARGET_EFAULT
;
2008 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2009 return -TARGET_EFAULT
;
2010 host_sd
->sem_nsems
= tswapl(target_sd
->sem_nsems
);
2011 host_sd
->sem_otime
= tswapl(target_sd
->sem_otime
);
2012 host_sd
->sem_ctime
= tswapl(target_sd
->sem_ctime
);
2013 unlock_user_struct(target_sd
, target_addr
, 0);
2017 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2018 struct semid_ds
*host_sd
)
2020 struct target_semid_ds
*target_sd
;
2022 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2023 return -TARGET_EFAULT
;
2024 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2025 return -TARGET_EFAULT
;;
2026 target_sd
->sem_nsems
= tswapl(host_sd
->sem_nsems
);
2027 target_sd
->sem_otime
= tswapl(host_sd
->sem_otime
);
2028 target_sd
->sem_ctime
= tswapl(host_sd
->sem_ctime
);
2029 unlock_user_struct(target_sd
, target_addr
, 1);
2033 struct target_seminfo
{
2046 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2047 struct seminfo
*host_seminfo
)
2049 struct target_seminfo
*target_seminfo
;
2050 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2051 return -TARGET_EFAULT
;
2052 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2053 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2054 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2055 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2056 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2057 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2058 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2059 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2060 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2061 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2062 unlock_user_struct(target_seminfo
, target_addr
, 1);
2068 struct semid_ds
*buf
;
2069 unsigned short *array
;
2070 struct seminfo
*__buf
;
2073 union target_semun
{
2080 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2081 abi_ulong target_addr
)
2084 unsigned short *array
;
2086 struct semid_ds semid_ds
;
2089 semun
.buf
= &semid_ds
;
2091 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2093 return get_errno(ret
);
2095 nsems
= semid_ds
.sem_nsems
;
2097 *host_array
= malloc(nsems
*sizeof(unsigned short));
2098 array
= lock_user(VERIFY_READ
, target_addr
,
2099 nsems
*sizeof(unsigned short), 1);
2101 return -TARGET_EFAULT
;
2103 for(i
=0; i
<nsems
; i
++) {
2104 __get_user((*host_array
)[i
], &array
[i
]);
2106 unlock_user(array
, target_addr
, 0);
2111 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2112 unsigned short **host_array
)
2115 unsigned short *array
;
2117 struct semid_ds semid_ds
;
2120 semun
.buf
= &semid_ds
;
2122 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2124 return get_errno(ret
);
2126 nsems
= semid_ds
.sem_nsems
;
2128 array
= lock_user(VERIFY_WRITE
, target_addr
,
2129 nsems
*sizeof(unsigned short), 0);
2131 return -TARGET_EFAULT
;
2133 for(i
=0; i
<nsems
; i
++) {
2134 __put_user((*host_array
)[i
], &array
[i
]);
2137 unlock_user(array
, target_addr
, 1);
2142 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2143 union target_semun target_su
)
2146 struct semid_ds dsarg
;
2147 unsigned short *array
;
2148 struct seminfo seminfo
;
2149 abi_long ret
= -TARGET_EINVAL
;
2156 arg
.val
= tswapl(target_su
.val
);
2157 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2158 target_su
.val
= tswapl(arg
.val
);
2162 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2166 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2167 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2174 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2178 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2179 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2185 arg
.__buf
= &seminfo
;
2186 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2187 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2195 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2202 struct target_sembuf
{
2203 unsigned short sem_num
;
2208 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2209 abi_ulong target_addr
,
2212 struct target_sembuf
*target_sembuf
;
2215 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2216 nsops
*sizeof(struct target_sembuf
), 1);
2218 return -TARGET_EFAULT
;
2220 for(i
=0; i
<nsops
; i
++) {
2221 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2222 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2223 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2226 unlock_user(target_sembuf
, target_addr
, 0);
2231 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2233 struct sembuf sops
[nsops
];
2235 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2236 return -TARGET_EFAULT
;
2238 return semop(semid
, sops
, nsops
);
2241 struct target_msqid_ds
2243 struct target_ipc_perm msg_perm
;
2244 abi_ulong msg_stime
;
2245 #if TARGET_ABI_BITS == 32
2246 abi_ulong __unused1
;
2248 abi_ulong msg_rtime
;
2249 #if TARGET_ABI_BITS == 32
2250 abi_ulong __unused2
;
2252 abi_ulong msg_ctime
;
2253 #if TARGET_ABI_BITS == 32
2254 abi_ulong __unused3
;
2256 abi_ulong __msg_cbytes
;
2258 abi_ulong msg_qbytes
;
2259 abi_ulong msg_lspid
;
2260 abi_ulong msg_lrpid
;
2261 abi_ulong __unused4
;
2262 abi_ulong __unused5
;
2265 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2266 abi_ulong target_addr
)
2268 struct target_msqid_ds
*target_md
;
2270 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2271 return -TARGET_EFAULT
;
2272 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2273 return -TARGET_EFAULT
;
2274 host_md
->msg_stime
= tswapl(target_md
->msg_stime
);
2275 host_md
->msg_rtime
= tswapl(target_md
->msg_rtime
);
2276 host_md
->msg_ctime
= tswapl(target_md
->msg_ctime
);
2277 host_md
->__msg_cbytes
= tswapl(target_md
->__msg_cbytes
);
2278 host_md
->msg_qnum
= tswapl(target_md
->msg_qnum
);
2279 host_md
->msg_qbytes
= tswapl(target_md
->msg_qbytes
);
2280 host_md
->msg_lspid
= tswapl(target_md
->msg_lspid
);
2281 host_md
->msg_lrpid
= tswapl(target_md
->msg_lrpid
);
2282 unlock_user_struct(target_md
, target_addr
, 0);
2286 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2287 struct msqid_ds
*host_md
)
2289 struct target_msqid_ds
*target_md
;
2291 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2292 return -TARGET_EFAULT
;
2293 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2294 return -TARGET_EFAULT
;
2295 target_md
->msg_stime
= tswapl(host_md
->msg_stime
);
2296 target_md
->msg_rtime
= tswapl(host_md
->msg_rtime
);
2297 target_md
->msg_ctime
= tswapl(host_md
->msg_ctime
);
2298 target_md
->__msg_cbytes
= tswapl(host_md
->__msg_cbytes
);
2299 target_md
->msg_qnum
= tswapl(host_md
->msg_qnum
);
2300 target_md
->msg_qbytes
= tswapl(host_md
->msg_qbytes
);
2301 target_md
->msg_lspid
= tswapl(host_md
->msg_lspid
);
2302 target_md
->msg_lrpid
= tswapl(host_md
->msg_lrpid
);
2303 unlock_user_struct(target_md
, target_addr
, 1);
2307 struct target_msginfo
{
2315 unsigned short int msgseg
;
2318 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2319 struct msginfo
*host_msginfo
)
2321 struct target_msginfo
*target_msginfo
;
2322 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2323 return -TARGET_EFAULT
;
2324 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2325 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2326 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2327 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2328 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2329 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2330 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2331 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2332 unlock_user_struct(target_msginfo
, target_addr
, 1);
2336 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2338 struct msqid_ds dsarg
;
2339 struct msginfo msginfo
;
2340 abi_long ret
= -TARGET_EINVAL
;
2348 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2349 return -TARGET_EFAULT
;
2350 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2351 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2352 return -TARGET_EFAULT
;
2355 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2359 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2360 if (host_to_target_msginfo(ptr
, &msginfo
))
2361 return -TARGET_EFAULT
;
2368 struct target_msgbuf
{
2373 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2374 unsigned int msgsz
, int msgflg
)
2376 struct target_msgbuf
*target_mb
;
2377 struct msgbuf
*host_mb
;
2380 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2381 return -TARGET_EFAULT
;
2382 host_mb
= malloc(msgsz
+sizeof(long));
2383 host_mb
->mtype
= (abi_long
) tswapl(target_mb
->mtype
);
2384 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2385 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2387 unlock_user_struct(target_mb
, msgp
, 0);
2392 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2393 unsigned int msgsz
, abi_long msgtyp
,
2396 struct target_msgbuf
*target_mb
;
2398 struct msgbuf
*host_mb
;
2401 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2402 return -TARGET_EFAULT
;
2404 host_mb
= malloc(msgsz
+sizeof(long));
2405 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapl(msgtyp
), msgflg
));
2408 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2409 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2410 if (!target_mtext
) {
2411 ret
= -TARGET_EFAULT
;
2414 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2415 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2418 target_mb
->mtype
= tswapl(host_mb
->mtype
);
2423 unlock_user_struct(target_mb
, msgp
, 1);
2427 #ifdef TARGET_NR_ipc
2428 /* ??? This only works with linear mappings. */
2429 /* do_ipc() must return target values and target errnos. */
2430 static abi_long
do_ipc(unsigned int call
, int first
,
2431 int second
, int third
,
2432 abi_long ptr
, abi_long fifth
)
2436 struct shmid_ds shm_info
;
2439 version
= call
>> 16;
2444 ret
= do_semop(first
, ptr
, second
);
2448 ret
= get_errno(semget(first
, second
, third
));
2452 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
2456 ret
= get_errno(msgget(first
, second
));
2460 ret
= do_msgsnd(first
, ptr
, second
, third
);
2464 ret
= do_msgctl(first
, second
, ptr
);
2471 struct target_ipc_kludge
{
2476 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
2477 ret
= -TARGET_EFAULT
;
2481 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
2483 unlock_user_struct(tmp
, ptr
, 0);
2487 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
2495 /* SHM_* flags are the same on all linux platforms */
2496 host_addr
= shmat(first
, (void *)g2h(ptr
), second
);
2497 if (host_addr
== (void *)-1) {
2498 ret
= get_errno((long)host_addr
);
2501 raddr
= h2g((unsigned long)host_addr
);
2502 /* find out the length of the shared memory segment */
2504 ret
= get_errno(shmctl(first
, IPC_STAT
, &shm_info
));
2505 if (is_error(ret
)) {
2506 /* can't get length, bail out */
2510 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
2511 PAGE_VALID
| PAGE_READ
|
2512 ((second
& SHM_RDONLY
)? 0: PAGE_WRITE
));
2513 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2514 if (shm_regions
[i
].start
== 0) {
2515 shm_regions
[i
].start
= raddr
;
2516 shm_regions
[i
].size
= shm_info
.shm_segsz
;
2520 if (put_user_ual(raddr
, third
))
2521 return -TARGET_EFAULT
;
2526 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
2527 if (shm_regions
[i
].start
== ptr
) {
2528 shm_regions
[i
].start
= 0;
2529 page_set_flags(ptr
, shm_regions
[i
].size
, 0);
2533 ret
= get_errno(shmdt((void *)g2h(ptr
)));
2537 /* IPC_* flag values are the same on all linux platforms */
2538 ret
= get_errno(shmget(first
, second
, third
));
2541 /* IPC_* and SHM_* command values are the same on all linux platforms */
2547 ret
= get_errno(shmctl(first
, second
, NULL
));
2555 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
2556 ret
= -TARGET_ENOSYS
;
2563 /* kernel structure types definitions */
2566 #define STRUCT(name, list...) STRUCT_ ## name,
2567 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
2569 #include "syscall_types.h"
2572 #undef STRUCT_SPECIAL
2574 #define STRUCT(name, list...) static const argtype struct_ ## name ## _def[] = { list, TYPE_NULL };
2575 #define STRUCT_SPECIAL(name)
2576 #include "syscall_types.h"
2578 #undef STRUCT_SPECIAL
2580 typedef struct IOCTLEntry
{
2581 unsigned int target_cmd
;
2582 unsigned int host_cmd
;
2585 const argtype arg_type
[5];
2588 #define IOC_R 0x0001
2589 #define IOC_W 0x0002
2590 #define IOC_RW (IOC_R | IOC_W)
2592 #define MAX_STRUCT_SIZE 4096
2594 static IOCTLEntry ioctl_entries
[] = {
2595 #define IOCTL(cmd, access, types...) \
2596 { TARGET_ ## cmd, cmd, #cmd, access, { types } },
2601 /* ??? Implement proper locking for ioctls. */
2602 /* do_ioctl() Must return target values and target errnos. */
2603 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
2605 const IOCTLEntry
*ie
;
2606 const argtype
*arg_type
;
2608 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
2614 if (ie
->target_cmd
== 0) {
2615 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
2616 return -TARGET_ENOSYS
;
2618 if (ie
->target_cmd
== cmd
)
2622 arg_type
= ie
->arg_type
;
2624 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
2626 switch(arg_type
[0]) {
2629 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
2634 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
2638 target_size
= thunk_type_size(arg_type
, 0);
2639 switch(ie
->access
) {
2641 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2642 if (!is_error(ret
)) {
2643 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2645 return -TARGET_EFAULT
;
2646 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2647 unlock_user(argptr
, arg
, target_size
);
2651 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2653 return -TARGET_EFAULT
;
2654 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2655 unlock_user(argptr
, arg
, 0);
2656 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2660 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
2662 return -TARGET_EFAULT
;
2663 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
2664 unlock_user(argptr
, arg
, 0);
2665 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
2666 if (!is_error(ret
)) {
2667 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
2669 return -TARGET_EFAULT
;
2670 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
2671 unlock_user(argptr
, arg
, target_size
);
2677 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
2678 (long)cmd
, arg_type
[0]);
2679 ret
= -TARGET_ENOSYS
;
2685 static const bitmask_transtbl iflag_tbl
[] = {
2686 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
2687 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
2688 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
2689 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
2690 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
2691 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
2692 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
2693 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
2694 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
2695 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
2696 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
2697 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
2698 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
2699 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
2703 static const bitmask_transtbl oflag_tbl
[] = {
2704 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
2705 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
2706 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
2707 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
2708 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
2709 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
2710 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
2711 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
2712 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
2713 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
2714 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
2715 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
2716 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
2717 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
2718 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
2719 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
2720 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
2721 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
2722 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
2723 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
2724 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
2725 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
2726 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
2727 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
2731 static const bitmask_transtbl cflag_tbl
[] = {
2732 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
2733 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
2734 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
2735 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
2736 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
2737 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
2738 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
2739 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
2740 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
2741 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
2742 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
2743 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
2744 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
2745 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
2746 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
2747 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
2748 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
2749 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
2750 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
2751 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
2752 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
2753 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
2754 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
2755 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
2756 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
2757 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
2758 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
2759 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
2760 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
2761 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
2762 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
2766 static const bitmask_transtbl lflag_tbl
[] = {
2767 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
2768 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
2769 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
2770 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
2771 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
2772 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
2773 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
2774 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
2775 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
2776 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
2777 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
2778 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
2779 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
2780 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
2781 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
2785 static void target_to_host_termios (void *dst
, const void *src
)
2787 struct host_termios
*host
= dst
;
2788 const struct target_termios
*target
= src
;
2791 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
2793 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
2795 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
2797 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
2798 host
->c_line
= target
->c_line
;
2800 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
2801 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
2802 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
2803 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
2804 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
2805 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
2806 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
2807 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
2808 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
2809 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
2810 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
2811 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
2812 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
2813 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
2814 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
2815 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
2816 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
2819 static void host_to_target_termios (void *dst
, const void *src
)
2821 struct target_termios
*target
= dst
;
2822 const struct host_termios
*host
= src
;
2825 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
2827 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
2829 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
2831 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
2832 target
->c_line
= host
->c_line
;
2834 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
2835 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
2836 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
2837 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
2838 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
2839 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
2840 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
2841 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
2842 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
2843 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
2844 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
2845 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
2846 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
2847 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
2848 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
2849 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
2850 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
2853 static const StructEntry struct_termios_def
= {
2854 .convert
= { host_to_target_termios
, target_to_host_termios
},
2855 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
2856 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
2859 static bitmask_transtbl mmap_flags_tbl
[] = {
2860 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
2861 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
2862 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
2863 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
2864 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
2865 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
2866 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
2867 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
2871 #if defined(TARGET_I386)
2873 /* NOTE: there is really one LDT for all the threads */
2874 static uint8_t *ldt_table
;
2876 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
2883 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
2884 if (size
> bytecount
)
2886 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
2888 return -TARGET_EFAULT
;
2889 /* ??? Should this by byteswapped? */
2890 memcpy(p
, ldt_table
, size
);
2891 unlock_user(p
, ptr
, size
);
2895 /* XXX: add locking support */
2896 static abi_long
write_ldt(CPUX86State
*env
,
2897 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
2899 struct target_modify_ldt_ldt_s ldt_info
;
2900 struct target_modify_ldt_ldt_s
*target_ldt_info
;
2901 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
2902 int seg_not_present
, useable
, lm
;
2903 uint32_t *lp
, entry_1
, entry_2
;
2905 if (bytecount
!= sizeof(ldt_info
))
2906 return -TARGET_EINVAL
;
2907 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
2908 return -TARGET_EFAULT
;
2909 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
2910 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
2911 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
2912 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
2913 unlock_user_struct(target_ldt_info
, ptr
, 0);
2915 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
2916 return -TARGET_EINVAL
;
2917 seg_32bit
= ldt_info
.flags
& 1;
2918 contents
= (ldt_info
.flags
>> 1) & 3;
2919 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
2920 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
2921 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
2922 useable
= (ldt_info
.flags
>> 6) & 1;
2926 lm
= (ldt_info
.flags
>> 7) & 1;
2928 if (contents
== 3) {
2930 return -TARGET_EINVAL
;
2931 if (seg_not_present
== 0)
2932 return -TARGET_EINVAL
;
2934 /* allocate the LDT */
2936 env
->ldt
.base
= target_mmap(0,
2937 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
2938 PROT_READ
|PROT_WRITE
,
2939 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
2940 if (env
->ldt
.base
== -1)
2941 return -TARGET_ENOMEM
;
2942 memset(g2h(env
->ldt
.base
), 0,
2943 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
2944 env
->ldt
.limit
= 0xffff;
2945 ldt_table
= g2h(env
->ldt
.base
);
2948 /* NOTE: same code as Linux kernel */
2949 /* Allow LDTs to be cleared by the user. */
2950 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
2953 read_exec_only
== 1 &&
2955 limit_in_pages
== 0 &&
2956 seg_not_present
== 1 &&
2964 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
2965 (ldt_info
.limit
& 0x0ffff);
2966 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
2967 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
2968 (ldt_info
.limit
& 0xf0000) |
2969 ((read_exec_only
^ 1) << 9) |
2971 ((seg_not_present
^ 1) << 15) |
2973 (limit_in_pages
<< 23) |
2977 entry_2
|= (useable
<< 20);
2979 /* Install the new entry ... */
2981 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
2982 lp
[0] = tswap32(entry_1
);
2983 lp
[1] = tswap32(entry_2
);
2987 /* specific and weird i386 syscalls */
2988 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
2989 unsigned long bytecount
)
2995 ret
= read_ldt(ptr
, bytecount
);
2998 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3001 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3004 ret
= -TARGET_ENOSYS
;
3010 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3011 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3013 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3014 struct target_modify_ldt_ldt_s ldt_info
;
3015 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3016 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3017 int seg_not_present
, useable
, lm
;
3018 uint32_t *lp
, entry_1
, entry_2
;
3021 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3022 if (!target_ldt_info
)
3023 return -TARGET_EFAULT
;
3024 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3025 ldt_info
.base_addr
= tswapl(target_ldt_info
->base_addr
);
3026 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3027 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3028 if (ldt_info
.entry_number
== -1) {
3029 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3030 if (gdt_table
[i
] == 0) {
3031 ldt_info
.entry_number
= i
;
3032 target_ldt_info
->entry_number
= tswap32(i
);
3037 unlock_user_struct(target_ldt_info
, ptr
, 1);
3039 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3040 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3041 return -TARGET_EINVAL
;
3042 seg_32bit
= ldt_info
.flags
& 1;
3043 contents
= (ldt_info
.flags
>> 1) & 3;
3044 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3045 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3046 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3047 useable
= (ldt_info
.flags
>> 6) & 1;
3051 lm
= (ldt_info
.flags
>> 7) & 1;
3054 if (contents
== 3) {
3055 if (seg_not_present
== 0)
3056 return -TARGET_EINVAL
;
3059 /* NOTE: same code as Linux kernel */
3060 /* Allow LDTs to be cleared by the user. */
3061 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3062 if ((contents
== 0 &&
3063 read_exec_only
== 1 &&
3065 limit_in_pages
== 0 &&
3066 seg_not_present
== 1 &&
3074 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3075 (ldt_info
.limit
& 0x0ffff);
3076 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3077 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3078 (ldt_info
.limit
& 0xf0000) |
3079 ((read_exec_only
^ 1) << 9) |
3081 ((seg_not_present
^ 1) << 15) |
3083 (limit_in_pages
<< 23) |
3088 /* Install the new entry ... */
3090 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3091 lp
[0] = tswap32(entry_1
);
3092 lp
[1] = tswap32(entry_2
);
3096 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3098 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3099 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3100 uint32_t base_addr
, limit
, flags
;
3101 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3102 int seg_not_present
, useable
, lm
;
3103 uint32_t *lp
, entry_1
, entry_2
;
3105 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3106 if (!target_ldt_info
)
3107 return -TARGET_EFAULT
;
3108 idx
= tswap32(target_ldt_info
->entry_number
);
3109 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3110 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3111 unlock_user_struct(target_ldt_info
, ptr
, 1);
3112 return -TARGET_EINVAL
;
3114 lp
= (uint32_t *)(gdt_table
+ idx
);
3115 entry_1
= tswap32(lp
[0]);
3116 entry_2
= tswap32(lp
[1]);
3118 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3119 contents
= (entry_2
>> 10) & 3;
3120 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3121 seg_32bit
= (entry_2
>> 22) & 1;
3122 limit_in_pages
= (entry_2
>> 23) & 1;
3123 useable
= (entry_2
>> 20) & 1;
3127 lm
= (entry_2
>> 21) & 1;
3129 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3130 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3131 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3132 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3133 base_addr
= (entry_1
>> 16) |
3134 (entry_2
& 0xff000000) |
3135 ((entry_2
& 0xff) << 16);
3136 target_ldt_info
->base_addr
= tswapl(base_addr
);
3137 target_ldt_info
->limit
= tswap32(limit
);
3138 target_ldt_info
->flags
= tswap32(flags
);
3139 unlock_user_struct(target_ldt_info
, ptr
, 1);
3142 #endif /* TARGET_I386 && TARGET_ABI32 */
3144 #ifndef TARGET_ABI32
3145 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3152 case TARGET_ARCH_SET_GS
:
3153 case TARGET_ARCH_SET_FS
:
3154 if (code
== TARGET_ARCH_SET_GS
)
3158 cpu_x86_load_seg(env
, idx
, 0);
3159 env
->segs
[idx
].base
= addr
;
3161 case TARGET_ARCH_GET_GS
:
3162 case TARGET_ARCH_GET_FS
:
3163 if (code
== TARGET_ARCH_GET_GS
)
3167 val
= env
->segs
[idx
].base
;
3168 if (put_user(val
, addr
, abi_ulong
))
3169 return -TARGET_EFAULT
;
3172 ret
= -TARGET_EINVAL
;
3179 #endif /* defined(TARGET_I386) */
3181 #if defined(USE_NPTL)
3183 #define NEW_STACK_SIZE PTHREAD_STACK_MIN
3185 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3188 pthread_mutex_t mutex
;
3189 pthread_cond_t cond
;
3192 abi_ulong child_tidptr
;
3193 abi_ulong parent_tidptr
;
3197 static void *clone_func(void *arg
)
3199 new_thread_info
*info
= arg
;
3204 info
->tid
= gettid();
3205 if (info
->child_tidptr
)
3206 put_user_u32(info
->tid
, info
->child_tidptr
);
3207 if (info
->parent_tidptr
)
3208 put_user_u32(info
->tid
, info
->parent_tidptr
);
3209 /* Enable signals. */
3210 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3211 /* Signal to the parent that we're ready. */
3212 pthread_mutex_lock(&info
->mutex
);
3213 pthread_cond_broadcast(&info
->cond
);
3214 pthread_mutex_unlock(&info
->mutex
);
3215 /* Wait until the parent has finshed initializing the tls state. */
3216 pthread_mutex_lock(&clone_lock
);
3217 pthread_mutex_unlock(&clone_lock
);
3223 /* this stack is the equivalent of the kernel stack associated with a
3225 #define NEW_STACK_SIZE 8192
3227 static int clone_func(void *arg
)
3229 CPUState
*env
= arg
;
3236 /* do_fork() Must return host values and target errnos (unlike most
3237 do_*() functions). */
3238 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
3239 abi_ulong parent_tidptr
, target_ulong newtls
,
3240 abi_ulong child_tidptr
)
3246 #if defined(USE_NPTL)
3247 unsigned int nptl_flags
;
3251 /* Emulate vfork() with fork() */
3252 if (flags
& CLONE_VFORK
)
3253 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
3255 if (flags
& CLONE_VM
) {
3256 #if defined(USE_NPTL)
3257 new_thread_info info
;
3258 pthread_attr_t attr
;
3260 ts
= qemu_mallocz(sizeof(TaskState
) + NEW_STACK_SIZE
);
3261 init_task_state(ts
);
3262 new_stack
= ts
->stack
;
3263 /* we create a new CPU instance. */
3264 new_env
= cpu_copy(env
);
3265 /* Init regs that differ from the parent. */
3266 cpu_clone_regs(new_env
, newsp
);
3267 new_env
->opaque
= ts
;
3268 #if defined(USE_NPTL)
3270 flags
&= ~CLONE_NPTL_FLAGS2
;
3272 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
3273 ts
->child_tidptr
= child_tidptr
;
3276 if (nptl_flags
& CLONE_SETTLS
)
3277 cpu_set_tls (new_env
, newtls
);
3279 /* Grab a mutex so that thread setup appears atomic. */
3280 pthread_mutex_lock(&clone_lock
);
3282 memset(&info
, 0, sizeof(info
));
3283 pthread_mutex_init(&info
.mutex
, NULL
);
3284 pthread_mutex_lock(&info
.mutex
);
3285 pthread_cond_init(&info
.cond
, NULL
);
3287 if (nptl_flags
& CLONE_CHILD_SETTID
)
3288 info
.child_tidptr
= child_tidptr
;
3289 if (nptl_flags
& CLONE_PARENT_SETTID
)
3290 info
.parent_tidptr
= parent_tidptr
;
3292 ret
= pthread_attr_init(&attr
);
3293 ret
= pthread_attr_setstack(&attr
, new_stack
, NEW_STACK_SIZE
);
3294 /* It is not safe to deliver signals until the child has finished
3295 initializing, so temporarily block all signals. */
3296 sigfillset(&sigmask
);
3297 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
3299 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
3300 /* TODO: Free new CPU state if thread creation failed. */
3302 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
3303 pthread_attr_destroy(&attr
);
3305 /* Wait for the child to initialize. */
3306 pthread_cond_wait(&info
.cond
, &info
.mutex
);
3308 if (flags
& CLONE_PARENT_SETTID
)
3309 put_user_u32(ret
, parent_tidptr
);
3313 pthread_mutex_unlock(&info
.mutex
);
3314 pthread_cond_destroy(&info
.cond
);
3315 pthread_mutex_destroy(&info
.mutex
);
3316 pthread_mutex_unlock(&clone_lock
);
3318 if (flags
& CLONE_NPTL_FLAGS2
)
3320 /* This is probably going to die very quickly, but do it anyway. */
3322 ret
= __clone2(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3324 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
3328 /* if no CLONE_VM, we consider it is a fork */
3329 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
3334 /* Child Process. */
3335 cpu_clone_regs(env
, newsp
);
3337 #if defined(USE_NPTL)
3338 /* There is a race condition here. The parent process could
3339 theoretically read the TID in the child process before the child
3340 tid is set. This would require using either ptrace
3341 (not implemented) or having *_tidptr to point at a shared memory
3342 mapping. We can't repeat the spinlock hack used above because
3343 the child process gets its own copy of the lock. */
3344 if (flags
& CLONE_CHILD_SETTID
)
3345 put_user_u32(gettid(), child_tidptr
);
3346 if (flags
& CLONE_PARENT_SETTID
)
3347 put_user_u32(gettid(), parent_tidptr
);
3348 ts
= (TaskState
*)env
->opaque
;
3349 if (flags
& CLONE_SETTLS
)
3350 cpu_set_tls (env
, newtls
);
3351 if (flags
& CLONE_CHILD_CLEARTID
)
3352 ts
->child_tidptr
= child_tidptr
;
3361 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
3364 struct target_flock
*target_fl
;
3365 struct flock64 fl64
;
3366 struct target_flock64
*target_fl64
;
3370 case TARGET_F_GETLK
:
3371 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3372 return -TARGET_EFAULT
;
3373 fl
.l_type
= tswap16(target_fl
->l_type
);
3374 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3375 fl
.l_start
= tswapl(target_fl
->l_start
);
3376 fl
.l_len
= tswapl(target_fl
->l_len
);
3377 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3378 unlock_user_struct(target_fl
, arg
, 0);
3379 ret
= get_errno(fcntl(fd
, cmd
, &fl
));
3381 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
3382 return -TARGET_EFAULT
;
3383 target_fl
->l_type
= tswap16(fl
.l_type
);
3384 target_fl
->l_whence
= tswap16(fl
.l_whence
);
3385 target_fl
->l_start
= tswapl(fl
.l_start
);
3386 target_fl
->l_len
= tswapl(fl
.l_len
);
3387 target_fl
->l_pid
= tswapl(fl
.l_pid
);
3388 unlock_user_struct(target_fl
, arg
, 1);
3392 case TARGET_F_SETLK
:
3393 case TARGET_F_SETLKW
:
3394 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
3395 return -TARGET_EFAULT
;
3396 fl
.l_type
= tswap16(target_fl
->l_type
);
3397 fl
.l_whence
= tswap16(target_fl
->l_whence
);
3398 fl
.l_start
= tswapl(target_fl
->l_start
);
3399 fl
.l_len
= tswapl(target_fl
->l_len
);
3400 fl
.l_pid
= tswapl(target_fl
->l_pid
);
3401 unlock_user_struct(target_fl
, arg
, 0);
3402 ret
= get_errno(fcntl(fd
, cmd
, &fl
));
3405 case TARGET_F_GETLK64
:
3406 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3407 return -TARGET_EFAULT
;
3408 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3409 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3410 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3411 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3412 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3413 unlock_user_struct(target_fl64
, arg
, 0);
3414 ret
= get_errno(fcntl(fd
, cmd
>> 1, &fl64
));
3416 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
3417 return -TARGET_EFAULT
;
3418 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
3419 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
3420 target_fl64
->l_start
= tswapl(fl64
.l_start
);
3421 target_fl64
->l_len
= tswapl(fl64
.l_len
);
3422 target_fl64
->l_pid
= tswapl(fl64
.l_pid
);
3423 unlock_user_struct(target_fl64
, arg
, 1);
3426 case TARGET_F_SETLK64
:
3427 case TARGET_F_SETLKW64
:
3428 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
3429 return -TARGET_EFAULT
;
3430 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
3431 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
3432 fl64
.l_start
= tswapl(target_fl64
->l_start
);
3433 fl64
.l_len
= tswapl(target_fl64
->l_len
);
3434 fl64
.l_pid
= tswap16(target_fl64
->l_pid
);
3435 unlock_user_struct(target_fl64
, arg
, 0);
3436 ret
= get_errno(fcntl(fd
, cmd
>> 1, &fl64
));
3440 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3442 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
3447 ret
= get_errno(fcntl(fd
, cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
3451 ret
= get_errno(fcntl(fd
, cmd
, arg
));
3459 static inline int high2lowuid(int uid
)
3467 static inline int high2lowgid(int gid
)
3475 static inline int low2highuid(int uid
)
3477 if ((int16_t)uid
== -1)
3483 static inline int low2highgid(int gid
)
3485 if ((int16_t)gid
== -1)
3491 #endif /* USE_UID16 */
3493 void syscall_init(void)
3496 const argtype
*arg_type
;
3500 #define STRUCT(name, list...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
3501 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
3502 #include "syscall_types.h"
3504 #undef STRUCT_SPECIAL
3506 /* we patch the ioctl size if necessary. We rely on the fact that
3507 no ioctl has all the bits at '1' in the size field */
3509 while (ie
->target_cmd
!= 0) {
3510 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
3511 TARGET_IOC_SIZEMASK
) {
3512 arg_type
= ie
->arg_type
;
3513 if (arg_type
[0] != TYPE_PTR
) {
3514 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
3519 size
= thunk_type_size(arg_type
, 0);
3520 ie
->target_cmd
= (ie
->target_cmd
&
3521 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
3522 (size
<< TARGET_IOC_SIZESHIFT
);
3525 /* Build target_to_host_errno_table[] table from
3526 * host_to_target_errno_table[]. */
3527 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
3528 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
3530 /* automatic consistency check if same arch */
3531 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
3532 (defined(__x86_64__) && defined(TARGET_X86_64))
3533 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
3534 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
3535 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
3542 #if TARGET_ABI_BITS == 32
3543 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
3545 #ifdef TARGET_WORDS_BIGENDIAN
3546 return ((uint64_t)word0
<< 32) | word1
;
3548 return ((uint64_t)word1
<< 32) | word0
;
3551 #else /* TARGET_ABI_BITS == 32 */
3552 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
3556 #endif /* TARGET_ABI_BITS != 32 */
3558 #ifdef TARGET_NR_truncate64
3559 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
3565 if (((CPUARMState
*)cpu_env
)->eabi
)
3571 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
3575 #ifdef TARGET_NR_ftruncate64
3576 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
3582 if (((CPUARMState
*)cpu_env
)->eabi
)
3588 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
3592 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
3593 abi_ulong target_addr
)
3595 struct target_timespec
*target_ts
;
3597 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
3598 return -TARGET_EFAULT
;
3599 host_ts
->tv_sec
= tswapl(target_ts
->tv_sec
);
3600 host_ts
->tv_nsec
= tswapl(target_ts
->tv_nsec
);
3601 unlock_user_struct(target_ts
, target_addr
, 0);
3605 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
3606 struct timespec
*host_ts
)
3608 struct target_timespec
*target_ts
;
3610 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
3611 return -TARGET_EFAULT
;
3612 target_ts
->tv_sec
= tswapl(host_ts
->tv_sec
);
3613 target_ts
->tv_nsec
= tswapl(host_ts
->tv_nsec
);
3614 unlock_user_struct(target_ts
, target_addr
, 1);
3618 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
3619 static inline abi_long
host_to_target_stat64(void *cpu_env
,
3620 abi_ulong target_addr
,
3621 struct stat
*host_st
)
3624 if (((CPUARMState
*)cpu_env
)->eabi
) {
3625 struct target_eabi_stat64
*target_st
;
3627 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3628 return -TARGET_EFAULT
;
3629 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
3630 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3631 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3632 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3633 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3635 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3636 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3637 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3638 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3639 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3640 __put_user(host_st
->st_size
, &target_st
->st_size
);
3641 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3642 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3643 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3644 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3645 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3646 unlock_user_struct(target_st
, target_addr
, 1);
3650 #if TARGET_LONG_BITS == 64
3651 struct target_stat
*target_st
;
3653 struct target_stat64
*target_st
;
3656 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
3657 return -TARGET_EFAULT
;
3658 memset(target_st
, 0, sizeof(*target_st
));
3659 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
3660 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
3661 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
3662 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
3664 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
3665 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
3666 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
3667 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
3668 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
3669 /* XXX: better use of kernel struct */
3670 __put_user(host_st
->st_size
, &target_st
->st_size
);
3671 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
3672 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
3673 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
3674 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
3675 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
3676 unlock_user_struct(target_st
, target_addr
, 1);
3683 #if defined(USE_NPTL)
3684 /* ??? Using host futex calls even when target atomic operations
3685 are not really atomic probably breaks things. However implementing
3686 futexes locally would make futexes shared between multiple processes
3687 tricky. However they're probably useless because guest atomic
3688 operations won't work either. */
3689 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
3690 target_ulong uaddr2
, int val3
)
3692 struct timespec ts
, *pts
;
3694 /* ??? We assume FUTEX_* constants are the same on both host
3700 target_to_host_timespec(pts
, timeout
);
3704 return get_errno(sys_futex(g2h(uaddr
), FUTEX_WAIT
, tswap32(val
),
3707 return get_errno(sys_futex(g2h(uaddr
), FUTEX_WAKE
, val
, NULL
, NULL
, 0));
3709 return get_errno(sys_futex(g2h(uaddr
), FUTEX_FD
, val
, NULL
, NULL
, 0));
3711 return get_errno(sys_futex(g2h(uaddr
), FUTEX_REQUEUE
, val
,
3712 NULL
, g2h(uaddr2
), 0));
3713 case FUTEX_CMP_REQUEUE
:
3714 return get_errno(sys_futex(g2h(uaddr
), FUTEX_CMP_REQUEUE
, val
,
3715 NULL
, g2h(uaddr2
), tswap32(val3
)));
3717 return -TARGET_ENOSYS
;
3722 /* Map host to target signal numbers for the wait family of syscalls.
3723 Assume all other status bits are the same. */
3724 static int host_to_target_waitstatus(int status
)
3726 if (WIFSIGNALED(status
)) {
3727 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
3729 if (WIFSTOPPED(status
)) {
3730 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
3736 int get_osversion(void)
3738 static int osversion
;
3739 struct new_utsname buf
;
3744 if (qemu_uname_release
&& *qemu_uname_release
) {
3745 s
= qemu_uname_release
;
3747 if (sys_uname(&buf
))
3752 for (i
= 0; i
< 3; i
++) {
3754 while (*s
>= '0' && *s
<= '9') {
3759 tmp
= (tmp
<< 8) + n
;
3767 /* do_syscall() should always have a single exit point at the end so
3768 that actions, such as logging of syscall results, can be performed.
3769 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
3770 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
3771 abi_long arg2
, abi_long arg3
, abi_long arg4
,
3772 abi_long arg5
, abi_long arg6
)
3780 gemu_log("syscall %d", num
);
3783 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
3786 case TARGET_NR_exit
:
3788 /* In old applications this may be used to implement _exit(2).
3789 However in threaded applictions it is used for thread termination,
3790 and _exit_group is used for application termination.
3791 Do thread termination if we have more then one thread. */
3792 /* FIXME: This probably breaks if a signal arrives. We should probably
3793 be disabling signals. */
3794 if (first_cpu
->next_cpu
) {
3801 while (p
&& p
!= (CPUState
*)cpu_env
) {
3802 lastp
= &p
->next_cpu
;
3805 /* If we didn't find the CPU for this thread then something is
3809 /* Remove the CPU from the list. */
3810 *lastp
= p
->next_cpu
;
3812 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
3813 if (ts
->child_tidptr
) {
3814 put_user_u32(0, ts
->child_tidptr
);
3815 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
3818 /* TODO: Free CPU state. */
3825 gdb_exit(cpu_env
, arg1
);
3827 ret
= 0; /* avoid warning */
3829 case TARGET_NR_read
:
3833 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
3835 ret
= get_errno(read(arg1
, p
, arg3
));
3836 unlock_user(p
, arg2
, ret
);
3839 case TARGET_NR_write
:
3840 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
3842 ret
= get_errno(write(arg1
, p
, arg3
));
3843 unlock_user(p
, arg2
, 0);
3845 case TARGET_NR_open
:
3846 if (!(p
= lock_user_string(arg1
)))
3848 ret
= get_errno(open(path(p
),
3849 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
3851 unlock_user(p
, arg1
, 0);
3853 #if defined(TARGET_NR_openat) && defined(__NR_openat)
3854 case TARGET_NR_openat
:
3855 if (!(p
= lock_user_string(arg2
)))
3857 ret
= get_errno(sys_openat(arg1
,
3859 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
3861 unlock_user(p
, arg2
, 0);
3864 case TARGET_NR_close
:
3865 ret
= get_errno(close(arg1
));
3870 case TARGET_NR_fork
:
3871 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
3873 #ifdef TARGET_NR_waitpid
3874 case TARGET_NR_waitpid
:
3877 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
3878 if (!is_error(ret
) && arg2
3879 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
3884 #ifdef TARGET_NR_waitid
3885 case TARGET_NR_waitid
:
3889 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
3890 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
3891 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
3893 host_to_target_siginfo(p
, &info
);
3894 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
3899 #ifdef TARGET_NR_creat /* not on alpha */
3900 case TARGET_NR_creat
:
3901 if (!(p
= lock_user_string(arg1
)))
3903 ret
= get_errno(creat(p
, arg2
));
3904 unlock_user(p
, arg1
, 0);
3907 case TARGET_NR_link
:
3910 p
= lock_user_string(arg1
);
3911 p2
= lock_user_string(arg2
);
3913 ret
= -TARGET_EFAULT
;
3915 ret
= get_errno(link(p
, p2
));
3916 unlock_user(p2
, arg2
, 0);
3917 unlock_user(p
, arg1
, 0);
3920 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
3921 case TARGET_NR_linkat
:
3926 p
= lock_user_string(arg2
);
3927 p2
= lock_user_string(arg4
);
3929 ret
= -TARGET_EFAULT
;
3931 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
3932 unlock_user(p
, arg2
, 0);
3933 unlock_user(p2
, arg4
, 0);
3937 case TARGET_NR_unlink
:
3938 if (!(p
= lock_user_string(arg1
)))
3940 ret
= get_errno(unlink(p
));
3941 unlock_user(p
, arg1
, 0);
3943 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
3944 case TARGET_NR_unlinkat
:
3945 if (!(p
= lock_user_string(arg2
)))
3947 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
3948 unlock_user(p
, arg2
, 0);
3951 case TARGET_NR_execve
:
3953 char **argp
, **envp
;
3956 abi_ulong guest_argp
;
3957 abi_ulong guest_envp
;
3963 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
3964 if (get_user_ual(addr
, gp
))
3972 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
3973 if (get_user_ual(addr
, gp
))
3980 argp
= alloca((argc
+ 1) * sizeof(void *));
3981 envp
= alloca((envc
+ 1) * sizeof(void *));
3983 for (gp
= guest_argp
, q
= argp
; gp
;
3984 gp
+= sizeof(abi_ulong
), q
++) {
3985 if (get_user_ual(addr
, gp
))
3989 if (!(*q
= lock_user_string(addr
)))
3994 for (gp
= guest_envp
, q
= envp
; gp
;
3995 gp
+= sizeof(abi_ulong
), q
++) {
3996 if (get_user_ual(addr
, gp
))
4000 if (!(*q
= lock_user_string(addr
)))
4005 if (!(p
= lock_user_string(arg1
)))
4007 ret
= get_errno(execve(p
, argp
, envp
));
4008 unlock_user(p
, arg1
, 0);
4013 ret
= -TARGET_EFAULT
;
4016 for (gp
= guest_argp
, q
= argp
; *q
;
4017 gp
+= sizeof(abi_ulong
), q
++) {
4018 if (get_user_ual(addr
, gp
)
4021 unlock_user(*q
, addr
, 0);
4023 for (gp
= guest_envp
, q
= envp
; *q
;
4024 gp
+= sizeof(abi_ulong
), q
++) {
4025 if (get_user_ual(addr
, gp
)
4028 unlock_user(*q
, addr
, 0);
4032 case TARGET_NR_chdir
:
4033 if (!(p
= lock_user_string(arg1
)))
4035 ret
= get_errno(chdir(p
));
4036 unlock_user(p
, arg1
, 0);
4038 #ifdef TARGET_NR_time
4039 case TARGET_NR_time
:
4042 ret
= get_errno(time(&host_time
));
4045 && put_user_sal(host_time
, arg1
))
4050 case TARGET_NR_mknod
:
4051 if (!(p
= lock_user_string(arg1
)))
4053 ret
= get_errno(mknod(p
, arg2
, arg3
));
4054 unlock_user(p
, arg1
, 0);
4056 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
4057 case TARGET_NR_mknodat
:
4058 if (!(p
= lock_user_string(arg2
)))
4060 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
4061 unlock_user(p
, arg2
, 0);
4064 case TARGET_NR_chmod
:
4065 if (!(p
= lock_user_string(arg1
)))
4067 ret
= get_errno(chmod(p
, arg2
));
4068 unlock_user(p
, arg1
, 0);
4070 #ifdef TARGET_NR_break
4071 case TARGET_NR_break
:
4074 #ifdef TARGET_NR_oldstat
4075 case TARGET_NR_oldstat
:
4078 case TARGET_NR_lseek
:
4079 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
4081 #ifdef TARGET_NR_getxpid
4082 case TARGET_NR_getxpid
:
4084 case TARGET_NR_getpid
:
4086 ret
= get_errno(getpid());
4088 case TARGET_NR_mount
:
4090 /* need to look at the data field */
4092 p
= lock_user_string(arg1
);
4093 p2
= lock_user_string(arg2
);
4094 p3
= lock_user_string(arg3
);
4095 if (!p
|| !p2
|| !p3
)
4096 ret
= -TARGET_EFAULT
;
4098 /* FIXME - arg5 should be locked, but it isn't clear how to
4099 * do that since it's not guaranteed to be a NULL-terminated
4102 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
4103 unlock_user(p
, arg1
, 0);
4104 unlock_user(p2
, arg2
, 0);
4105 unlock_user(p3
, arg3
, 0);
4108 #ifdef TARGET_NR_umount
4109 case TARGET_NR_umount
:
4110 if (!(p
= lock_user_string(arg1
)))
4112 ret
= get_errno(umount(p
));
4113 unlock_user(p
, arg1
, 0);
4116 #ifdef TARGET_NR_stime /* not on alpha */
4117 case TARGET_NR_stime
:
4120 if (get_user_sal(host_time
, arg1
))
4122 ret
= get_errno(stime(&host_time
));
4126 case TARGET_NR_ptrace
:
4128 #ifdef TARGET_NR_alarm /* not on alpha */
4129 case TARGET_NR_alarm
:
4133 #ifdef TARGET_NR_oldfstat
4134 case TARGET_NR_oldfstat
:
4137 #ifdef TARGET_NR_pause /* not on alpha */
4138 case TARGET_NR_pause
:
4139 ret
= get_errno(pause());
4142 #ifdef TARGET_NR_utime
4143 case TARGET_NR_utime
:
4145 struct utimbuf tbuf
, *host_tbuf
;
4146 struct target_utimbuf
*target_tbuf
;
4148 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
4150 tbuf
.actime
= tswapl(target_tbuf
->actime
);
4151 tbuf
.modtime
= tswapl(target_tbuf
->modtime
);
4152 unlock_user_struct(target_tbuf
, arg2
, 0);
4157 if (!(p
= lock_user_string(arg1
)))
4159 ret
= get_errno(utime(p
, host_tbuf
));
4160 unlock_user(p
, arg1
, 0);
4164 case TARGET_NR_utimes
:
4166 struct timeval
*tvp
, tv
[2];
4168 if (copy_from_user_timeval(&tv
[0], arg2
)
4169 || copy_from_user_timeval(&tv
[1],
4170 arg2
+ sizeof(struct target_timeval
)))
4176 if (!(p
= lock_user_string(arg1
)))
4178 ret
= get_errno(utimes(p
, tvp
));
4179 unlock_user(p
, arg1
, 0);
4182 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
4183 case TARGET_NR_futimesat
:
4185 struct timeval
*tvp
, tv
[2];
4187 if (copy_from_user_timeval(&tv
[0], arg3
)
4188 || copy_from_user_timeval(&tv
[1],
4189 arg3
+ sizeof(struct target_timeval
)))
4195 if (!(p
= lock_user_string(arg2
)))
4197 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
4198 unlock_user(p
, arg2
, 0);
4202 #ifdef TARGET_NR_stty
4203 case TARGET_NR_stty
:
4206 #ifdef TARGET_NR_gtty
4207 case TARGET_NR_gtty
:
4210 case TARGET_NR_access
:
4211 if (!(p
= lock_user_string(arg1
)))
4213 ret
= get_errno(access(p
, arg2
));
4214 unlock_user(p
, arg1
, 0);
4216 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
4217 case TARGET_NR_faccessat
:
4218 if (!(p
= lock_user_string(arg2
)))
4220 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
4221 unlock_user(p
, arg2
, 0);
4224 #ifdef TARGET_NR_nice /* not on alpha */
4225 case TARGET_NR_nice
:
4226 ret
= get_errno(nice(arg1
));
4229 #ifdef TARGET_NR_ftime
4230 case TARGET_NR_ftime
:
4233 case TARGET_NR_sync
:
4237 case TARGET_NR_kill
:
4238 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
4240 case TARGET_NR_rename
:
4243 p
= lock_user_string(arg1
);
4244 p2
= lock_user_string(arg2
);
4246 ret
= -TARGET_EFAULT
;
4248 ret
= get_errno(rename(p
, p2
));
4249 unlock_user(p2
, arg2
, 0);
4250 unlock_user(p
, arg1
, 0);
4253 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
4254 case TARGET_NR_renameat
:
4257 p
= lock_user_string(arg2
);
4258 p2
= lock_user_string(arg4
);
4260 ret
= -TARGET_EFAULT
;
4262 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
4263 unlock_user(p2
, arg4
, 0);
4264 unlock_user(p
, arg2
, 0);
4268 case TARGET_NR_mkdir
:
4269 if (!(p
= lock_user_string(arg1
)))
4271 ret
= get_errno(mkdir(p
, arg2
));
4272 unlock_user(p
, arg1
, 0);
4274 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
4275 case TARGET_NR_mkdirat
:
4276 if (!(p
= lock_user_string(arg2
)))
4278 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
4279 unlock_user(p
, arg2
, 0);
4282 case TARGET_NR_rmdir
:
4283 if (!(p
= lock_user_string(arg1
)))
4285 ret
= get_errno(rmdir(p
));
4286 unlock_user(p
, arg1
, 0);
4289 ret
= get_errno(dup(arg1
));
4291 case TARGET_NR_pipe
:
4294 ret
= get_errno(pipe(host_pipe
));
4295 if (!is_error(ret
)) {
4296 #if defined(TARGET_MIPS)
4297 CPUMIPSState
*env
= (CPUMIPSState
*)cpu_env
;
4298 env
->active_tc
.gpr
[3] = host_pipe
[1];
4300 #elif defined(TARGET_SH4)
4301 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
4304 if (put_user_s32(host_pipe
[0], arg1
)
4305 || put_user_s32(host_pipe
[1], arg1
+ sizeof(host_pipe
[0])))
4311 case TARGET_NR_times
:
4313 struct target_tms
*tmsp
;
4315 ret
= get_errno(times(&tms
));
4317 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
4320 tmsp
->tms_utime
= tswapl(host_to_target_clock_t(tms
.tms_utime
));
4321 tmsp
->tms_stime
= tswapl(host_to_target_clock_t(tms
.tms_stime
));
4322 tmsp
->tms_cutime
= tswapl(host_to_target_clock_t(tms
.tms_cutime
));
4323 tmsp
->tms_cstime
= tswapl(host_to_target_clock_t(tms
.tms_cstime
));
4326 ret
= host_to_target_clock_t(ret
);
4329 #ifdef TARGET_NR_prof
4330 case TARGET_NR_prof
:
4333 #ifdef TARGET_NR_signal
4334 case TARGET_NR_signal
:
4337 case TARGET_NR_acct
:
4339 ret
= get_errno(acct(NULL
));
4341 if (!(p
= lock_user_string(arg1
)))
4343 ret
= get_errno(acct(path(p
)));
4344 unlock_user(p
, arg1
, 0);
4347 #ifdef TARGET_NR_umount2 /* not on alpha */
4348 case TARGET_NR_umount2
:
4349 if (!(p
= lock_user_string(arg1
)))
4351 ret
= get_errno(umount2(p
, arg2
));
4352 unlock_user(p
, arg1
, 0);
4355 #ifdef TARGET_NR_lock
4356 case TARGET_NR_lock
:
4359 case TARGET_NR_ioctl
:
4360 ret
= do_ioctl(arg1
, arg2
, arg3
);
4362 case TARGET_NR_fcntl
:
4363 ret
= do_fcntl(arg1
, arg2
, arg3
);
4365 #ifdef TARGET_NR_mpx
4369 case TARGET_NR_setpgid
:
4370 ret
= get_errno(setpgid(arg1
, arg2
));
4372 #ifdef TARGET_NR_ulimit
4373 case TARGET_NR_ulimit
:
4376 #ifdef TARGET_NR_oldolduname
4377 case TARGET_NR_oldolduname
:
4380 case TARGET_NR_umask
:
4381 ret
= get_errno(umask(arg1
));
4383 case TARGET_NR_chroot
:
4384 if (!(p
= lock_user_string(arg1
)))
4386 ret
= get_errno(chroot(p
));
4387 unlock_user(p
, arg1
, 0);
4389 case TARGET_NR_ustat
:
4391 case TARGET_NR_dup2
:
4392 ret
= get_errno(dup2(arg1
, arg2
));
4394 #ifdef TARGET_NR_getppid /* not on alpha */
4395 case TARGET_NR_getppid
:
4396 ret
= get_errno(getppid());
4399 case TARGET_NR_getpgrp
:
4400 ret
= get_errno(getpgrp());
4402 case TARGET_NR_setsid
:
4403 ret
= get_errno(setsid());
4405 #ifdef TARGET_NR_sigaction
4406 case TARGET_NR_sigaction
:
4408 #if !defined(TARGET_MIPS)
4409 struct target_old_sigaction
*old_act
;
4410 struct target_sigaction act
, oact
, *pact
;
4412 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4414 act
._sa_handler
= old_act
->_sa_handler
;
4415 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
4416 act
.sa_flags
= old_act
->sa_flags
;
4417 act
.sa_restorer
= old_act
->sa_restorer
;
4418 unlock_user_struct(old_act
, arg2
, 0);
4423 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4424 if (!is_error(ret
) && arg3
) {
4425 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4427 old_act
->_sa_handler
= oact
._sa_handler
;
4428 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
4429 old_act
->sa_flags
= oact
.sa_flags
;
4430 old_act
->sa_restorer
= oact
.sa_restorer
;
4431 unlock_user_struct(old_act
, arg3
, 1);
4434 struct target_sigaction act
, oact
, *pact
, *old_act
;
4437 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
4439 act
._sa_handler
= old_act
->_sa_handler
;
4440 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
4441 act
.sa_flags
= old_act
->sa_flags
;
4442 unlock_user_struct(old_act
, arg2
, 0);
4448 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
4450 if (!is_error(ret
) && arg3
) {
4451 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
4453 old_act
->_sa_handler
= oact
._sa_handler
;
4454 old_act
->sa_flags
= oact
.sa_flags
;
4455 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
4456 old_act
->sa_mask
.sig
[1] = 0;
4457 old_act
->sa_mask
.sig
[2] = 0;
4458 old_act
->sa_mask
.sig
[3] = 0;
4459 unlock_user_struct(old_act
, arg3
, 1);
4465 case TARGET_NR_rt_sigaction
:
4467 struct target_sigaction
*act
;
4468 struct target_sigaction
*oact
;
4471 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
4476 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
4477 ret
= -TARGET_EFAULT
;
4478 goto rt_sigaction_fail
;
4482 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
4485 unlock_user_struct(act
, arg2
, 0);
4487 unlock_user_struct(oact
, arg3
, 1);
4490 #ifdef TARGET_NR_sgetmask /* not on alpha */
4491 case TARGET_NR_sgetmask
:
4494 abi_ulong target_set
;
4495 sigprocmask(0, NULL
, &cur_set
);
4496 host_to_target_old_sigset(&target_set
, &cur_set
);
4501 #ifdef TARGET_NR_ssetmask /* not on alpha */
4502 case TARGET_NR_ssetmask
:
4504 sigset_t set
, oset
, cur_set
;
4505 abi_ulong target_set
= arg1
;
4506 sigprocmask(0, NULL
, &cur_set
);
4507 target_to_host_old_sigset(&set
, &target_set
);
4508 sigorset(&set
, &set
, &cur_set
);
4509 sigprocmask(SIG_SETMASK
, &set
, &oset
);
4510 host_to_target_old_sigset(&target_set
, &oset
);
4515 #ifdef TARGET_NR_sigprocmask
4516 case TARGET_NR_sigprocmask
:
4519 sigset_t set
, oldset
, *set_ptr
;
4523 case TARGET_SIG_BLOCK
:
4526 case TARGET_SIG_UNBLOCK
:
4529 case TARGET_SIG_SETMASK
:
4533 ret
= -TARGET_EINVAL
;
4536 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4538 target_to_host_old_sigset(&set
, p
);
4539 unlock_user(p
, arg2
, 0);
4545 ret
= get_errno(sigprocmask(arg1
, set_ptr
, &oldset
));
4546 if (!is_error(ret
) && arg3
) {
4547 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4549 host_to_target_old_sigset(p
, &oldset
);
4550 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4555 case TARGET_NR_rt_sigprocmask
:
4558 sigset_t set
, oldset
, *set_ptr
;
4562 case TARGET_SIG_BLOCK
:
4565 case TARGET_SIG_UNBLOCK
:
4568 case TARGET_SIG_SETMASK
:
4572 ret
= -TARGET_EINVAL
;
4575 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
4577 target_to_host_sigset(&set
, p
);
4578 unlock_user(p
, arg2
, 0);
4584 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
4585 if (!is_error(ret
) && arg3
) {
4586 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
4588 host_to_target_sigset(p
, &oldset
);
4589 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
4593 #ifdef TARGET_NR_sigpending
4594 case TARGET_NR_sigpending
:
4597 ret
= get_errno(sigpending(&set
));
4598 if (!is_error(ret
)) {
4599 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4601 host_to_target_old_sigset(p
, &set
);
4602 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4607 case TARGET_NR_rt_sigpending
:
4610 ret
= get_errno(sigpending(&set
));
4611 if (!is_error(ret
)) {
4612 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
4614 host_to_target_sigset(p
, &set
);
4615 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
4619 #ifdef TARGET_NR_sigsuspend
4620 case TARGET_NR_sigsuspend
:
4623 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4625 target_to_host_old_sigset(&set
, p
);
4626 unlock_user(p
, arg1
, 0);
4627 ret
= get_errno(sigsuspend(&set
));
4631 case TARGET_NR_rt_sigsuspend
:
4634 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4636 target_to_host_sigset(&set
, p
);
4637 unlock_user(p
, arg1
, 0);
4638 ret
= get_errno(sigsuspend(&set
));
4641 case TARGET_NR_rt_sigtimedwait
:
4644 struct timespec uts
, *puts
;
4647 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
4649 target_to_host_sigset(&set
, p
);
4650 unlock_user(p
, arg1
, 0);
4653 target_to_host_timespec(puts
, arg3
);
4657 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
4658 if (!is_error(ret
) && arg2
) {
4659 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
4661 host_to_target_siginfo(p
, &uinfo
);
4662 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
4666 case TARGET_NR_rt_sigqueueinfo
:
4669 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
4671 target_to_host_siginfo(&uinfo
, p
);
4672 unlock_user(p
, arg1
, 0);
4673 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
4676 #ifdef TARGET_NR_sigreturn
4677 case TARGET_NR_sigreturn
:
4678 /* NOTE: ret is eax, so not transcoding must be done */
4679 ret
= do_sigreturn(cpu_env
);
4682 case TARGET_NR_rt_sigreturn
:
4683 /* NOTE: ret is eax, so not transcoding must be done */
4684 ret
= do_rt_sigreturn(cpu_env
);
4686 case TARGET_NR_sethostname
:
4687 if (!(p
= lock_user_string(arg1
)))
4689 ret
= get_errno(sethostname(p
, arg2
));
4690 unlock_user(p
, arg1
, 0);
4692 case TARGET_NR_setrlimit
:
4694 /* XXX: convert resource ? */
4695 int resource
= arg1
;
4696 struct target_rlimit
*target_rlim
;
4698 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
4700 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
4701 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
4702 unlock_user_struct(target_rlim
, arg2
, 0);
4703 ret
= get_errno(setrlimit(resource
, &rlim
));
4706 case TARGET_NR_getrlimit
:
4708 /* XXX: convert resource ? */
4709 int resource
= arg1
;
4710 struct target_rlimit
*target_rlim
;
4713 ret
= get_errno(getrlimit(resource
, &rlim
));
4714 if (!is_error(ret
)) {
4715 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
4717 rlim
.rlim_cur
= tswapl(target_rlim
->rlim_cur
);
4718 rlim
.rlim_max
= tswapl(target_rlim
->rlim_max
);
4719 unlock_user_struct(target_rlim
, arg2
, 1);
4723 case TARGET_NR_getrusage
:
4725 struct rusage rusage
;
4726 ret
= get_errno(getrusage(arg1
, &rusage
));
4727 if (!is_error(ret
)) {
4728 host_to_target_rusage(arg2
, &rusage
);
4732 case TARGET_NR_gettimeofday
:
4735 ret
= get_errno(gettimeofday(&tv
, NULL
));
4736 if (!is_error(ret
)) {
4737 if (copy_to_user_timeval(arg1
, &tv
))
4742 case TARGET_NR_settimeofday
:
4745 if (copy_from_user_timeval(&tv
, arg1
))
4747 ret
= get_errno(settimeofday(&tv
, NULL
));
4750 #ifdef TARGET_NR_select
4751 case TARGET_NR_select
:
4753 struct target_sel_arg_struct
*sel
;
4754 abi_ulong inp
, outp
, exp
, tvp
;
4757 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
4759 nsel
= tswapl(sel
->n
);
4760 inp
= tswapl(sel
->inp
);
4761 outp
= tswapl(sel
->outp
);
4762 exp
= tswapl(sel
->exp
);
4763 tvp
= tswapl(sel
->tvp
);
4764 unlock_user_struct(sel
, arg1
, 0);
4765 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
4769 case TARGET_NR_symlink
:
4772 p
= lock_user_string(arg1
);
4773 p2
= lock_user_string(arg2
);
4775 ret
= -TARGET_EFAULT
;
4777 ret
= get_errno(symlink(p
, p2
));
4778 unlock_user(p2
, arg2
, 0);
4779 unlock_user(p
, arg1
, 0);
4782 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
4783 case TARGET_NR_symlinkat
:
4786 p
= lock_user_string(arg1
);
4787 p2
= lock_user_string(arg3
);
4789 ret
= -TARGET_EFAULT
;
4791 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
4792 unlock_user(p2
, arg3
, 0);
4793 unlock_user(p
, arg1
, 0);
4797 #ifdef TARGET_NR_oldlstat
4798 case TARGET_NR_oldlstat
:
4801 case TARGET_NR_readlink
:
4804 p
= lock_user_string(arg1
);
4805 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
4807 ret
= -TARGET_EFAULT
;
4809 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
4810 char real
[PATH_MAX
];
4811 temp
= realpath(exec_path
,real
);
4812 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
4813 snprintf((char *)p2
, arg3
, "%s", real
);
4816 ret
= get_errno(readlink(path(p
), p2
, arg3
));
4818 unlock_user(p2
, arg2
, ret
);
4819 unlock_user(p
, arg1
, 0);
4822 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
4823 case TARGET_NR_readlinkat
:
4826 p
= lock_user_string(arg2
);
4827 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
4829 ret
= -TARGET_EFAULT
;
4831 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
4832 unlock_user(p2
, arg3
, ret
);
4833 unlock_user(p
, arg2
, 0);
4837 #ifdef TARGET_NR_uselib
4838 case TARGET_NR_uselib
:
4841 #ifdef TARGET_NR_swapon
4842 case TARGET_NR_swapon
:
4843 if (!(p
= lock_user_string(arg1
)))
4845 ret
= get_errno(swapon(p
, arg2
));
4846 unlock_user(p
, arg1
, 0);
4849 case TARGET_NR_reboot
:
4851 #ifdef TARGET_NR_readdir
4852 case TARGET_NR_readdir
:
4855 #ifdef TARGET_NR_mmap
4856 case TARGET_NR_mmap
:
4857 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_CRIS)
4860 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
4861 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
4869 unlock_user(v
, arg1
, 0);
4870 ret
= get_errno(target_mmap(v1
, v2
, v3
,
4871 target_to_host_bitmask(v4
, mmap_flags_tbl
),
4875 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
4876 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
4882 #ifdef TARGET_NR_mmap2
4883 case TARGET_NR_mmap2
:
4885 #define MMAP_SHIFT 12
4887 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
4888 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
4890 arg6
<< MMAP_SHIFT
));
4893 case TARGET_NR_munmap
:
4894 ret
= get_errno(target_munmap(arg1
, arg2
));
4896 case TARGET_NR_mprotect
:
4897 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
4899 #ifdef TARGET_NR_mremap
4900 case TARGET_NR_mremap
:
4901 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
4904 /* ??? msync/mlock/munlock are broken for softmmu. */
4905 #ifdef TARGET_NR_msync
4906 case TARGET_NR_msync
:
4907 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
4910 #ifdef TARGET_NR_mlock
4911 case TARGET_NR_mlock
:
4912 ret
= get_errno(mlock(g2h(arg1
), arg2
));
4915 #ifdef TARGET_NR_munlock
4916 case TARGET_NR_munlock
:
4917 ret
= get_errno(munlock(g2h(arg1
), arg2
));
4920 #ifdef TARGET_NR_mlockall
4921 case TARGET_NR_mlockall
:
4922 ret
= get_errno(mlockall(arg1
));
4925 #ifdef TARGET_NR_munlockall
4926 case TARGET_NR_munlockall
:
4927 ret
= get_errno(munlockall());
4930 case TARGET_NR_truncate
:
4931 if (!(p
= lock_user_string(arg1
)))
4933 ret
= get_errno(truncate(p
, arg2
));
4934 unlock_user(p
, arg1
, 0);
4936 case TARGET_NR_ftruncate
:
4937 ret
= get_errno(ftruncate(arg1
, arg2
));
4939 case TARGET_NR_fchmod
:
4940 ret
= get_errno(fchmod(arg1
, arg2
));
4942 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
4943 case TARGET_NR_fchmodat
:
4944 if (!(p
= lock_user_string(arg2
)))
4946 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
4947 unlock_user(p
, arg2
, 0);
4950 case TARGET_NR_getpriority
:
4951 /* libc does special remapping of the return value of
4952 * sys_getpriority() so it's just easiest to call
4953 * sys_getpriority() directly rather than through libc. */
4954 ret
= sys_getpriority(arg1
, arg2
);
4956 case TARGET_NR_setpriority
:
4957 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
4959 #ifdef TARGET_NR_profil
4960 case TARGET_NR_profil
:
4963 case TARGET_NR_statfs
:
4964 if (!(p
= lock_user_string(arg1
)))
4966 ret
= get_errno(statfs(path(p
), &stfs
));
4967 unlock_user(p
, arg1
, 0);
4969 if (!is_error(ret
)) {
4970 struct target_statfs
*target_stfs
;
4972 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
4974 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
4975 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
4976 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
4977 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
4978 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
4979 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
4980 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
4981 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
4982 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
4983 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
4984 unlock_user_struct(target_stfs
, arg2
, 1);
4987 case TARGET_NR_fstatfs
:
4988 ret
= get_errno(fstatfs(arg1
, &stfs
));
4989 goto convert_statfs
;
4990 #ifdef TARGET_NR_statfs64
4991 case TARGET_NR_statfs64
:
4992 if (!(p
= lock_user_string(arg1
)))
4994 ret
= get_errno(statfs(path(p
), &stfs
));
4995 unlock_user(p
, arg1
, 0);
4997 if (!is_error(ret
)) {
4998 struct target_statfs64
*target_stfs
;
5000 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
5002 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
5003 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
5004 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
5005 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
5006 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
5007 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
5008 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
5009 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
5010 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
5011 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
5012 unlock_user_struct(target_stfs
, arg3
, 1);
5015 case TARGET_NR_fstatfs64
:
5016 ret
= get_errno(fstatfs(arg1
, &stfs
));
5017 goto convert_statfs64
;
5019 #ifdef TARGET_NR_ioperm
5020 case TARGET_NR_ioperm
:
5023 #ifdef TARGET_NR_socketcall
5024 case TARGET_NR_socketcall
:
5025 ret
= do_socketcall(arg1
, arg2
);
5028 #ifdef TARGET_NR_accept
5029 case TARGET_NR_accept
:
5030 ret
= do_accept(arg1
, arg2
, arg3
);
5033 #ifdef TARGET_NR_bind
5034 case TARGET_NR_bind
:
5035 ret
= do_bind(arg1
, arg2
, arg3
);
5038 #ifdef TARGET_NR_connect
5039 case TARGET_NR_connect
:
5040 ret
= do_connect(arg1
, arg2
, arg3
);
5043 #ifdef TARGET_NR_getpeername
5044 case TARGET_NR_getpeername
:
5045 ret
= do_getpeername(arg1
, arg2
, arg3
);
5048 #ifdef TARGET_NR_getsockname
5049 case TARGET_NR_getsockname
:
5050 ret
= do_getsockname(arg1
, arg2
, arg3
);
5053 #ifdef TARGET_NR_getsockopt
5054 case TARGET_NR_getsockopt
:
5055 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
5058 #ifdef TARGET_NR_listen
5059 case TARGET_NR_listen
:
5060 ret
= get_errno(listen(arg1
, arg2
));
5063 #ifdef TARGET_NR_recv
5064 case TARGET_NR_recv
:
5065 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
5068 #ifdef TARGET_NR_recvfrom
5069 case TARGET_NR_recvfrom
:
5070 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5073 #ifdef TARGET_NR_recvmsg
5074 case TARGET_NR_recvmsg
:
5075 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
5078 #ifdef TARGET_NR_send
5079 case TARGET_NR_send
:
5080 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
5083 #ifdef TARGET_NR_sendmsg
5084 case TARGET_NR_sendmsg
:
5085 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
5088 #ifdef TARGET_NR_sendto
5089 case TARGET_NR_sendto
:
5090 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5093 #ifdef TARGET_NR_shutdown
5094 case TARGET_NR_shutdown
:
5095 ret
= get_errno(shutdown(arg1
, arg2
));
5098 #ifdef TARGET_NR_socket
5099 case TARGET_NR_socket
:
5100 ret
= do_socket(arg1
, arg2
, arg3
);
5103 #ifdef TARGET_NR_socketpair
5104 case TARGET_NR_socketpair
:
5105 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
5108 #ifdef TARGET_NR_setsockopt
5109 case TARGET_NR_setsockopt
:
5110 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
5114 case TARGET_NR_syslog
:
5115 if (!(p
= lock_user_string(arg2
)))
5117 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
5118 unlock_user(p
, arg2
, 0);
5121 case TARGET_NR_setitimer
:
5123 struct itimerval value
, ovalue
, *pvalue
;
5127 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
5128 || copy_from_user_timeval(&pvalue
->it_value
,
5129 arg2
+ sizeof(struct target_timeval
)))
5134 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
5135 if (!is_error(ret
) && arg3
) {
5136 if (copy_to_user_timeval(arg3
,
5137 &ovalue
.it_interval
)
5138 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
5144 case TARGET_NR_getitimer
:
5146 struct itimerval value
;
5148 ret
= get_errno(getitimer(arg1
, &value
));
5149 if (!is_error(ret
) && arg2
) {
5150 if (copy_to_user_timeval(arg2
,
5152 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
5158 case TARGET_NR_stat
:
5159 if (!(p
= lock_user_string(arg1
)))
5161 ret
= get_errno(stat(path(p
), &st
));
5162 unlock_user(p
, arg1
, 0);
5164 case TARGET_NR_lstat
:
5165 if (!(p
= lock_user_string(arg1
)))
5167 ret
= get_errno(lstat(path(p
), &st
));
5168 unlock_user(p
, arg1
, 0);
5170 case TARGET_NR_fstat
:
5172 ret
= get_errno(fstat(arg1
, &st
));
5174 if (!is_error(ret
)) {
5175 struct target_stat
*target_st
;
5177 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
5179 __put_user(st
.st_dev
, &target_st
->st_dev
);
5180 __put_user(st
.st_ino
, &target_st
->st_ino
);
5181 __put_user(st
.st_mode
, &target_st
->st_mode
);
5182 __put_user(st
.st_uid
, &target_st
->st_uid
);
5183 __put_user(st
.st_gid
, &target_st
->st_gid
);
5184 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
5185 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
5186 __put_user(st
.st_size
, &target_st
->st_size
);
5187 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
5188 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
5189 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
5190 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
5191 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
5192 unlock_user_struct(target_st
, arg2
, 1);
5196 #ifdef TARGET_NR_olduname
5197 case TARGET_NR_olduname
:
5200 #ifdef TARGET_NR_iopl
5201 case TARGET_NR_iopl
:
5204 case TARGET_NR_vhangup
:
5205 ret
= get_errno(vhangup());
5207 #ifdef TARGET_NR_idle
5208 case TARGET_NR_idle
:
5211 #ifdef TARGET_NR_syscall
5212 case TARGET_NR_syscall
:
5213 ret
= do_syscall(cpu_env
,arg1
& 0xffff,arg2
,arg3
,arg4
,arg5
,arg6
,0);
5216 case TARGET_NR_wait4
:
5219 abi_long status_ptr
= arg2
;
5220 struct rusage rusage
, *rusage_ptr
;
5221 abi_ulong target_rusage
= arg4
;
5223 rusage_ptr
= &rusage
;
5226 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
5227 if (!is_error(ret
)) {
5229 status
= host_to_target_waitstatus(status
);
5230 if (put_user_s32(status
, status_ptr
))
5234 host_to_target_rusage(target_rusage
, &rusage
);
5238 #ifdef TARGET_NR_swapoff
5239 case TARGET_NR_swapoff
:
5240 if (!(p
= lock_user_string(arg1
)))
5242 ret
= get_errno(swapoff(p
));
5243 unlock_user(p
, arg1
, 0);
5246 case TARGET_NR_sysinfo
:
5248 struct target_sysinfo
*target_value
;
5249 struct sysinfo value
;
5250 ret
= get_errno(sysinfo(&value
));
5251 if (!is_error(ret
) && arg1
)
5253 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
5255 __put_user(value
.uptime
, &target_value
->uptime
);
5256 __put_user(value
.loads
[0], &target_value
->loads
[0]);
5257 __put_user(value
.loads
[1], &target_value
->loads
[1]);
5258 __put_user(value
.loads
[2], &target_value
->loads
[2]);
5259 __put_user(value
.totalram
, &target_value
->totalram
);
5260 __put_user(value
.freeram
, &target_value
->freeram
);
5261 __put_user(value
.sharedram
, &target_value
->sharedram
);
5262 __put_user(value
.bufferram
, &target_value
->bufferram
);
5263 __put_user(value
.totalswap
, &target_value
->totalswap
);
5264 __put_user(value
.freeswap
, &target_value
->freeswap
);
5265 __put_user(value
.procs
, &target_value
->procs
);
5266 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
5267 __put_user(value
.freehigh
, &target_value
->freehigh
);
5268 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
5269 unlock_user_struct(target_value
, arg1
, 1);
5273 #ifdef TARGET_NR_ipc
5275 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
5278 #ifdef TARGET_NR_semget
5279 case TARGET_NR_semget
:
5280 ret
= get_errno(semget(arg1
, arg2
, arg3
));
5283 #ifdef TARGET_NR_semop
5284 case TARGET_NR_semop
:
5285 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
5288 #ifdef TARGET_NR_semctl
5289 case TARGET_NR_semctl
:
5290 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
5293 #ifdef TARGET_NR_msgctl
5294 case TARGET_NR_msgctl
:
5295 ret
= do_msgctl(arg1
, arg2
, arg3
);
5298 #ifdef TARGET_NR_msgget
5299 case TARGET_NR_msgget
:
5300 ret
= get_errno(msgget(arg1
, arg2
));
5303 #ifdef TARGET_NR_msgrcv
5304 case TARGET_NR_msgrcv
:
5305 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
5308 #ifdef TARGET_NR_msgsnd
5309 case TARGET_NR_msgsnd
:
5310 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
5313 case TARGET_NR_fsync
:
5314 ret
= get_errno(fsync(arg1
));
5316 case TARGET_NR_clone
:
5317 #if defined(TARGET_SH4)
5318 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
5319 #elif defined(TARGET_CRIS)
5320 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
5322 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
5325 #ifdef __NR_exit_group
5326 /* new thread calls */
5327 case TARGET_NR_exit_group
:
5331 gdb_exit(cpu_env
, arg1
);
5332 ret
= get_errno(exit_group(arg1
));
5335 case TARGET_NR_setdomainname
:
5336 if (!(p
= lock_user_string(arg1
)))
5338 ret
= get_errno(setdomainname(p
, arg2
));
5339 unlock_user(p
, arg1
, 0);
5341 case TARGET_NR_uname
:
5342 /* no need to transcode because we use the linux syscall */
5344 struct new_utsname
* buf
;
5346 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
5348 ret
= get_errno(sys_uname(buf
));
5349 if (!is_error(ret
)) {
5350 /* Overrite the native machine name with whatever is being
5352 strcpy (buf
->machine
, UNAME_MACHINE
);
5353 /* Allow the user to override the reported release. */
5354 if (qemu_uname_release
&& *qemu_uname_release
)
5355 strcpy (buf
->release
, qemu_uname_release
);
5357 unlock_user_struct(buf
, arg1
, 1);
5361 case TARGET_NR_modify_ldt
:
5362 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
5364 #if !defined(TARGET_X86_64)
5365 case TARGET_NR_vm86old
:
5367 case TARGET_NR_vm86
:
5368 ret
= do_vm86(cpu_env
, arg1
, arg2
);
5372 case TARGET_NR_adjtimex
:
5374 #ifdef TARGET_NR_create_module
5375 case TARGET_NR_create_module
:
5377 case TARGET_NR_init_module
:
5378 case TARGET_NR_delete_module
:
5379 #ifdef TARGET_NR_get_kernel_syms
5380 case TARGET_NR_get_kernel_syms
:
5383 case TARGET_NR_quotactl
:
5385 case TARGET_NR_getpgid
:
5386 ret
= get_errno(getpgid(arg1
));
5388 case TARGET_NR_fchdir
:
5389 ret
= get_errno(fchdir(arg1
));
5391 #ifdef TARGET_NR_bdflush /* not on x86_64 */
5392 case TARGET_NR_bdflush
:
5395 #ifdef TARGET_NR_sysfs
5396 case TARGET_NR_sysfs
:
5399 case TARGET_NR_personality
:
5400 ret
= get_errno(personality(arg1
));
5402 #ifdef TARGET_NR_afs_syscall
5403 case TARGET_NR_afs_syscall
:
5406 #ifdef TARGET_NR__llseek /* Not on alpha */
5407 case TARGET_NR__llseek
:
5409 #if defined (__x86_64__)
5410 ret
= get_errno(lseek(arg1
, ((uint64_t )arg2
<< 32) | arg3
, arg5
));
5411 if (put_user_s64(ret
, arg4
))
5415 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
5416 if (put_user_s64(res
, arg4
))
5422 case TARGET_NR_getdents
:
5423 #if TARGET_ABI_BITS != 32
5425 #elif TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
5427 struct target_dirent
*target_dirp
;
5428 struct linux_dirent
*dirp
;
5429 abi_long count
= arg3
;
5431 dirp
= malloc(count
);
5433 ret
= -TARGET_ENOMEM
;
5437 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5438 if (!is_error(ret
)) {
5439 struct linux_dirent
*de
;
5440 struct target_dirent
*tde
;
5442 int reclen
, treclen
;
5443 int count1
, tnamelen
;
5447 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5451 reclen
= de
->d_reclen
;
5452 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
5453 tde
->d_reclen
= tswap16(treclen
);
5454 tde
->d_ino
= tswapl(de
->d_ino
);
5455 tde
->d_off
= tswapl(de
->d_off
);
5456 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
5459 /* XXX: may not be correct */
5460 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
5461 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5463 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
5467 unlock_user(target_dirp
, arg2
, ret
);
5473 struct linux_dirent
*dirp
;
5474 abi_long count
= arg3
;
5476 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5478 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
5479 if (!is_error(ret
)) {
5480 struct linux_dirent
*de
;
5485 reclen
= de
->d_reclen
;
5488 de
->d_reclen
= tswap16(reclen
);
5489 tswapls(&de
->d_ino
);
5490 tswapls(&de
->d_off
);
5491 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
5495 unlock_user(dirp
, arg2
, ret
);
5499 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
5500 case TARGET_NR_getdents64
:
5502 struct linux_dirent64
*dirp
;
5503 abi_long count
= arg3
;
5504 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
5506 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
5507 if (!is_error(ret
)) {
5508 struct linux_dirent64
*de
;
5513 reclen
= de
->d_reclen
;
5516 de
->d_reclen
= tswap16(reclen
);
5517 tswap64s((uint64_t *)&de
->d_ino
);
5518 tswap64s((uint64_t *)&de
->d_off
);
5519 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
5523 unlock_user(dirp
, arg2
, ret
);
5526 #endif /* TARGET_NR_getdents64 */
5527 #ifdef TARGET_NR__newselect
5528 case TARGET_NR__newselect
:
5529 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
5532 #ifdef TARGET_NR_poll
5533 case TARGET_NR_poll
:
5535 struct target_pollfd
*target_pfd
;
5536 unsigned int nfds
= arg2
;
5541 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
5544 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
5545 for(i
= 0; i
< nfds
; i
++) {
5546 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
5547 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
5549 ret
= get_errno(poll(pfd
, nfds
, timeout
));
5550 if (!is_error(ret
)) {
5551 for(i
= 0; i
< nfds
; i
++) {
5552 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
5554 ret
+= nfds
* (sizeof(struct target_pollfd
)
5555 - sizeof(struct pollfd
));
5557 unlock_user(target_pfd
, arg1
, ret
);
5561 case TARGET_NR_flock
:
5562 /* NOTE: the flock constant seems to be the same for every
5564 ret
= get_errno(flock(arg1
, arg2
));
5566 case TARGET_NR_readv
:
5571 vec
= alloca(count
* sizeof(struct iovec
));
5572 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
5574 ret
= get_errno(readv(arg1
, vec
, count
));
5575 unlock_iovec(vec
, arg2
, count
, 1);
5578 case TARGET_NR_writev
:
5583 vec
= alloca(count
* sizeof(struct iovec
));
5584 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
5586 ret
= get_errno(writev(arg1
, vec
, count
));
5587 unlock_iovec(vec
, arg2
, count
, 0);
5590 case TARGET_NR_getsid
:
5591 ret
= get_errno(getsid(arg1
));
5593 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
5594 case TARGET_NR_fdatasync
:
5595 ret
= get_errno(fdatasync(arg1
));
5598 case TARGET_NR__sysctl
:
5599 /* We don't implement this, but ENOTDIR is always a safe
5601 ret
= -TARGET_ENOTDIR
;
5603 case TARGET_NR_sched_setparam
:
5605 struct sched_param
*target_schp
;
5606 struct sched_param schp
;
5608 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
5610 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5611 unlock_user_struct(target_schp
, arg2
, 0);
5612 ret
= get_errno(sched_setparam(arg1
, &schp
));
5615 case TARGET_NR_sched_getparam
:
5617 struct sched_param
*target_schp
;
5618 struct sched_param schp
;
5619 ret
= get_errno(sched_getparam(arg1
, &schp
));
5620 if (!is_error(ret
)) {
5621 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
5623 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
5624 unlock_user_struct(target_schp
, arg2
, 1);
5628 case TARGET_NR_sched_setscheduler
:
5630 struct sched_param
*target_schp
;
5631 struct sched_param schp
;
5632 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
5634 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
5635 unlock_user_struct(target_schp
, arg3
, 0);
5636 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
5639 case TARGET_NR_sched_getscheduler
:
5640 ret
= get_errno(sched_getscheduler(arg1
));
5642 case TARGET_NR_sched_yield
:
5643 ret
= get_errno(sched_yield());
5645 case TARGET_NR_sched_get_priority_max
:
5646 ret
= get_errno(sched_get_priority_max(arg1
));
5648 case TARGET_NR_sched_get_priority_min
:
5649 ret
= get_errno(sched_get_priority_min(arg1
));
5651 case TARGET_NR_sched_rr_get_interval
:
5654 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
5655 if (!is_error(ret
)) {
5656 host_to_target_timespec(arg2
, &ts
);
5660 case TARGET_NR_nanosleep
:
5662 struct timespec req
, rem
;
5663 target_to_host_timespec(&req
, arg1
);
5664 ret
= get_errno(nanosleep(&req
, &rem
));
5665 if (is_error(ret
) && arg2
) {
5666 host_to_target_timespec(arg2
, &rem
);
5670 #ifdef TARGET_NR_query_module
5671 case TARGET_NR_query_module
:
5674 #ifdef TARGET_NR_nfsservctl
5675 case TARGET_NR_nfsservctl
:
5678 case TARGET_NR_prctl
:
5681 case PR_GET_PDEATHSIG
:
5684 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
5685 if (!is_error(ret
) && arg2
5686 && put_user_ual(deathsig
, arg2
))
5691 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
5695 #ifdef TARGET_NR_arch_prctl
5696 case TARGET_NR_arch_prctl
:
5697 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
5698 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
5704 #ifdef TARGET_NR_pread
5705 case TARGET_NR_pread
:
5707 if (((CPUARMState
*)cpu_env
)->eabi
)
5710 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5712 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
5713 unlock_user(p
, arg2
, ret
);
5715 case TARGET_NR_pwrite
:
5717 if (((CPUARMState
*)cpu_env
)->eabi
)
5720 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5722 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
5723 unlock_user(p
, arg2
, 0);
5726 #ifdef TARGET_NR_pread64
5727 case TARGET_NR_pread64
:
5728 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
5730 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
5731 unlock_user(p
, arg2
, ret
);
5733 case TARGET_NR_pwrite64
:
5734 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
5736 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
5737 unlock_user(p
, arg2
, 0);
5740 case TARGET_NR_getcwd
:
5741 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
5743 ret
= get_errno(sys_getcwd1(p
, arg2
));
5744 unlock_user(p
, arg1
, ret
);
5746 case TARGET_NR_capget
:
5748 case TARGET_NR_capset
:
5750 case TARGET_NR_sigaltstack
:
5751 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
5752 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA)
5753 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
5758 case TARGET_NR_sendfile
:
5760 #ifdef TARGET_NR_getpmsg
5761 case TARGET_NR_getpmsg
:
5764 #ifdef TARGET_NR_putpmsg
5765 case TARGET_NR_putpmsg
:
5768 #ifdef TARGET_NR_vfork
5769 case TARGET_NR_vfork
:
5770 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
5774 #ifdef TARGET_NR_ugetrlimit
5775 case TARGET_NR_ugetrlimit
:
5778 ret
= get_errno(getrlimit(arg1
, &rlim
));
5779 if (!is_error(ret
)) {
5780 struct target_rlimit
*target_rlim
;
5781 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5783 target_rlim
->rlim_cur
= tswapl(rlim
.rlim_cur
);
5784 target_rlim
->rlim_max
= tswapl(rlim
.rlim_max
);
5785 unlock_user_struct(target_rlim
, arg2
, 1);
5790 #ifdef TARGET_NR_truncate64
5791 case TARGET_NR_truncate64
:
5792 if (!(p
= lock_user_string(arg1
)))
5794 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
5795 unlock_user(p
, arg1
, 0);
5798 #ifdef TARGET_NR_ftruncate64
5799 case TARGET_NR_ftruncate64
:
5800 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
5803 #ifdef TARGET_NR_stat64
5804 case TARGET_NR_stat64
:
5805 if (!(p
= lock_user_string(arg1
)))
5807 ret
= get_errno(stat(path(p
), &st
));
5808 unlock_user(p
, arg1
, 0);
5810 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5813 #ifdef TARGET_NR_lstat64
5814 case TARGET_NR_lstat64
:
5815 if (!(p
= lock_user_string(arg1
)))
5817 ret
= get_errno(lstat(path(p
), &st
));
5818 unlock_user(p
, arg1
, 0);
5820 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5823 #ifdef TARGET_NR_fstat64
5824 case TARGET_NR_fstat64
:
5825 ret
= get_errno(fstat(arg1
, &st
));
5827 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
5830 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
5831 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
5832 #ifdef TARGET_NR_fstatat64
5833 case TARGET_NR_fstatat64
:
5835 #ifdef TARGET_NR_newfstatat
5836 case TARGET_NR_newfstatat
:
5838 if (!(p
= lock_user_string(arg2
)))
5840 #ifdef __NR_fstatat64
5841 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
5843 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
5846 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
5850 case TARGET_NR_lchown
:
5851 if (!(p
= lock_user_string(arg1
)))
5853 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
5854 unlock_user(p
, arg1
, 0);
5856 case TARGET_NR_getuid
:
5857 ret
= get_errno(high2lowuid(getuid()));
5859 case TARGET_NR_getgid
:
5860 ret
= get_errno(high2lowgid(getgid()));
5862 case TARGET_NR_geteuid
:
5863 ret
= get_errno(high2lowuid(geteuid()));
5865 case TARGET_NR_getegid
:
5866 ret
= get_errno(high2lowgid(getegid()));
5868 case TARGET_NR_setreuid
:
5869 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
5871 case TARGET_NR_setregid
:
5872 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
5874 case TARGET_NR_getgroups
:
5876 int gidsetsize
= arg1
;
5877 uint16_t *target_grouplist
;
5881 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5882 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
5883 if (gidsetsize
== 0)
5885 if (!is_error(ret
)) {
5886 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
5887 if (!target_grouplist
)
5889 for(i
= 0;i
< ret
; i
++)
5890 target_grouplist
[i
] = tswap16(grouplist
[i
]);
5891 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
5895 case TARGET_NR_setgroups
:
5897 int gidsetsize
= arg1
;
5898 uint16_t *target_grouplist
;
5902 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
5903 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
5904 if (!target_grouplist
) {
5905 ret
= -TARGET_EFAULT
;
5908 for(i
= 0;i
< gidsetsize
; i
++)
5909 grouplist
[i
] = tswap16(target_grouplist
[i
]);
5910 unlock_user(target_grouplist
, arg2
, 0);
5911 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
5914 case TARGET_NR_fchown
:
5915 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
5917 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
5918 case TARGET_NR_fchownat
:
5919 if (!(p
= lock_user_string(arg2
)))
5921 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
5922 unlock_user(p
, arg2
, 0);
5925 #ifdef TARGET_NR_setresuid
5926 case TARGET_NR_setresuid
:
5927 ret
= get_errno(setresuid(low2highuid(arg1
),
5929 low2highuid(arg3
)));
5932 #ifdef TARGET_NR_getresuid
5933 case TARGET_NR_getresuid
:
5935 uid_t ruid
, euid
, suid
;
5936 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
5937 if (!is_error(ret
)) {
5938 if (put_user_u16(high2lowuid(ruid
), arg1
)
5939 || put_user_u16(high2lowuid(euid
), arg2
)
5940 || put_user_u16(high2lowuid(suid
), arg3
))
5946 #ifdef TARGET_NR_getresgid
5947 case TARGET_NR_setresgid
:
5948 ret
= get_errno(setresgid(low2highgid(arg1
),
5950 low2highgid(arg3
)));
5953 #ifdef TARGET_NR_getresgid
5954 case TARGET_NR_getresgid
:
5956 gid_t rgid
, egid
, sgid
;
5957 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
5958 if (!is_error(ret
)) {
5959 if (put_user_u16(high2lowgid(rgid
), arg1
)
5960 || put_user_u16(high2lowgid(egid
), arg2
)
5961 || put_user_u16(high2lowgid(sgid
), arg3
))
5967 case TARGET_NR_chown
:
5968 if (!(p
= lock_user_string(arg1
)))
5970 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
5971 unlock_user(p
, arg1
, 0);
5973 case TARGET_NR_setuid
:
5974 ret
= get_errno(setuid(low2highuid(arg1
)));
5976 case TARGET_NR_setgid
:
5977 ret
= get_errno(setgid(low2highgid(arg1
)));
5979 case TARGET_NR_setfsuid
:
5980 ret
= get_errno(setfsuid(arg1
));
5982 case TARGET_NR_setfsgid
:
5983 ret
= get_errno(setfsgid(arg1
));
5985 #endif /* USE_UID16 */
5987 #ifdef TARGET_NR_lchown32
5988 case TARGET_NR_lchown32
:
5989 if (!(p
= lock_user_string(arg1
)))
5991 ret
= get_errno(lchown(p
, arg2
, arg3
));
5992 unlock_user(p
, arg1
, 0);
5995 #ifdef TARGET_NR_getuid32
5996 case TARGET_NR_getuid32
:
5997 ret
= get_errno(getuid());
6001 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
6002 /* Alpha specific */
6003 case TARGET_NR_getxuid
:
6007 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
6009 ret
= get_errno(getuid());
6012 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
6013 /* Alpha specific */
6014 case TARGET_NR_getxgid
:
6018 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
6020 ret
= get_errno(getgid());
6024 #ifdef TARGET_NR_getgid32
6025 case TARGET_NR_getgid32
:
6026 ret
= get_errno(getgid());
6029 #ifdef TARGET_NR_geteuid32
6030 case TARGET_NR_geteuid32
:
6031 ret
= get_errno(geteuid());
6034 #ifdef TARGET_NR_getegid32
6035 case TARGET_NR_getegid32
:
6036 ret
= get_errno(getegid());
6039 #ifdef TARGET_NR_setreuid32
6040 case TARGET_NR_setreuid32
:
6041 ret
= get_errno(setreuid(arg1
, arg2
));
6044 #ifdef TARGET_NR_setregid32
6045 case TARGET_NR_setregid32
:
6046 ret
= get_errno(setregid(arg1
, arg2
));
6049 #ifdef TARGET_NR_getgroups32
6050 case TARGET_NR_getgroups32
:
6052 int gidsetsize
= arg1
;
6053 uint32_t *target_grouplist
;
6057 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6058 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
6059 if (gidsetsize
== 0)
6061 if (!is_error(ret
)) {
6062 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
6063 if (!target_grouplist
) {
6064 ret
= -TARGET_EFAULT
;
6067 for(i
= 0;i
< ret
; i
++)
6068 target_grouplist
[i
] = tswap32(grouplist
[i
]);
6069 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
6074 #ifdef TARGET_NR_setgroups32
6075 case TARGET_NR_setgroups32
:
6077 int gidsetsize
= arg1
;
6078 uint32_t *target_grouplist
;
6082 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
6083 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
6084 if (!target_grouplist
) {
6085 ret
= -TARGET_EFAULT
;
6088 for(i
= 0;i
< gidsetsize
; i
++)
6089 grouplist
[i
] = tswap32(target_grouplist
[i
]);
6090 unlock_user(target_grouplist
, arg2
, 0);
6091 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
6095 #ifdef TARGET_NR_fchown32
6096 case TARGET_NR_fchown32
:
6097 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
6100 #ifdef TARGET_NR_setresuid32
6101 case TARGET_NR_setresuid32
:
6102 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
6105 #ifdef TARGET_NR_getresuid32
6106 case TARGET_NR_getresuid32
:
6108 uid_t ruid
, euid
, suid
;
6109 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
6110 if (!is_error(ret
)) {
6111 if (put_user_u32(ruid
, arg1
)
6112 || put_user_u32(euid
, arg2
)
6113 || put_user_u32(suid
, arg3
))
6119 #ifdef TARGET_NR_setresgid32
6120 case TARGET_NR_setresgid32
:
6121 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
6124 #ifdef TARGET_NR_getresgid32
6125 case TARGET_NR_getresgid32
:
6127 gid_t rgid
, egid
, sgid
;
6128 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
6129 if (!is_error(ret
)) {
6130 if (put_user_u32(rgid
, arg1
)
6131 || put_user_u32(egid
, arg2
)
6132 || put_user_u32(sgid
, arg3
))
6138 #ifdef TARGET_NR_chown32
6139 case TARGET_NR_chown32
:
6140 if (!(p
= lock_user_string(arg1
)))
6142 ret
= get_errno(chown(p
, arg2
, arg3
));
6143 unlock_user(p
, arg1
, 0);
6146 #ifdef TARGET_NR_setuid32
6147 case TARGET_NR_setuid32
:
6148 ret
= get_errno(setuid(arg1
));
6151 #ifdef TARGET_NR_setgid32
6152 case TARGET_NR_setgid32
:
6153 ret
= get_errno(setgid(arg1
));
6156 #ifdef TARGET_NR_setfsuid32
6157 case TARGET_NR_setfsuid32
:
6158 ret
= get_errno(setfsuid(arg1
));
6161 #ifdef TARGET_NR_setfsgid32
6162 case TARGET_NR_setfsgid32
:
6163 ret
= get_errno(setfsgid(arg1
));
6167 case TARGET_NR_pivot_root
:
6169 #ifdef TARGET_NR_mincore
6170 case TARGET_NR_mincore
:
6173 ret
= -TARGET_EFAULT
;
6174 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
6176 if (!(p
= lock_user_string(arg3
)))
6178 ret
= get_errno(mincore(a
, arg2
, p
));
6179 unlock_user(p
, arg3
, ret
);
6181 unlock_user(a
, arg1
, 0);
6185 #ifdef TARGET_NR_arm_fadvise64_64
6186 case TARGET_NR_arm_fadvise64_64
:
6189 * arm_fadvise64_64 looks like fadvise64_64 but
6190 * with different argument order
6198 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64)
6199 #ifdef TARGET_NR_fadvise64_64
6200 case TARGET_NR_fadvise64_64
:
6202 /* This is a hint, so ignoring and returning success is ok. */
6206 #ifdef TARGET_NR_madvise
6207 case TARGET_NR_madvise
:
6208 /* A straight passthrough may not be safe because qemu sometimes
6209 turns private flie-backed mappings into anonymous mappings.
6210 This will break MADV_DONTNEED.
6211 This is a hint, so ignoring and returning success is ok. */
6215 #if TARGET_ABI_BITS == 32
6216 case TARGET_NR_fcntl64
:
6220 struct target_flock64
*target_fl
;
6222 struct target_eabi_flock64
*target_efl
;
6226 case TARGET_F_GETLK64
:
6229 case TARGET_F_SETLK64
:
6232 case TARGET_F_SETLKW64
:
6241 case TARGET_F_GETLK64
:
6243 if (((CPUARMState
*)cpu_env
)->eabi
) {
6244 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6246 fl
.l_type
= tswap16(target_efl
->l_type
);
6247 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6248 fl
.l_start
= tswap64(target_efl
->l_start
);
6249 fl
.l_len
= tswap64(target_efl
->l_len
);
6250 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6251 unlock_user_struct(target_efl
, arg3
, 0);
6255 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6257 fl
.l_type
= tswap16(target_fl
->l_type
);
6258 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6259 fl
.l_start
= tswap64(target_fl
->l_start
);
6260 fl
.l_len
= tswap64(target_fl
->l_len
);
6261 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6262 unlock_user_struct(target_fl
, arg3
, 0);
6264 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6267 if (((CPUARMState
*)cpu_env
)->eabi
) {
6268 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
6270 target_efl
->l_type
= tswap16(fl
.l_type
);
6271 target_efl
->l_whence
= tswap16(fl
.l_whence
);
6272 target_efl
->l_start
= tswap64(fl
.l_start
);
6273 target_efl
->l_len
= tswap64(fl
.l_len
);
6274 target_efl
->l_pid
= tswapl(fl
.l_pid
);
6275 unlock_user_struct(target_efl
, arg3
, 1);
6279 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
6281 target_fl
->l_type
= tswap16(fl
.l_type
);
6282 target_fl
->l_whence
= tswap16(fl
.l_whence
);
6283 target_fl
->l_start
= tswap64(fl
.l_start
);
6284 target_fl
->l_len
= tswap64(fl
.l_len
);
6285 target_fl
->l_pid
= tswapl(fl
.l_pid
);
6286 unlock_user_struct(target_fl
, arg3
, 1);
6291 case TARGET_F_SETLK64
:
6292 case TARGET_F_SETLKW64
:
6294 if (((CPUARMState
*)cpu_env
)->eabi
) {
6295 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
6297 fl
.l_type
= tswap16(target_efl
->l_type
);
6298 fl
.l_whence
= tswap16(target_efl
->l_whence
);
6299 fl
.l_start
= tswap64(target_efl
->l_start
);
6300 fl
.l_len
= tswap64(target_efl
->l_len
);
6301 fl
.l_pid
= tswapl(target_efl
->l_pid
);
6302 unlock_user_struct(target_efl
, arg3
, 0);
6306 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
6308 fl
.l_type
= tswap16(target_fl
->l_type
);
6309 fl
.l_whence
= tswap16(target_fl
->l_whence
);
6310 fl
.l_start
= tswap64(target_fl
->l_start
);
6311 fl
.l_len
= tswap64(target_fl
->l_len
);
6312 fl
.l_pid
= tswapl(target_fl
->l_pid
);
6313 unlock_user_struct(target_fl
, arg3
, 0);
6315 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
6318 ret
= do_fcntl(arg1
, cmd
, arg3
);
6324 #ifdef TARGET_NR_cacheflush
6325 case TARGET_NR_cacheflush
:
6326 /* self-modifying code is handled automatically, so nothing needed */
6330 #ifdef TARGET_NR_security
6331 case TARGET_NR_security
:
6334 #ifdef TARGET_NR_getpagesize
6335 case TARGET_NR_getpagesize
:
6336 ret
= TARGET_PAGE_SIZE
;
6339 case TARGET_NR_gettid
:
6340 ret
= get_errno(gettid());
6342 #ifdef TARGET_NR_readahead
6343 case TARGET_NR_readahead
:
6344 #if TARGET_ABI_BITS == 32
6346 if (((CPUARMState
*)cpu_env
)->eabi
)
6353 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
6355 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
6359 #ifdef TARGET_NR_setxattr
6360 case TARGET_NR_setxattr
:
6361 case TARGET_NR_lsetxattr
:
6362 case TARGET_NR_fsetxattr
:
6363 case TARGET_NR_getxattr
:
6364 case TARGET_NR_lgetxattr
:
6365 case TARGET_NR_fgetxattr
:
6366 case TARGET_NR_listxattr
:
6367 case TARGET_NR_llistxattr
:
6368 case TARGET_NR_flistxattr
:
6369 case TARGET_NR_removexattr
:
6370 case TARGET_NR_lremovexattr
:
6371 case TARGET_NR_fremovexattr
:
6372 goto unimplemented_nowarn
;
6374 #ifdef TARGET_NR_set_thread_area
6375 case TARGET_NR_set_thread_area
:
6376 #if defined(TARGET_MIPS)
6377 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
6380 #elif defined(TARGET_CRIS)
6382 ret
= -TARGET_EINVAL
;
6384 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
6388 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
6389 ret
= do_set_thread_area(cpu_env
, arg1
);
6392 goto unimplemented_nowarn
;
6395 #ifdef TARGET_NR_get_thread_area
6396 case TARGET_NR_get_thread_area
:
6397 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6398 ret
= do_get_thread_area(cpu_env
, arg1
);
6400 goto unimplemented_nowarn
;
6403 #ifdef TARGET_NR_getdomainname
6404 case TARGET_NR_getdomainname
:
6405 goto unimplemented_nowarn
;
6408 #ifdef TARGET_NR_clock_gettime
6409 case TARGET_NR_clock_gettime
:
6412 ret
= get_errno(clock_gettime(arg1
, &ts
));
6413 if (!is_error(ret
)) {
6414 host_to_target_timespec(arg2
, &ts
);
6419 #ifdef TARGET_NR_clock_getres
6420 case TARGET_NR_clock_getres
:
6423 ret
= get_errno(clock_getres(arg1
, &ts
));
6424 if (!is_error(ret
)) {
6425 host_to_target_timespec(arg2
, &ts
);
6430 #ifdef TARGET_NR_clock_nanosleep
6431 case TARGET_NR_clock_nanosleep
:
6434 target_to_host_timespec(&ts
, arg3
);
6435 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
6437 host_to_target_timespec(arg4
, &ts
);
6442 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
6443 case TARGET_NR_set_tid_address
:
6444 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
6448 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
6449 case TARGET_NR_tkill
:
6450 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
6454 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
6455 case TARGET_NR_tgkill
:
6456 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
6457 target_to_host_signal(arg3
)));
6461 #ifdef TARGET_NR_set_robust_list
6462 case TARGET_NR_set_robust_list
:
6463 goto unimplemented_nowarn
;
6466 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
6467 case TARGET_NR_utimensat
:
6469 struct timespec ts
[2];
6470 target_to_host_timespec(ts
, arg3
);
6471 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
6473 ret
= get_errno(sys_utimensat(arg1
, NULL
, ts
, arg4
));
6475 if (!(p
= lock_user_string(arg2
))) {
6476 ret
= -TARGET_EFAULT
;
6479 ret
= get_errno(sys_utimensat(arg1
, path(p
), ts
, arg4
));
6480 unlock_user(p
, arg2
, 0);
6485 #if defined(USE_NPTL)
6486 case TARGET_NR_futex
:
6487 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6490 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
6491 case TARGET_NR_inotify_init
:
6492 ret
= get_errno(sys_inotify_init());
6495 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
6496 case TARGET_NR_inotify_add_watch
:
6497 p
= lock_user_string(arg2
);
6498 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
6499 unlock_user(p
, arg2
, 0);
6502 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
6503 case TARGET_NR_inotify_rm_watch
:
6504 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
6508 #ifdef TARGET_NR_mq_open
6509 case TARGET_NR_mq_open
:
6511 struct mq_attr posix_mq_attr
;
6513 p
= lock_user_string(arg1
- 1);
6515 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
6516 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
6517 unlock_user (p
, arg1
, 0);
6521 case TARGET_NR_mq_unlink
:
6522 p
= lock_user_string(arg1
- 1);
6523 ret
= get_errno(mq_unlink(p
));
6524 unlock_user (p
, arg1
, 0);
6527 case TARGET_NR_mq_timedsend
:
6531 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6533 target_to_host_timespec(&ts
, arg5
);
6534 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
6535 host_to_target_timespec(arg5
, &ts
);
6538 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
6539 unlock_user (p
, arg2
, arg3
);
6543 case TARGET_NR_mq_timedreceive
:
6548 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
6550 target_to_host_timespec(&ts
, arg5
);
6551 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
6552 host_to_target_timespec(arg5
, &ts
);
6555 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
6556 unlock_user (p
, arg2
, arg3
);
6558 put_user_u32(prio
, arg4
);
6562 /* Not implemented for now... */
6563 /* case TARGET_NR_mq_notify: */
6566 case TARGET_NR_mq_getsetattr
:
6568 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
6571 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
6572 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
6575 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
6576 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
6585 gemu_log("qemu: Unsupported syscall: %d\n", num
);
6586 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
6587 unimplemented_nowarn
:
6589 ret
= -TARGET_ENOSYS
;
6594 gemu_log(" = %ld\n", ret
);
6597 print_syscall_ret(num
, ret
);
6600 ret
= -TARGET_EFAULT
;