4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
31 #include <sys/types.h>
37 #include <sys/mount.h>
38 #include <sys/prctl.h>
39 #include <sys/resource.h>
45 int __clone2(int (*fn
)(void *), void *child_stack_base
,
46 size_t stack_size
, int flags
, void *arg
, ...);
48 #include <sys/socket.h>
52 #include <sys/times.h>
55 #include <sys/statfs.h>
57 #include <sys/sysinfo.h>
58 #include <sys/utsname.h>
59 //#include <sys/user.h>
60 #include <netinet/ip.h>
61 #include <netinet/tcp.h>
62 #include <linux/wireless.h>
63 #include "qemu-common.h"
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu-xattr.h"
77 #define termios host_termios
78 #define winsize host_winsize
79 #define termio host_termio
80 #define sgttyb host_sgttyb /* same as target */
81 #define tchars host_tchars /* same as target */
82 #define ltchars host_ltchars /* same as target */
84 #include <linux/termios.h>
85 #include <linux/unistd.h>
86 #include <linux/utsname.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include "linux_loop.h"
99 #include "cpu-uname.h"
103 #if defined(CONFIG_USE_NPTL)
104 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
105 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
107 /* XXX: Hardcode the above values. */
108 #define CLONE_NPTL_FLAGS2 0
113 //#include <linux/msdos_fs.h>
114 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
115 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
126 #define _syscall0(type,name) \
127 static type name (void) \
129 return syscall(__NR_##name); \
132 #define _syscall1(type,name,type1,arg1) \
133 static type name (type1 arg1) \
135 return syscall(__NR_##name, arg1); \
138 #define _syscall2(type,name,type1,arg1,type2,arg2) \
139 static type name (type1 arg1,type2 arg2) \
141 return syscall(__NR_##name, arg1, arg2); \
144 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
145 static type name (type1 arg1,type2 arg2,type3 arg3) \
147 return syscall(__NR_##name, arg1, arg2, arg3); \
150 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
151 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
153 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
156 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
158 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
160 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
164 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
165 type5,arg5,type6,arg6) \
166 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
169 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
173 #define __NR_sys_uname __NR_uname
174 #define __NR_sys_faccessat __NR_faccessat
175 #define __NR_sys_fchmodat __NR_fchmodat
176 #define __NR_sys_fchownat __NR_fchownat
177 #define __NR_sys_fstatat64 __NR_fstatat64
178 #define __NR_sys_futimesat __NR_futimesat
179 #define __NR_sys_getcwd1 __NR_getcwd
180 #define __NR_sys_getdents __NR_getdents
181 #define __NR_sys_getdents64 __NR_getdents64
182 #define __NR_sys_getpriority __NR_getpriority
183 #define __NR_sys_linkat __NR_linkat
184 #define __NR_sys_mkdirat __NR_mkdirat
185 #define __NR_sys_mknodat __NR_mknodat
186 #define __NR_sys_newfstatat __NR_newfstatat
187 #define __NR_sys_openat __NR_openat
188 #define __NR_sys_readlinkat __NR_readlinkat
189 #define __NR_sys_renameat __NR_renameat
190 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
191 #define __NR_sys_symlinkat __NR_symlinkat
192 #define __NR_sys_syslog __NR_syslog
193 #define __NR_sys_tgkill __NR_tgkill
194 #define __NR_sys_tkill __NR_tkill
195 #define __NR_sys_unlinkat __NR_unlinkat
196 #define __NR_sys_utimensat __NR_utimensat
197 #define __NR_sys_futex __NR_futex
198 #define __NR_sys_inotify_init __NR_inotify_init
199 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
200 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
202 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
204 #define __NR__llseek __NR_lseek
208 _syscall0(int, gettid
)
210 /* This is a replacement for the host gettid() and must return a host
212 static int gettid(void) {
216 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
218 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
220 _syscall2(int, sys_getpriority
, int, which
, int, who
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
228 _syscall3(int,sys_tgkill
,int,tgid
,int,pid
,int,sig
)
230 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
231 _syscall2(int,sys_tkill
,int,tid
,int,sig
)
233 #ifdef __NR_exit_group
234 _syscall1(int,exit_group
,int,error_code
)
236 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
237 _syscall1(int,set_tid_address
,int *,tidptr
)
239 #if defined(CONFIG_USE_NPTL)
240 #if defined(TARGET_NR_futex) && defined(__NR_futex)
241 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
242 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
245 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
246 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
247 unsigned long *, user_mask_ptr
);
248 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
249 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
250 unsigned long *, user_mask_ptr
);
251 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
254 static bitmask_transtbl fcntl_flags_tbl
[] = {
255 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
256 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
257 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
258 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
259 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
260 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
261 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
262 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
263 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
264 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
265 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
266 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
267 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
268 #if defined(O_DIRECT)
269 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
274 #define COPY_UTSNAME_FIELD(dest, src) \
276 /* __NEW_UTS_LEN doesn't include terminating null */ \
277 (void) strncpy((dest), (src), __NEW_UTS_LEN); \
278 (dest)[__NEW_UTS_LEN] = '\0'; \
281 static int sys_uname(struct new_utsname
*buf
)
283 struct utsname uts_buf
;
285 if (uname(&uts_buf
) < 0)
289 * Just in case these have some differences, we
290 * translate utsname to new_utsname (which is the
291 * struct linux kernel uses).
294 memset(buf
, 0, sizeof(*buf
));
295 COPY_UTSNAME_FIELD(buf
->sysname
, uts_buf
.sysname
);
296 COPY_UTSNAME_FIELD(buf
->nodename
, uts_buf
.nodename
);
297 COPY_UTSNAME_FIELD(buf
->release
, uts_buf
.release
);
298 COPY_UTSNAME_FIELD(buf
->version
, uts_buf
.version
);
299 COPY_UTSNAME_FIELD(buf
->machine
, uts_buf
.machine
);
301 COPY_UTSNAME_FIELD(buf
->domainname
, uts_buf
.domainname
);
305 #undef COPY_UTSNAME_FIELD
308 static int sys_getcwd1(char *buf
, size_t size
)
310 if (getcwd(buf
, size
) == NULL
) {
311 /* getcwd() sets errno */
314 return strlen(buf
)+1;
319 * Host system seems to have atfile syscall stubs available. We
320 * now enable them one by one as specified by target syscall_nr.h.
323 #ifdef TARGET_NR_faccessat
324 static int sys_faccessat(int dirfd
, const char *pathname
, int mode
)
326 return (faccessat(dirfd
, pathname
, mode
, 0));
329 #ifdef TARGET_NR_fchmodat
330 static int sys_fchmodat(int dirfd
, const char *pathname
, mode_t mode
)
332 return (fchmodat(dirfd
, pathname
, mode
, 0));
335 #if defined(TARGET_NR_fchownat)
336 static int sys_fchownat(int dirfd
, const char *pathname
, uid_t owner
,
337 gid_t group
, int flags
)
339 return (fchownat(dirfd
, pathname
, owner
, group
, flags
));
342 #ifdef __NR_fstatat64
343 static int sys_fstatat64(int dirfd
, const char *pathname
, struct stat
*buf
,
346 return (fstatat(dirfd
, pathname
, buf
, flags
));
349 #ifdef __NR_newfstatat
350 static int sys_newfstatat(int dirfd
, const char *pathname
, struct stat
*buf
,
353 return (fstatat(dirfd
, pathname
, buf
, flags
));
356 #ifdef TARGET_NR_futimesat
357 static int sys_futimesat(int dirfd
, const char *pathname
,
358 const struct timeval times
[2])
360 return (futimesat(dirfd
, pathname
, times
));
363 #ifdef TARGET_NR_linkat
364 static int sys_linkat(int olddirfd
, const char *oldpath
,
365 int newdirfd
, const char *newpath
, int flags
)
367 return (linkat(olddirfd
, oldpath
, newdirfd
, newpath
, flags
));
370 #ifdef TARGET_NR_mkdirat
371 static int sys_mkdirat(int dirfd
, const char *pathname
, mode_t mode
)
373 return (mkdirat(dirfd
, pathname
, mode
));
376 #ifdef TARGET_NR_mknodat
377 static int sys_mknodat(int dirfd
, const char *pathname
, mode_t mode
,
380 return (mknodat(dirfd
, pathname
, mode
, dev
));
383 #ifdef TARGET_NR_openat
384 static int sys_openat(int dirfd
, const char *pathname
, int flags
, mode_t mode
)
387 * open(2) has extra parameter 'mode' when called with
390 if ((flags
& O_CREAT
) != 0) {
391 return (openat(dirfd
, pathname
, flags
, mode
));
393 return (openat(dirfd
, pathname
, flags
));
396 #ifdef TARGET_NR_readlinkat
397 static int sys_readlinkat(int dirfd
, const char *pathname
, char *buf
, size_t bufsiz
)
399 return (readlinkat(dirfd
, pathname
, buf
, bufsiz
));
402 #ifdef TARGET_NR_renameat
403 static int sys_renameat(int olddirfd
, const char *oldpath
,
404 int newdirfd
, const char *newpath
)
406 return (renameat(olddirfd
, oldpath
, newdirfd
, newpath
));
409 #ifdef TARGET_NR_symlinkat
410 static int sys_symlinkat(const char *oldpath
, int newdirfd
, const char *newpath
)
412 return (symlinkat(oldpath
, newdirfd
, newpath
));
415 #ifdef TARGET_NR_unlinkat
416 static int sys_unlinkat(int dirfd
, const char *pathname
, int flags
)
418 return (unlinkat(dirfd
, pathname
, flags
));
421 #else /* !CONFIG_ATFILE */
424 * Try direct syscalls instead
426 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
427 _syscall3(int,sys_faccessat
,int,dirfd
,const char *,pathname
,int,mode
)
429 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
430 _syscall3(int,sys_fchmodat
,int,dirfd
,const char *,pathname
, mode_t
,mode
)
432 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
433 _syscall5(int,sys_fchownat
,int,dirfd
,const char *,pathname
,
434 uid_t
,owner
,gid_t
,group
,int,flags
)
436 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
437 defined(__NR_fstatat64)
438 _syscall4(int,sys_fstatat64
,int,dirfd
,const char *,pathname
,
439 struct stat
*,buf
,int,flags
)
441 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
442 _syscall3(int,sys_futimesat
,int,dirfd
,const char *,pathname
,
443 const struct timeval
*,times
)
445 #if (defined(TARGET_NR_newfstatat) || defined(TARGET_NR_fstatat64) ) && \
446 defined(__NR_newfstatat)
447 _syscall4(int,sys_newfstatat
,int,dirfd
,const char *,pathname
,
448 struct stat
*,buf
,int,flags
)
450 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
451 _syscall5(int,sys_linkat
,int,olddirfd
,const char *,oldpath
,
452 int,newdirfd
,const char *,newpath
,int,flags
)
454 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
455 _syscall3(int,sys_mkdirat
,int,dirfd
,const char *,pathname
,mode_t
,mode
)
457 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
458 _syscall4(int,sys_mknodat
,int,dirfd
,const char *,pathname
,
459 mode_t
,mode
,dev_t
,dev
)
461 #if defined(TARGET_NR_openat) && defined(__NR_openat)
462 _syscall4(int,sys_openat
,int,dirfd
,const char *,pathname
,int,flags
,mode_t
,mode
)
464 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
465 _syscall4(int,sys_readlinkat
,int,dirfd
,const char *,pathname
,
466 char *,buf
,size_t,bufsize
)
468 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
469 _syscall4(int,sys_renameat
,int,olddirfd
,const char *,oldpath
,
470 int,newdirfd
,const char *,newpath
)
472 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
473 _syscall3(int,sys_symlinkat
,const char *,oldpath
,
474 int,newdirfd
,const char *,newpath
)
476 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
477 _syscall3(int,sys_unlinkat
,int,dirfd
,const char *,pathname
,int,flags
)
480 #endif /* CONFIG_ATFILE */
482 #ifdef CONFIG_UTIMENSAT
483 static int sys_utimensat(int dirfd
, const char *pathname
,
484 const struct timespec times
[2], int flags
)
486 if (pathname
== NULL
)
487 return futimens(dirfd
, times
);
489 return utimensat(dirfd
, pathname
, times
, flags
);
492 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
493 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
494 const struct timespec
*,tsp
,int,flags
)
496 #endif /* CONFIG_UTIMENSAT */
498 #ifdef CONFIG_INOTIFY
499 #include <sys/inotify.h>
501 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
502 static int sys_inotify_init(void)
504 return (inotify_init());
507 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
508 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
510 return (inotify_add_watch(fd
, pathname
, mask
));
513 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
514 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
516 return (inotify_rm_watch(fd
, wd
));
519 #ifdef CONFIG_INOTIFY1
520 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
521 static int sys_inotify_init1(int flags
)
523 return (inotify_init1(flags
));
528 /* Userspace can usually survive runtime without inotify */
529 #undef TARGET_NR_inotify_init
530 #undef TARGET_NR_inotify_init1
531 #undef TARGET_NR_inotify_add_watch
532 #undef TARGET_NR_inotify_rm_watch
533 #endif /* CONFIG_INOTIFY */
535 #if defined(TARGET_NR_ppoll)
537 # define __NR_ppoll -1
539 #define __NR_sys_ppoll __NR_ppoll
540 _syscall5(int, sys_ppoll
, struct pollfd
*, fds
, nfds_t
, nfds
,
541 struct timespec
*, timeout
, const __sigset_t
*, sigmask
,
545 #if defined(TARGET_NR_pselect6)
546 #ifndef __NR_pselect6
547 # define __NR_pselect6 -1
549 #define __NR_sys_pselect6 __NR_pselect6
550 _syscall6(int, sys_pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
,
551 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
);
554 #if defined(TARGET_NR_prlimit64)
555 #ifndef __NR_prlimit64
556 # define __NR_prlimit64 -1
558 #define __NR_sys_prlimit64 __NR_prlimit64
559 /* The glibc rlimit structure may not be that used by the underlying syscall */
560 struct host_rlimit64
{
564 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
565 const struct host_rlimit64
*, new_limit
,
566 struct host_rlimit64
*, old_limit
)
569 extern int personality(int);
570 extern int flock(int, int);
571 extern int setfsuid(int);
572 extern int setfsgid(int);
573 extern int setgroups(int, gid_t
*);
575 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
577 static inline int regpairs_aligned(void *cpu_env
) {
578 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
580 #elif defined(TARGET_MIPS)
581 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
583 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
586 #define ERRNO_TABLE_SIZE 1200
588 /* target_to_host_errno_table[] is initialized from
589 * host_to_target_errno_table[] in syscall_init(). */
590 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
594 * This list is the union of errno values overridden in asm-<arch>/errno.h
595 * minus the errnos that are not actually generic to all archs.
597 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
598 [EIDRM
] = TARGET_EIDRM
,
599 [ECHRNG
] = TARGET_ECHRNG
,
600 [EL2NSYNC
] = TARGET_EL2NSYNC
,
601 [EL3HLT
] = TARGET_EL3HLT
,
602 [EL3RST
] = TARGET_EL3RST
,
603 [ELNRNG
] = TARGET_ELNRNG
,
604 [EUNATCH
] = TARGET_EUNATCH
,
605 [ENOCSI
] = TARGET_ENOCSI
,
606 [EL2HLT
] = TARGET_EL2HLT
,
607 [EDEADLK
] = TARGET_EDEADLK
,
608 [ENOLCK
] = TARGET_ENOLCK
,
609 [EBADE
] = TARGET_EBADE
,
610 [EBADR
] = TARGET_EBADR
,
611 [EXFULL
] = TARGET_EXFULL
,
612 [ENOANO
] = TARGET_ENOANO
,
613 [EBADRQC
] = TARGET_EBADRQC
,
614 [EBADSLT
] = TARGET_EBADSLT
,
615 [EBFONT
] = TARGET_EBFONT
,
616 [ENOSTR
] = TARGET_ENOSTR
,
617 [ENODATA
] = TARGET_ENODATA
,
618 [ETIME
] = TARGET_ETIME
,
619 [ENOSR
] = TARGET_ENOSR
,
620 [ENONET
] = TARGET_ENONET
,
621 [ENOPKG
] = TARGET_ENOPKG
,
622 [EREMOTE
] = TARGET_EREMOTE
,
623 [ENOLINK
] = TARGET_ENOLINK
,
624 [EADV
] = TARGET_EADV
,
625 [ESRMNT
] = TARGET_ESRMNT
,
626 [ECOMM
] = TARGET_ECOMM
,
627 [EPROTO
] = TARGET_EPROTO
,
628 [EDOTDOT
] = TARGET_EDOTDOT
,
629 [EMULTIHOP
] = TARGET_EMULTIHOP
,
630 [EBADMSG
] = TARGET_EBADMSG
,
631 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
632 [EOVERFLOW
] = TARGET_EOVERFLOW
,
633 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
634 [EBADFD
] = TARGET_EBADFD
,
635 [EREMCHG
] = TARGET_EREMCHG
,
636 [ELIBACC
] = TARGET_ELIBACC
,
637 [ELIBBAD
] = TARGET_ELIBBAD
,
638 [ELIBSCN
] = TARGET_ELIBSCN
,
639 [ELIBMAX
] = TARGET_ELIBMAX
,
640 [ELIBEXEC
] = TARGET_ELIBEXEC
,
641 [EILSEQ
] = TARGET_EILSEQ
,
642 [ENOSYS
] = TARGET_ENOSYS
,
643 [ELOOP
] = TARGET_ELOOP
,
644 [ERESTART
] = TARGET_ERESTART
,
645 [ESTRPIPE
] = TARGET_ESTRPIPE
,
646 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
647 [EUSERS
] = TARGET_EUSERS
,
648 [ENOTSOCK
] = TARGET_ENOTSOCK
,
649 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
650 [EMSGSIZE
] = TARGET_EMSGSIZE
,
651 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
652 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
653 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
654 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
655 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
656 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
657 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
658 [EADDRINUSE
] = TARGET_EADDRINUSE
,
659 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
660 [ENETDOWN
] = TARGET_ENETDOWN
,
661 [ENETUNREACH
] = TARGET_ENETUNREACH
,
662 [ENETRESET
] = TARGET_ENETRESET
,
663 [ECONNABORTED
] = TARGET_ECONNABORTED
,
664 [ECONNRESET
] = TARGET_ECONNRESET
,
665 [ENOBUFS
] = TARGET_ENOBUFS
,
666 [EISCONN
] = TARGET_EISCONN
,
667 [ENOTCONN
] = TARGET_ENOTCONN
,
668 [EUCLEAN
] = TARGET_EUCLEAN
,
669 [ENOTNAM
] = TARGET_ENOTNAM
,
670 [ENAVAIL
] = TARGET_ENAVAIL
,
671 [EISNAM
] = TARGET_EISNAM
,
672 [EREMOTEIO
] = TARGET_EREMOTEIO
,
673 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
674 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
675 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
676 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
677 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
678 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
679 [EALREADY
] = TARGET_EALREADY
,
680 [EINPROGRESS
] = TARGET_EINPROGRESS
,
681 [ESTALE
] = TARGET_ESTALE
,
682 [ECANCELED
] = TARGET_ECANCELED
,
683 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
684 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
686 [ENOKEY
] = TARGET_ENOKEY
,
689 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
692 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
695 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
698 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
700 #ifdef ENOTRECOVERABLE
701 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
705 static inline int host_to_target_errno(int err
)
707 if(host_to_target_errno_table
[err
])
708 return host_to_target_errno_table
[err
];
712 static inline int target_to_host_errno(int err
)
714 if (target_to_host_errno_table
[err
])
715 return target_to_host_errno_table
[err
];
719 static inline abi_long
get_errno(abi_long ret
)
722 return -host_to_target_errno(errno
);
727 static inline int is_error(abi_long ret
)
729 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
732 char *target_strerror(int err
)
734 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
737 return strerror(target_to_host_errno(err
));
740 static abi_ulong target_brk
;
741 static abi_ulong target_original_brk
;
742 static abi_ulong brk_page
;
744 void target_set_brk(abi_ulong new_brk
)
746 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
747 brk_page
= HOST_PAGE_ALIGN(target_brk
);
750 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
751 #define DEBUGF_BRK(message, args...)
753 /* do_brk() must return target values and target errnos. */
754 abi_long
do_brk(abi_ulong new_brk
)
756 abi_long mapped_addr
;
759 DEBUGF_BRK("do_brk(%#010x) -> ", new_brk
);
762 DEBUGF_BRK("%#010x (!new_brk)\n", target_brk
);
765 if (new_brk
< target_original_brk
) {
766 DEBUGF_BRK("%#010x (new_brk < target_original_brk)\n", target_brk
);
770 /* If the new brk is less than the highest page reserved to the
771 * target heap allocation, set it and we're almost done... */
772 if (new_brk
<= brk_page
) {
773 /* Heap contents are initialized to zero, as for anonymous
775 if (new_brk
> target_brk
) {
776 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
778 target_brk
= new_brk
;
779 DEBUGF_BRK("%#010x (new_brk <= brk_page)\n", target_brk
);
783 /* We need to allocate more memory after the brk... Note that
784 * we don't use MAP_FIXED because that will map over the top of
785 * any existing mapping (like the one with the host libc or qemu
786 * itself); instead we treat "mapped but at wrong address" as
787 * a failure and unmap again.
789 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
790 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
791 PROT_READ
|PROT_WRITE
,
792 MAP_ANON
|MAP_PRIVATE
, 0, 0));
794 if (mapped_addr
== brk_page
) {
795 /* Heap contents are initialized to zero, as for anonymous
796 * mapped pages. Technically the new pages are already
797 * initialized to zero since they *are* anonymous mapped
798 * pages, however we have to take care with the contents that
799 * come from the remaining part of the previous page: it may
800 * contains garbage data due to a previous heap usage (grown
802 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
804 target_brk
= new_brk
;
805 brk_page
= HOST_PAGE_ALIGN(target_brk
);
806 DEBUGF_BRK("%#010x (mapped_addr == brk_page)\n", target_brk
);
808 } else if (mapped_addr
!= -1) {
809 /* Mapped but at wrong address, meaning there wasn't actually
810 * enough space for this brk.
812 target_munmap(mapped_addr
, new_alloc_size
);
814 DEBUGF_BRK("%#010x (mapped_addr != -1)\n", target_brk
);
817 DEBUGF_BRK("%#010x (otherwise)\n", target_brk
);
820 #if defined(TARGET_ALPHA)
821 /* We (partially) emulate OSF/1 on Alpha, which requires we
822 return a proper errno, not an unchanged brk value. */
823 return -TARGET_ENOMEM
;
825 /* For everything else, return the previous break. */
829 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
830 abi_ulong target_fds_addr
,
834 abi_ulong b
, *target_fds
;
836 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
837 if (!(target_fds
= lock_user(VERIFY_READ
,
839 sizeof(abi_ulong
) * nw
,
841 return -TARGET_EFAULT
;
845 for (i
= 0; i
< nw
; i
++) {
846 /* grab the abi_ulong */
847 __get_user(b
, &target_fds
[i
]);
848 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
849 /* check the bit inside the abi_ulong */
856 unlock_user(target_fds
, target_fds_addr
, 0);
861 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
862 abi_ulong target_fds_addr
,
865 if (target_fds_addr
) {
866 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
867 return -TARGET_EFAULT
;
875 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
881 abi_ulong
*target_fds
;
883 nw
= (n
+ TARGET_ABI_BITS
- 1) / TARGET_ABI_BITS
;
884 if (!(target_fds
= lock_user(VERIFY_WRITE
,
886 sizeof(abi_ulong
) * nw
,
888 return -TARGET_EFAULT
;
891 for (i
= 0; i
< nw
; i
++) {
893 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
894 v
|= ((FD_ISSET(k
, fds
) != 0) << j
);
897 __put_user(v
, &target_fds
[i
]);
900 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
905 #if defined(__alpha__)
911 static inline abi_long
host_to_target_clock_t(long ticks
)
913 #if HOST_HZ == TARGET_HZ
916 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
920 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
921 const struct rusage
*rusage
)
923 struct target_rusage
*target_rusage
;
925 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
926 return -TARGET_EFAULT
;
927 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
928 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
929 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
930 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
931 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
932 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
933 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
934 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
935 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
936 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
937 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
938 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
939 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
940 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
941 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
942 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
943 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
944 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
945 unlock_user_struct(target_rusage
, target_addr
, 1);
950 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
952 abi_ulong target_rlim_swap
;
955 target_rlim_swap
= tswapal(target_rlim
);
956 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
957 return RLIM_INFINITY
;
959 result
= target_rlim_swap
;
960 if (target_rlim_swap
!= (rlim_t
)result
)
961 return RLIM_INFINITY
;
966 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
968 abi_ulong target_rlim_swap
;
971 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
972 target_rlim_swap
= TARGET_RLIM_INFINITY
;
974 target_rlim_swap
= rlim
;
975 result
= tswapal(target_rlim_swap
);
980 static inline int target_to_host_resource(int code
)
983 case TARGET_RLIMIT_AS
:
985 case TARGET_RLIMIT_CORE
:
987 case TARGET_RLIMIT_CPU
:
989 case TARGET_RLIMIT_DATA
:
991 case TARGET_RLIMIT_FSIZE
:
993 case TARGET_RLIMIT_LOCKS
:
995 case TARGET_RLIMIT_MEMLOCK
:
996 return RLIMIT_MEMLOCK
;
997 case TARGET_RLIMIT_MSGQUEUE
:
998 return RLIMIT_MSGQUEUE
;
999 case TARGET_RLIMIT_NICE
:
1001 case TARGET_RLIMIT_NOFILE
:
1002 return RLIMIT_NOFILE
;
1003 case TARGET_RLIMIT_NPROC
:
1004 return RLIMIT_NPROC
;
1005 case TARGET_RLIMIT_RSS
:
1007 case TARGET_RLIMIT_RTPRIO
:
1008 return RLIMIT_RTPRIO
;
1009 case TARGET_RLIMIT_SIGPENDING
:
1010 return RLIMIT_SIGPENDING
;
1011 case TARGET_RLIMIT_STACK
:
1012 return RLIMIT_STACK
;
1018 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1019 abi_ulong target_tv_addr
)
1021 struct target_timeval
*target_tv
;
1023 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1024 return -TARGET_EFAULT
;
1026 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1027 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1029 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1034 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1035 const struct timeval
*tv
)
1037 struct target_timeval
*target_tv
;
1039 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1040 return -TARGET_EFAULT
;
1042 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1043 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1045 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1050 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1053 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1054 abi_ulong target_mq_attr_addr
)
1056 struct target_mq_attr
*target_mq_attr
;
1058 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1059 target_mq_attr_addr
, 1))
1060 return -TARGET_EFAULT
;
1062 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1063 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1064 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1065 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1067 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1072 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1073 const struct mq_attr
*attr
)
1075 struct target_mq_attr
*target_mq_attr
;
1077 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1078 target_mq_attr_addr
, 0))
1079 return -TARGET_EFAULT
;
1081 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1082 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1083 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1084 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1086 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1092 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1093 /* do_select() must return target values and target errnos. */
1094 static abi_long
do_select(int n
,
1095 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1096 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1098 fd_set rfds
, wfds
, efds
;
1099 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1100 struct timeval tv
, *tv_ptr
;
1103 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1107 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1111 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1116 if (target_tv_addr
) {
1117 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1118 return -TARGET_EFAULT
;
1124 ret
= get_errno(select(n
, rfds_ptr
, wfds_ptr
, efds_ptr
, tv_ptr
));
1126 if (!is_error(ret
)) {
1127 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1128 return -TARGET_EFAULT
;
1129 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1130 return -TARGET_EFAULT
;
1131 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1132 return -TARGET_EFAULT
;
1134 if (target_tv_addr
&& copy_to_user_timeval(target_tv_addr
, &tv
))
1135 return -TARGET_EFAULT
;
1142 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1145 return pipe2(host_pipe
, flags
);
1151 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1152 int flags
, int is_pipe2
)
1156 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1159 return get_errno(ret
);
1161 /* Several targets have special calling conventions for the original
1162 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1164 #if defined(TARGET_ALPHA)
1165 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1166 return host_pipe
[0];
1167 #elif defined(TARGET_MIPS)
1168 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1169 return host_pipe
[0];
1170 #elif defined(TARGET_SH4)
1171 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1172 return host_pipe
[0];
1176 if (put_user_s32(host_pipe
[0], pipedes
)
1177 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1178 return -TARGET_EFAULT
;
1179 return get_errno(ret
);
1182 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1183 abi_ulong target_addr
,
1186 struct target_ip_mreqn
*target_smreqn
;
1188 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1190 return -TARGET_EFAULT
;
1191 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1192 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1193 if (len
== sizeof(struct target_ip_mreqn
))
1194 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1195 unlock_user(target_smreqn
, target_addr
, 0);
1200 static inline abi_long
target_to_host_sockaddr(struct sockaddr
*addr
,
1201 abi_ulong target_addr
,
1204 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1205 sa_family_t sa_family
;
1206 struct target_sockaddr
*target_saddr
;
1208 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1210 return -TARGET_EFAULT
;
1212 sa_family
= tswap16(target_saddr
->sa_family
);
1214 /* Oops. The caller might send a incomplete sun_path; sun_path
1215 * must be terminated by \0 (see the manual page), but
1216 * unfortunately it is quite common to specify sockaddr_un
1217 * length as "strlen(x->sun_path)" while it should be
1218 * "strlen(...) + 1". We'll fix that here if needed.
1219 * Linux kernel has a similar feature.
1222 if (sa_family
== AF_UNIX
) {
1223 if (len
< unix_maxlen
&& len
> 0) {
1224 char *cp
= (char*)target_saddr
;
1226 if ( cp
[len
-1] && !cp
[len
] )
1229 if (len
> unix_maxlen
)
1233 memcpy(addr
, target_saddr
, len
);
1234 addr
->sa_family
= sa_family
;
1235 unlock_user(target_saddr
, target_addr
, 0);
1240 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1241 struct sockaddr
*addr
,
1244 struct target_sockaddr
*target_saddr
;
1246 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1248 return -TARGET_EFAULT
;
1249 memcpy(target_saddr
, addr
, len
);
1250 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1251 unlock_user(target_saddr
, target_addr
, len
);
1256 /* ??? Should this also swap msgh->name? */
1257 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1258 struct target_msghdr
*target_msgh
)
1260 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1261 abi_long msg_controllen
;
1262 abi_ulong target_cmsg_addr
;
1263 struct target_cmsghdr
*target_cmsg
;
1264 socklen_t space
= 0;
1266 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1267 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1269 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1270 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1272 return -TARGET_EFAULT
;
1274 while (cmsg
&& target_cmsg
) {
1275 void *data
= CMSG_DATA(cmsg
);
1276 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1278 int len
= tswapal(target_cmsg
->cmsg_len
)
1279 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1281 space
+= CMSG_SPACE(len
);
1282 if (space
> msgh
->msg_controllen
) {
1283 space
-= CMSG_SPACE(len
);
1284 gemu_log("Host cmsg overflow\n");
1288 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1289 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1290 cmsg
->cmsg_len
= CMSG_LEN(len
);
1292 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1293 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1294 memcpy(data
, target_data
, len
);
1296 int *fd
= (int *)data
;
1297 int *target_fd
= (int *)target_data
;
1298 int i
, numfds
= len
/ sizeof(int);
1300 for (i
= 0; i
< numfds
; i
++)
1301 fd
[i
] = tswap32(target_fd
[i
]);
1304 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1305 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1307 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1309 msgh
->msg_controllen
= space
;
1313 /* ??? Should this also swap msgh->name? */
1314 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1315 struct msghdr
*msgh
)
1317 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1318 abi_long msg_controllen
;
1319 abi_ulong target_cmsg_addr
;
1320 struct target_cmsghdr
*target_cmsg
;
1321 socklen_t space
= 0;
1323 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1324 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1326 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1327 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1329 return -TARGET_EFAULT
;
1331 while (cmsg
&& target_cmsg
) {
1332 void *data
= CMSG_DATA(cmsg
);
1333 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1335 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1337 space
+= TARGET_CMSG_SPACE(len
);
1338 if (space
> msg_controllen
) {
1339 space
-= TARGET_CMSG_SPACE(len
);
1340 gemu_log("Target cmsg overflow\n");
1344 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1345 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1346 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(len
));
1348 if (cmsg
->cmsg_level
!= TARGET_SOL_SOCKET
|| cmsg
->cmsg_type
!= SCM_RIGHTS
) {
1349 gemu_log("Unsupported ancillary data: %d/%d\n", cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1350 memcpy(target_data
, data
, len
);
1352 int *fd
= (int *)data
;
1353 int *target_fd
= (int *)target_data
;
1354 int i
, numfds
= len
/ sizeof(int);
1356 for (i
= 0; i
< numfds
; i
++)
1357 target_fd
[i
] = tswap32(fd
[i
]);
1360 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1361 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
);
1363 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1365 target_msgh
->msg_controllen
= tswapal(space
);
1369 /* do_setsockopt() Must return target values and target errnos. */
1370 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
1371 abi_ulong optval_addr
, socklen_t optlen
)
1375 struct ip_mreqn
*ip_mreq
;
1376 struct ip_mreq_source
*ip_mreq_source
;
1380 /* TCP options all take an 'int' value. */
1381 if (optlen
< sizeof(uint32_t))
1382 return -TARGET_EINVAL
;
1384 if (get_user_u32(val
, optval_addr
))
1385 return -TARGET_EFAULT
;
1386 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1393 case IP_ROUTER_ALERT
:
1397 case IP_MTU_DISCOVER
:
1403 case IP_MULTICAST_TTL
:
1404 case IP_MULTICAST_LOOP
:
1406 if (optlen
>= sizeof(uint32_t)) {
1407 if (get_user_u32(val
, optval_addr
))
1408 return -TARGET_EFAULT
;
1409 } else if (optlen
>= 1) {
1410 if (get_user_u8(val
, optval_addr
))
1411 return -TARGET_EFAULT
;
1413 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
1415 case IP_ADD_MEMBERSHIP
:
1416 case IP_DROP_MEMBERSHIP
:
1417 if (optlen
< sizeof (struct target_ip_mreq
) ||
1418 optlen
> sizeof (struct target_ip_mreqn
))
1419 return -TARGET_EINVAL
;
1421 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
1422 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
1423 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
1426 case IP_BLOCK_SOURCE
:
1427 case IP_UNBLOCK_SOURCE
:
1428 case IP_ADD_SOURCE_MEMBERSHIP
:
1429 case IP_DROP_SOURCE_MEMBERSHIP
:
1430 if (optlen
!= sizeof (struct target_ip_mreq_source
))
1431 return -TARGET_EINVAL
;
1433 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
1434 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
1435 unlock_user (ip_mreq_source
, optval_addr
, 0);
1442 case TARGET_SOL_SOCKET
:
1444 /* Options with 'int' argument. */
1445 case TARGET_SO_DEBUG
:
1448 case TARGET_SO_REUSEADDR
:
1449 optname
= SO_REUSEADDR
;
1451 case TARGET_SO_TYPE
:
1454 case TARGET_SO_ERROR
:
1457 case TARGET_SO_DONTROUTE
:
1458 optname
= SO_DONTROUTE
;
1460 case TARGET_SO_BROADCAST
:
1461 optname
= SO_BROADCAST
;
1463 case TARGET_SO_SNDBUF
:
1464 optname
= SO_SNDBUF
;
1466 case TARGET_SO_RCVBUF
:
1467 optname
= SO_RCVBUF
;
1469 case TARGET_SO_KEEPALIVE
:
1470 optname
= SO_KEEPALIVE
;
1472 case TARGET_SO_OOBINLINE
:
1473 optname
= SO_OOBINLINE
;
1475 case TARGET_SO_NO_CHECK
:
1476 optname
= SO_NO_CHECK
;
1478 case TARGET_SO_PRIORITY
:
1479 optname
= SO_PRIORITY
;
1482 case TARGET_SO_BSDCOMPAT
:
1483 optname
= SO_BSDCOMPAT
;
1486 case TARGET_SO_PASSCRED
:
1487 optname
= SO_PASSCRED
;
1489 case TARGET_SO_TIMESTAMP
:
1490 optname
= SO_TIMESTAMP
;
1492 case TARGET_SO_RCVLOWAT
:
1493 optname
= SO_RCVLOWAT
;
1495 case TARGET_SO_RCVTIMEO
:
1496 optname
= SO_RCVTIMEO
;
1498 case TARGET_SO_SNDTIMEO
:
1499 optname
= SO_SNDTIMEO
;
1505 if (optlen
< sizeof(uint32_t))
1506 return -TARGET_EINVAL
;
1508 if (get_user_u32(val
, optval_addr
))
1509 return -TARGET_EFAULT
;
1510 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
1514 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
1515 ret
= -TARGET_ENOPROTOOPT
;
1520 /* do_getsockopt() Must return target values and target errnos. */
1521 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
1522 abi_ulong optval_addr
, abi_ulong optlen
)
1529 case TARGET_SOL_SOCKET
:
1532 /* These don't just return a single integer */
1533 case TARGET_SO_LINGER
:
1534 case TARGET_SO_RCVTIMEO
:
1535 case TARGET_SO_SNDTIMEO
:
1536 case TARGET_SO_PEERNAME
:
1538 case TARGET_SO_PEERCRED
: {
1541 struct target_ucred
*tcr
;
1543 if (get_user_u32(len
, optlen
)) {
1544 return -TARGET_EFAULT
;
1547 return -TARGET_EINVAL
;
1551 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
1559 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
1560 return -TARGET_EFAULT
;
1562 __put_user(cr
.pid
, &tcr
->pid
);
1563 __put_user(cr
.uid
, &tcr
->uid
);
1564 __put_user(cr
.gid
, &tcr
->gid
);
1565 unlock_user_struct(tcr
, optval_addr
, 1);
1566 if (put_user_u32(len
, optlen
)) {
1567 return -TARGET_EFAULT
;
1571 /* Options with 'int' argument. */
1572 case TARGET_SO_DEBUG
:
1575 case TARGET_SO_REUSEADDR
:
1576 optname
= SO_REUSEADDR
;
1578 case TARGET_SO_TYPE
:
1581 case TARGET_SO_ERROR
:
1584 case TARGET_SO_DONTROUTE
:
1585 optname
= SO_DONTROUTE
;
1587 case TARGET_SO_BROADCAST
:
1588 optname
= SO_BROADCAST
;
1590 case TARGET_SO_SNDBUF
:
1591 optname
= SO_SNDBUF
;
1593 case TARGET_SO_RCVBUF
:
1594 optname
= SO_RCVBUF
;
1596 case TARGET_SO_KEEPALIVE
:
1597 optname
= SO_KEEPALIVE
;
1599 case TARGET_SO_OOBINLINE
:
1600 optname
= SO_OOBINLINE
;
1602 case TARGET_SO_NO_CHECK
:
1603 optname
= SO_NO_CHECK
;
1605 case TARGET_SO_PRIORITY
:
1606 optname
= SO_PRIORITY
;
1609 case TARGET_SO_BSDCOMPAT
:
1610 optname
= SO_BSDCOMPAT
;
1613 case TARGET_SO_PASSCRED
:
1614 optname
= SO_PASSCRED
;
1616 case TARGET_SO_TIMESTAMP
:
1617 optname
= SO_TIMESTAMP
;
1619 case TARGET_SO_RCVLOWAT
:
1620 optname
= SO_RCVLOWAT
;
1627 /* TCP options all take an 'int' value. */
1629 if (get_user_u32(len
, optlen
))
1630 return -TARGET_EFAULT
;
1632 return -TARGET_EINVAL
;
1634 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1640 if (put_user_u32(val
, optval_addr
))
1641 return -TARGET_EFAULT
;
1643 if (put_user_u8(val
, optval_addr
))
1644 return -TARGET_EFAULT
;
1646 if (put_user_u32(len
, optlen
))
1647 return -TARGET_EFAULT
;
1654 case IP_ROUTER_ALERT
:
1658 case IP_MTU_DISCOVER
:
1664 case IP_MULTICAST_TTL
:
1665 case IP_MULTICAST_LOOP
:
1666 if (get_user_u32(len
, optlen
))
1667 return -TARGET_EFAULT
;
1669 return -TARGET_EINVAL
;
1671 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
1674 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
1676 if (put_user_u32(len
, optlen
)
1677 || put_user_u8(val
, optval_addr
))
1678 return -TARGET_EFAULT
;
1680 if (len
> sizeof(int))
1682 if (put_user_u32(len
, optlen
)
1683 || put_user_u32(val
, optval_addr
))
1684 return -TARGET_EFAULT
;
1688 ret
= -TARGET_ENOPROTOOPT
;
1694 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
1696 ret
= -TARGET_EOPNOTSUPP
;
1703 * lock_iovec()/unlock_iovec() have a return code of 0 for success where
1704 * other lock functions have a return code of 0 for failure.
1706 static abi_long
lock_iovec(int type
, struct iovec
*vec
, abi_ulong target_addr
,
1707 int count
, int copy
)
1709 struct target_iovec
*target_vec
;
1713 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1715 return -TARGET_EFAULT
;
1716 for(i
= 0;i
< count
; i
++) {
1717 base
= tswapal(target_vec
[i
].iov_base
);
1718 vec
[i
].iov_len
= tswapal(target_vec
[i
].iov_len
);
1719 if (vec
[i
].iov_len
!= 0) {
1720 vec
[i
].iov_base
= lock_user(type
, base
, vec
[i
].iov_len
, copy
);
1721 /* Don't check lock_user return value. We must call writev even
1722 if a element has invalid base address. */
1724 /* zero length pointer is ignored */
1725 vec
[i
].iov_base
= NULL
;
1728 unlock_user (target_vec
, target_addr
, 0);
1732 static abi_long
unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
1733 int count
, int copy
)
1735 struct target_iovec
*target_vec
;
1739 target_vec
= lock_user(VERIFY_READ
, target_addr
, count
* sizeof(struct target_iovec
), 1);
1741 return -TARGET_EFAULT
;
1742 for(i
= 0;i
< count
; i
++) {
1743 if (target_vec
[i
].iov_base
) {
1744 base
= tswapal(target_vec
[i
].iov_base
);
1745 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
1748 unlock_user (target_vec
, target_addr
, 0);
1753 /* do_socket() Must return target values and target errnos. */
1754 static abi_long
do_socket(int domain
, int type
, int protocol
)
1756 #if defined(TARGET_MIPS)
1758 case TARGET_SOCK_DGRAM
:
1761 case TARGET_SOCK_STREAM
:
1764 case TARGET_SOCK_RAW
:
1767 case TARGET_SOCK_RDM
:
1770 case TARGET_SOCK_SEQPACKET
:
1771 type
= SOCK_SEQPACKET
;
1773 case TARGET_SOCK_PACKET
:
1778 if (domain
== PF_NETLINK
)
1779 return -EAFNOSUPPORT
; /* do not NETLINK socket connections possible */
1780 return get_errno(socket(domain
, type
, protocol
));
1783 /* do_bind() Must return target values and target errnos. */
1784 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
1790 if ((int)addrlen
< 0) {
1791 return -TARGET_EINVAL
;
1794 addr
= alloca(addrlen
+1);
1796 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1800 return get_errno(bind(sockfd
, addr
, addrlen
));
1803 /* do_connect() Must return target values and target errnos. */
1804 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
1810 if ((int)addrlen
< 0) {
1811 return -TARGET_EINVAL
;
1814 addr
= alloca(addrlen
);
1816 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
1820 return get_errno(connect(sockfd
, addr
, addrlen
));
1823 /* do_sendrecvmsg() Must return target values and target errnos. */
1824 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
1825 int flags
, int send
)
1828 struct target_msghdr
*msgp
;
1832 abi_ulong target_vec
;
1835 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
1839 return -TARGET_EFAULT
;
1840 if (msgp
->msg_name
) {
1841 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
1842 msg
.msg_name
= alloca(msg
.msg_namelen
);
1843 ret
= target_to_host_sockaddr(msg
.msg_name
, tswapal(msgp
->msg_name
),
1846 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1850 msg
.msg_name
= NULL
;
1851 msg
.msg_namelen
= 0;
1853 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
1854 msg
.msg_control
= alloca(msg
.msg_controllen
);
1855 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
1857 count
= tswapal(msgp
->msg_iovlen
);
1858 vec
= alloca(count
* sizeof(struct iovec
));
1859 target_vec
= tswapal(msgp
->msg_iov
);
1860 lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
, vec
, target_vec
, count
, send
);
1861 msg
.msg_iovlen
= count
;
1865 ret
= target_to_host_cmsg(&msg
, msgp
);
1867 ret
= get_errno(sendmsg(fd
, &msg
, flags
));
1869 ret
= get_errno(recvmsg(fd
, &msg
, flags
));
1870 if (!is_error(ret
)) {
1872 ret
= host_to_target_cmsg(msgp
, &msg
);
1877 unlock_iovec(vec
, target_vec
, count
, !send
);
1878 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
1882 /* do_accept() Must return target values and target errnos. */
1883 static abi_long
do_accept(int fd
, abi_ulong target_addr
,
1884 abi_ulong target_addrlen_addr
)
1890 if (target_addr
== 0)
1891 return get_errno(accept(fd
, NULL
, NULL
));
1893 /* linux returns EINVAL if addrlen pointer is invalid */
1894 if (get_user_u32(addrlen
, target_addrlen_addr
))
1895 return -TARGET_EINVAL
;
1897 if ((int)addrlen
< 0) {
1898 return -TARGET_EINVAL
;
1901 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1902 return -TARGET_EINVAL
;
1904 addr
= alloca(addrlen
);
1906 ret
= get_errno(accept(fd
, addr
, &addrlen
));
1907 if (!is_error(ret
)) {
1908 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1909 if (put_user_u32(addrlen
, target_addrlen_addr
))
1910 ret
= -TARGET_EFAULT
;
1915 /* do_getpeername() Must return target values and target errnos. */
1916 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
1917 abi_ulong target_addrlen_addr
)
1923 if (get_user_u32(addrlen
, target_addrlen_addr
))
1924 return -TARGET_EFAULT
;
1926 if ((int)addrlen
< 0) {
1927 return -TARGET_EINVAL
;
1930 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1931 return -TARGET_EFAULT
;
1933 addr
= alloca(addrlen
);
1935 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
1936 if (!is_error(ret
)) {
1937 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1938 if (put_user_u32(addrlen
, target_addrlen_addr
))
1939 ret
= -TARGET_EFAULT
;
1944 /* do_getsockname() Must return target values and target errnos. */
1945 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
1946 abi_ulong target_addrlen_addr
)
1952 if (get_user_u32(addrlen
, target_addrlen_addr
))
1953 return -TARGET_EFAULT
;
1955 if ((int)addrlen
< 0) {
1956 return -TARGET_EINVAL
;
1959 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
1960 return -TARGET_EFAULT
;
1962 addr
= alloca(addrlen
);
1964 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
1965 if (!is_error(ret
)) {
1966 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
1967 if (put_user_u32(addrlen
, target_addrlen_addr
))
1968 ret
= -TARGET_EFAULT
;
1973 /* do_socketpair() Must return target values and target errnos. */
1974 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
1975 abi_ulong target_tab_addr
)
1980 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
1981 if (!is_error(ret
)) {
1982 if (put_user_s32(tab
[0], target_tab_addr
)
1983 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
1984 ret
= -TARGET_EFAULT
;
1989 /* do_sendto() Must return target values and target errnos. */
1990 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
1991 abi_ulong target_addr
, socklen_t addrlen
)
1997 if ((int)addrlen
< 0) {
1998 return -TARGET_EINVAL
;
2001 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
2003 return -TARGET_EFAULT
;
2005 addr
= alloca(addrlen
);
2006 ret
= target_to_host_sockaddr(addr
, target_addr
, addrlen
);
2008 unlock_user(host_msg
, msg
, 0);
2011 ret
= get_errno(sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
2013 ret
= get_errno(send(fd
, host_msg
, len
, flags
));
2015 unlock_user(host_msg
, msg
, 0);
2019 /* do_recvfrom() Must return target values and target errnos. */
2020 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
2021 abi_ulong target_addr
,
2022 abi_ulong target_addrlen
)
2029 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
2031 return -TARGET_EFAULT
;
2033 if (get_user_u32(addrlen
, target_addrlen
)) {
2034 ret
= -TARGET_EFAULT
;
2037 if ((int)addrlen
< 0) {
2038 ret
= -TARGET_EINVAL
;
2041 addr
= alloca(addrlen
);
2042 ret
= get_errno(recvfrom(fd
, host_msg
, len
, flags
, addr
, &addrlen
));
2044 addr
= NULL
; /* To keep compiler quiet. */
2045 ret
= get_errno(qemu_recv(fd
, host_msg
, len
, flags
));
2047 if (!is_error(ret
)) {
2049 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
2050 if (put_user_u32(addrlen
, target_addrlen
)) {
2051 ret
= -TARGET_EFAULT
;
2055 unlock_user(host_msg
, msg
, len
);
2058 unlock_user(host_msg
, msg
, 0);
2063 #ifdef TARGET_NR_socketcall
2064 /* do_socketcall() Must return target values and target errnos. */
2065 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
2068 const int n
= sizeof(abi_ulong
);
2073 abi_ulong domain
, type
, protocol
;
2075 if (get_user_ual(domain
, vptr
)
2076 || get_user_ual(type
, vptr
+ n
)
2077 || get_user_ual(protocol
, vptr
+ 2 * n
))
2078 return -TARGET_EFAULT
;
2080 ret
= do_socket(domain
, type
, protocol
);
2086 abi_ulong target_addr
;
2089 if (get_user_ual(sockfd
, vptr
)
2090 || get_user_ual(target_addr
, vptr
+ n
)
2091 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2092 return -TARGET_EFAULT
;
2094 ret
= do_bind(sockfd
, target_addr
, addrlen
);
2097 case SOCKOP_connect
:
2100 abi_ulong target_addr
;
2103 if (get_user_ual(sockfd
, vptr
)
2104 || get_user_ual(target_addr
, vptr
+ n
)
2105 || get_user_ual(addrlen
, vptr
+ 2 * n
))
2106 return -TARGET_EFAULT
;
2108 ret
= do_connect(sockfd
, target_addr
, addrlen
);
2113 abi_ulong sockfd
, backlog
;
2115 if (get_user_ual(sockfd
, vptr
)
2116 || get_user_ual(backlog
, vptr
+ n
))
2117 return -TARGET_EFAULT
;
2119 ret
= get_errno(listen(sockfd
, backlog
));
2125 abi_ulong target_addr
, target_addrlen
;
2127 if (get_user_ual(sockfd
, vptr
)
2128 || get_user_ual(target_addr
, vptr
+ n
)
2129 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2130 return -TARGET_EFAULT
;
2132 ret
= do_accept(sockfd
, target_addr
, target_addrlen
);
2135 case SOCKOP_getsockname
:
2138 abi_ulong target_addr
, target_addrlen
;
2140 if (get_user_ual(sockfd
, vptr
)
2141 || get_user_ual(target_addr
, vptr
+ n
)
2142 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2143 return -TARGET_EFAULT
;
2145 ret
= do_getsockname(sockfd
, target_addr
, target_addrlen
);
2148 case SOCKOP_getpeername
:
2151 abi_ulong target_addr
, target_addrlen
;
2153 if (get_user_ual(sockfd
, vptr
)
2154 || get_user_ual(target_addr
, vptr
+ n
)
2155 || get_user_ual(target_addrlen
, vptr
+ 2 * n
))
2156 return -TARGET_EFAULT
;
2158 ret
= do_getpeername(sockfd
, target_addr
, target_addrlen
);
2161 case SOCKOP_socketpair
:
2163 abi_ulong domain
, type
, protocol
;
2166 if (get_user_ual(domain
, vptr
)
2167 || get_user_ual(type
, vptr
+ n
)
2168 || get_user_ual(protocol
, vptr
+ 2 * n
)
2169 || get_user_ual(tab
, vptr
+ 3 * n
))
2170 return -TARGET_EFAULT
;
2172 ret
= do_socketpair(domain
, type
, protocol
, tab
);
2182 if (get_user_ual(sockfd
, vptr
)
2183 || get_user_ual(msg
, vptr
+ n
)
2184 || get_user_ual(len
, vptr
+ 2 * n
)
2185 || get_user_ual(flags
, vptr
+ 3 * n
))
2186 return -TARGET_EFAULT
;
2188 ret
= do_sendto(sockfd
, msg
, len
, flags
, 0, 0);
2198 if (get_user_ual(sockfd
, vptr
)
2199 || get_user_ual(msg
, vptr
+ n
)
2200 || get_user_ual(len
, vptr
+ 2 * n
)
2201 || get_user_ual(flags
, vptr
+ 3 * n
))
2202 return -TARGET_EFAULT
;
2204 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, 0, 0);
2216 if (get_user_ual(sockfd
, vptr
)
2217 || get_user_ual(msg
, vptr
+ n
)
2218 || get_user_ual(len
, vptr
+ 2 * n
)
2219 || get_user_ual(flags
, vptr
+ 3 * n
)
2220 || get_user_ual(addr
, vptr
+ 4 * n
)
2221 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2222 return -TARGET_EFAULT
;
2224 ret
= do_sendto(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2227 case SOCKOP_recvfrom
:
2236 if (get_user_ual(sockfd
, vptr
)
2237 || get_user_ual(msg
, vptr
+ n
)
2238 || get_user_ual(len
, vptr
+ 2 * n
)
2239 || get_user_ual(flags
, vptr
+ 3 * n
)
2240 || get_user_ual(addr
, vptr
+ 4 * n
)
2241 || get_user_ual(addrlen
, vptr
+ 5 * n
))
2242 return -TARGET_EFAULT
;
2244 ret
= do_recvfrom(sockfd
, msg
, len
, flags
, addr
, addrlen
);
2247 case SOCKOP_shutdown
:
2249 abi_ulong sockfd
, how
;
2251 if (get_user_ual(sockfd
, vptr
)
2252 || get_user_ual(how
, vptr
+ n
))
2253 return -TARGET_EFAULT
;
2255 ret
= get_errno(shutdown(sockfd
, how
));
2258 case SOCKOP_sendmsg
:
2259 case SOCKOP_recvmsg
:
2262 abi_ulong target_msg
;
2265 if (get_user_ual(fd
, vptr
)
2266 || get_user_ual(target_msg
, vptr
+ n
)
2267 || get_user_ual(flags
, vptr
+ 2 * n
))
2268 return -TARGET_EFAULT
;
2270 ret
= do_sendrecvmsg(fd
, target_msg
, flags
,
2271 (num
== SOCKOP_sendmsg
));
2274 case SOCKOP_setsockopt
:
2282 if (get_user_ual(sockfd
, vptr
)
2283 || get_user_ual(level
, vptr
+ n
)
2284 || get_user_ual(optname
, vptr
+ 2 * n
)
2285 || get_user_ual(optval
, vptr
+ 3 * n
)
2286 || get_user_ual(optlen
, vptr
+ 4 * n
))
2287 return -TARGET_EFAULT
;
2289 ret
= do_setsockopt(sockfd
, level
, optname
, optval
, optlen
);
2292 case SOCKOP_getsockopt
:
2300 if (get_user_ual(sockfd
, vptr
)
2301 || get_user_ual(level
, vptr
+ n
)
2302 || get_user_ual(optname
, vptr
+ 2 * n
)
2303 || get_user_ual(optval
, vptr
+ 3 * n
)
2304 || get_user_ual(optlen
, vptr
+ 4 * n
))
2305 return -TARGET_EFAULT
;
2307 ret
= do_getsockopt(sockfd
, level
, optname
, optval
, optlen
);
2311 gemu_log("Unsupported socketcall: %d\n", num
);
2312 ret
= -TARGET_ENOSYS
;
2319 #define N_SHM_REGIONS 32
2321 static struct shm_region
{
2324 } shm_regions
[N_SHM_REGIONS
];
2326 struct target_ipc_perm
2333 unsigned short int mode
;
2334 unsigned short int __pad1
;
2335 unsigned short int __seq
;
2336 unsigned short int __pad2
;
2337 abi_ulong __unused1
;
2338 abi_ulong __unused2
;
2341 struct target_semid_ds
2343 struct target_ipc_perm sem_perm
;
2344 abi_ulong sem_otime
;
2345 abi_ulong __unused1
;
2346 abi_ulong sem_ctime
;
2347 abi_ulong __unused2
;
2348 abi_ulong sem_nsems
;
2349 abi_ulong __unused3
;
2350 abi_ulong __unused4
;
2353 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
2354 abi_ulong target_addr
)
2356 struct target_ipc_perm
*target_ip
;
2357 struct target_semid_ds
*target_sd
;
2359 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2360 return -TARGET_EFAULT
;
2361 target_ip
= &(target_sd
->sem_perm
);
2362 host_ip
->__key
= tswapal(target_ip
->__key
);
2363 host_ip
->uid
= tswapal(target_ip
->uid
);
2364 host_ip
->gid
= tswapal(target_ip
->gid
);
2365 host_ip
->cuid
= tswapal(target_ip
->cuid
);
2366 host_ip
->cgid
= tswapal(target_ip
->cgid
);
2367 host_ip
->mode
= tswap16(target_ip
->mode
);
2368 unlock_user_struct(target_sd
, target_addr
, 0);
2372 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
2373 struct ipc_perm
*host_ip
)
2375 struct target_ipc_perm
*target_ip
;
2376 struct target_semid_ds
*target_sd
;
2378 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2379 return -TARGET_EFAULT
;
2380 target_ip
= &(target_sd
->sem_perm
);
2381 target_ip
->__key
= tswapal(host_ip
->__key
);
2382 target_ip
->uid
= tswapal(host_ip
->uid
);
2383 target_ip
->gid
= tswapal(host_ip
->gid
);
2384 target_ip
->cuid
= tswapal(host_ip
->cuid
);
2385 target_ip
->cgid
= tswapal(host_ip
->cgid
);
2386 target_ip
->mode
= tswap16(host_ip
->mode
);
2387 unlock_user_struct(target_sd
, target_addr
, 1);
2391 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
2392 abi_ulong target_addr
)
2394 struct target_semid_ds
*target_sd
;
2396 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2397 return -TARGET_EFAULT
;
2398 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
2399 return -TARGET_EFAULT
;
2400 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
2401 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
2402 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
2403 unlock_user_struct(target_sd
, target_addr
, 0);
2407 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
2408 struct semid_ds
*host_sd
)
2410 struct target_semid_ds
*target_sd
;
2412 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2413 return -TARGET_EFAULT
;
2414 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
2415 return -TARGET_EFAULT
;
2416 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
2417 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
2418 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
2419 unlock_user_struct(target_sd
, target_addr
, 1);
2423 struct target_seminfo
{
2436 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
2437 struct seminfo
*host_seminfo
)
2439 struct target_seminfo
*target_seminfo
;
2440 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
2441 return -TARGET_EFAULT
;
2442 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
2443 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
2444 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
2445 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
2446 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
2447 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
2448 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
2449 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
2450 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
2451 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
2452 unlock_user_struct(target_seminfo
, target_addr
, 1);
2458 struct semid_ds
*buf
;
2459 unsigned short *array
;
2460 struct seminfo
*__buf
;
2463 union target_semun
{
2470 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
2471 abi_ulong target_addr
)
2474 unsigned short *array
;
2476 struct semid_ds semid_ds
;
2479 semun
.buf
= &semid_ds
;
2481 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2483 return get_errno(ret
);
2485 nsems
= semid_ds
.sem_nsems
;
2487 *host_array
= malloc(nsems
*sizeof(unsigned short));
2488 array
= lock_user(VERIFY_READ
, target_addr
,
2489 nsems
*sizeof(unsigned short), 1);
2491 return -TARGET_EFAULT
;
2493 for(i
=0; i
<nsems
; i
++) {
2494 __get_user((*host_array
)[i
], &array
[i
]);
2496 unlock_user(array
, target_addr
, 0);
2501 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
2502 unsigned short **host_array
)
2505 unsigned short *array
;
2507 struct semid_ds semid_ds
;
2510 semun
.buf
= &semid_ds
;
2512 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
2514 return get_errno(ret
);
2516 nsems
= semid_ds
.sem_nsems
;
2518 array
= lock_user(VERIFY_WRITE
, target_addr
,
2519 nsems
*sizeof(unsigned short), 0);
2521 return -TARGET_EFAULT
;
2523 for(i
=0; i
<nsems
; i
++) {
2524 __put_user((*host_array
)[i
], &array
[i
]);
2527 unlock_user(array
, target_addr
, 1);
2532 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
2533 union target_semun target_su
)
2536 struct semid_ds dsarg
;
2537 unsigned short *array
= NULL
;
2538 struct seminfo seminfo
;
2539 abi_long ret
= -TARGET_EINVAL
;
2546 arg
.val
= tswap32(target_su
.val
);
2547 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2548 target_su
.val
= tswap32(arg
.val
);
2552 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
2556 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2557 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
2564 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
2568 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2569 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
2575 arg
.__buf
= &seminfo
;
2576 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
2577 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
2585 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
2592 struct target_sembuf
{
2593 unsigned short sem_num
;
2598 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
2599 abi_ulong target_addr
,
2602 struct target_sembuf
*target_sembuf
;
2605 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
2606 nsops
*sizeof(struct target_sembuf
), 1);
2608 return -TARGET_EFAULT
;
2610 for(i
=0; i
<nsops
; i
++) {
2611 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
2612 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
2613 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
2616 unlock_user(target_sembuf
, target_addr
, 0);
2621 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
2623 struct sembuf sops
[nsops
];
2625 if (target_to_host_sembuf(sops
, ptr
, nsops
))
2626 return -TARGET_EFAULT
;
2628 return semop(semid
, sops
, nsops
);
2631 struct target_msqid_ds
2633 struct target_ipc_perm msg_perm
;
2634 abi_ulong msg_stime
;
2635 #if TARGET_ABI_BITS == 32
2636 abi_ulong __unused1
;
2638 abi_ulong msg_rtime
;
2639 #if TARGET_ABI_BITS == 32
2640 abi_ulong __unused2
;
2642 abi_ulong msg_ctime
;
2643 #if TARGET_ABI_BITS == 32
2644 abi_ulong __unused3
;
2646 abi_ulong __msg_cbytes
;
2648 abi_ulong msg_qbytes
;
2649 abi_ulong msg_lspid
;
2650 abi_ulong msg_lrpid
;
2651 abi_ulong __unused4
;
2652 abi_ulong __unused5
;
2655 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
2656 abi_ulong target_addr
)
2658 struct target_msqid_ds
*target_md
;
2660 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
2661 return -TARGET_EFAULT
;
2662 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
2663 return -TARGET_EFAULT
;
2664 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
2665 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
2666 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
2667 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
2668 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
2669 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
2670 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
2671 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
2672 unlock_user_struct(target_md
, target_addr
, 0);
2676 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
2677 struct msqid_ds
*host_md
)
2679 struct target_msqid_ds
*target_md
;
2681 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
2682 return -TARGET_EFAULT
;
2683 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
2684 return -TARGET_EFAULT
;
2685 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
2686 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
2687 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
2688 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
2689 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
2690 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
2691 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
2692 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
2693 unlock_user_struct(target_md
, target_addr
, 1);
2697 struct target_msginfo
{
2705 unsigned short int msgseg
;
2708 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
2709 struct msginfo
*host_msginfo
)
2711 struct target_msginfo
*target_msginfo
;
2712 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
2713 return -TARGET_EFAULT
;
2714 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
2715 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
2716 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
2717 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
2718 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
2719 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
2720 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
2721 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
2722 unlock_user_struct(target_msginfo
, target_addr
, 1);
2726 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
2728 struct msqid_ds dsarg
;
2729 struct msginfo msginfo
;
2730 abi_long ret
= -TARGET_EINVAL
;
2738 if (target_to_host_msqid_ds(&dsarg
,ptr
))
2739 return -TARGET_EFAULT
;
2740 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
2741 if (host_to_target_msqid_ds(ptr
,&dsarg
))
2742 return -TARGET_EFAULT
;
2745 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
2749 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
2750 if (host_to_target_msginfo(ptr
, &msginfo
))
2751 return -TARGET_EFAULT
;
2758 struct target_msgbuf
{
2763 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
2764 unsigned int msgsz
, int msgflg
)
2766 struct target_msgbuf
*target_mb
;
2767 struct msgbuf
*host_mb
;
2770 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
2771 return -TARGET_EFAULT
;
2772 host_mb
= malloc(msgsz
+sizeof(long));
2773 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
2774 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
2775 ret
= get_errno(msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
2777 unlock_user_struct(target_mb
, msgp
, 0);
2782 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
2783 unsigned int msgsz
, abi_long msgtyp
,
2786 struct target_msgbuf
*target_mb
;
2788 struct msgbuf
*host_mb
;
2791 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
2792 return -TARGET_EFAULT
;
2794 host_mb
= malloc(msgsz
+sizeof(long));
2795 ret
= get_errno(msgrcv(msqid
, host_mb
, msgsz
, tswapal(msgtyp
), msgflg
));
2798 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
2799 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
2800 if (!target_mtext
) {
2801 ret
= -TARGET_EFAULT
;
2804 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
2805 unlock_user(target_mtext
, target_mtext_addr
, ret
);
2808 target_mb
->mtype
= tswapal(host_mb
->mtype
);
2813 unlock_user_struct(target_mb
, msgp
, 1);
2817 struct target_shmid_ds
2819 struct target_ipc_perm shm_perm
;
2820 abi_ulong shm_segsz
;
2821 abi_ulong shm_atime
;
2822 #if TARGET_ABI_BITS == 32
2823 abi_ulong __unused1
;
2825 abi_ulong shm_dtime
;
2826 #if TARGET_ABI_BITS == 32
2827 abi_ulong __unused2
;
2829 abi_ulong shm_ctime
;
2830 #if TARGET_ABI_BITS == 32
2831 abi_ulong __unused3
;
2835 abi_ulong shm_nattch
;
2836 unsigned long int __unused4
;
2837 unsigned long int __unused5
;
2840 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
2841 abi_ulong target_addr
)
2843 struct target_shmid_ds
*target_sd
;
2845 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
2846 return -TARGET_EFAULT
;
2847 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
2848 return -TARGET_EFAULT
;
2849 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2850 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2851 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2852 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2853 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2854 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2855 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2856 unlock_user_struct(target_sd
, target_addr
, 0);
2860 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
2861 struct shmid_ds
*host_sd
)
2863 struct target_shmid_ds
*target_sd
;
2865 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
2866 return -TARGET_EFAULT
;
2867 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
2868 return -TARGET_EFAULT
;
2869 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
2870 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
2871 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
2872 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
2873 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
2874 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
2875 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
2876 unlock_user_struct(target_sd
, target_addr
, 1);
2880 struct target_shminfo
{
2888 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
2889 struct shminfo
*host_shminfo
)
2891 struct target_shminfo
*target_shminfo
;
2892 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
2893 return -TARGET_EFAULT
;
2894 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
2895 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
2896 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
2897 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
2898 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
2899 unlock_user_struct(target_shminfo
, target_addr
, 1);
2903 struct target_shm_info
{
2908 abi_ulong swap_attempts
;
2909 abi_ulong swap_successes
;
2912 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
2913 struct shm_info
*host_shm_info
)
2915 struct target_shm_info
*target_shm_info
;
2916 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
2917 return -TARGET_EFAULT
;
2918 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
2919 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
2920 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
2921 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
2922 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
2923 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
2924 unlock_user_struct(target_shm_info
, target_addr
, 1);
2928 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
2930 struct shmid_ds dsarg
;
2931 struct shminfo shminfo
;
2932 struct shm_info shm_info
;
2933 abi_long ret
= -TARGET_EINVAL
;
2941 if (target_to_host_shmid_ds(&dsarg
, buf
))
2942 return -TARGET_EFAULT
;
2943 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
2944 if (host_to_target_shmid_ds(buf
, &dsarg
))
2945 return -TARGET_EFAULT
;
2948 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
2949 if (host_to_target_shminfo(buf
, &shminfo
))
2950 return -TARGET_EFAULT
;
2953 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
2954 if (host_to_target_shm_info(buf
, &shm_info
))
2955 return -TARGET_EFAULT
;
2960 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
2967 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
2971 struct shmid_ds shm_info
;
2974 /* find out the length of the shared memory segment */
2975 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
2976 if (is_error(ret
)) {
2977 /* can't get length, bail out */
2984 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
2986 abi_ulong mmap_start
;
2988 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
2990 if (mmap_start
== -1) {
2992 host_raddr
= (void *)-1;
2994 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
2997 if (host_raddr
== (void *)-1) {
2999 return get_errno((long)host_raddr
);
3001 raddr
=h2g((unsigned long)host_raddr
);
3003 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
3004 PAGE_VALID
| PAGE_READ
|
3005 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
3007 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
3008 if (shm_regions
[i
].start
== 0) {
3009 shm_regions
[i
].start
= raddr
;
3010 shm_regions
[i
].size
= shm_info
.shm_segsz
;
3020 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
3024 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
3025 if (shm_regions
[i
].start
== shmaddr
) {
3026 shm_regions
[i
].start
= 0;
3027 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
3032 return get_errno(shmdt(g2h(shmaddr
)));
3035 #ifdef TARGET_NR_ipc
3036 /* ??? This only works with linear mappings. */
3037 /* do_ipc() must return target values and target errnos. */
3038 static abi_long
do_ipc(unsigned int call
, int first
,
3039 int second
, int third
,
3040 abi_long ptr
, abi_long fifth
)
3045 version
= call
>> 16;
3050 ret
= do_semop(first
, ptr
, second
);
3054 ret
= get_errno(semget(first
, second
, third
));
3058 ret
= do_semctl(first
, second
, third
, (union target_semun
)(abi_ulong
) ptr
);
3062 ret
= get_errno(msgget(first
, second
));
3066 ret
= do_msgsnd(first
, ptr
, second
, third
);
3070 ret
= do_msgctl(first
, second
, ptr
);
3077 struct target_ipc_kludge
{
3082 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
3083 ret
= -TARGET_EFAULT
;
3087 ret
= do_msgrcv(first
, tmp
->msgp
, second
, tmp
->msgtyp
, third
);
3089 unlock_user_struct(tmp
, ptr
, 0);
3093 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
3102 raddr
= do_shmat(first
, ptr
, second
);
3103 if (is_error(raddr
))
3104 return get_errno(raddr
);
3105 if (put_user_ual(raddr
, third
))
3106 return -TARGET_EFAULT
;
3110 ret
= -TARGET_EINVAL
;
3115 ret
= do_shmdt(ptr
);
3119 /* IPC_* flag values are the same on all linux platforms */
3120 ret
= get_errno(shmget(first
, second
, third
));
3123 /* IPC_* and SHM_* command values are the same on all linux platforms */
3125 ret
= do_shmctl(first
, second
, third
);
3128 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
3129 ret
= -TARGET_ENOSYS
;
3136 /* kernel structure types definitions */
3138 #define STRUCT(name, ...) STRUCT_ ## name,
3139 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
3141 #include "syscall_types.h"
3144 #undef STRUCT_SPECIAL
3146 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
3147 #define STRUCT_SPECIAL(name)
3148 #include "syscall_types.h"
3150 #undef STRUCT_SPECIAL
3152 typedef struct IOCTLEntry IOCTLEntry
;
3154 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3155 int fd
, abi_long cmd
, abi_long arg
);
3158 unsigned int target_cmd
;
3159 unsigned int host_cmd
;
3162 do_ioctl_fn
*do_ioctl
;
3163 const argtype arg_type
[5];
3166 #define IOC_R 0x0001
3167 #define IOC_W 0x0002
3168 #define IOC_RW (IOC_R | IOC_W)
3170 #define MAX_STRUCT_SIZE 4096
3172 #ifdef CONFIG_FIEMAP
3173 /* So fiemap access checks don't overflow on 32 bit systems.
3174 * This is very slightly smaller than the limit imposed by
3175 * the underlying kernel.
3177 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
3178 / sizeof(struct fiemap_extent))
3180 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3181 int fd
, abi_long cmd
, abi_long arg
)
3183 /* The parameter for this ioctl is a struct fiemap followed
3184 * by an array of struct fiemap_extent whose size is set
3185 * in fiemap->fm_extent_count. The array is filled in by the
3188 int target_size_in
, target_size_out
;
3190 const argtype
*arg_type
= ie
->arg_type
;
3191 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
3194 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
3198 assert(arg_type
[0] == TYPE_PTR
);
3199 assert(ie
->access
== IOC_RW
);
3201 target_size_in
= thunk_type_size(arg_type
, 0);
3202 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
3204 return -TARGET_EFAULT
;
3206 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3207 unlock_user(argptr
, arg
, 0);
3208 fm
= (struct fiemap
*)buf_temp
;
3209 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
3210 return -TARGET_EINVAL
;
3213 outbufsz
= sizeof (*fm
) +
3214 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
3216 if (outbufsz
> MAX_STRUCT_SIZE
) {
3217 /* We can't fit all the extents into the fixed size buffer.
3218 * Allocate one that is large enough and use it instead.
3220 fm
= malloc(outbufsz
);
3222 return -TARGET_ENOMEM
;
3224 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
3227 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, fm
));
3228 if (!is_error(ret
)) {
3229 target_size_out
= target_size_in
;
3230 /* An extent_count of 0 means we were only counting the extents
3231 * so there are no structs to copy
3233 if (fm
->fm_extent_count
!= 0) {
3234 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
3236 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
3238 ret
= -TARGET_EFAULT
;
3240 /* Convert the struct fiemap */
3241 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
3242 if (fm
->fm_extent_count
!= 0) {
3243 p
= argptr
+ target_size_in
;
3244 /* ...and then all the struct fiemap_extents */
3245 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
3246 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
3251 unlock_user(argptr
, arg
, target_size_out
);
3261 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
3262 int fd
, abi_long cmd
, abi_long arg
)
3264 const argtype
*arg_type
= ie
->arg_type
;
3268 struct ifconf
*host_ifconf
;
3270 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
3271 int target_ifreq_size
;
3276 abi_long target_ifc_buf
;
3280 assert(arg_type
[0] == TYPE_PTR
);
3281 assert(ie
->access
== IOC_RW
);
3284 target_size
= thunk_type_size(arg_type
, 0);
3286 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3288 return -TARGET_EFAULT
;
3289 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3290 unlock_user(argptr
, arg
, 0);
3292 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
3293 target_ifc_len
= host_ifconf
->ifc_len
;
3294 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
3296 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
3297 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
3298 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
3300 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
3301 if (outbufsz
> MAX_STRUCT_SIZE
) {
3302 /* We can't fit all the extents into the fixed size buffer.
3303 * Allocate one that is large enough and use it instead.
3305 host_ifconf
= malloc(outbufsz
);
3307 return -TARGET_ENOMEM
;
3309 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
3312 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
3314 host_ifconf
->ifc_len
= host_ifc_len
;
3315 host_ifconf
->ifc_buf
= host_ifc_buf
;
3317 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, host_ifconf
));
3318 if (!is_error(ret
)) {
3319 /* convert host ifc_len to target ifc_len */
3321 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
3322 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
3323 host_ifconf
->ifc_len
= target_ifc_len
;
3325 /* restore target ifc_buf */
3327 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
3329 /* copy struct ifconf to target user */
3331 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3333 return -TARGET_EFAULT
;
3334 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
3335 unlock_user(argptr
, arg
, target_size
);
3337 /* copy ifreq[] to target user */
3339 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
3340 for (i
= 0; i
< nb_ifreq
; i
++) {
3341 thunk_convert(argptr
+ i
* target_ifreq_size
,
3342 host_ifc_buf
+ i
* sizeof(struct ifreq
),
3343 ifreq_arg_type
, THUNK_TARGET
);
3345 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
3355 static IOCTLEntry ioctl_entries
[] = {
3356 #define IOCTL(cmd, access, ...) \
3357 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
3358 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
3359 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
3364 /* ??? Implement proper locking for ioctls. */
3365 /* do_ioctl() Must return target values and target errnos. */
3366 static abi_long
do_ioctl(int fd
, abi_long cmd
, abi_long arg
)
3368 const IOCTLEntry
*ie
;
3369 const argtype
*arg_type
;
3371 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
3377 if (ie
->target_cmd
== 0) {
3378 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
3379 return -TARGET_ENOSYS
;
3381 if (ie
->target_cmd
== cmd
)
3385 arg_type
= ie
->arg_type
;
3387 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
3390 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
3393 switch(arg_type
[0]) {
3396 ret
= get_errno(ioctl(fd
, ie
->host_cmd
));
3401 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, arg
));
3405 target_size
= thunk_type_size(arg_type
, 0);
3406 switch(ie
->access
) {
3408 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3409 if (!is_error(ret
)) {
3410 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3412 return -TARGET_EFAULT
;
3413 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3414 unlock_user(argptr
, arg
, target_size
);
3418 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3420 return -TARGET_EFAULT
;
3421 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3422 unlock_user(argptr
, arg
, 0);
3423 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3427 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
3429 return -TARGET_EFAULT
;
3430 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
3431 unlock_user(argptr
, arg
, 0);
3432 ret
= get_errno(ioctl(fd
, ie
->host_cmd
, buf_temp
));
3433 if (!is_error(ret
)) {
3434 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
3436 return -TARGET_EFAULT
;
3437 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
3438 unlock_user(argptr
, arg
, target_size
);
3444 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
3445 (long)cmd
, arg_type
[0]);
3446 ret
= -TARGET_ENOSYS
;
3452 static const bitmask_transtbl iflag_tbl
[] = {
3453 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
3454 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
3455 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
3456 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
3457 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
3458 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
3459 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
3460 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
3461 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
3462 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
3463 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
3464 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
3465 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
3466 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
3470 static const bitmask_transtbl oflag_tbl
[] = {
3471 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
3472 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
3473 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
3474 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
3475 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
3476 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
3477 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
3478 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
3479 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
3480 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
3481 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
3482 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
3483 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
3484 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
3485 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
3486 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
3487 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
3488 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
3489 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
3490 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
3491 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
3492 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
3493 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
3494 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
3498 static const bitmask_transtbl cflag_tbl
[] = {
3499 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
3500 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
3501 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
3502 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
3503 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
3504 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
3505 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
3506 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
3507 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
3508 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
3509 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
3510 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
3511 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
3512 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
3513 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
3514 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
3515 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
3516 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
3517 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
3518 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
3519 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
3520 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
3521 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
3522 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
3523 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
3524 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
3525 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
3526 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
3527 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
3528 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
3529 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
3533 static const bitmask_transtbl lflag_tbl
[] = {
3534 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
3535 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
3536 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
3537 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
3538 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
3539 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
3540 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
3541 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
3542 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
3543 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
3544 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
3545 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
3546 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
3547 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
3548 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
3552 static void target_to_host_termios (void *dst
, const void *src
)
3554 struct host_termios
*host
= dst
;
3555 const struct target_termios
*target
= src
;
3558 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
3560 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
3562 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
3564 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
3565 host
->c_line
= target
->c_line
;
3567 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
3568 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
3569 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
3570 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
3571 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
3572 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
3573 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
3574 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
3575 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
3576 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
3577 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
3578 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
3579 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
3580 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
3581 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
3582 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
3583 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
3584 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
3587 static void host_to_target_termios (void *dst
, const void *src
)
3589 struct target_termios
*target
= dst
;
3590 const struct host_termios
*host
= src
;
3593 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
3595 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
3597 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
3599 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
3600 target
->c_line
= host
->c_line
;
3602 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
3603 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
3604 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
3605 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
3606 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
3607 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
3608 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
3609 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
3610 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
3611 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
3612 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
3613 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
3614 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
3615 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
3616 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
3617 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
3618 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
3619 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
3622 static const StructEntry struct_termios_def
= {
3623 .convert
= { host_to_target_termios
, target_to_host_termios
},
3624 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
3625 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
3628 static bitmask_transtbl mmap_flags_tbl
[] = {
3629 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
3630 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
3631 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
3632 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
3633 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
3634 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
3635 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
3636 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
3640 #if defined(TARGET_I386)
3642 /* NOTE: there is really one LDT for all the threads */
3643 static uint8_t *ldt_table
;
3645 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
3652 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
3653 if (size
> bytecount
)
3655 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
3657 return -TARGET_EFAULT
;
3658 /* ??? Should this by byteswapped? */
3659 memcpy(p
, ldt_table
, size
);
3660 unlock_user(p
, ptr
, size
);
3664 /* XXX: add locking support */
3665 static abi_long
write_ldt(CPUX86State
*env
,
3666 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
3668 struct target_modify_ldt_ldt_s ldt_info
;
3669 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3670 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3671 int seg_not_present
, useable
, lm
;
3672 uint32_t *lp
, entry_1
, entry_2
;
3674 if (bytecount
!= sizeof(ldt_info
))
3675 return -TARGET_EINVAL
;
3676 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
3677 return -TARGET_EFAULT
;
3678 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3679 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3680 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3681 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3682 unlock_user_struct(target_ldt_info
, ptr
, 0);
3684 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
3685 return -TARGET_EINVAL
;
3686 seg_32bit
= ldt_info
.flags
& 1;
3687 contents
= (ldt_info
.flags
>> 1) & 3;
3688 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3689 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3690 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3691 useable
= (ldt_info
.flags
>> 6) & 1;
3695 lm
= (ldt_info
.flags
>> 7) & 1;
3697 if (contents
== 3) {
3699 return -TARGET_EINVAL
;
3700 if (seg_not_present
== 0)
3701 return -TARGET_EINVAL
;
3703 /* allocate the LDT */
3705 env
->ldt
.base
= target_mmap(0,
3706 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
3707 PROT_READ
|PROT_WRITE
,
3708 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
3709 if (env
->ldt
.base
== -1)
3710 return -TARGET_ENOMEM
;
3711 memset(g2h(env
->ldt
.base
), 0,
3712 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
3713 env
->ldt
.limit
= 0xffff;
3714 ldt_table
= g2h(env
->ldt
.base
);
3717 /* NOTE: same code as Linux kernel */
3718 /* Allow LDTs to be cleared by the user. */
3719 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3722 read_exec_only
== 1 &&
3724 limit_in_pages
== 0 &&
3725 seg_not_present
== 1 &&
3733 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3734 (ldt_info
.limit
& 0x0ffff);
3735 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3736 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3737 (ldt_info
.limit
& 0xf0000) |
3738 ((read_exec_only
^ 1) << 9) |
3740 ((seg_not_present
^ 1) << 15) |
3742 (limit_in_pages
<< 23) |
3746 entry_2
|= (useable
<< 20);
3748 /* Install the new entry ... */
3750 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
3751 lp
[0] = tswap32(entry_1
);
3752 lp
[1] = tswap32(entry_2
);
3756 /* specific and weird i386 syscalls */
3757 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
3758 unsigned long bytecount
)
3764 ret
= read_ldt(ptr
, bytecount
);
3767 ret
= write_ldt(env
, ptr
, bytecount
, 1);
3770 ret
= write_ldt(env
, ptr
, bytecount
, 0);
3773 ret
= -TARGET_ENOSYS
;
3779 #if defined(TARGET_I386) && defined(TARGET_ABI32)
3780 static abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3782 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3783 struct target_modify_ldt_ldt_s ldt_info
;
3784 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3785 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
3786 int seg_not_present
, useable
, lm
;
3787 uint32_t *lp
, entry_1
, entry_2
;
3790 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3791 if (!target_ldt_info
)
3792 return -TARGET_EFAULT
;
3793 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
3794 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
3795 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
3796 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
3797 if (ldt_info
.entry_number
== -1) {
3798 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
3799 if (gdt_table
[i
] == 0) {
3800 ldt_info
.entry_number
= i
;
3801 target_ldt_info
->entry_number
= tswap32(i
);
3806 unlock_user_struct(target_ldt_info
, ptr
, 1);
3808 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
3809 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
3810 return -TARGET_EINVAL
;
3811 seg_32bit
= ldt_info
.flags
& 1;
3812 contents
= (ldt_info
.flags
>> 1) & 3;
3813 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
3814 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
3815 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
3816 useable
= (ldt_info
.flags
>> 6) & 1;
3820 lm
= (ldt_info
.flags
>> 7) & 1;
3823 if (contents
== 3) {
3824 if (seg_not_present
== 0)
3825 return -TARGET_EINVAL
;
3828 /* NOTE: same code as Linux kernel */
3829 /* Allow LDTs to be cleared by the user. */
3830 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
3831 if ((contents
== 0 &&
3832 read_exec_only
== 1 &&
3834 limit_in_pages
== 0 &&
3835 seg_not_present
== 1 &&
3843 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
3844 (ldt_info
.limit
& 0x0ffff);
3845 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
3846 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
3847 (ldt_info
.limit
& 0xf0000) |
3848 ((read_exec_only
^ 1) << 9) |
3850 ((seg_not_present
^ 1) << 15) |
3852 (limit_in_pages
<< 23) |
3857 /* Install the new entry ... */
3859 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
3860 lp
[0] = tswap32(entry_1
);
3861 lp
[1] = tswap32(entry_2
);
3865 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
3867 struct target_modify_ldt_ldt_s
*target_ldt_info
;
3868 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
3869 uint32_t base_addr
, limit
, flags
;
3870 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
3871 int seg_not_present
, useable
, lm
;
3872 uint32_t *lp
, entry_1
, entry_2
;
3874 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
3875 if (!target_ldt_info
)
3876 return -TARGET_EFAULT
;
3877 idx
= tswap32(target_ldt_info
->entry_number
);
3878 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
3879 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
3880 unlock_user_struct(target_ldt_info
, ptr
, 1);
3881 return -TARGET_EINVAL
;
3883 lp
= (uint32_t *)(gdt_table
+ idx
);
3884 entry_1
= tswap32(lp
[0]);
3885 entry_2
= tswap32(lp
[1]);
3887 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
3888 contents
= (entry_2
>> 10) & 3;
3889 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
3890 seg_32bit
= (entry_2
>> 22) & 1;
3891 limit_in_pages
= (entry_2
>> 23) & 1;
3892 useable
= (entry_2
>> 20) & 1;
3896 lm
= (entry_2
>> 21) & 1;
3898 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
3899 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
3900 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
3901 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
3902 base_addr
= (entry_1
>> 16) |
3903 (entry_2
& 0xff000000) |
3904 ((entry_2
& 0xff) << 16);
3905 target_ldt_info
->base_addr
= tswapal(base_addr
);
3906 target_ldt_info
->limit
= tswap32(limit
);
3907 target_ldt_info
->flags
= tswap32(flags
);
3908 unlock_user_struct(target_ldt_info
, ptr
, 1);
3911 #endif /* TARGET_I386 && TARGET_ABI32 */
3913 #ifndef TARGET_ABI32
3914 static abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
3921 case TARGET_ARCH_SET_GS
:
3922 case TARGET_ARCH_SET_FS
:
3923 if (code
== TARGET_ARCH_SET_GS
)
3927 cpu_x86_load_seg(env
, idx
, 0);
3928 env
->segs
[idx
].base
= addr
;
3930 case TARGET_ARCH_GET_GS
:
3931 case TARGET_ARCH_GET_FS
:
3932 if (code
== TARGET_ARCH_GET_GS
)
3936 val
= env
->segs
[idx
].base
;
3937 if (put_user(val
, addr
, abi_ulong
))
3938 ret
= -TARGET_EFAULT
;
3941 ret
= -TARGET_EINVAL
;
3948 #endif /* defined(TARGET_I386) */
3950 #define NEW_STACK_SIZE 0x40000
3952 #if defined(CONFIG_USE_NPTL)
3954 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
3957 pthread_mutex_t mutex
;
3958 pthread_cond_t cond
;
3961 abi_ulong child_tidptr
;
3962 abi_ulong parent_tidptr
;
3966 static void *clone_func(void *arg
)
3968 new_thread_info
*info
= arg
;
3974 ts
= (TaskState
*)thread_env
->opaque
;
3975 info
->tid
= gettid();
3976 env
->host_tid
= info
->tid
;
3978 if (info
->child_tidptr
)
3979 put_user_u32(info
->tid
, info
->child_tidptr
);
3980 if (info
->parent_tidptr
)
3981 put_user_u32(info
->tid
, info
->parent_tidptr
);
3982 /* Enable signals. */
3983 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
3984 /* Signal to the parent that we're ready. */
3985 pthread_mutex_lock(&info
->mutex
);
3986 pthread_cond_broadcast(&info
->cond
);
3987 pthread_mutex_unlock(&info
->mutex
);
3988 /* Wait until the parent has finshed initializing the tls state. */
3989 pthread_mutex_lock(&clone_lock
);
3990 pthread_mutex_unlock(&clone_lock
);
3997 static int clone_func(void *arg
)
3999 CPUState
*env
= arg
;
4006 /* do_fork() Must return host values and target errnos (unlike most
4007 do_*() functions). */
4008 static int do_fork(CPUState
*env
, unsigned int flags
, abi_ulong newsp
,
4009 abi_ulong parent_tidptr
, target_ulong newtls
,
4010 abi_ulong child_tidptr
)
4015 #if defined(CONFIG_USE_NPTL)
4016 unsigned int nptl_flags
;
4022 /* Emulate vfork() with fork() */
4023 if (flags
& CLONE_VFORK
)
4024 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
4026 if (flags
& CLONE_VM
) {
4027 TaskState
*parent_ts
= (TaskState
*)env
->opaque
;
4028 #if defined(CONFIG_USE_NPTL)
4029 new_thread_info info
;
4030 pthread_attr_t attr
;
4032 ts
= g_malloc0(sizeof(TaskState
));
4033 init_task_state(ts
);
4034 /* we create a new CPU instance. */
4035 new_env
= cpu_copy(env
);
4036 #if defined(TARGET_I386) || defined(TARGET_SPARC) || defined(TARGET_PPC)
4039 /* Init regs that differ from the parent. */
4040 cpu_clone_regs(new_env
, newsp
);
4041 new_env
->opaque
= ts
;
4042 ts
->bprm
= parent_ts
->bprm
;
4043 ts
->info
= parent_ts
->info
;
4044 #if defined(CONFIG_USE_NPTL)
4046 flags
&= ~CLONE_NPTL_FLAGS2
;
4048 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
4049 ts
->child_tidptr
= child_tidptr
;
4052 if (nptl_flags
& CLONE_SETTLS
)
4053 cpu_set_tls (new_env
, newtls
);
4055 /* Grab a mutex so that thread setup appears atomic. */
4056 pthread_mutex_lock(&clone_lock
);
4058 memset(&info
, 0, sizeof(info
));
4059 pthread_mutex_init(&info
.mutex
, NULL
);
4060 pthread_mutex_lock(&info
.mutex
);
4061 pthread_cond_init(&info
.cond
, NULL
);
4063 if (nptl_flags
& CLONE_CHILD_SETTID
)
4064 info
.child_tidptr
= child_tidptr
;
4065 if (nptl_flags
& CLONE_PARENT_SETTID
)
4066 info
.parent_tidptr
= parent_tidptr
;
4068 ret
= pthread_attr_init(&attr
);
4069 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
4070 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
4071 /* It is not safe to deliver signals until the child has finished
4072 initializing, so temporarily block all signals. */
4073 sigfillset(&sigmask
);
4074 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
4076 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
4077 /* TODO: Free new CPU state if thread creation failed. */
4079 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
4080 pthread_attr_destroy(&attr
);
4082 /* Wait for the child to initialize. */
4083 pthread_cond_wait(&info
.cond
, &info
.mutex
);
4085 if (flags
& CLONE_PARENT_SETTID
)
4086 put_user_u32(ret
, parent_tidptr
);
4090 pthread_mutex_unlock(&info
.mutex
);
4091 pthread_cond_destroy(&info
.cond
);
4092 pthread_mutex_destroy(&info
.mutex
);
4093 pthread_mutex_unlock(&clone_lock
);
4095 if (flags
& CLONE_NPTL_FLAGS2
)
4097 /* This is probably going to die very quickly, but do it anyway. */
4098 new_stack
= g_malloc0 (NEW_STACK_SIZE
);
4100 ret
= __clone2(clone_func
, new_stack
, NEW_STACK_SIZE
, flags
, new_env
);
4102 ret
= clone(clone_func
, new_stack
+ NEW_STACK_SIZE
, flags
, new_env
);
4106 /* if no CLONE_VM, we consider it is a fork */
4107 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0)
4112 /* Child Process. */
4113 cpu_clone_regs(env
, newsp
);
4115 #if defined(CONFIG_USE_NPTL)
4116 /* There is a race condition here. The parent process could
4117 theoretically read the TID in the child process before the child
4118 tid is set. This would require using either ptrace
4119 (not implemented) or having *_tidptr to point at a shared memory
4120 mapping. We can't repeat the spinlock hack used above because
4121 the child process gets its own copy of the lock. */
4122 if (flags
& CLONE_CHILD_SETTID
)
4123 put_user_u32(gettid(), child_tidptr
);
4124 if (flags
& CLONE_PARENT_SETTID
)
4125 put_user_u32(gettid(), parent_tidptr
);
4126 ts
= (TaskState
*)env
->opaque
;
4127 if (flags
& CLONE_SETTLS
)
4128 cpu_set_tls (env
, newtls
);
4129 if (flags
& CLONE_CHILD_CLEARTID
)
4130 ts
->child_tidptr
= child_tidptr
;
4139 /* warning : doesn't handle linux specific flags... */
4140 static int target_to_host_fcntl_cmd(int cmd
)
4143 case TARGET_F_DUPFD
:
4144 case TARGET_F_GETFD
:
4145 case TARGET_F_SETFD
:
4146 case TARGET_F_GETFL
:
4147 case TARGET_F_SETFL
:
4149 case TARGET_F_GETLK
:
4151 case TARGET_F_SETLK
:
4153 case TARGET_F_SETLKW
:
4155 case TARGET_F_GETOWN
:
4157 case TARGET_F_SETOWN
:
4159 case TARGET_F_GETSIG
:
4161 case TARGET_F_SETSIG
:
4163 #if TARGET_ABI_BITS == 32
4164 case TARGET_F_GETLK64
:
4166 case TARGET_F_SETLK64
:
4168 case TARGET_F_SETLKW64
:
4171 case TARGET_F_SETLEASE
:
4173 case TARGET_F_GETLEASE
:
4175 #ifdef F_DUPFD_CLOEXEC
4176 case TARGET_F_DUPFD_CLOEXEC
:
4177 return F_DUPFD_CLOEXEC
;
4179 case TARGET_F_NOTIFY
:
4182 return -TARGET_EINVAL
;
4184 return -TARGET_EINVAL
;
4187 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
4190 struct target_flock
*target_fl
;
4191 struct flock64 fl64
;
4192 struct target_flock64
*target_fl64
;
4194 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
4196 if (host_cmd
== -TARGET_EINVAL
)
4200 case TARGET_F_GETLK
:
4201 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4202 return -TARGET_EFAULT
;
4203 fl
.l_type
= tswap16(target_fl
->l_type
);
4204 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4205 fl
.l_start
= tswapal(target_fl
->l_start
);
4206 fl
.l_len
= tswapal(target_fl
->l_len
);
4207 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4208 unlock_user_struct(target_fl
, arg
, 0);
4209 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4211 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg
, 0))
4212 return -TARGET_EFAULT
;
4213 target_fl
->l_type
= tswap16(fl
.l_type
);
4214 target_fl
->l_whence
= tswap16(fl
.l_whence
);
4215 target_fl
->l_start
= tswapal(fl
.l_start
);
4216 target_fl
->l_len
= tswapal(fl
.l_len
);
4217 target_fl
->l_pid
= tswap32(fl
.l_pid
);
4218 unlock_user_struct(target_fl
, arg
, 1);
4222 case TARGET_F_SETLK
:
4223 case TARGET_F_SETLKW
:
4224 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg
, 1))
4225 return -TARGET_EFAULT
;
4226 fl
.l_type
= tswap16(target_fl
->l_type
);
4227 fl
.l_whence
= tswap16(target_fl
->l_whence
);
4228 fl
.l_start
= tswapal(target_fl
->l_start
);
4229 fl
.l_len
= tswapal(target_fl
->l_len
);
4230 fl
.l_pid
= tswap32(target_fl
->l_pid
);
4231 unlock_user_struct(target_fl
, arg
, 0);
4232 ret
= get_errno(fcntl(fd
, host_cmd
, &fl
));
4235 case TARGET_F_GETLK64
:
4236 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4237 return -TARGET_EFAULT
;
4238 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4239 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4240 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4241 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4242 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4243 unlock_user_struct(target_fl64
, arg
, 0);
4244 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4246 if (!lock_user_struct(VERIFY_WRITE
, target_fl64
, arg
, 0))
4247 return -TARGET_EFAULT
;
4248 target_fl64
->l_type
= tswap16(fl64
.l_type
) >> 1;
4249 target_fl64
->l_whence
= tswap16(fl64
.l_whence
);
4250 target_fl64
->l_start
= tswap64(fl64
.l_start
);
4251 target_fl64
->l_len
= tswap64(fl64
.l_len
);
4252 target_fl64
->l_pid
= tswap32(fl64
.l_pid
);
4253 unlock_user_struct(target_fl64
, arg
, 1);
4256 case TARGET_F_SETLK64
:
4257 case TARGET_F_SETLKW64
:
4258 if (!lock_user_struct(VERIFY_READ
, target_fl64
, arg
, 1))
4259 return -TARGET_EFAULT
;
4260 fl64
.l_type
= tswap16(target_fl64
->l_type
) >> 1;
4261 fl64
.l_whence
= tswap16(target_fl64
->l_whence
);
4262 fl64
.l_start
= tswap64(target_fl64
->l_start
);
4263 fl64
.l_len
= tswap64(target_fl64
->l_len
);
4264 fl64
.l_pid
= tswap32(target_fl64
->l_pid
);
4265 unlock_user_struct(target_fl64
, arg
, 0);
4266 ret
= get_errno(fcntl(fd
, host_cmd
, &fl64
));
4269 case TARGET_F_GETFL
:
4270 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4272 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
4276 case TARGET_F_SETFL
:
4277 ret
= get_errno(fcntl(fd
, host_cmd
, target_to_host_bitmask(arg
, fcntl_flags_tbl
)));
4280 case TARGET_F_SETOWN
:
4281 case TARGET_F_GETOWN
:
4282 case TARGET_F_SETSIG
:
4283 case TARGET_F_GETSIG
:
4284 case TARGET_F_SETLEASE
:
4285 case TARGET_F_GETLEASE
:
4286 ret
= get_errno(fcntl(fd
, host_cmd
, arg
));
4290 ret
= get_errno(fcntl(fd
, cmd
, arg
));
4298 static inline int high2lowuid(int uid
)
4306 static inline int high2lowgid(int gid
)
4314 static inline int low2highuid(int uid
)
4316 if ((int16_t)uid
== -1)
4322 static inline int low2highgid(int gid
)
4324 if ((int16_t)gid
== -1)
4329 static inline int tswapid(int id
)
4333 #else /* !USE_UID16 */
4334 static inline int high2lowuid(int uid
)
4338 static inline int high2lowgid(int gid
)
4342 static inline int low2highuid(int uid
)
4346 static inline int low2highgid(int gid
)
4350 static inline int tswapid(int id
)
4354 #endif /* USE_UID16 */
4356 void syscall_init(void)
4359 const argtype
*arg_type
;
4363 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
4364 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
4365 #include "syscall_types.h"
4367 #undef STRUCT_SPECIAL
4369 /* we patch the ioctl size if necessary. We rely on the fact that
4370 no ioctl has all the bits at '1' in the size field */
4372 while (ie
->target_cmd
!= 0) {
4373 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
4374 TARGET_IOC_SIZEMASK
) {
4375 arg_type
= ie
->arg_type
;
4376 if (arg_type
[0] != TYPE_PTR
) {
4377 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
4382 size
= thunk_type_size(arg_type
, 0);
4383 ie
->target_cmd
= (ie
->target_cmd
&
4384 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
4385 (size
<< TARGET_IOC_SIZESHIFT
);
4388 /* Build target_to_host_errno_table[] table from
4389 * host_to_target_errno_table[]. */
4390 for (i
=0; i
< ERRNO_TABLE_SIZE
; i
++)
4391 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
4393 /* automatic consistency check if same arch */
4394 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
4395 (defined(__x86_64__) && defined(TARGET_X86_64))
4396 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
4397 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
4398 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
4405 #if TARGET_ABI_BITS == 32
4406 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
4408 #ifdef TARGET_WORDS_BIGENDIAN
4409 return ((uint64_t)word0
<< 32) | word1
;
4411 return ((uint64_t)word1
<< 32) | word0
;
4414 #else /* TARGET_ABI_BITS == 32 */
4415 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
4419 #endif /* TARGET_ABI_BITS != 32 */
4421 #ifdef TARGET_NR_truncate64
4422 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
4427 if (regpairs_aligned(cpu_env
)) {
4431 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
4435 #ifdef TARGET_NR_ftruncate64
4436 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
4441 if (regpairs_aligned(cpu_env
)) {
4445 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
4449 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
4450 abi_ulong target_addr
)
4452 struct target_timespec
*target_ts
;
4454 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
4455 return -TARGET_EFAULT
;
4456 host_ts
->tv_sec
= tswapal(target_ts
->tv_sec
);
4457 host_ts
->tv_nsec
= tswapal(target_ts
->tv_nsec
);
4458 unlock_user_struct(target_ts
, target_addr
, 0);
4462 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
4463 struct timespec
*host_ts
)
4465 struct target_timespec
*target_ts
;
4467 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
4468 return -TARGET_EFAULT
;
4469 target_ts
->tv_sec
= tswapal(host_ts
->tv_sec
);
4470 target_ts
->tv_nsec
= tswapal(host_ts
->tv_nsec
);
4471 unlock_user_struct(target_ts
, target_addr
, 1);
4475 #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
4476 static inline abi_long
host_to_target_stat64(void *cpu_env
,
4477 abi_ulong target_addr
,
4478 struct stat
*host_st
)
4481 if (((CPUARMState
*)cpu_env
)->eabi
) {
4482 struct target_eabi_stat64
*target_st
;
4484 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4485 return -TARGET_EFAULT
;
4486 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
4487 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4488 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4489 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4490 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4492 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4493 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4494 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4495 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4496 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4497 __put_user(host_st
->st_size
, &target_st
->st_size
);
4498 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4499 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4500 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4501 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4502 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4503 unlock_user_struct(target_st
, target_addr
, 1);
4507 #if TARGET_ABI_BITS == 64 && !defined(TARGET_ALPHA)
4508 struct target_stat
*target_st
;
4510 struct target_stat64
*target_st
;
4513 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
4514 return -TARGET_EFAULT
;
4515 memset(target_st
, 0, sizeof(*target_st
));
4516 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
4517 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
4518 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
4519 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
4521 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
4522 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
4523 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
4524 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
4525 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
4526 /* XXX: better use of kernel struct */
4527 __put_user(host_st
->st_size
, &target_st
->st_size
);
4528 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
4529 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
4530 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
4531 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
4532 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
4533 unlock_user_struct(target_st
, target_addr
, 1);
4540 #if defined(CONFIG_USE_NPTL)
4541 /* ??? Using host futex calls even when target atomic operations
4542 are not really atomic probably breaks things. However implementing
4543 futexes locally would make futexes shared between multiple processes
4544 tricky. However they're probably useless because guest atomic
4545 operations won't work either. */
4546 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
4547 target_ulong uaddr2
, int val3
)
4549 struct timespec ts
, *pts
;
4552 /* ??? We assume FUTEX_* constants are the same on both host
4554 #ifdef FUTEX_CMD_MASK
4555 base_op
= op
& FUTEX_CMD_MASK
;
4563 target_to_host_timespec(pts
, timeout
);
4567 return get_errno(sys_futex(g2h(uaddr
), op
, tswap32(val
),
4570 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4572 return get_errno(sys_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
4574 case FUTEX_CMP_REQUEUE
:
4576 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
4577 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
4578 But the prototype takes a `struct timespec *'; insert casts
4579 to satisfy the compiler. We do not need to tswap TIMEOUT
4580 since it's not compared to guest memory. */
4581 pts
= (struct timespec
*)(uintptr_t) timeout
;
4582 return get_errno(sys_futex(g2h(uaddr
), op
, val
, pts
,
4584 (base_op
== FUTEX_CMP_REQUEUE
4588 return -TARGET_ENOSYS
;
4593 /* Map host to target signal numbers for the wait family of syscalls.
4594 Assume all other status bits are the same. */
4595 static int host_to_target_waitstatus(int status
)
4597 if (WIFSIGNALED(status
)) {
4598 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
4600 if (WIFSTOPPED(status
)) {
4601 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
4607 int get_osversion(void)
4609 static int osversion
;
4610 struct new_utsname buf
;
4615 if (qemu_uname_release
&& *qemu_uname_release
) {
4616 s
= qemu_uname_release
;
4618 if (sys_uname(&buf
))
4623 for (i
= 0; i
< 3; i
++) {
4625 while (*s
>= '0' && *s
<= '9') {
4630 tmp
= (tmp
<< 8) + n
;
4639 static int open_self_maps(void *cpu_env
, int fd
)
4641 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
4643 dprintf(fd
, "%08llx-%08llx rw-p %08llx 00:00 0 [stack]\n",
4644 (unsigned long long)ts
->info
->stack_limit
,
4645 (unsigned long long)(ts
->stack_base
+ (TARGET_PAGE_SIZE
- 1))
4647 (unsigned long long)ts
->stack_base
);
4652 static int open_self_stat(void *cpu_env
, int fd
)
4654 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
4655 abi_ulong start_stack
= ts
->info
->start_stack
;
4658 for (i
= 0; i
< 44; i
++) {
4667 snprintf(buf
, sizeof(buf
), "%"PRId64
"%c", val
, i
== 43 ? '\n' : ' ');
4669 if (write(fd
, buf
, len
) != len
) {
4677 static int open_self_auxv(void *cpu_env
, int fd
)
4679 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
4680 abi_ulong auxv
= ts
->info
->saved_auxv
;
4681 abi_ulong len
= ts
->info
->auxv_len
;
4685 * Auxiliary vector is stored in target process stack.
4686 * read in whole auxv vector and copy it to file
4688 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
4692 r
= write(fd
, ptr
, len
);
4699 lseek(fd
, 0, SEEK_SET
);
4700 unlock_user(ptr
, auxv
, len
);
4706 static int do_open(void *cpu_env
, const char *pathname
, int flags
, mode_t mode
)
4709 const char *filename
;
4710 int (*fill
)(void *cpu_env
, int fd
);
4712 const struct fake_open
*fake_open
;
4713 static const struct fake_open fakes
[] = {
4714 { "/proc/self/maps", open_self_maps
},
4715 { "/proc/self/stat", open_self_stat
},
4716 { "/proc/self/auxv", open_self_auxv
},
4720 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
4721 if (!strncmp(pathname
, fake_open
->filename
,
4722 strlen(fake_open
->filename
))) {
4727 if (fake_open
->filename
) {
4729 char filename
[PATH_MAX
];
4732 /* create temporary file to map stat to */
4733 tmpdir
= getenv("TMPDIR");
4736 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
4737 fd
= mkstemp(filename
);
4743 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
4747 lseek(fd
, 0, SEEK_SET
);
4752 return get_errno(open(path(pathname
), flags
, mode
));
4755 /* do_syscall() should always have a single exit point at the end so
4756 that actions, such as logging of syscall results, can be performed.
4757 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
4758 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
4759 abi_long arg2
, abi_long arg3
, abi_long arg4
,
4760 abi_long arg5
, abi_long arg6
, abi_long arg7
,
4769 gemu_log("syscall %d", num
);
4772 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
4775 case TARGET_NR_exit
:
4776 #ifdef CONFIG_USE_NPTL
4777 /* In old applications this may be used to implement _exit(2).
4778 However in threaded applictions it is used for thread termination,
4779 and _exit_group is used for application termination.
4780 Do thread termination if we have more then one thread. */
4781 /* FIXME: This probably breaks if a signal arrives. We should probably
4782 be disabling signals. */
4783 if (first_cpu
->next_cpu
) {
4791 while (p
&& p
!= (CPUState
*)cpu_env
) {
4792 lastp
= &p
->next_cpu
;
4795 /* If we didn't find the CPU for this thread then something is
4799 /* Remove the CPU from the list. */
4800 *lastp
= p
->next_cpu
;
4802 ts
= ((CPUState
*)cpu_env
)->opaque
;
4803 if (ts
->child_tidptr
) {
4804 put_user_u32(0, ts
->child_tidptr
);
4805 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
4817 gdb_exit(cpu_env
, arg1
);
4819 ret
= 0; /* avoid warning */
4821 case TARGET_NR_read
:
4825 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
4827 ret
= get_errno(read(arg1
, p
, arg3
));
4828 unlock_user(p
, arg2
, ret
);
4831 case TARGET_NR_write
:
4832 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
4834 ret
= get_errno(write(arg1
, p
, arg3
));
4835 unlock_user(p
, arg2
, 0);
4837 case TARGET_NR_open
:
4838 if (!(p
= lock_user_string(arg1
)))
4840 ret
= get_errno(do_open(cpu_env
, p
,
4841 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
4843 unlock_user(p
, arg1
, 0);
4845 #if defined(TARGET_NR_openat) && defined(__NR_openat)
4846 case TARGET_NR_openat
:
4847 if (!(p
= lock_user_string(arg2
)))
4849 ret
= get_errno(sys_openat(arg1
,
4851 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
4853 unlock_user(p
, arg2
, 0);
4856 case TARGET_NR_close
:
4857 ret
= get_errno(close(arg1
));
4862 case TARGET_NR_fork
:
4863 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
4865 #ifdef TARGET_NR_waitpid
4866 case TARGET_NR_waitpid
:
4869 ret
= get_errno(waitpid(arg1
, &status
, arg3
));
4870 if (!is_error(ret
) && arg2
&& ret
4871 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
4876 #ifdef TARGET_NR_waitid
4877 case TARGET_NR_waitid
:
4881 ret
= get_errno(waitid(arg1
, arg2
, &info
, arg4
));
4882 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
4883 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
4885 host_to_target_siginfo(p
, &info
);
4886 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
4891 #ifdef TARGET_NR_creat /* not on alpha */
4892 case TARGET_NR_creat
:
4893 if (!(p
= lock_user_string(arg1
)))
4895 ret
= get_errno(creat(p
, arg2
));
4896 unlock_user(p
, arg1
, 0);
4899 case TARGET_NR_link
:
4902 p
= lock_user_string(arg1
);
4903 p2
= lock_user_string(arg2
);
4905 ret
= -TARGET_EFAULT
;
4907 ret
= get_errno(link(p
, p2
));
4908 unlock_user(p2
, arg2
, 0);
4909 unlock_user(p
, arg1
, 0);
4912 #if defined(TARGET_NR_linkat) && defined(__NR_linkat)
4913 case TARGET_NR_linkat
:
4918 p
= lock_user_string(arg2
);
4919 p2
= lock_user_string(arg4
);
4921 ret
= -TARGET_EFAULT
;
4923 ret
= get_errno(sys_linkat(arg1
, p
, arg3
, p2
, arg5
));
4924 unlock_user(p
, arg2
, 0);
4925 unlock_user(p2
, arg4
, 0);
4929 case TARGET_NR_unlink
:
4930 if (!(p
= lock_user_string(arg1
)))
4932 ret
= get_errno(unlink(p
));
4933 unlock_user(p
, arg1
, 0);
4935 #if defined(TARGET_NR_unlinkat) && defined(__NR_unlinkat)
4936 case TARGET_NR_unlinkat
:
4937 if (!(p
= lock_user_string(arg2
)))
4939 ret
= get_errno(sys_unlinkat(arg1
, p
, arg3
));
4940 unlock_user(p
, arg2
, 0);
4943 case TARGET_NR_execve
:
4945 char **argp
, **envp
;
4948 abi_ulong guest_argp
;
4949 abi_ulong guest_envp
;
4955 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
4956 if (get_user_ual(addr
, gp
))
4964 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
4965 if (get_user_ual(addr
, gp
))
4972 argp
= alloca((argc
+ 1) * sizeof(void *));
4973 envp
= alloca((envc
+ 1) * sizeof(void *));
4975 for (gp
= guest_argp
, q
= argp
; gp
;
4976 gp
+= sizeof(abi_ulong
), q
++) {
4977 if (get_user_ual(addr
, gp
))
4981 if (!(*q
= lock_user_string(addr
)))
4986 for (gp
= guest_envp
, q
= envp
; gp
;
4987 gp
+= sizeof(abi_ulong
), q
++) {
4988 if (get_user_ual(addr
, gp
))
4992 if (!(*q
= lock_user_string(addr
)))
4997 if (!(p
= lock_user_string(arg1
)))
4999 ret
= get_errno(execve(p
, argp
, envp
));
5000 unlock_user(p
, arg1
, 0);
5005 ret
= -TARGET_EFAULT
;
5008 for (gp
= guest_argp
, q
= argp
; *q
;
5009 gp
+= sizeof(abi_ulong
), q
++) {
5010 if (get_user_ual(addr
, gp
)
5013 unlock_user(*q
, addr
, 0);
5015 for (gp
= guest_envp
, q
= envp
; *q
;
5016 gp
+= sizeof(abi_ulong
), q
++) {
5017 if (get_user_ual(addr
, gp
)
5020 unlock_user(*q
, addr
, 0);
5024 case TARGET_NR_chdir
:
5025 if (!(p
= lock_user_string(arg1
)))
5027 ret
= get_errno(chdir(p
));
5028 unlock_user(p
, arg1
, 0);
5030 #ifdef TARGET_NR_time
5031 case TARGET_NR_time
:
5034 ret
= get_errno(time(&host_time
));
5037 && put_user_sal(host_time
, arg1
))
5042 case TARGET_NR_mknod
:
5043 if (!(p
= lock_user_string(arg1
)))
5045 ret
= get_errno(mknod(p
, arg2
, arg3
));
5046 unlock_user(p
, arg1
, 0);
5048 #if defined(TARGET_NR_mknodat) && defined(__NR_mknodat)
5049 case TARGET_NR_mknodat
:
5050 if (!(p
= lock_user_string(arg2
)))
5052 ret
= get_errno(sys_mknodat(arg1
, p
, arg3
, arg4
));
5053 unlock_user(p
, arg2
, 0);
5056 case TARGET_NR_chmod
:
5057 if (!(p
= lock_user_string(arg1
)))
5059 ret
= get_errno(chmod(p
, arg2
));
5060 unlock_user(p
, arg1
, 0);
5062 #ifdef TARGET_NR_break
5063 case TARGET_NR_break
:
5066 #ifdef TARGET_NR_oldstat
5067 case TARGET_NR_oldstat
:
5070 case TARGET_NR_lseek
:
5071 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
5073 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
5074 /* Alpha specific */
5075 case TARGET_NR_getxpid
:
5076 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
5077 ret
= get_errno(getpid());
5080 #ifdef TARGET_NR_getpid
5081 case TARGET_NR_getpid
:
5082 ret
= get_errno(getpid());
5085 case TARGET_NR_mount
:
5087 /* need to look at the data field */
5089 p
= lock_user_string(arg1
);
5090 p2
= lock_user_string(arg2
);
5091 p3
= lock_user_string(arg3
);
5092 if (!p
|| !p2
|| !p3
)
5093 ret
= -TARGET_EFAULT
;
5095 /* FIXME - arg5 should be locked, but it isn't clear how to
5096 * do that since it's not guaranteed to be a NULL-terminated
5100 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
));
5102 ret
= get_errno(mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
)));
5104 unlock_user(p
, arg1
, 0);
5105 unlock_user(p2
, arg2
, 0);
5106 unlock_user(p3
, arg3
, 0);
5109 #ifdef TARGET_NR_umount
5110 case TARGET_NR_umount
:
5111 if (!(p
= lock_user_string(arg1
)))
5113 ret
= get_errno(umount(p
));
5114 unlock_user(p
, arg1
, 0);
5117 #ifdef TARGET_NR_stime /* not on alpha */
5118 case TARGET_NR_stime
:
5121 if (get_user_sal(host_time
, arg1
))
5123 ret
= get_errno(stime(&host_time
));
5127 case TARGET_NR_ptrace
:
5129 #ifdef TARGET_NR_alarm /* not on alpha */
5130 case TARGET_NR_alarm
:
5134 #ifdef TARGET_NR_oldfstat
5135 case TARGET_NR_oldfstat
:
5138 #ifdef TARGET_NR_pause /* not on alpha */
5139 case TARGET_NR_pause
:
5140 ret
= get_errno(pause());
5143 #ifdef TARGET_NR_utime
5144 case TARGET_NR_utime
:
5146 struct utimbuf tbuf
, *host_tbuf
;
5147 struct target_utimbuf
*target_tbuf
;
5149 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
5151 tbuf
.actime
= tswapal(target_tbuf
->actime
);
5152 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
5153 unlock_user_struct(target_tbuf
, arg2
, 0);
5158 if (!(p
= lock_user_string(arg1
)))
5160 ret
= get_errno(utime(p
, host_tbuf
));
5161 unlock_user(p
, arg1
, 0);
5165 case TARGET_NR_utimes
:
5167 struct timeval
*tvp
, tv
[2];
5169 if (copy_from_user_timeval(&tv
[0], arg2
)
5170 || copy_from_user_timeval(&tv
[1],
5171 arg2
+ sizeof(struct target_timeval
)))
5177 if (!(p
= lock_user_string(arg1
)))
5179 ret
= get_errno(utimes(p
, tvp
));
5180 unlock_user(p
, arg1
, 0);
5183 #if defined(TARGET_NR_futimesat) && defined(__NR_futimesat)
5184 case TARGET_NR_futimesat
:
5186 struct timeval
*tvp
, tv
[2];
5188 if (copy_from_user_timeval(&tv
[0], arg3
)
5189 || copy_from_user_timeval(&tv
[1],
5190 arg3
+ sizeof(struct target_timeval
)))
5196 if (!(p
= lock_user_string(arg2
)))
5198 ret
= get_errno(sys_futimesat(arg1
, path(p
), tvp
));
5199 unlock_user(p
, arg2
, 0);
5203 #ifdef TARGET_NR_stty
5204 case TARGET_NR_stty
:
5207 #ifdef TARGET_NR_gtty
5208 case TARGET_NR_gtty
:
5211 case TARGET_NR_access
:
5212 if (!(p
= lock_user_string(arg1
)))
5214 ret
= get_errno(access(path(p
), arg2
));
5215 unlock_user(p
, arg1
, 0);
5217 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
5218 case TARGET_NR_faccessat
:
5219 if (!(p
= lock_user_string(arg2
)))
5221 ret
= get_errno(sys_faccessat(arg1
, p
, arg3
));
5222 unlock_user(p
, arg2
, 0);
5225 #ifdef TARGET_NR_nice /* not on alpha */
5226 case TARGET_NR_nice
:
5227 ret
= get_errno(nice(arg1
));
5230 #ifdef TARGET_NR_ftime
5231 case TARGET_NR_ftime
:
5234 case TARGET_NR_sync
:
5238 case TARGET_NR_kill
:
5239 ret
= get_errno(kill(arg1
, target_to_host_signal(arg2
)));
5241 case TARGET_NR_rename
:
5244 p
= lock_user_string(arg1
);
5245 p2
= lock_user_string(arg2
);
5247 ret
= -TARGET_EFAULT
;
5249 ret
= get_errno(rename(p
, p2
));
5250 unlock_user(p2
, arg2
, 0);
5251 unlock_user(p
, arg1
, 0);
5254 #if defined(TARGET_NR_renameat) && defined(__NR_renameat)
5255 case TARGET_NR_renameat
:
5258 p
= lock_user_string(arg2
);
5259 p2
= lock_user_string(arg4
);
5261 ret
= -TARGET_EFAULT
;
5263 ret
= get_errno(sys_renameat(arg1
, p
, arg3
, p2
));
5264 unlock_user(p2
, arg4
, 0);
5265 unlock_user(p
, arg2
, 0);
5269 case TARGET_NR_mkdir
:
5270 if (!(p
= lock_user_string(arg1
)))
5272 ret
= get_errno(mkdir(p
, arg2
));
5273 unlock_user(p
, arg1
, 0);
5275 #if defined(TARGET_NR_mkdirat) && defined(__NR_mkdirat)
5276 case TARGET_NR_mkdirat
:
5277 if (!(p
= lock_user_string(arg2
)))
5279 ret
= get_errno(sys_mkdirat(arg1
, p
, arg3
));
5280 unlock_user(p
, arg2
, 0);
5283 case TARGET_NR_rmdir
:
5284 if (!(p
= lock_user_string(arg1
)))
5286 ret
= get_errno(rmdir(p
));
5287 unlock_user(p
, arg1
, 0);
5290 ret
= get_errno(dup(arg1
));
5292 case TARGET_NR_pipe
:
5293 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
5295 #ifdef TARGET_NR_pipe2
5296 case TARGET_NR_pipe2
:
5297 ret
= do_pipe(cpu_env
, arg1
, arg2
, 1);
5300 case TARGET_NR_times
:
5302 struct target_tms
*tmsp
;
5304 ret
= get_errno(times(&tms
));
5306 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
5309 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
5310 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
5311 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
5312 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
5315 ret
= host_to_target_clock_t(ret
);
5318 #ifdef TARGET_NR_prof
5319 case TARGET_NR_prof
:
5322 #ifdef TARGET_NR_signal
5323 case TARGET_NR_signal
:
5326 case TARGET_NR_acct
:
5328 ret
= get_errno(acct(NULL
));
5330 if (!(p
= lock_user_string(arg1
)))
5332 ret
= get_errno(acct(path(p
)));
5333 unlock_user(p
, arg1
, 0);
5336 #ifdef TARGET_NR_umount2 /* not on alpha */
5337 case TARGET_NR_umount2
:
5338 if (!(p
= lock_user_string(arg1
)))
5340 ret
= get_errno(umount2(p
, arg2
));
5341 unlock_user(p
, arg1
, 0);
5344 #ifdef TARGET_NR_lock
5345 case TARGET_NR_lock
:
5348 case TARGET_NR_ioctl
:
5349 ret
= do_ioctl(arg1
, arg2
, arg3
);
5351 case TARGET_NR_fcntl
:
5352 ret
= do_fcntl(arg1
, arg2
, arg3
);
5354 #ifdef TARGET_NR_mpx
5358 case TARGET_NR_setpgid
:
5359 ret
= get_errno(setpgid(arg1
, arg2
));
5361 #ifdef TARGET_NR_ulimit
5362 case TARGET_NR_ulimit
:
5365 #ifdef TARGET_NR_oldolduname
5366 case TARGET_NR_oldolduname
:
5369 case TARGET_NR_umask
:
5370 ret
= get_errno(umask(arg1
));
5372 case TARGET_NR_chroot
:
5373 if (!(p
= lock_user_string(arg1
)))
5375 ret
= get_errno(chroot(p
));
5376 unlock_user(p
, arg1
, 0);
5378 case TARGET_NR_ustat
:
5380 case TARGET_NR_dup2
:
5381 ret
= get_errno(dup2(arg1
, arg2
));
5383 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
5384 case TARGET_NR_dup3
:
5385 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
5388 #ifdef TARGET_NR_getppid /* not on alpha */
5389 case TARGET_NR_getppid
:
5390 ret
= get_errno(getppid());
5393 case TARGET_NR_getpgrp
:
5394 ret
= get_errno(getpgrp());
5396 case TARGET_NR_setsid
:
5397 ret
= get_errno(setsid());
5399 #ifdef TARGET_NR_sigaction
5400 case TARGET_NR_sigaction
:
5402 #if defined(TARGET_ALPHA)
5403 struct target_sigaction act
, oact
, *pact
= 0;
5404 struct target_old_sigaction
*old_act
;
5406 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5408 act
._sa_handler
= old_act
->_sa_handler
;
5409 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5410 act
.sa_flags
= old_act
->sa_flags
;
5411 act
.sa_restorer
= 0;
5412 unlock_user_struct(old_act
, arg2
, 0);
5415 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5416 if (!is_error(ret
) && arg3
) {
5417 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5419 old_act
->_sa_handler
= oact
._sa_handler
;
5420 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5421 old_act
->sa_flags
= oact
.sa_flags
;
5422 unlock_user_struct(old_act
, arg3
, 1);
5424 #elif defined(TARGET_MIPS)
5425 struct target_sigaction act
, oact
, *pact
, *old_act
;
5428 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5430 act
._sa_handler
= old_act
->_sa_handler
;
5431 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
5432 act
.sa_flags
= old_act
->sa_flags
;
5433 unlock_user_struct(old_act
, arg2
, 0);
5439 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5441 if (!is_error(ret
) && arg3
) {
5442 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5444 old_act
->_sa_handler
= oact
._sa_handler
;
5445 old_act
->sa_flags
= oact
.sa_flags
;
5446 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
5447 old_act
->sa_mask
.sig
[1] = 0;
5448 old_act
->sa_mask
.sig
[2] = 0;
5449 old_act
->sa_mask
.sig
[3] = 0;
5450 unlock_user_struct(old_act
, arg3
, 1);
5453 struct target_old_sigaction
*old_act
;
5454 struct target_sigaction act
, oact
, *pact
;
5456 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
5458 act
._sa_handler
= old_act
->_sa_handler
;
5459 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
5460 act
.sa_flags
= old_act
->sa_flags
;
5461 act
.sa_restorer
= old_act
->sa_restorer
;
5462 unlock_user_struct(old_act
, arg2
, 0);
5467 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5468 if (!is_error(ret
) && arg3
) {
5469 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
5471 old_act
->_sa_handler
= oact
._sa_handler
;
5472 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
5473 old_act
->sa_flags
= oact
.sa_flags
;
5474 old_act
->sa_restorer
= oact
.sa_restorer
;
5475 unlock_user_struct(old_act
, arg3
, 1);
5481 case TARGET_NR_rt_sigaction
:
5483 #if defined(TARGET_ALPHA)
5484 struct target_sigaction act
, oact
, *pact
= 0;
5485 struct target_rt_sigaction
*rt_act
;
5486 /* ??? arg4 == sizeof(sigset_t). */
5488 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
5490 act
._sa_handler
= rt_act
->_sa_handler
;
5491 act
.sa_mask
= rt_act
->sa_mask
;
5492 act
.sa_flags
= rt_act
->sa_flags
;
5493 act
.sa_restorer
= arg5
;
5494 unlock_user_struct(rt_act
, arg2
, 0);
5497 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
5498 if (!is_error(ret
) && arg3
) {
5499 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
5501 rt_act
->_sa_handler
= oact
._sa_handler
;
5502 rt_act
->sa_mask
= oact
.sa_mask
;
5503 rt_act
->sa_flags
= oact
.sa_flags
;
5504 unlock_user_struct(rt_act
, arg3
, 1);
5507 struct target_sigaction
*act
;
5508 struct target_sigaction
*oact
;
5511 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
5516 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
5517 ret
= -TARGET_EFAULT
;
5518 goto rt_sigaction_fail
;
5522 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
5525 unlock_user_struct(act
, arg2
, 0);
5527 unlock_user_struct(oact
, arg3
, 1);
5531 #ifdef TARGET_NR_sgetmask /* not on alpha */
5532 case TARGET_NR_sgetmask
:
5535 abi_ulong target_set
;
5536 sigprocmask(0, NULL
, &cur_set
);
5537 host_to_target_old_sigset(&target_set
, &cur_set
);
5542 #ifdef TARGET_NR_ssetmask /* not on alpha */
5543 case TARGET_NR_ssetmask
:
5545 sigset_t set
, oset
, cur_set
;
5546 abi_ulong target_set
= arg1
;
5547 sigprocmask(0, NULL
, &cur_set
);
5548 target_to_host_old_sigset(&set
, &target_set
);
5549 sigorset(&set
, &set
, &cur_set
);
5550 sigprocmask(SIG_SETMASK
, &set
, &oset
);
5551 host_to_target_old_sigset(&target_set
, &oset
);
5556 #ifdef TARGET_NR_sigprocmask
5557 case TARGET_NR_sigprocmask
:
5559 #if defined(TARGET_ALPHA)
5560 sigset_t set
, oldset
;
5565 case TARGET_SIG_BLOCK
:
5568 case TARGET_SIG_UNBLOCK
:
5571 case TARGET_SIG_SETMASK
:
5575 ret
= -TARGET_EINVAL
;
5579 target_to_host_old_sigset(&set
, &mask
);
5581 ret
= get_errno(sigprocmask(how
, &set
, &oldset
));
5583 if (!is_error(ret
)) {
5584 host_to_target_old_sigset(&mask
, &oldset
);
5586 ((CPUAlphaState
*)cpu_env
)->[IR_V0
] = 0; /* force no error */
5589 sigset_t set
, oldset
, *set_ptr
;
5594 case TARGET_SIG_BLOCK
:
5597 case TARGET_SIG_UNBLOCK
:
5600 case TARGET_SIG_SETMASK
:
5604 ret
= -TARGET_EINVAL
;
5607 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5609 target_to_host_old_sigset(&set
, p
);
5610 unlock_user(p
, arg2
, 0);
5616 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5617 if (!is_error(ret
) && arg3
) {
5618 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5620 host_to_target_old_sigset(p
, &oldset
);
5621 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5627 case TARGET_NR_rt_sigprocmask
:
5630 sigset_t set
, oldset
, *set_ptr
;
5634 case TARGET_SIG_BLOCK
:
5637 case TARGET_SIG_UNBLOCK
:
5640 case TARGET_SIG_SETMASK
:
5644 ret
= -TARGET_EINVAL
;
5647 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
5649 target_to_host_sigset(&set
, p
);
5650 unlock_user(p
, arg2
, 0);
5656 ret
= get_errno(sigprocmask(how
, set_ptr
, &oldset
));
5657 if (!is_error(ret
) && arg3
) {
5658 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
5660 host_to_target_sigset(p
, &oldset
);
5661 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
5665 #ifdef TARGET_NR_sigpending
5666 case TARGET_NR_sigpending
:
5669 ret
= get_errno(sigpending(&set
));
5670 if (!is_error(ret
)) {
5671 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5673 host_to_target_old_sigset(p
, &set
);
5674 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5679 case TARGET_NR_rt_sigpending
:
5682 ret
= get_errno(sigpending(&set
));
5683 if (!is_error(ret
)) {
5684 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
5686 host_to_target_sigset(p
, &set
);
5687 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
5691 #ifdef TARGET_NR_sigsuspend
5692 case TARGET_NR_sigsuspend
:
5695 #if defined(TARGET_ALPHA)
5696 abi_ulong mask
= arg1
;
5697 target_to_host_old_sigset(&set
, &mask
);
5699 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5701 target_to_host_old_sigset(&set
, p
);
5702 unlock_user(p
, arg1
, 0);
5704 ret
= get_errno(sigsuspend(&set
));
5708 case TARGET_NR_rt_sigsuspend
:
5711 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5713 target_to_host_sigset(&set
, p
);
5714 unlock_user(p
, arg1
, 0);
5715 ret
= get_errno(sigsuspend(&set
));
5718 case TARGET_NR_rt_sigtimedwait
:
5721 struct timespec uts
, *puts
;
5724 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
5726 target_to_host_sigset(&set
, p
);
5727 unlock_user(p
, arg1
, 0);
5730 target_to_host_timespec(puts
, arg3
);
5734 ret
= get_errno(sigtimedwait(&set
, &uinfo
, puts
));
5735 if (!is_error(ret
) && arg2
) {
5736 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
), 0)))
5738 host_to_target_siginfo(p
, &uinfo
);
5739 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
5743 case TARGET_NR_rt_sigqueueinfo
:
5746 if (!(p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_sigset_t
), 1)))
5748 target_to_host_siginfo(&uinfo
, p
);
5749 unlock_user(p
, arg1
, 0);
5750 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
5753 #ifdef TARGET_NR_sigreturn
5754 case TARGET_NR_sigreturn
:
5755 /* NOTE: ret is eax, so not transcoding must be done */
5756 ret
= do_sigreturn(cpu_env
);
5759 case TARGET_NR_rt_sigreturn
:
5760 /* NOTE: ret is eax, so not transcoding must be done */
5761 ret
= do_rt_sigreturn(cpu_env
);
5763 case TARGET_NR_sethostname
:
5764 if (!(p
= lock_user_string(arg1
)))
5766 ret
= get_errno(sethostname(p
, arg2
));
5767 unlock_user(p
, arg1
, 0);
5769 case TARGET_NR_setrlimit
:
5771 int resource
= target_to_host_resource(arg1
);
5772 struct target_rlimit
*target_rlim
;
5774 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
5776 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
5777 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
5778 unlock_user_struct(target_rlim
, arg2
, 0);
5779 ret
= get_errno(setrlimit(resource
, &rlim
));
5782 case TARGET_NR_getrlimit
:
5784 int resource
= target_to_host_resource(arg1
);
5785 struct target_rlimit
*target_rlim
;
5788 ret
= get_errno(getrlimit(resource
, &rlim
));
5789 if (!is_error(ret
)) {
5790 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
5792 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
5793 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
5794 unlock_user_struct(target_rlim
, arg2
, 1);
5798 case TARGET_NR_getrusage
:
5800 struct rusage rusage
;
5801 ret
= get_errno(getrusage(arg1
, &rusage
));
5802 if (!is_error(ret
)) {
5803 host_to_target_rusage(arg2
, &rusage
);
5807 case TARGET_NR_gettimeofday
:
5810 ret
= get_errno(gettimeofday(&tv
, NULL
));
5811 if (!is_error(ret
)) {
5812 if (copy_to_user_timeval(arg1
, &tv
))
5817 case TARGET_NR_settimeofday
:
5820 if (copy_from_user_timeval(&tv
, arg1
))
5822 ret
= get_errno(settimeofday(&tv
, NULL
));
5825 #if defined(TARGET_NR_select) && !defined(TARGET_S390X) && !defined(TARGET_S390)
5826 case TARGET_NR_select
:
5828 struct target_sel_arg_struct
*sel
;
5829 abi_ulong inp
, outp
, exp
, tvp
;
5832 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
5834 nsel
= tswapal(sel
->n
);
5835 inp
= tswapal(sel
->inp
);
5836 outp
= tswapal(sel
->outp
);
5837 exp
= tswapal(sel
->exp
);
5838 tvp
= tswapal(sel
->tvp
);
5839 unlock_user_struct(sel
, arg1
, 0);
5840 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
5844 #ifdef TARGET_NR_pselect6
5845 case TARGET_NR_pselect6
:
5847 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
5848 fd_set rfds
, wfds
, efds
;
5849 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
5850 struct timespec ts
, *ts_ptr
;
5853 * The 6th arg is actually two args smashed together,
5854 * so we cannot use the C library.
5862 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
5863 target_sigset_t
*target_sigset
;
5871 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
5875 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
5879 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
5885 * This takes a timespec, and not a timeval, so we cannot
5886 * use the do_select() helper ...
5889 if (target_to_host_timespec(&ts
, ts_addr
)) {
5897 /* Extract the two packed args for the sigset */
5900 sig
.size
= _NSIG
/ 8;
5902 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
5906 arg_sigset
= tswapal(arg7
[0]);
5907 arg_sigsize
= tswapal(arg7
[1]);
5908 unlock_user(arg7
, arg6
, 0);
5912 if (arg_sigsize
!= sizeof(*target_sigset
)) {
5913 /* Like the kernel, we enforce correct size sigsets */
5914 ret
= -TARGET_EINVAL
;
5917 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
5918 sizeof(*target_sigset
), 1);
5919 if (!target_sigset
) {
5922 target_to_host_sigset(&set
, target_sigset
);
5923 unlock_user(target_sigset
, arg_sigset
, 0);
5931 ret
= get_errno(sys_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
5934 if (!is_error(ret
)) {
5935 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
5937 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
5939 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
5942 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
5948 case TARGET_NR_symlink
:
5951 p
= lock_user_string(arg1
);
5952 p2
= lock_user_string(arg2
);
5954 ret
= -TARGET_EFAULT
;
5956 ret
= get_errno(symlink(p
, p2
));
5957 unlock_user(p2
, arg2
, 0);
5958 unlock_user(p
, arg1
, 0);
5961 #if defined(TARGET_NR_symlinkat) && defined(__NR_symlinkat)
5962 case TARGET_NR_symlinkat
:
5965 p
= lock_user_string(arg1
);
5966 p2
= lock_user_string(arg3
);
5968 ret
= -TARGET_EFAULT
;
5970 ret
= get_errno(sys_symlinkat(p
, arg2
, p2
));
5971 unlock_user(p2
, arg3
, 0);
5972 unlock_user(p
, arg1
, 0);
5976 #ifdef TARGET_NR_oldlstat
5977 case TARGET_NR_oldlstat
:
5980 case TARGET_NR_readlink
:
5983 p
= lock_user_string(arg1
);
5984 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
5986 ret
= -TARGET_EFAULT
;
5988 if (strncmp((const char *)p
, "/proc/self/exe", 14) == 0) {
5989 char real
[PATH_MAX
];
5990 temp
= realpath(exec_path
,real
);
5991 ret
= (temp
==NULL
) ? get_errno(-1) : strlen(real
) ;
5992 snprintf((char *)p2
, arg3
, "%s", real
);
5995 ret
= get_errno(readlink(path(p
), p2
, arg3
));
5997 unlock_user(p2
, arg2
, ret
);
5998 unlock_user(p
, arg1
, 0);
6001 #if defined(TARGET_NR_readlinkat) && defined(__NR_readlinkat)
6002 case TARGET_NR_readlinkat
:
6005 p
= lock_user_string(arg2
);
6006 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
6008 ret
= -TARGET_EFAULT
;
6010 ret
= get_errno(sys_readlinkat(arg1
, path(p
), p2
, arg4
));
6011 unlock_user(p2
, arg3
, ret
);
6012 unlock_user(p
, arg2
, 0);
6016 #ifdef TARGET_NR_uselib
6017 case TARGET_NR_uselib
:
6020 #ifdef TARGET_NR_swapon
6021 case TARGET_NR_swapon
:
6022 if (!(p
= lock_user_string(arg1
)))
6024 ret
= get_errno(swapon(p
, arg2
));
6025 unlock_user(p
, arg1
, 0);
6028 case TARGET_NR_reboot
:
6029 if (!(p
= lock_user_string(arg4
)))
6031 ret
= reboot(arg1
, arg2
, arg3
, p
);
6032 unlock_user(p
, arg4
, 0);
6034 #ifdef TARGET_NR_readdir
6035 case TARGET_NR_readdir
:
6038 #ifdef TARGET_NR_mmap
6039 case TARGET_NR_mmap
:
6040 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) || \
6041 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
6042 || defined(TARGET_S390X)
6045 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
6046 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
6054 unlock_user(v
, arg1
, 0);
6055 ret
= get_errno(target_mmap(v1
, v2
, v3
,
6056 target_to_host_bitmask(v4
, mmap_flags_tbl
),
6060 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6061 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6067 #ifdef TARGET_NR_mmap2
6068 case TARGET_NR_mmap2
:
6070 #define MMAP_SHIFT 12
6072 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
6073 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
6075 arg6
<< MMAP_SHIFT
));
6078 case TARGET_NR_munmap
:
6079 ret
= get_errno(target_munmap(arg1
, arg2
));
6081 case TARGET_NR_mprotect
:
6083 TaskState
*ts
= ((CPUState
*)cpu_env
)->opaque
;
6084 /* Special hack to detect libc making the stack executable. */
6085 if ((arg3
& PROT_GROWSDOWN
)
6086 && arg1
>= ts
->info
->stack_limit
6087 && arg1
<= ts
->info
->start_stack
) {
6088 arg3
&= ~PROT_GROWSDOWN
;
6089 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
6090 arg1
= ts
->info
->stack_limit
;
6093 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
6095 #ifdef TARGET_NR_mremap
6096 case TARGET_NR_mremap
:
6097 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
6100 /* ??? msync/mlock/munlock are broken for softmmu. */
6101 #ifdef TARGET_NR_msync
6102 case TARGET_NR_msync
:
6103 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
6106 #ifdef TARGET_NR_mlock
6107 case TARGET_NR_mlock
:
6108 ret
= get_errno(mlock(g2h(arg1
), arg2
));
6111 #ifdef TARGET_NR_munlock
6112 case TARGET_NR_munlock
:
6113 ret
= get_errno(munlock(g2h(arg1
), arg2
));
6116 #ifdef TARGET_NR_mlockall
6117 case TARGET_NR_mlockall
:
6118 ret
= get_errno(mlockall(arg1
));
6121 #ifdef TARGET_NR_munlockall
6122 case TARGET_NR_munlockall
:
6123 ret
= get_errno(munlockall());
6126 case TARGET_NR_truncate
:
6127 if (!(p
= lock_user_string(arg1
)))
6129 ret
= get_errno(truncate(p
, arg2
));
6130 unlock_user(p
, arg1
, 0);
6132 case TARGET_NR_ftruncate
:
6133 ret
= get_errno(ftruncate(arg1
, arg2
));
6135 case TARGET_NR_fchmod
:
6136 ret
= get_errno(fchmod(arg1
, arg2
));
6138 #if defined(TARGET_NR_fchmodat) && defined(__NR_fchmodat)
6139 case TARGET_NR_fchmodat
:
6140 if (!(p
= lock_user_string(arg2
)))
6142 ret
= get_errno(sys_fchmodat(arg1
, p
, arg3
));
6143 unlock_user(p
, arg2
, 0);
6146 case TARGET_NR_getpriority
:
6147 /* libc does special remapping of the return value of
6148 * sys_getpriority() so it's just easiest to call
6149 * sys_getpriority() directly rather than through libc. */
6150 ret
= get_errno(sys_getpriority(arg1
, arg2
));
6152 case TARGET_NR_setpriority
:
6153 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
6155 #ifdef TARGET_NR_profil
6156 case TARGET_NR_profil
:
6159 case TARGET_NR_statfs
:
6160 if (!(p
= lock_user_string(arg1
)))
6162 ret
= get_errno(statfs(path(p
), &stfs
));
6163 unlock_user(p
, arg1
, 0);
6165 if (!is_error(ret
)) {
6166 struct target_statfs
*target_stfs
;
6168 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
6170 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6171 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6172 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6173 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6174 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6175 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6176 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6177 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6178 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6179 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6180 unlock_user_struct(target_stfs
, arg2
, 1);
6183 case TARGET_NR_fstatfs
:
6184 ret
= get_errno(fstatfs(arg1
, &stfs
));
6185 goto convert_statfs
;
6186 #ifdef TARGET_NR_statfs64
6187 case TARGET_NR_statfs64
:
6188 if (!(p
= lock_user_string(arg1
)))
6190 ret
= get_errno(statfs(path(p
), &stfs
));
6191 unlock_user(p
, arg1
, 0);
6193 if (!is_error(ret
)) {
6194 struct target_statfs64
*target_stfs
;
6196 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
6198 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
6199 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
6200 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
6201 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
6202 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
6203 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
6204 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
6205 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
6206 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
6207 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
6208 unlock_user_struct(target_stfs
, arg3
, 1);
6211 case TARGET_NR_fstatfs64
:
6212 ret
= get_errno(fstatfs(arg1
, &stfs
));
6213 goto convert_statfs64
;
6215 #ifdef TARGET_NR_ioperm
6216 case TARGET_NR_ioperm
:
6219 #ifdef TARGET_NR_socketcall
6220 case TARGET_NR_socketcall
:
6221 ret
= do_socketcall(arg1
, arg2
);
6224 #ifdef TARGET_NR_accept
6225 case TARGET_NR_accept
:
6226 ret
= do_accept(arg1
, arg2
, arg3
);
6229 #ifdef TARGET_NR_bind
6230 case TARGET_NR_bind
:
6231 ret
= do_bind(arg1
, arg2
, arg3
);
6234 #ifdef TARGET_NR_connect
6235 case TARGET_NR_connect
:
6236 ret
= do_connect(arg1
, arg2
, arg3
);
6239 #ifdef TARGET_NR_getpeername
6240 case TARGET_NR_getpeername
:
6241 ret
= do_getpeername(arg1
, arg2
, arg3
);
6244 #ifdef TARGET_NR_getsockname
6245 case TARGET_NR_getsockname
:
6246 ret
= do_getsockname(arg1
, arg2
, arg3
);
6249 #ifdef TARGET_NR_getsockopt
6250 case TARGET_NR_getsockopt
:
6251 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
6254 #ifdef TARGET_NR_listen
6255 case TARGET_NR_listen
:
6256 ret
= get_errno(listen(arg1
, arg2
));
6259 #ifdef TARGET_NR_recv
6260 case TARGET_NR_recv
:
6261 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
6264 #ifdef TARGET_NR_recvfrom
6265 case TARGET_NR_recvfrom
:
6266 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6269 #ifdef TARGET_NR_recvmsg
6270 case TARGET_NR_recvmsg
:
6271 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
6274 #ifdef TARGET_NR_send
6275 case TARGET_NR_send
:
6276 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
6279 #ifdef TARGET_NR_sendmsg
6280 case TARGET_NR_sendmsg
:
6281 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
6284 #ifdef TARGET_NR_sendto
6285 case TARGET_NR_sendto
:
6286 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6289 #ifdef TARGET_NR_shutdown
6290 case TARGET_NR_shutdown
:
6291 ret
= get_errno(shutdown(arg1
, arg2
));
6294 #ifdef TARGET_NR_socket
6295 case TARGET_NR_socket
:
6296 ret
= do_socket(arg1
, arg2
, arg3
);
6299 #ifdef TARGET_NR_socketpair
6300 case TARGET_NR_socketpair
:
6301 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
6304 #ifdef TARGET_NR_setsockopt
6305 case TARGET_NR_setsockopt
:
6306 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
6310 case TARGET_NR_syslog
:
6311 if (!(p
= lock_user_string(arg2
)))
6313 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
6314 unlock_user(p
, arg2
, 0);
6317 case TARGET_NR_setitimer
:
6319 struct itimerval value
, ovalue
, *pvalue
;
6323 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
6324 || copy_from_user_timeval(&pvalue
->it_value
,
6325 arg2
+ sizeof(struct target_timeval
)))
6330 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
6331 if (!is_error(ret
) && arg3
) {
6332 if (copy_to_user_timeval(arg3
,
6333 &ovalue
.it_interval
)
6334 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
6340 case TARGET_NR_getitimer
:
6342 struct itimerval value
;
6344 ret
= get_errno(getitimer(arg1
, &value
));
6345 if (!is_error(ret
) && arg2
) {
6346 if (copy_to_user_timeval(arg2
,
6348 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
6354 case TARGET_NR_stat
:
6355 if (!(p
= lock_user_string(arg1
)))
6357 ret
= get_errno(stat(path(p
), &st
));
6358 unlock_user(p
, arg1
, 0);
6360 case TARGET_NR_lstat
:
6361 if (!(p
= lock_user_string(arg1
)))
6363 ret
= get_errno(lstat(path(p
), &st
));
6364 unlock_user(p
, arg1
, 0);
6366 case TARGET_NR_fstat
:
6368 ret
= get_errno(fstat(arg1
, &st
));
6370 if (!is_error(ret
)) {
6371 struct target_stat
*target_st
;
6373 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
6375 memset(target_st
, 0, sizeof(*target_st
));
6376 __put_user(st
.st_dev
, &target_st
->st_dev
);
6377 __put_user(st
.st_ino
, &target_st
->st_ino
);
6378 __put_user(st
.st_mode
, &target_st
->st_mode
);
6379 __put_user(st
.st_uid
, &target_st
->st_uid
);
6380 __put_user(st
.st_gid
, &target_st
->st_gid
);
6381 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
6382 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
6383 __put_user(st
.st_size
, &target_st
->st_size
);
6384 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
6385 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
6386 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
6387 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
6388 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
6389 unlock_user_struct(target_st
, arg2
, 1);
6393 #ifdef TARGET_NR_olduname
6394 case TARGET_NR_olduname
:
6397 #ifdef TARGET_NR_iopl
6398 case TARGET_NR_iopl
:
6401 case TARGET_NR_vhangup
:
6402 ret
= get_errno(vhangup());
6404 #ifdef TARGET_NR_idle
6405 case TARGET_NR_idle
:
6408 #ifdef TARGET_NR_syscall
6409 case TARGET_NR_syscall
:
6410 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
6411 arg6
, arg7
, arg8
, 0);
6414 case TARGET_NR_wait4
:
6417 abi_long status_ptr
= arg2
;
6418 struct rusage rusage
, *rusage_ptr
;
6419 abi_ulong target_rusage
= arg4
;
6421 rusage_ptr
= &rusage
;
6424 ret
= get_errno(wait4(arg1
, &status
, arg3
, rusage_ptr
));
6425 if (!is_error(ret
)) {
6426 if (status_ptr
&& ret
) {
6427 status
= host_to_target_waitstatus(status
);
6428 if (put_user_s32(status
, status_ptr
))
6432 host_to_target_rusage(target_rusage
, &rusage
);
6436 #ifdef TARGET_NR_swapoff
6437 case TARGET_NR_swapoff
:
6438 if (!(p
= lock_user_string(arg1
)))
6440 ret
= get_errno(swapoff(p
));
6441 unlock_user(p
, arg1
, 0);
6444 case TARGET_NR_sysinfo
:
6446 struct target_sysinfo
*target_value
;
6447 struct sysinfo value
;
6448 ret
= get_errno(sysinfo(&value
));
6449 if (!is_error(ret
) && arg1
)
6451 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
6453 __put_user(value
.uptime
, &target_value
->uptime
);
6454 __put_user(value
.loads
[0], &target_value
->loads
[0]);
6455 __put_user(value
.loads
[1], &target_value
->loads
[1]);
6456 __put_user(value
.loads
[2], &target_value
->loads
[2]);
6457 __put_user(value
.totalram
, &target_value
->totalram
);
6458 __put_user(value
.freeram
, &target_value
->freeram
);
6459 __put_user(value
.sharedram
, &target_value
->sharedram
);
6460 __put_user(value
.bufferram
, &target_value
->bufferram
);
6461 __put_user(value
.totalswap
, &target_value
->totalswap
);
6462 __put_user(value
.freeswap
, &target_value
->freeswap
);
6463 __put_user(value
.procs
, &target_value
->procs
);
6464 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
6465 __put_user(value
.freehigh
, &target_value
->freehigh
);
6466 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
6467 unlock_user_struct(target_value
, arg1
, 1);
6471 #ifdef TARGET_NR_ipc
6473 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
6476 #ifdef TARGET_NR_semget
6477 case TARGET_NR_semget
:
6478 ret
= get_errno(semget(arg1
, arg2
, arg3
));
6481 #ifdef TARGET_NR_semop
6482 case TARGET_NR_semop
:
6483 ret
= get_errno(do_semop(arg1
, arg2
, arg3
));
6486 #ifdef TARGET_NR_semctl
6487 case TARGET_NR_semctl
:
6488 ret
= do_semctl(arg1
, arg2
, arg3
, (union target_semun
)(abi_ulong
)arg4
);
6491 #ifdef TARGET_NR_msgctl
6492 case TARGET_NR_msgctl
:
6493 ret
= do_msgctl(arg1
, arg2
, arg3
);
6496 #ifdef TARGET_NR_msgget
6497 case TARGET_NR_msgget
:
6498 ret
= get_errno(msgget(arg1
, arg2
));
6501 #ifdef TARGET_NR_msgrcv
6502 case TARGET_NR_msgrcv
:
6503 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
6506 #ifdef TARGET_NR_msgsnd
6507 case TARGET_NR_msgsnd
:
6508 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
6511 #ifdef TARGET_NR_shmget
6512 case TARGET_NR_shmget
:
6513 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
6516 #ifdef TARGET_NR_shmctl
6517 case TARGET_NR_shmctl
:
6518 ret
= do_shmctl(arg1
, arg2
, arg3
);
6521 #ifdef TARGET_NR_shmat
6522 case TARGET_NR_shmat
:
6523 ret
= do_shmat(arg1
, arg2
, arg3
);
6526 #ifdef TARGET_NR_shmdt
6527 case TARGET_NR_shmdt
:
6528 ret
= do_shmdt(arg1
);
6531 case TARGET_NR_fsync
:
6532 ret
= get_errno(fsync(arg1
));
6534 case TARGET_NR_clone
:
6535 #if defined(TARGET_SH4) || defined(TARGET_ALPHA)
6536 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
6537 #elif defined(TARGET_CRIS)
6538 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg4
, arg5
));
6539 #elif defined(TARGET_S390X)
6540 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
6542 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
6545 #ifdef __NR_exit_group
6546 /* new thread calls */
6547 case TARGET_NR_exit_group
:
6551 gdb_exit(cpu_env
, arg1
);
6552 ret
= get_errno(exit_group(arg1
));
6555 case TARGET_NR_setdomainname
:
6556 if (!(p
= lock_user_string(arg1
)))
6558 ret
= get_errno(setdomainname(p
, arg2
));
6559 unlock_user(p
, arg1
, 0);
6561 case TARGET_NR_uname
:
6562 /* no need to transcode because we use the linux syscall */
6564 struct new_utsname
* buf
;
6566 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
6568 ret
= get_errno(sys_uname(buf
));
6569 if (!is_error(ret
)) {
6570 /* Overrite the native machine name with whatever is being
6572 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
6573 /* Allow the user to override the reported release. */
6574 if (qemu_uname_release
&& *qemu_uname_release
)
6575 strcpy (buf
->release
, qemu_uname_release
);
6577 unlock_user_struct(buf
, arg1
, 1);
6581 case TARGET_NR_modify_ldt
:
6582 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
6584 #if !defined(TARGET_X86_64)
6585 case TARGET_NR_vm86old
:
6587 case TARGET_NR_vm86
:
6588 ret
= do_vm86(cpu_env
, arg1
, arg2
);
6592 case TARGET_NR_adjtimex
:
6594 #ifdef TARGET_NR_create_module
6595 case TARGET_NR_create_module
:
6597 case TARGET_NR_init_module
:
6598 case TARGET_NR_delete_module
:
6599 #ifdef TARGET_NR_get_kernel_syms
6600 case TARGET_NR_get_kernel_syms
:
6603 case TARGET_NR_quotactl
:
6605 case TARGET_NR_getpgid
:
6606 ret
= get_errno(getpgid(arg1
));
6608 case TARGET_NR_fchdir
:
6609 ret
= get_errno(fchdir(arg1
));
6611 #ifdef TARGET_NR_bdflush /* not on x86_64 */
6612 case TARGET_NR_bdflush
:
6615 #ifdef TARGET_NR_sysfs
6616 case TARGET_NR_sysfs
:
6619 case TARGET_NR_personality
:
6620 ret
= get_errno(personality(arg1
));
6622 #ifdef TARGET_NR_afs_syscall
6623 case TARGET_NR_afs_syscall
:
6626 #ifdef TARGET_NR__llseek /* Not on alpha */
6627 case TARGET_NR__llseek
:
6630 #if !defined(__NR_llseek)
6631 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | arg3
, arg5
);
6633 ret
= get_errno(res
);
6638 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
6640 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
6646 case TARGET_NR_getdents
:
6647 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
6649 struct target_dirent
*target_dirp
;
6650 struct linux_dirent
*dirp
;
6651 abi_long count
= arg3
;
6653 dirp
= malloc(count
);
6655 ret
= -TARGET_ENOMEM
;
6659 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6660 if (!is_error(ret
)) {
6661 struct linux_dirent
*de
;
6662 struct target_dirent
*tde
;
6664 int reclen
, treclen
;
6665 int count1
, tnamelen
;
6669 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6673 reclen
= de
->d_reclen
;
6674 treclen
= reclen
- (2 * (sizeof(long) - sizeof(abi_long
)));
6675 tde
->d_reclen
= tswap16(treclen
);
6676 tde
->d_ino
= tswapal(de
->d_ino
);
6677 tde
->d_off
= tswapal(de
->d_off
);
6678 tnamelen
= treclen
- (2 * sizeof(abi_long
) + 2);
6681 /* XXX: may not be correct */
6682 pstrcpy(tde
->d_name
, tnamelen
, de
->d_name
);
6683 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6685 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
6689 unlock_user(target_dirp
, arg2
, ret
);
6695 struct linux_dirent
*dirp
;
6696 abi_long count
= arg3
;
6698 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6700 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
6701 if (!is_error(ret
)) {
6702 struct linux_dirent
*de
;
6707 reclen
= de
->d_reclen
;
6710 de
->d_reclen
= tswap16(reclen
);
6711 tswapls(&de
->d_ino
);
6712 tswapls(&de
->d_off
);
6713 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
6717 unlock_user(dirp
, arg2
, ret
);
6721 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
6722 case TARGET_NR_getdents64
:
6724 struct linux_dirent64
*dirp
;
6725 abi_long count
= arg3
;
6726 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
6728 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
6729 if (!is_error(ret
)) {
6730 struct linux_dirent64
*de
;
6735 reclen
= de
->d_reclen
;
6738 de
->d_reclen
= tswap16(reclen
);
6739 tswap64s((uint64_t *)&de
->d_ino
);
6740 tswap64s((uint64_t *)&de
->d_off
);
6741 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
6745 unlock_user(dirp
, arg2
, ret
);
6748 #endif /* TARGET_NR_getdents64 */
6749 #if defined(TARGET_NR__newselect) || defined(TARGET_S390X)
6751 case TARGET_NR_select
:
6753 case TARGET_NR__newselect
:
6755 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
6758 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
6759 # ifdef TARGET_NR_poll
6760 case TARGET_NR_poll
:
6762 # ifdef TARGET_NR_ppoll
6763 case TARGET_NR_ppoll
:
6766 struct target_pollfd
*target_pfd
;
6767 unsigned int nfds
= arg2
;
6772 target_pfd
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_pollfd
) * nfds
, 1);
6776 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
6777 for(i
= 0; i
< nfds
; i
++) {
6778 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
6779 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
6782 # ifdef TARGET_NR_ppoll
6783 if (num
== TARGET_NR_ppoll
) {
6784 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
6785 target_sigset_t
*target_set
;
6786 sigset_t _set
, *set
= &_set
;
6789 if (target_to_host_timespec(timeout_ts
, arg3
)) {
6790 unlock_user(target_pfd
, arg1
, 0);
6798 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
6800 unlock_user(target_pfd
, arg1
, 0);
6803 target_to_host_sigset(set
, target_set
);
6808 ret
= get_errno(sys_ppoll(pfd
, nfds
, timeout_ts
, set
, _NSIG
/8));
6810 if (!is_error(ret
) && arg3
) {
6811 host_to_target_timespec(arg3
, timeout_ts
);
6814 unlock_user(target_set
, arg4
, 0);
6818 ret
= get_errno(poll(pfd
, nfds
, timeout
));
6820 if (!is_error(ret
)) {
6821 for(i
= 0; i
< nfds
; i
++) {
6822 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
6825 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
6829 case TARGET_NR_flock
:
6830 /* NOTE: the flock constant seems to be the same for every
6832 ret
= get_errno(flock(arg1
, arg2
));
6834 case TARGET_NR_readv
:
6839 vec
= alloca(count
* sizeof(struct iovec
));
6840 if (lock_iovec(VERIFY_WRITE
, vec
, arg2
, count
, 0) < 0)
6842 ret
= get_errno(readv(arg1
, vec
, count
));
6843 unlock_iovec(vec
, arg2
, count
, 1);
6846 case TARGET_NR_writev
:
6851 vec
= alloca(count
* sizeof(struct iovec
));
6852 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
6854 ret
= get_errno(writev(arg1
, vec
, count
));
6855 unlock_iovec(vec
, arg2
, count
, 0);
6858 case TARGET_NR_getsid
:
6859 ret
= get_errno(getsid(arg1
));
6861 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
6862 case TARGET_NR_fdatasync
:
6863 ret
= get_errno(fdatasync(arg1
));
6866 case TARGET_NR__sysctl
:
6867 /* We don't implement this, but ENOTDIR is always a safe
6869 ret
= -TARGET_ENOTDIR
;
6871 case TARGET_NR_sched_getaffinity
:
6873 unsigned int mask_size
;
6874 unsigned long *mask
;
6877 * sched_getaffinity needs multiples of ulong, so need to take
6878 * care of mismatches between target ulong and host ulong sizes.
6880 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6881 ret
= -TARGET_EINVAL
;
6884 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6886 mask
= alloca(mask_size
);
6887 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
6889 if (!is_error(ret
)) {
6890 if (copy_to_user(arg3
, mask
, ret
)) {
6896 case TARGET_NR_sched_setaffinity
:
6898 unsigned int mask_size
;
6899 unsigned long *mask
;
6902 * sched_setaffinity needs multiples of ulong, so need to take
6903 * care of mismatches between target ulong and host ulong sizes.
6905 if (arg2
& (sizeof(abi_ulong
) - 1)) {
6906 ret
= -TARGET_EINVAL
;
6909 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
6911 mask
= alloca(mask_size
);
6912 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
6915 memcpy(mask
, p
, arg2
);
6916 unlock_user_struct(p
, arg2
, 0);
6918 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
6921 case TARGET_NR_sched_setparam
:
6923 struct sched_param
*target_schp
;
6924 struct sched_param schp
;
6926 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
6928 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6929 unlock_user_struct(target_schp
, arg2
, 0);
6930 ret
= get_errno(sched_setparam(arg1
, &schp
));
6933 case TARGET_NR_sched_getparam
:
6935 struct sched_param
*target_schp
;
6936 struct sched_param schp
;
6937 ret
= get_errno(sched_getparam(arg1
, &schp
));
6938 if (!is_error(ret
)) {
6939 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
6941 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
6942 unlock_user_struct(target_schp
, arg2
, 1);
6946 case TARGET_NR_sched_setscheduler
:
6948 struct sched_param
*target_schp
;
6949 struct sched_param schp
;
6950 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
6952 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
6953 unlock_user_struct(target_schp
, arg3
, 0);
6954 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
6957 case TARGET_NR_sched_getscheduler
:
6958 ret
= get_errno(sched_getscheduler(arg1
));
6960 case TARGET_NR_sched_yield
:
6961 ret
= get_errno(sched_yield());
6963 case TARGET_NR_sched_get_priority_max
:
6964 ret
= get_errno(sched_get_priority_max(arg1
));
6966 case TARGET_NR_sched_get_priority_min
:
6967 ret
= get_errno(sched_get_priority_min(arg1
));
6969 case TARGET_NR_sched_rr_get_interval
:
6972 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
6973 if (!is_error(ret
)) {
6974 host_to_target_timespec(arg2
, &ts
);
6978 case TARGET_NR_nanosleep
:
6980 struct timespec req
, rem
;
6981 target_to_host_timespec(&req
, arg1
);
6982 ret
= get_errno(nanosleep(&req
, &rem
));
6983 if (is_error(ret
) && arg2
) {
6984 host_to_target_timespec(arg2
, &rem
);
6988 #ifdef TARGET_NR_query_module
6989 case TARGET_NR_query_module
:
6992 #ifdef TARGET_NR_nfsservctl
6993 case TARGET_NR_nfsservctl
:
6996 case TARGET_NR_prctl
:
6999 case PR_GET_PDEATHSIG
:
7002 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
7003 if (!is_error(ret
) && arg2
7004 && put_user_ual(deathsig
, arg2
))
7009 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
7013 #ifdef TARGET_NR_arch_prctl
7014 case TARGET_NR_arch_prctl
:
7015 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
7016 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
7022 #ifdef TARGET_NR_pread
7023 case TARGET_NR_pread
:
7024 if (regpairs_aligned(cpu_env
))
7026 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7028 ret
= get_errno(pread(arg1
, p
, arg3
, arg4
));
7029 unlock_user(p
, arg2
, ret
);
7031 case TARGET_NR_pwrite
:
7032 if (regpairs_aligned(cpu_env
))
7034 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7036 ret
= get_errno(pwrite(arg1
, p
, arg3
, arg4
));
7037 unlock_user(p
, arg2
, 0);
7040 #ifdef TARGET_NR_pread64
7041 case TARGET_NR_pread64
:
7042 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7044 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7045 unlock_user(p
, arg2
, ret
);
7047 case TARGET_NR_pwrite64
:
7048 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7050 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
7051 unlock_user(p
, arg2
, 0);
7054 case TARGET_NR_getcwd
:
7055 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
7057 ret
= get_errno(sys_getcwd1(p
, arg2
));
7058 unlock_user(p
, arg1
, ret
);
7060 case TARGET_NR_capget
:
7062 case TARGET_NR_capset
:
7064 case TARGET_NR_sigaltstack
:
7065 #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
7066 defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
7067 defined(TARGET_M68K) || defined(TARGET_S390X)
7068 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUState
*)cpu_env
));
7073 case TARGET_NR_sendfile
:
7075 #ifdef TARGET_NR_getpmsg
7076 case TARGET_NR_getpmsg
:
7079 #ifdef TARGET_NR_putpmsg
7080 case TARGET_NR_putpmsg
:
7083 #ifdef TARGET_NR_vfork
7084 case TARGET_NR_vfork
:
7085 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
7089 #ifdef TARGET_NR_ugetrlimit
7090 case TARGET_NR_ugetrlimit
:
7093 int resource
= target_to_host_resource(arg1
);
7094 ret
= get_errno(getrlimit(resource
, &rlim
));
7095 if (!is_error(ret
)) {
7096 struct target_rlimit
*target_rlim
;
7097 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
7099 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
7100 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
7101 unlock_user_struct(target_rlim
, arg2
, 1);
7106 #ifdef TARGET_NR_truncate64
7107 case TARGET_NR_truncate64
:
7108 if (!(p
= lock_user_string(arg1
)))
7110 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
7111 unlock_user(p
, arg1
, 0);
7114 #ifdef TARGET_NR_ftruncate64
7115 case TARGET_NR_ftruncate64
:
7116 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
7119 #ifdef TARGET_NR_stat64
7120 case TARGET_NR_stat64
:
7121 if (!(p
= lock_user_string(arg1
)))
7123 ret
= get_errno(stat(path(p
), &st
));
7124 unlock_user(p
, arg1
, 0);
7126 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7129 #ifdef TARGET_NR_lstat64
7130 case TARGET_NR_lstat64
:
7131 if (!(p
= lock_user_string(arg1
)))
7133 ret
= get_errno(lstat(path(p
), &st
));
7134 unlock_user(p
, arg1
, 0);
7136 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7139 #ifdef TARGET_NR_fstat64
7140 case TARGET_NR_fstat64
:
7141 ret
= get_errno(fstat(arg1
, &st
));
7143 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
7146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat)) && \
7147 (defined(__NR_fstatat64) || defined(__NR_newfstatat))
7148 #ifdef TARGET_NR_fstatat64
7149 case TARGET_NR_fstatat64
:
7151 #ifdef TARGET_NR_newfstatat
7152 case TARGET_NR_newfstatat
:
7154 if (!(p
= lock_user_string(arg2
)))
7156 #ifdef __NR_fstatat64
7157 ret
= get_errno(sys_fstatat64(arg1
, path(p
), &st
, arg4
));
7159 ret
= get_errno(sys_newfstatat(arg1
, path(p
), &st
, arg4
));
7162 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
7165 case TARGET_NR_lchown
:
7166 if (!(p
= lock_user_string(arg1
)))
7168 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7169 unlock_user(p
, arg1
, 0);
7171 #ifdef TARGET_NR_getuid
7172 case TARGET_NR_getuid
:
7173 ret
= get_errno(high2lowuid(getuid()));
7176 #ifdef TARGET_NR_getgid
7177 case TARGET_NR_getgid
:
7178 ret
= get_errno(high2lowgid(getgid()));
7181 #ifdef TARGET_NR_geteuid
7182 case TARGET_NR_geteuid
:
7183 ret
= get_errno(high2lowuid(geteuid()));
7186 #ifdef TARGET_NR_getegid
7187 case TARGET_NR_getegid
:
7188 ret
= get_errno(high2lowgid(getegid()));
7191 case TARGET_NR_setreuid
:
7192 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
7194 case TARGET_NR_setregid
:
7195 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
7197 case TARGET_NR_getgroups
:
7199 int gidsetsize
= arg1
;
7200 target_id
*target_grouplist
;
7204 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7205 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7206 if (gidsetsize
== 0)
7208 if (!is_error(ret
)) {
7209 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 2, 0);
7210 if (!target_grouplist
)
7212 for(i
= 0;i
< ret
; i
++)
7213 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
7214 unlock_user(target_grouplist
, arg2
, gidsetsize
* 2);
7218 case TARGET_NR_setgroups
:
7220 int gidsetsize
= arg1
;
7221 target_id
*target_grouplist
;
7225 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7226 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 2, 1);
7227 if (!target_grouplist
) {
7228 ret
= -TARGET_EFAULT
;
7231 for(i
= 0;i
< gidsetsize
; i
++)
7232 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
7233 unlock_user(target_grouplist
, arg2
, 0);
7234 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7237 case TARGET_NR_fchown
:
7238 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
7240 #if defined(TARGET_NR_fchownat) && defined(__NR_fchownat)
7241 case TARGET_NR_fchownat
:
7242 if (!(p
= lock_user_string(arg2
)))
7244 ret
= get_errno(sys_fchownat(arg1
, p
, low2highuid(arg3
), low2highgid(arg4
), arg5
));
7245 unlock_user(p
, arg2
, 0);
7248 #ifdef TARGET_NR_setresuid
7249 case TARGET_NR_setresuid
:
7250 ret
= get_errno(setresuid(low2highuid(arg1
),
7252 low2highuid(arg3
)));
7255 #ifdef TARGET_NR_getresuid
7256 case TARGET_NR_getresuid
:
7258 uid_t ruid
, euid
, suid
;
7259 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7260 if (!is_error(ret
)) {
7261 if (put_user_u16(high2lowuid(ruid
), arg1
)
7262 || put_user_u16(high2lowuid(euid
), arg2
)
7263 || put_user_u16(high2lowuid(suid
), arg3
))
7269 #ifdef TARGET_NR_getresgid
7270 case TARGET_NR_setresgid
:
7271 ret
= get_errno(setresgid(low2highgid(arg1
),
7273 low2highgid(arg3
)));
7276 #ifdef TARGET_NR_getresgid
7277 case TARGET_NR_getresgid
:
7279 gid_t rgid
, egid
, sgid
;
7280 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7281 if (!is_error(ret
)) {
7282 if (put_user_u16(high2lowgid(rgid
), arg1
)
7283 || put_user_u16(high2lowgid(egid
), arg2
)
7284 || put_user_u16(high2lowgid(sgid
), arg3
))
7290 case TARGET_NR_chown
:
7291 if (!(p
= lock_user_string(arg1
)))
7293 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
7294 unlock_user(p
, arg1
, 0);
7296 case TARGET_NR_setuid
:
7297 ret
= get_errno(setuid(low2highuid(arg1
)));
7299 case TARGET_NR_setgid
:
7300 ret
= get_errno(setgid(low2highgid(arg1
)));
7302 case TARGET_NR_setfsuid
:
7303 ret
= get_errno(setfsuid(arg1
));
7305 case TARGET_NR_setfsgid
:
7306 ret
= get_errno(setfsgid(arg1
));
7309 #ifdef TARGET_NR_lchown32
7310 case TARGET_NR_lchown32
:
7311 if (!(p
= lock_user_string(arg1
)))
7313 ret
= get_errno(lchown(p
, arg2
, arg3
));
7314 unlock_user(p
, arg1
, 0);
7317 #ifdef TARGET_NR_getuid32
7318 case TARGET_NR_getuid32
:
7319 ret
= get_errno(getuid());
7323 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
7324 /* Alpha specific */
7325 case TARGET_NR_getxuid
:
7329 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
7331 ret
= get_errno(getuid());
7334 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
7335 /* Alpha specific */
7336 case TARGET_NR_getxgid
:
7340 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
7342 ret
= get_errno(getgid());
7345 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
7346 /* Alpha specific */
7347 case TARGET_NR_osf_getsysinfo
:
7348 ret
= -TARGET_EOPNOTSUPP
;
7350 case TARGET_GSI_IEEE_FP_CONTROL
:
7352 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7354 /* Copied from linux ieee_fpcr_to_swcr. */
7355 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
7356 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
7357 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
7358 | SWCR_TRAP_ENABLE_DZE
7359 | SWCR_TRAP_ENABLE_OVF
);
7360 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
7361 | SWCR_TRAP_ENABLE_INE
);
7362 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
7363 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
7365 if (put_user_u64 (swcr
, arg2
))
7371 /* case GSI_IEEE_STATE_AT_SIGNAL:
7372 -- Not implemented in linux kernel.
7374 -- Retrieves current unaligned access state; not much used.
7376 -- Retrieves implver information; surely not used.
7378 -- Grabs a copy of the HWRPB; surely not used.
7383 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
7384 /* Alpha specific */
7385 case TARGET_NR_osf_setsysinfo
:
7386 ret
= -TARGET_EOPNOTSUPP
;
7388 case TARGET_SSI_IEEE_FP_CONTROL
:
7389 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
7391 uint64_t swcr
, fpcr
, orig_fpcr
;
7393 if (get_user_u64 (swcr
, arg2
))
7395 orig_fpcr
= cpu_alpha_load_fpcr (cpu_env
);
7396 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
7398 /* Copied from linux ieee_swcr_to_fpcr. */
7399 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
7400 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
7401 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
7402 | SWCR_TRAP_ENABLE_DZE
7403 | SWCR_TRAP_ENABLE_OVF
)) << 48;
7404 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
7405 | SWCR_TRAP_ENABLE_INE
)) << 57;
7406 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
7407 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
7409 cpu_alpha_store_fpcr (cpu_env
, fpcr
);
7412 if (arg1
== TARGET_SSI_IEEE_RAISE_EXCEPTION
) {
7413 /* Old exceptions are not signaled. */
7414 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
7416 /* If any exceptions set by this call, and are unmasked,
7423 /* case SSI_NVPAIRS:
7424 -- Used with SSIN_UACPROC to enable unaligned accesses.
7425 case SSI_IEEE_STATE_AT_SIGNAL:
7426 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
7427 -- Not implemented in linux kernel
7432 #ifdef TARGET_NR_osf_sigprocmask
7433 /* Alpha specific. */
7434 case TARGET_NR_osf_sigprocmask
:
7438 sigset_t set
, oldset
;
7441 case TARGET_SIG_BLOCK
:
7444 case TARGET_SIG_UNBLOCK
:
7447 case TARGET_SIG_SETMASK
:
7451 ret
= -TARGET_EINVAL
;
7455 target_to_host_old_sigset(&set
, &mask
);
7456 sigprocmask(how
, &set
, &oldset
);
7457 host_to_target_old_sigset(&mask
, &oldset
);
7463 #ifdef TARGET_NR_getgid32
7464 case TARGET_NR_getgid32
:
7465 ret
= get_errno(getgid());
7468 #ifdef TARGET_NR_geteuid32
7469 case TARGET_NR_geteuid32
:
7470 ret
= get_errno(geteuid());
7473 #ifdef TARGET_NR_getegid32
7474 case TARGET_NR_getegid32
:
7475 ret
= get_errno(getegid());
7478 #ifdef TARGET_NR_setreuid32
7479 case TARGET_NR_setreuid32
:
7480 ret
= get_errno(setreuid(arg1
, arg2
));
7483 #ifdef TARGET_NR_setregid32
7484 case TARGET_NR_setregid32
:
7485 ret
= get_errno(setregid(arg1
, arg2
));
7488 #ifdef TARGET_NR_getgroups32
7489 case TARGET_NR_getgroups32
:
7491 int gidsetsize
= arg1
;
7492 uint32_t *target_grouplist
;
7496 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7497 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
7498 if (gidsetsize
== 0)
7500 if (!is_error(ret
)) {
7501 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
7502 if (!target_grouplist
) {
7503 ret
= -TARGET_EFAULT
;
7506 for(i
= 0;i
< ret
; i
++)
7507 target_grouplist
[i
] = tswap32(grouplist
[i
]);
7508 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
7513 #ifdef TARGET_NR_setgroups32
7514 case TARGET_NR_setgroups32
:
7516 int gidsetsize
= arg1
;
7517 uint32_t *target_grouplist
;
7521 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
7522 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
7523 if (!target_grouplist
) {
7524 ret
= -TARGET_EFAULT
;
7527 for(i
= 0;i
< gidsetsize
; i
++)
7528 grouplist
[i
] = tswap32(target_grouplist
[i
]);
7529 unlock_user(target_grouplist
, arg2
, 0);
7530 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
7534 #ifdef TARGET_NR_fchown32
7535 case TARGET_NR_fchown32
:
7536 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
7539 #ifdef TARGET_NR_setresuid32
7540 case TARGET_NR_setresuid32
:
7541 ret
= get_errno(setresuid(arg1
, arg2
, arg3
));
7544 #ifdef TARGET_NR_getresuid32
7545 case TARGET_NR_getresuid32
:
7547 uid_t ruid
, euid
, suid
;
7548 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
7549 if (!is_error(ret
)) {
7550 if (put_user_u32(ruid
, arg1
)
7551 || put_user_u32(euid
, arg2
)
7552 || put_user_u32(suid
, arg3
))
7558 #ifdef TARGET_NR_setresgid32
7559 case TARGET_NR_setresgid32
:
7560 ret
= get_errno(setresgid(arg1
, arg2
, arg3
));
7563 #ifdef TARGET_NR_getresgid32
7564 case TARGET_NR_getresgid32
:
7566 gid_t rgid
, egid
, sgid
;
7567 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
7568 if (!is_error(ret
)) {
7569 if (put_user_u32(rgid
, arg1
)
7570 || put_user_u32(egid
, arg2
)
7571 || put_user_u32(sgid
, arg3
))
7577 #ifdef TARGET_NR_chown32
7578 case TARGET_NR_chown32
:
7579 if (!(p
= lock_user_string(arg1
)))
7581 ret
= get_errno(chown(p
, arg2
, arg3
));
7582 unlock_user(p
, arg1
, 0);
7585 #ifdef TARGET_NR_setuid32
7586 case TARGET_NR_setuid32
:
7587 ret
= get_errno(setuid(arg1
));
7590 #ifdef TARGET_NR_setgid32
7591 case TARGET_NR_setgid32
:
7592 ret
= get_errno(setgid(arg1
));
7595 #ifdef TARGET_NR_setfsuid32
7596 case TARGET_NR_setfsuid32
:
7597 ret
= get_errno(setfsuid(arg1
));
7600 #ifdef TARGET_NR_setfsgid32
7601 case TARGET_NR_setfsgid32
:
7602 ret
= get_errno(setfsgid(arg1
));
7606 case TARGET_NR_pivot_root
:
7608 #ifdef TARGET_NR_mincore
7609 case TARGET_NR_mincore
:
7612 ret
= -TARGET_EFAULT
;
7613 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
7615 if (!(p
= lock_user_string(arg3
)))
7617 ret
= get_errno(mincore(a
, arg2
, p
));
7618 unlock_user(p
, arg3
, ret
);
7620 unlock_user(a
, arg1
, 0);
7624 #ifdef TARGET_NR_arm_fadvise64_64
7625 case TARGET_NR_arm_fadvise64_64
:
7628 * arm_fadvise64_64 looks like fadvise64_64 but
7629 * with different argument order
7637 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
7638 #ifdef TARGET_NR_fadvise64_64
7639 case TARGET_NR_fadvise64_64
:
7641 #ifdef TARGET_NR_fadvise64
7642 case TARGET_NR_fadvise64
:
7646 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
7647 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
7648 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
7649 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
7653 ret
= -posix_fadvise(arg1
, arg2
, arg3
, arg4
);
7656 #ifdef TARGET_NR_madvise
7657 case TARGET_NR_madvise
:
7658 /* A straight passthrough may not be safe because qemu sometimes
7659 turns private flie-backed mappings into anonymous mappings.
7660 This will break MADV_DONTNEED.
7661 This is a hint, so ignoring and returning success is ok. */
7665 #if TARGET_ABI_BITS == 32
7666 case TARGET_NR_fcntl64
:
7670 struct target_flock64
*target_fl
;
7672 struct target_eabi_flock64
*target_efl
;
7675 cmd
= target_to_host_fcntl_cmd(arg2
);
7676 if (cmd
== -TARGET_EINVAL
) {
7682 case TARGET_F_GETLK64
:
7684 if (((CPUARMState
*)cpu_env
)->eabi
) {
7685 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7687 fl
.l_type
= tswap16(target_efl
->l_type
);
7688 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7689 fl
.l_start
= tswap64(target_efl
->l_start
);
7690 fl
.l_len
= tswap64(target_efl
->l_len
);
7691 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7692 unlock_user_struct(target_efl
, arg3
, 0);
7696 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7698 fl
.l_type
= tswap16(target_fl
->l_type
);
7699 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7700 fl
.l_start
= tswap64(target_fl
->l_start
);
7701 fl
.l_len
= tswap64(target_fl
->l_len
);
7702 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7703 unlock_user_struct(target_fl
, arg3
, 0);
7705 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7708 if (((CPUARMState
*)cpu_env
)->eabi
) {
7709 if (!lock_user_struct(VERIFY_WRITE
, target_efl
, arg3
, 0))
7711 target_efl
->l_type
= tswap16(fl
.l_type
);
7712 target_efl
->l_whence
= tswap16(fl
.l_whence
);
7713 target_efl
->l_start
= tswap64(fl
.l_start
);
7714 target_efl
->l_len
= tswap64(fl
.l_len
);
7715 target_efl
->l_pid
= tswap32(fl
.l_pid
);
7716 unlock_user_struct(target_efl
, arg3
, 1);
7720 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, arg3
, 0))
7722 target_fl
->l_type
= tswap16(fl
.l_type
);
7723 target_fl
->l_whence
= tswap16(fl
.l_whence
);
7724 target_fl
->l_start
= tswap64(fl
.l_start
);
7725 target_fl
->l_len
= tswap64(fl
.l_len
);
7726 target_fl
->l_pid
= tswap32(fl
.l_pid
);
7727 unlock_user_struct(target_fl
, arg3
, 1);
7732 case TARGET_F_SETLK64
:
7733 case TARGET_F_SETLKW64
:
7735 if (((CPUARMState
*)cpu_env
)->eabi
) {
7736 if (!lock_user_struct(VERIFY_READ
, target_efl
, arg3
, 1))
7738 fl
.l_type
= tswap16(target_efl
->l_type
);
7739 fl
.l_whence
= tswap16(target_efl
->l_whence
);
7740 fl
.l_start
= tswap64(target_efl
->l_start
);
7741 fl
.l_len
= tswap64(target_efl
->l_len
);
7742 fl
.l_pid
= tswap32(target_efl
->l_pid
);
7743 unlock_user_struct(target_efl
, arg3
, 0);
7747 if (!lock_user_struct(VERIFY_READ
, target_fl
, arg3
, 1))
7749 fl
.l_type
= tswap16(target_fl
->l_type
);
7750 fl
.l_whence
= tswap16(target_fl
->l_whence
);
7751 fl
.l_start
= tswap64(target_fl
->l_start
);
7752 fl
.l_len
= tswap64(target_fl
->l_len
);
7753 fl
.l_pid
= tswap32(target_fl
->l_pid
);
7754 unlock_user_struct(target_fl
, arg3
, 0);
7756 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
7759 ret
= do_fcntl(arg1
, arg2
, arg3
);
7765 #ifdef TARGET_NR_cacheflush
7766 case TARGET_NR_cacheflush
:
7767 /* self-modifying code is handled automatically, so nothing needed */
7771 #ifdef TARGET_NR_security
7772 case TARGET_NR_security
:
7775 #ifdef TARGET_NR_getpagesize
7776 case TARGET_NR_getpagesize
:
7777 ret
= TARGET_PAGE_SIZE
;
7780 case TARGET_NR_gettid
:
7781 ret
= get_errno(gettid());
7783 #ifdef TARGET_NR_readahead
7784 case TARGET_NR_readahead
:
7785 #if TARGET_ABI_BITS == 32
7786 if (regpairs_aligned(cpu_env
)) {
7791 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
7793 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
7798 #ifdef TARGET_NR_setxattr
7799 case TARGET_NR_listxattr
:
7800 case TARGET_NR_llistxattr
:
7804 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7806 ret
= -TARGET_EFAULT
;
7810 p
= lock_user_string(arg1
);
7812 if (num
== TARGET_NR_listxattr
) {
7813 ret
= get_errno(listxattr(p
, b
, arg3
));
7815 ret
= get_errno(llistxattr(p
, b
, arg3
));
7818 ret
= -TARGET_EFAULT
;
7820 unlock_user(p
, arg1
, 0);
7821 unlock_user(b
, arg2
, arg3
);
7824 case TARGET_NR_flistxattr
:
7828 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
7830 ret
= -TARGET_EFAULT
;
7834 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
7835 unlock_user(b
, arg2
, arg3
);
7838 case TARGET_NR_setxattr
:
7839 case TARGET_NR_lsetxattr
:
7841 void *p
, *n
, *v
= 0;
7843 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
7845 ret
= -TARGET_EFAULT
;
7849 p
= lock_user_string(arg1
);
7850 n
= lock_user_string(arg2
);
7852 if (num
== TARGET_NR_setxattr
) {
7853 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
7855 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
7858 ret
= -TARGET_EFAULT
;
7860 unlock_user(p
, arg1
, 0);
7861 unlock_user(n
, arg2
, 0);
7862 unlock_user(v
, arg3
, 0);
7865 case TARGET_NR_fsetxattr
:
7869 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
7871 ret
= -TARGET_EFAULT
;
7875 n
= lock_user_string(arg2
);
7877 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
7879 ret
= -TARGET_EFAULT
;
7881 unlock_user(n
, arg2
, 0);
7882 unlock_user(v
, arg3
, 0);
7885 case TARGET_NR_getxattr
:
7886 case TARGET_NR_lgetxattr
:
7888 void *p
, *n
, *v
= 0;
7890 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7892 ret
= -TARGET_EFAULT
;
7896 p
= lock_user_string(arg1
);
7897 n
= lock_user_string(arg2
);
7899 if (num
== TARGET_NR_getxattr
) {
7900 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
7902 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
7905 ret
= -TARGET_EFAULT
;
7907 unlock_user(p
, arg1
, 0);
7908 unlock_user(n
, arg2
, 0);
7909 unlock_user(v
, arg3
, arg4
);
7912 case TARGET_NR_fgetxattr
:
7916 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
7918 ret
= -TARGET_EFAULT
;
7922 n
= lock_user_string(arg2
);
7924 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
7926 ret
= -TARGET_EFAULT
;
7928 unlock_user(n
, arg2
, 0);
7929 unlock_user(v
, arg3
, arg4
);
7932 case TARGET_NR_removexattr
:
7933 case TARGET_NR_lremovexattr
:
7936 p
= lock_user_string(arg1
);
7937 n
= lock_user_string(arg2
);
7939 if (num
== TARGET_NR_removexattr
) {
7940 ret
= get_errno(removexattr(p
, n
));
7942 ret
= get_errno(lremovexattr(p
, n
));
7945 ret
= -TARGET_EFAULT
;
7947 unlock_user(p
, arg1
, 0);
7948 unlock_user(n
, arg2
, 0);
7951 case TARGET_NR_fremovexattr
:
7954 n
= lock_user_string(arg2
);
7956 ret
= get_errno(fremovexattr(arg1
, n
));
7958 ret
= -TARGET_EFAULT
;
7960 unlock_user(n
, arg2
, 0);
7964 #endif /* CONFIG_ATTR */
7965 #ifdef TARGET_NR_set_thread_area
7966 case TARGET_NR_set_thread_area
:
7967 #if defined(TARGET_MIPS)
7968 ((CPUMIPSState
*) cpu_env
)->tls_value
= arg1
;
7971 #elif defined(TARGET_CRIS)
7973 ret
= -TARGET_EINVAL
;
7975 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
7979 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
7980 ret
= do_set_thread_area(cpu_env
, arg1
);
7983 goto unimplemented_nowarn
;
7986 #ifdef TARGET_NR_get_thread_area
7987 case TARGET_NR_get_thread_area
:
7988 #if defined(TARGET_I386) && defined(TARGET_ABI32)
7989 ret
= do_get_thread_area(cpu_env
, arg1
);
7991 goto unimplemented_nowarn
;
7994 #ifdef TARGET_NR_getdomainname
7995 case TARGET_NR_getdomainname
:
7996 goto unimplemented_nowarn
;
7999 #ifdef TARGET_NR_clock_gettime
8000 case TARGET_NR_clock_gettime
:
8003 ret
= get_errno(clock_gettime(arg1
, &ts
));
8004 if (!is_error(ret
)) {
8005 host_to_target_timespec(arg2
, &ts
);
8010 #ifdef TARGET_NR_clock_getres
8011 case TARGET_NR_clock_getres
:
8014 ret
= get_errno(clock_getres(arg1
, &ts
));
8015 if (!is_error(ret
)) {
8016 host_to_target_timespec(arg2
, &ts
);
8021 #ifdef TARGET_NR_clock_nanosleep
8022 case TARGET_NR_clock_nanosleep
:
8025 target_to_host_timespec(&ts
, arg3
);
8026 ret
= get_errno(clock_nanosleep(arg1
, arg2
, &ts
, arg4
? &ts
: NULL
));
8028 host_to_target_timespec(arg4
, &ts
);
8033 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
8034 case TARGET_NR_set_tid_address
:
8035 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
8039 #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
8040 case TARGET_NR_tkill
:
8041 ret
= get_errno(sys_tkill((int)arg1
, target_to_host_signal(arg2
)));
8045 #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
8046 case TARGET_NR_tgkill
:
8047 ret
= get_errno(sys_tgkill((int)arg1
, (int)arg2
,
8048 target_to_host_signal(arg3
)));
8052 #ifdef TARGET_NR_set_robust_list
8053 case TARGET_NR_set_robust_list
:
8054 goto unimplemented_nowarn
;
8057 #if defined(TARGET_NR_utimensat) && defined(__NR_utimensat)
8058 case TARGET_NR_utimensat
:
8060 struct timespec
*tsp
, ts
[2];
8064 target_to_host_timespec(ts
, arg3
);
8065 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
8069 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
8071 if (!(p
= lock_user_string(arg2
))) {
8072 ret
= -TARGET_EFAULT
;
8075 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
8076 unlock_user(p
, arg2
, 0);
8081 #if defined(CONFIG_USE_NPTL)
8082 case TARGET_NR_futex
:
8083 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8086 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
8087 case TARGET_NR_inotify_init
:
8088 ret
= get_errno(sys_inotify_init());
8091 #ifdef CONFIG_INOTIFY1
8092 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
8093 case TARGET_NR_inotify_init1
:
8094 ret
= get_errno(sys_inotify_init1(arg1
));
8098 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
8099 case TARGET_NR_inotify_add_watch
:
8100 p
= lock_user_string(arg2
);
8101 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
8102 unlock_user(p
, arg2
, 0);
8105 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
8106 case TARGET_NR_inotify_rm_watch
:
8107 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
8111 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
8112 case TARGET_NR_mq_open
:
8114 struct mq_attr posix_mq_attr
;
8116 p
= lock_user_string(arg1
- 1);
8118 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
8119 ret
= get_errno(mq_open(p
, arg2
, arg3
, &posix_mq_attr
));
8120 unlock_user (p
, arg1
, 0);
8124 case TARGET_NR_mq_unlink
:
8125 p
= lock_user_string(arg1
- 1);
8126 ret
= get_errno(mq_unlink(p
));
8127 unlock_user (p
, arg1
, 0);
8130 case TARGET_NR_mq_timedsend
:
8134 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8136 target_to_host_timespec(&ts
, arg5
);
8137 ret
= get_errno(mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
8138 host_to_target_timespec(arg5
, &ts
);
8141 ret
= get_errno(mq_send(arg1
, p
, arg3
, arg4
));
8142 unlock_user (p
, arg2
, arg3
);
8146 case TARGET_NR_mq_timedreceive
:
8151 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
8153 target_to_host_timespec(&ts
, arg5
);
8154 ret
= get_errno(mq_timedreceive(arg1
, p
, arg3
, &prio
, &ts
));
8155 host_to_target_timespec(arg5
, &ts
);
8158 ret
= get_errno(mq_receive(arg1
, p
, arg3
, &prio
));
8159 unlock_user (p
, arg2
, arg3
);
8161 put_user_u32(prio
, arg4
);
8165 /* Not implemented for now... */
8166 /* case TARGET_NR_mq_notify: */
8169 case TARGET_NR_mq_getsetattr
:
8171 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
8174 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
8175 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
8178 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
8179 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
8186 #ifdef CONFIG_SPLICE
8187 #ifdef TARGET_NR_tee
8190 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
8194 #ifdef TARGET_NR_splice
8195 case TARGET_NR_splice
:
8197 loff_t loff_in
, loff_out
;
8198 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
8200 get_user_u64(loff_in
, arg2
);
8201 ploff_in
= &loff_in
;
8204 get_user_u64(loff_out
, arg2
);
8205 ploff_out
= &loff_out
;
8207 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
8211 #ifdef TARGET_NR_vmsplice
8212 case TARGET_NR_vmsplice
:
8217 vec
= alloca(count
* sizeof(struct iovec
));
8218 if (lock_iovec(VERIFY_READ
, vec
, arg2
, count
, 1) < 0)
8220 ret
= get_errno(vmsplice(arg1
, vec
, count
, arg4
));
8221 unlock_iovec(vec
, arg2
, count
, 0);
8225 #endif /* CONFIG_SPLICE */
8226 #ifdef CONFIG_EVENTFD
8227 #if defined(TARGET_NR_eventfd)
8228 case TARGET_NR_eventfd
:
8229 ret
= get_errno(eventfd(arg1
, 0));
8232 #if defined(TARGET_NR_eventfd2)
8233 case TARGET_NR_eventfd2
:
8234 ret
= get_errno(eventfd(arg1
, arg2
));
8237 #endif /* CONFIG_EVENTFD */
8238 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
8239 case TARGET_NR_fallocate
:
8240 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
8243 #if defined(CONFIG_SYNC_FILE_RANGE)
8244 #if defined(TARGET_NR_sync_file_range)
8245 case TARGET_NR_sync_file_range
:
8246 #if TARGET_ABI_BITS == 32
8247 #if defined(TARGET_MIPS)
8248 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8249 target_offset64(arg5
, arg6
), arg7
));
8251 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
8252 target_offset64(arg4
, arg5
), arg6
));
8253 #endif /* !TARGET_MIPS */
8255 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
8259 #if defined(TARGET_NR_sync_file_range2)
8260 case TARGET_NR_sync_file_range2
:
8261 /* This is like sync_file_range but the arguments are reordered */
8262 #if TARGET_ABI_BITS == 32
8263 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
8264 target_offset64(arg5
, arg6
), arg2
));
8266 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
8271 #if defined(CONFIG_EPOLL)
8272 #if defined(TARGET_NR_epoll_create)
8273 case TARGET_NR_epoll_create
:
8274 ret
= get_errno(epoll_create(arg1
));
8277 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
8278 case TARGET_NR_epoll_create1
:
8279 ret
= get_errno(epoll_create1(arg1
));
8282 #if defined(TARGET_NR_epoll_ctl)
8283 case TARGET_NR_epoll_ctl
:
8285 struct epoll_event ep
;
8286 struct epoll_event
*epp
= 0;
8288 struct target_epoll_event
*target_ep
;
8289 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
8292 ep
.events
= tswap32(target_ep
->events
);
8293 /* The epoll_data_t union is just opaque data to the kernel,
8294 * so we transfer all 64 bits across and need not worry what
8295 * actual data type it is.
8297 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
8298 unlock_user_struct(target_ep
, arg4
, 0);
8301 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
8306 #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
8307 #define IMPLEMENT_EPOLL_PWAIT
8309 #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
8310 #if defined(TARGET_NR_epoll_wait)
8311 case TARGET_NR_epoll_wait
:
8313 #if defined(IMPLEMENT_EPOLL_PWAIT)
8314 case TARGET_NR_epoll_pwait
:
8317 struct target_epoll_event
*target_ep
;
8318 struct epoll_event
*ep
;
8320 int maxevents
= arg3
;
8323 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
8324 maxevents
* sizeof(struct target_epoll_event
), 1);
8329 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
8332 #if defined(IMPLEMENT_EPOLL_PWAIT)
8333 case TARGET_NR_epoll_pwait
:
8335 target_sigset_t
*target_set
;
8336 sigset_t _set
, *set
= &_set
;
8339 target_set
= lock_user(VERIFY_READ
, arg5
,
8340 sizeof(target_sigset_t
), 1);
8342 unlock_user(target_ep
, arg2
, 0);
8345 target_to_host_sigset(set
, target_set
);
8346 unlock_user(target_set
, arg5
, 0);
8351 ret
= get_errno(epoll_pwait(epfd
, ep
, maxevents
, timeout
, set
));
8355 #if defined(TARGET_NR_epoll_wait)
8356 case TARGET_NR_epoll_wait
:
8357 ret
= get_errno(epoll_wait(epfd
, ep
, maxevents
, timeout
));
8361 ret
= -TARGET_ENOSYS
;
8363 if (!is_error(ret
)) {
8365 for (i
= 0; i
< ret
; i
++) {
8366 target_ep
[i
].events
= tswap32(ep
[i
].events
);
8367 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
8370 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
8375 #ifdef TARGET_NR_prlimit64
8376 case TARGET_NR_prlimit64
:
8378 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
8379 struct target_rlimit64
*target_rnew
, *target_rold
;
8380 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
8382 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
8385 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
8386 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
8387 unlock_user_struct(target_rnew
, arg3
, 0);
8391 ret
= get_errno(sys_prlimit64(arg1
, arg2
, rnewp
, arg4
? &rold
: 0));
8392 if (!is_error(ret
) && arg4
) {
8393 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
8396 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
8397 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
8398 unlock_user_struct(target_rold
, arg4
, 1);
8405 gemu_log("qemu: Unsupported syscall: %d\n", num
);
8406 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
8407 unimplemented_nowarn
:
8409 ret
= -TARGET_ENOSYS
;
8414 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
8417 print_syscall_ret(num
, ret
);
8420 ret
= -TARGET_EFAULT
;