4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
115 #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
116 CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
119 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
120 * once. This exercises the codepaths for restart.
122 //#define DEBUG_ERESTARTSYS
124 //#include <linux/msdos_fs.h>
125 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
126 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
136 #define _syscall0(type,name) \
137 static type name (void) \
139 return syscall(__NR_##name); \
142 #define _syscall1(type,name,type1,arg1) \
143 static type name (type1 arg1) \
145 return syscall(__NR_##name, arg1); \
148 #define _syscall2(type,name,type1,arg1,type2,arg2) \
149 static type name (type1 arg1,type2 arg2) \
151 return syscall(__NR_##name, arg1, arg2); \
154 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
155 static type name (type1 arg1,type2 arg2,type3 arg3) \
157 return syscall(__NR_##name, arg1, arg2, arg3); \
160 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
161 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
163 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
166 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
168 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
170 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
174 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
175 type5,arg5,type6,arg6) \
176 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
179 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
183 #define __NR_sys_uname __NR_uname
184 #define __NR_sys_getcwd1 __NR_getcwd
185 #define __NR_sys_getdents __NR_getdents
186 #define __NR_sys_getdents64 __NR_getdents64
187 #define __NR_sys_getpriority __NR_getpriority
188 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
189 #define __NR_sys_syslog __NR_syslog
190 #define __NR_sys_futex __NR_futex
191 #define __NR_sys_inotify_init __NR_inotify_init
192 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
193 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
195 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
197 #define __NR__llseek __NR_lseek
200 /* Newer kernel ports have llseek() instead of _llseek() */
201 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
202 #define TARGET_NR__llseek TARGET_NR_llseek
206 _syscall0(int, gettid
)
208 /* This is a replacement for the host gettid() and must return a host
210 static int gettid(void) {
214 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
215 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
217 #if !defined(__NR_getdents) || \
218 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
219 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
221 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
222 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
223 loff_t
*, res
, uint
, wh
);
225 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
226 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
227 #ifdef __NR_exit_group
228 _syscall1(int,exit_group
,int,error_code
)
230 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
231 _syscall1(int,set_tid_address
,int *,tidptr
)
233 #if defined(TARGET_NR_futex) && defined(__NR_futex)
234 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
235 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
237 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
238 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
239 unsigned long *, user_mask_ptr
);
240 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
241 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
242 unsigned long *, user_mask_ptr
);
243 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
245 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
246 struct __user_cap_data_struct
*, data
);
247 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
248 struct __user_cap_data_struct
*, data
);
249 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
250 _syscall2(int, ioprio_get
, int, which
, int, who
)
252 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
253 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
255 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
256 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
259 static bitmask_transtbl fcntl_flags_tbl
[] = {
260 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
261 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
262 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
263 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
264 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
265 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
266 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
267 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
268 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
269 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
270 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
271 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
272 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
273 #if defined(O_DIRECT)
274 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
276 #if defined(O_NOATIME)
277 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
279 #if defined(O_CLOEXEC)
280 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
283 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
285 /* Don't terminate the list prematurely on 64-bit host+guest. */
286 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
287 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
294 QEMU_IFLA_BR_FORWARD_DELAY
,
295 QEMU_IFLA_BR_HELLO_TIME
,
296 QEMU_IFLA_BR_MAX_AGE
,
297 QEMU_IFLA_BR_AGEING_TIME
,
298 QEMU_IFLA_BR_STP_STATE
,
299 QEMU_IFLA_BR_PRIORITY
,
300 QEMU_IFLA_BR_VLAN_FILTERING
,
301 QEMU_IFLA_BR_VLAN_PROTOCOL
,
302 QEMU_IFLA_BR_GROUP_FWD_MASK
,
303 QEMU_IFLA_BR_ROOT_ID
,
304 QEMU_IFLA_BR_BRIDGE_ID
,
305 QEMU_IFLA_BR_ROOT_PORT
,
306 QEMU_IFLA_BR_ROOT_PATH_COST
,
307 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
308 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
309 QEMU_IFLA_BR_HELLO_TIMER
,
310 QEMU_IFLA_BR_TCN_TIMER
,
311 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
312 QEMU_IFLA_BR_GC_TIMER
,
313 QEMU_IFLA_BR_GROUP_ADDR
,
314 QEMU_IFLA_BR_FDB_FLUSH
,
315 QEMU_IFLA_BR_MCAST_ROUTER
,
316 QEMU_IFLA_BR_MCAST_SNOOPING
,
317 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
318 QEMU_IFLA_BR_MCAST_QUERIER
,
319 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
320 QEMU_IFLA_BR_MCAST_HASH_MAX
,
321 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
322 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
323 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
324 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
325 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
326 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
327 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
328 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
329 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
330 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
331 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
332 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
334 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
335 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
359 QEMU_IFLA_NET_NS_PID
,
362 QEMU_IFLA_VFINFO_LIST
,
370 QEMU_IFLA_PROMISCUITY
,
371 QEMU_IFLA_NUM_TX_QUEUES
,
372 QEMU_IFLA_NUM_RX_QUEUES
,
374 QEMU_IFLA_PHYS_PORT_ID
,
375 QEMU_IFLA_CARRIER_CHANGES
,
376 QEMU_IFLA_PHYS_SWITCH_ID
,
377 QEMU_IFLA_LINK_NETNSID
,
378 QEMU_IFLA_PHYS_PORT_NAME
,
379 QEMU_IFLA_PROTO_DOWN
,
380 QEMU_IFLA_GSO_MAX_SEGS
,
381 QEMU_IFLA_GSO_MAX_SIZE
,
388 QEMU_IFLA_BRPORT_UNSPEC
,
389 QEMU_IFLA_BRPORT_STATE
,
390 QEMU_IFLA_BRPORT_PRIORITY
,
391 QEMU_IFLA_BRPORT_COST
,
392 QEMU_IFLA_BRPORT_MODE
,
393 QEMU_IFLA_BRPORT_GUARD
,
394 QEMU_IFLA_BRPORT_PROTECT
,
395 QEMU_IFLA_BRPORT_FAST_LEAVE
,
396 QEMU_IFLA_BRPORT_LEARNING
,
397 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
398 QEMU_IFLA_BRPORT_PROXYARP
,
399 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
400 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
401 QEMU_IFLA_BRPORT_ROOT_ID
,
402 QEMU_IFLA_BRPORT_BRIDGE_ID
,
403 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
404 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
407 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
408 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
409 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
410 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
411 QEMU_IFLA_BRPORT_HOLD_TIMER
,
412 QEMU_IFLA_BRPORT_FLUSH
,
413 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
414 QEMU_IFLA_BRPORT_PAD
,
415 QEMU___IFLA_BRPORT_MAX
419 QEMU_IFLA_INFO_UNSPEC
,
422 QEMU_IFLA_INFO_XSTATS
,
423 QEMU_IFLA_INFO_SLAVE_KIND
,
424 QEMU_IFLA_INFO_SLAVE_DATA
,
425 QEMU___IFLA_INFO_MAX
,
429 QEMU_IFLA_INET_UNSPEC
,
431 QEMU___IFLA_INET_MAX
,
435 QEMU_IFLA_INET6_UNSPEC
,
436 QEMU_IFLA_INET6_FLAGS
,
437 QEMU_IFLA_INET6_CONF
,
438 QEMU_IFLA_INET6_STATS
,
439 QEMU_IFLA_INET6_MCAST
,
440 QEMU_IFLA_INET6_CACHEINFO
,
441 QEMU_IFLA_INET6_ICMP6STATS
,
442 QEMU_IFLA_INET6_TOKEN
,
443 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
444 QEMU___IFLA_INET6_MAX
447 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
448 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
449 typedef struct TargetFdTrans
{
450 TargetFdDataFunc host_to_target_data
;
451 TargetFdDataFunc target_to_host_data
;
452 TargetFdAddrFunc target_to_host_addr
;
455 static TargetFdTrans
**target_fd_trans
;
457 static unsigned int target_fd_max
;
459 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
461 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
462 return target_fd_trans
[fd
]->target_to_host_data
;
467 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
469 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
470 return target_fd_trans
[fd
]->host_to_target_data
;
475 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
477 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
478 return target_fd_trans
[fd
]->target_to_host_addr
;
483 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
487 if (fd
>= target_fd_max
) {
488 oldmax
= target_fd_max
;
489 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
490 target_fd_trans
= g_renew(TargetFdTrans
*,
491 target_fd_trans
, target_fd_max
);
492 memset((void *)(target_fd_trans
+ oldmax
), 0,
493 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
495 target_fd_trans
[fd
] = trans
;
498 static void fd_trans_unregister(int fd
)
500 if (fd
>= 0 && fd
< target_fd_max
) {
501 target_fd_trans
[fd
] = NULL
;
505 static void fd_trans_dup(int oldfd
, int newfd
)
507 fd_trans_unregister(newfd
);
508 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
509 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
513 static int sys_getcwd1(char *buf
, size_t size
)
515 if (getcwd(buf
, size
) == NULL
) {
516 /* getcwd() sets errno */
519 return strlen(buf
)+1;
522 #ifdef TARGET_NR_utimensat
523 #ifdef CONFIG_UTIMENSAT
524 static int sys_utimensat(int dirfd
, const char *pathname
,
525 const struct timespec times
[2], int flags
)
527 if (pathname
== NULL
)
528 return futimens(dirfd
, times
);
530 return utimensat(dirfd
, pathname
, times
, flags
);
532 #elif defined(__NR_utimensat)
533 #define __NR_sys_utimensat __NR_utimensat
534 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
535 const struct timespec
*,tsp
,int,flags
)
537 static int sys_utimensat(int dirfd
, const char *pathname
,
538 const struct timespec times
[2], int flags
)
544 #endif /* TARGET_NR_utimensat */
546 #ifdef CONFIG_INOTIFY
547 #include <sys/inotify.h>
549 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
550 static int sys_inotify_init(void)
552 return (inotify_init());
555 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
556 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
558 return (inotify_add_watch(fd
, pathname
, mask
));
561 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
562 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
564 return (inotify_rm_watch(fd
, wd
));
567 #ifdef CONFIG_INOTIFY1
568 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
569 static int sys_inotify_init1(int flags
)
571 return (inotify_init1(flags
));
576 /* Userspace can usually survive runtime without inotify */
577 #undef TARGET_NR_inotify_init
578 #undef TARGET_NR_inotify_init1
579 #undef TARGET_NR_inotify_add_watch
580 #undef TARGET_NR_inotify_rm_watch
581 #endif /* CONFIG_INOTIFY */
583 #if defined(TARGET_NR_prlimit64)
584 #ifndef __NR_prlimit64
585 # define __NR_prlimit64 -1
587 #define __NR_sys_prlimit64 __NR_prlimit64
588 /* The glibc rlimit structure may not be that used by the underlying syscall */
589 struct host_rlimit64
{
593 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
594 const struct host_rlimit64
*, new_limit
,
595 struct host_rlimit64
*, old_limit
)
599 #if defined(TARGET_NR_timer_create)
600 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
601 static timer_t g_posix_timers
[32] = { 0, } ;
603 static inline int next_free_host_timer(void)
606 /* FIXME: Does finding the next free slot require a lock? */
607 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
608 if (g_posix_timers
[k
] == 0) {
609 g_posix_timers
[k
] = (timer_t
) 1;
617 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
619 static inline int regpairs_aligned(void *cpu_env
) {
620 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
622 #elif defined(TARGET_MIPS)
623 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
624 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
625 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
626 * of registers which translates to the same as ARM/MIPS, because we start with
628 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
630 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
633 #define ERRNO_TABLE_SIZE 1200
635 /* target_to_host_errno_table[] is initialized from
636 * host_to_target_errno_table[] in syscall_init(). */
637 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
641 * This list is the union of errno values overridden in asm-<arch>/errno.h
642 * minus the errnos that are not actually generic to all archs.
644 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
645 [EAGAIN
] = TARGET_EAGAIN
,
646 [EIDRM
] = TARGET_EIDRM
,
647 [ECHRNG
] = TARGET_ECHRNG
,
648 [EL2NSYNC
] = TARGET_EL2NSYNC
,
649 [EL3HLT
] = TARGET_EL3HLT
,
650 [EL3RST
] = TARGET_EL3RST
,
651 [ELNRNG
] = TARGET_ELNRNG
,
652 [EUNATCH
] = TARGET_EUNATCH
,
653 [ENOCSI
] = TARGET_ENOCSI
,
654 [EL2HLT
] = TARGET_EL2HLT
,
655 [EDEADLK
] = TARGET_EDEADLK
,
656 [ENOLCK
] = TARGET_ENOLCK
,
657 [EBADE
] = TARGET_EBADE
,
658 [EBADR
] = TARGET_EBADR
,
659 [EXFULL
] = TARGET_EXFULL
,
660 [ENOANO
] = TARGET_ENOANO
,
661 [EBADRQC
] = TARGET_EBADRQC
,
662 [EBADSLT
] = TARGET_EBADSLT
,
663 [EBFONT
] = TARGET_EBFONT
,
664 [ENOSTR
] = TARGET_ENOSTR
,
665 [ENODATA
] = TARGET_ENODATA
,
666 [ETIME
] = TARGET_ETIME
,
667 [ENOSR
] = TARGET_ENOSR
,
668 [ENONET
] = TARGET_ENONET
,
669 [ENOPKG
] = TARGET_ENOPKG
,
670 [EREMOTE
] = TARGET_EREMOTE
,
671 [ENOLINK
] = TARGET_ENOLINK
,
672 [EADV
] = TARGET_EADV
,
673 [ESRMNT
] = TARGET_ESRMNT
,
674 [ECOMM
] = TARGET_ECOMM
,
675 [EPROTO
] = TARGET_EPROTO
,
676 [EDOTDOT
] = TARGET_EDOTDOT
,
677 [EMULTIHOP
] = TARGET_EMULTIHOP
,
678 [EBADMSG
] = TARGET_EBADMSG
,
679 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
680 [EOVERFLOW
] = TARGET_EOVERFLOW
,
681 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
682 [EBADFD
] = TARGET_EBADFD
,
683 [EREMCHG
] = TARGET_EREMCHG
,
684 [ELIBACC
] = TARGET_ELIBACC
,
685 [ELIBBAD
] = TARGET_ELIBBAD
,
686 [ELIBSCN
] = TARGET_ELIBSCN
,
687 [ELIBMAX
] = TARGET_ELIBMAX
,
688 [ELIBEXEC
] = TARGET_ELIBEXEC
,
689 [EILSEQ
] = TARGET_EILSEQ
,
690 [ENOSYS
] = TARGET_ENOSYS
,
691 [ELOOP
] = TARGET_ELOOP
,
692 [ERESTART
] = TARGET_ERESTART
,
693 [ESTRPIPE
] = TARGET_ESTRPIPE
,
694 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
695 [EUSERS
] = TARGET_EUSERS
,
696 [ENOTSOCK
] = TARGET_ENOTSOCK
,
697 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
698 [EMSGSIZE
] = TARGET_EMSGSIZE
,
699 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
700 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
701 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
702 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
703 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
704 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
705 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
706 [EADDRINUSE
] = TARGET_EADDRINUSE
,
707 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
708 [ENETDOWN
] = TARGET_ENETDOWN
,
709 [ENETUNREACH
] = TARGET_ENETUNREACH
,
710 [ENETRESET
] = TARGET_ENETRESET
,
711 [ECONNABORTED
] = TARGET_ECONNABORTED
,
712 [ECONNRESET
] = TARGET_ECONNRESET
,
713 [ENOBUFS
] = TARGET_ENOBUFS
,
714 [EISCONN
] = TARGET_EISCONN
,
715 [ENOTCONN
] = TARGET_ENOTCONN
,
716 [EUCLEAN
] = TARGET_EUCLEAN
,
717 [ENOTNAM
] = TARGET_ENOTNAM
,
718 [ENAVAIL
] = TARGET_ENAVAIL
,
719 [EISNAM
] = TARGET_EISNAM
,
720 [EREMOTEIO
] = TARGET_EREMOTEIO
,
721 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
722 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
723 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
724 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
725 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
726 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
727 [EALREADY
] = TARGET_EALREADY
,
728 [EINPROGRESS
] = TARGET_EINPROGRESS
,
729 [ESTALE
] = TARGET_ESTALE
,
730 [ECANCELED
] = TARGET_ECANCELED
,
731 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
732 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
734 [ENOKEY
] = TARGET_ENOKEY
,
737 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
740 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
743 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
746 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
748 #ifdef ENOTRECOVERABLE
749 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
753 static inline int host_to_target_errno(int err
)
755 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
756 host_to_target_errno_table
[err
]) {
757 return host_to_target_errno_table
[err
];
762 static inline int target_to_host_errno(int err
)
764 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
765 target_to_host_errno_table
[err
]) {
766 return target_to_host_errno_table
[err
];
771 static inline abi_long
get_errno(abi_long ret
)
774 return -host_to_target_errno(errno
);
779 static inline int is_error(abi_long ret
)
781 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
784 const char *target_strerror(int err
)
786 if (err
== TARGET_ERESTARTSYS
) {
787 return "To be restarted";
789 if (err
== TARGET_QEMU_ESIGRETURN
) {
790 return "Successful exit from sigreturn";
793 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
796 return strerror(target_to_host_errno(err
));
799 #define safe_syscall0(type, name) \
800 static type safe_##name(void) \
802 return safe_syscall(__NR_##name); \
805 #define safe_syscall1(type, name, type1, arg1) \
806 static type safe_##name(type1 arg1) \
808 return safe_syscall(__NR_##name, arg1); \
811 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
812 static type safe_##name(type1 arg1, type2 arg2) \
814 return safe_syscall(__NR_##name, arg1, arg2); \
817 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
818 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
820 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
823 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
825 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
827 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
830 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
831 type4, arg4, type5, arg5) \
832 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
835 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
838 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
839 type4, arg4, type5, arg5, type6, arg6) \
840 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
841 type5 arg5, type6 arg6) \
843 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
846 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
847 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
848 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
849 int, flags
, mode_t
, mode
)
850 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
851 struct rusage
*, rusage
)
852 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
853 int, options
, struct rusage
*, rusage
)
854 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
855 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
856 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
857 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
858 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
860 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
861 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
863 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
864 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
865 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
866 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
867 safe_syscall2(int, tkill
, int, tid
, int, sig
)
868 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
869 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
870 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
871 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
873 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
874 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
875 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
876 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
877 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
878 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
879 safe_syscall2(int, flock
, int, fd
, int, operation
)
880 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
881 const struct timespec
*, uts
, size_t, sigsetsize
)
882 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
884 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
885 struct timespec
*, rem
)
886 #ifdef TARGET_NR_clock_nanosleep
887 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
888 const struct timespec
*, req
, struct timespec
*, rem
)
891 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
893 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
894 long, msgtype
, int, flags
)
895 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
896 unsigned, nsops
, const struct timespec
*, timeout
)
898 /* This host kernel architecture uses a single ipc syscall; fake up
899 * wrappers for the sub-operations to hide this implementation detail.
900 * Annoyingly we can't include linux/ipc.h to get the constant definitions
901 * for the call parameter because some structs in there conflict with the
902 * sys/ipc.h ones. So we just define them here, and rely on them being
903 * the same for all host architectures.
905 #define Q_SEMTIMEDOP 4
908 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
910 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
911 void *, ptr
, long, fifth
)
912 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
914 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
916 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
918 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
920 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
921 const struct timespec
*timeout
)
923 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
927 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
928 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
929 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
930 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
931 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
933 /* We do ioctl like this rather than via safe_syscall3 to preserve the
934 * "third argument might be integer or pointer or not present" behaviour of
937 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
938 /* Similarly for fcntl. Note that callers must always:
939 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
940 * use the flock64 struct rather than unsuffixed flock
941 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
944 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
946 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
949 static inline int host_to_target_sock_type(int host_type
)
953 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
955 target_type
= TARGET_SOCK_DGRAM
;
958 target_type
= TARGET_SOCK_STREAM
;
961 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
965 #if defined(SOCK_CLOEXEC)
966 if (host_type
& SOCK_CLOEXEC
) {
967 target_type
|= TARGET_SOCK_CLOEXEC
;
971 #if defined(SOCK_NONBLOCK)
972 if (host_type
& SOCK_NONBLOCK
) {
973 target_type
|= TARGET_SOCK_NONBLOCK
;
980 static abi_ulong target_brk
;
981 static abi_ulong target_original_brk
;
982 static abi_ulong brk_page
;
984 void target_set_brk(abi_ulong new_brk
)
986 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
987 brk_page
= HOST_PAGE_ALIGN(target_brk
);
990 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
991 #define DEBUGF_BRK(message, args...)
993 /* do_brk() must return target values and target errnos. */
994 abi_long
do_brk(abi_ulong new_brk
)
996 abi_long mapped_addr
;
997 abi_ulong new_alloc_size
;
999 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1002 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1005 if (new_brk
< target_original_brk
) {
1006 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1011 /* If the new brk is less than the highest page reserved to the
1012 * target heap allocation, set it and we're almost done... */
1013 if (new_brk
<= brk_page
) {
1014 /* Heap contents are initialized to zero, as for anonymous
1016 if (new_brk
> target_brk
) {
1017 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1019 target_brk
= new_brk
;
1020 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1024 /* We need to allocate more memory after the brk... Note that
1025 * we don't use MAP_FIXED because that will map over the top of
1026 * any existing mapping (like the one with the host libc or qemu
1027 * itself); instead we treat "mapped but at wrong address" as
1028 * a failure and unmap again.
1030 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1031 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1032 PROT_READ
|PROT_WRITE
,
1033 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1035 if (mapped_addr
== brk_page
) {
1036 /* Heap contents are initialized to zero, as for anonymous
1037 * mapped pages. Technically the new pages are already
1038 * initialized to zero since they *are* anonymous mapped
1039 * pages, however we have to take care with the contents that
1040 * come from the remaining part of the previous page: it may
1041 * contains garbage data due to a previous heap usage (grown
1042 * then shrunken). */
1043 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1045 target_brk
= new_brk
;
1046 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1050 } else if (mapped_addr
!= -1) {
1051 /* Mapped but at wrong address, meaning there wasn't actually
1052 * enough space for this brk.
1054 target_munmap(mapped_addr
, new_alloc_size
);
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1059 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1062 #if defined(TARGET_ALPHA)
1063 /* We (partially) emulate OSF/1 on Alpha, which requires we
1064 return a proper errno, not an unchanged brk value. */
1065 return -TARGET_ENOMEM
;
1067 /* For everything else, return the previous break. */
1071 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1072 abi_ulong target_fds_addr
,
1076 abi_ulong b
, *target_fds
;
1078 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1079 if (!(target_fds
= lock_user(VERIFY_READ
,
1081 sizeof(abi_ulong
) * nw
,
1083 return -TARGET_EFAULT
;
1087 for (i
= 0; i
< nw
; i
++) {
1088 /* grab the abi_ulong */
1089 __get_user(b
, &target_fds
[i
]);
1090 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1091 /* check the bit inside the abi_ulong */
1098 unlock_user(target_fds
, target_fds_addr
, 0);
1103 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1104 abi_ulong target_fds_addr
,
1107 if (target_fds_addr
) {
1108 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1109 return -TARGET_EFAULT
;
1117 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1123 abi_ulong
*target_fds
;
1125 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1126 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1128 sizeof(abi_ulong
) * nw
,
1130 return -TARGET_EFAULT
;
1133 for (i
= 0; i
< nw
; i
++) {
1135 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1136 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1139 __put_user(v
, &target_fds
[i
]);
1142 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1147 #if defined(__alpha__)
1148 #define HOST_HZ 1024
1153 static inline abi_long
host_to_target_clock_t(long ticks
)
1155 #if HOST_HZ == TARGET_HZ
1158 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1162 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1163 const struct rusage
*rusage
)
1165 struct target_rusage
*target_rusage
;
1167 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1168 return -TARGET_EFAULT
;
1169 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1170 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1171 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1172 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1173 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1174 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1175 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1176 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1177 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1178 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1179 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1180 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1181 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1182 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1183 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1184 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1185 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1186 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1187 unlock_user_struct(target_rusage
, target_addr
, 1);
1192 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1194 abi_ulong target_rlim_swap
;
1197 target_rlim_swap
= tswapal(target_rlim
);
1198 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1199 return RLIM_INFINITY
;
1201 result
= target_rlim_swap
;
1202 if (target_rlim_swap
!= (rlim_t
)result
)
1203 return RLIM_INFINITY
;
1208 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1210 abi_ulong target_rlim_swap
;
1213 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1214 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1216 target_rlim_swap
= rlim
;
1217 result
= tswapal(target_rlim_swap
);
1222 static inline int target_to_host_resource(int code
)
1225 case TARGET_RLIMIT_AS
:
1227 case TARGET_RLIMIT_CORE
:
1229 case TARGET_RLIMIT_CPU
:
1231 case TARGET_RLIMIT_DATA
:
1233 case TARGET_RLIMIT_FSIZE
:
1234 return RLIMIT_FSIZE
;
1235 case TARGET_RLIMIT_LOCKS
:
1236 return RLIMIT_LOCKS
;
1237 case TARGET_RLIMIT_MEMLOCK
:
1238 return RLIMIT_MEMLOCK
;
1239 case TARGET_RLIMIT_MSGQUEUE
:
1240 return RLIMIT_MSGQUEUE
;
1241 case TARGET_RLIMIT_NICE
:
1243 case TARGET_RLIMIT_NOFILE
:
1244 return RLIMIT_NOFILE
;
1245 case TARGET_RLIMIT_NPROC
:
1246 return RLIMIT_NPROC
;
1247 case TARGET_RLIMIT_RSS
:
1249 case TARGET_RLIMIT_RTPRIO
:
1250 return RLIMIT_RTPRIO
;
1251 case TARGET_RLIMIT_SIGPENDING
:
1252 return RLIMIT_SIGPENDING
;
1253 case TARGET_RLIMIT_STACK
:
1254 return RLIMIT_STACK
;
1260 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1261 abi_ulong target_tv_addr
)
1263 struct target_timeval
*target_tv
;
1265 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1266 return -TARGET_EFAULT
;
1268 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1269 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1271 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1276 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1277 const struct timeval
*tv
)
1279 struct target_timeval
*target_tv
;
1281 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1282 return -TARGET_EFAULT
;
1284 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1285 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1287 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1292 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1293 abi_ulong target_tz_addr
)
1295 struct target_timezone
*target_tz
;
1297 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1298 return -TARGET_EFAULT
;
1301 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1302 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1304 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1309 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1312 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1313 abi_ulong target_mq_attr_addr
)
1315 struct target_mq_attr
*target_mq_attr
;
1317 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1318 target_mq_attr_addr
, 1))
1319 return -TARGET_EFAULT
;
1321 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1322 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1323 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1324 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1326 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1331 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1332 const struct mq_attr
*attr
)
1334 struct target_mq_attr
*target_mq_attr
;
1336 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1337 target_mq_attr_addr
, 0))
1338 return -TARGET_EFAULT
;
1340 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1341 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1342 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1343 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1345 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1351 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1352 /* do_select() must return target values and target errnos. */
1353 static abi_long
do_select(int n
,
1354 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1355 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1357 fd_set rfds
, wfds
, efds
;
1358 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1360 struct timespec ts
, *ts_ptr
;
1363 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1367 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1371 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1376 if (target_tv_addr
) {
1377 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1378 return -TARGET_EFAULT
;
1379 ts
.tv_sec
= tv
.tv_sec
;
1380 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1386 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1389 if (!is_error(ret
)) {
1390 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1391 return -TARGET_EFAULT
;
1392 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1393 return -TARGET_EFAULT
;
1394 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1395 return -TARGET_EFAULT
;
1397 if (target_tv_addr
) {
1398 tv
.tv_sec
= ts
.tv_sec
;
1399 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1400 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1401 return -TARGET_EFAULT
;
1410 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1413 return pipe2(host_pipe
, flags
);
1419 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1420 int flags
, int is_pipe2
)
1424 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1427 return get_errno(ret
);
1429 /* Several targets have special calling conventions for the original
1430 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1432 #if defined(TARGET_ALPHA)
1433 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1434 return host_pipe
[0];
1435 #elif defined(TARGET_MIPS)
1436 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1437 return host_pipe
[0];
1438 #elif defined(TARGET_SH4)
1439 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1440 return host_pipe
[0];
1441 #elif defined(TARGET_SPARC)
1442 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1443 return host_pipe
[0];
1447 if (put_user_s32(host_pipe
[0], pipedes
)
1448 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1449 return -TARGET_EFAULT
;
1450 return get_errno(ret
);
1453 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1454 abi_ulong target_addr
,
1457 struct target_ip_mreqn
*target_smreqn
;
1459 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1461 return -TARGET_EFAULT
;
1462 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1463 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1464 if (len
== sizeof(struct target_ip_mreqn
))
1465 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1466 unlock_user(target_smreqn
, target_addr
, 0);
1471 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1472 abi_ulong target_addr
,
1475 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1476 sa_family_t sa_family
;
1477 struct target_sockaddr
*target_saddr
;
1479 if (fd_trans_target_to_host_addr(fd
)) {
1480 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1483 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1485 return -TARGET_EFAULT
;
1487 sa_family
= tswap16(target_saddr
->sa_family
);
1489 /* Oops. The caller might send a incomplete sun_path; sun_path
1490 * must be terminated by \0 (see the manual page), but
1491 * unfortunately it is quite common to specify sockaddr_un
1492 * length as "strlen(x->sun_path)" while it should be
1493 * "strlen(...) + 1". We'll fix that here if needed.
1494 * Linux kernel has a similar feature.
1497 if (sa_family
== AF_UNIX
) {
1498 if (len
< unix_maxlen
&& len
> 0) {
1499 char *cp
= (char*)target_saddr
;
1501 if ( cp
[len
-1] && !cp
[len
] )
1504 if (len
> unix_maxlen
)
1508 memcpy(addr
, target_saddr
, len
);
1509 addr
->sa_family
= sa_family
;
1510 if (sa_family
== AF_NETLINK
) {
1511 struct sockaddr_nl
*nladdr
;
1513 nladdr
= (struct sockaddr_nl
*)addr
;
1514 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1515 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1516 } else if (sa_family
== AF_PACKET
) {
1517 struct target_sockaddr_ll
*lladdr
;
1519 lladdr
= (struct target_sockaddr_ll
*)addr
;
1520 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1521 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1523 unlock_user(target_saddr
, target_addr
, 0);
1528 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1529 struct sockaddr
*addr
,
1532 struct target_sockaddr
*target_saddr
;
1538 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1540 return -TARGET_EFAULT
;
1541 memcpy(target_saddr
, addr
, len
);
1542 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1543 sizeof(target_saddr
->sa_family
)) {
1544 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1546 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1547 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1548 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1549 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1550 } else if (addr
->sa_family
== AF_PACKET
) {
1551 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1552 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1553 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1555 unlock_user(target_saddr
, target_addr
, len
);
1560 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1561 struct target_msghdr
*target_msgh
)
1563 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1564 abi_long msg_controllen
;
1565 abi_ulong target_cmsg_addr
;
1566 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1567 socklen_t space
= 0;
1569 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1570 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1572 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1573 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1574 target_cmsg_start
= target_cmsg
;
1576 return -TARGET_EFAULT
;
1578 while (cmsg
&& target_cmsg
) {
1579 void *data
= CMSG_DATA(cmsg
);
1580 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1582 int len
= tswapal(target_cmsg
->cmsg_len
)
1583 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1585 space
+= CMSG_SPACE(len
);
1586 if (space
> msgh
->msg_controllen
) {
1587 space
-= CMSG_SPACE(len
);
1588 /* This is a QEMU bug, since we allocated the payload
1589 * area ourselves (unlike overflow in host-to-target
1590 * conversion, which is just the guest giving us a buffer
1591 * that's too small). It can't happen for the payload types
1592 * we currently support; if it becomes an issue in future
1593 * we would need to improve our allocation strategy to
1594 * something more intelligent than "twice the size of the
1595 * target buffer we're reading from".
1597 gemu_log("Host cmsg overflow\n");
1601 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1602 cmsg
->cmsg_level
= SOL_SOCKET
;
1604 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1606 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1607 cmsg
->cmsg_len
= CMSG_LEN(len
);
1609 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1610 int *fd
= (int *)data
;
1611 int *target_fd
= (int *)target_data
;
1612 int i
, numfds
= len
/ sizeof(int);
1614 for (i
= 0; i
< numfds
; i
++) {
1615 __get_user(fd
[i
], target_fd
+ i
);
1617 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1618 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1619 struct ucred
*cred
= (struct ucred
*)data
;
1620 struct target_ucred
*target_cred
=
1621 (struct target_ucred
*)target_data
;
1623 __get_user(cred
->pid
, &target_cred
->pid
);
1624 __get_user(cred
->uid
, &target_cred
->uid
);
1625 __get_user(cred
->gid
, &target_cred
->gid
);
1627 gemu_log("Unsupported ancillary data: %d/%d\n",
1628 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1629 memcpy(data
, target_data
, len
);
1632 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1633 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1636 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1638 msgh
->msg_controllen
= space
;
1642 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1643 struct msghdr
*msgh
)
1645 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1646 abi_long msg_controllen
;
1647 abi_ulong target_cmsg_addr
;
1648 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1649 socklen_t space
= 0;
1651 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1652 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1654 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1655 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1656 target_cmsg_start
= target_cmsg
;
1658 return -TARGET_EFAULT
;
1660 while (cmsg
&& target_cmsg
) {
1661 void *data
= CMSG_DATA(cmsg
);
1662 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1664 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1665 int tgt_len
, tgt_space
;
1667 /* We never copy a half-header but may copy half-data;
1668 * this is Linux's behaviour in put_cmsg(). Note that
1669 * truncation here is a guest problem (which we report
1670 * to the guest via the CTRUNC bit), unlike truncation
1671 * in target_to_host_cmsg, which is a QEMU bug.
1673 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1674 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1678 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1679 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1681 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1683 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1685 tgt_len
= TARGET_CMSG_LEN(len
);
1687 /* Payload types which need a different size of payload on
1688 * the target must adjust tgt_len here.
1690 switch (cmsg
->cmsg_level
) {
1692 switch (cmsg
->cmsg_type
) {
1694 tgt_len
= sizeof(struct target_timeval
);
1703 if (msg_controllen
< tgt_len
) {
1704 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1705 tgt_len
= msg_controllen
;
1708 /* We must now copy-and-convert len bytes of payload
1709 * into tgt_len bytes of destination space. Bear in mind
1710 * that in both source and destination we may be dealing
1711 * with a truncated value!
1713 switch (cmsg
->cmsg_level
) {
1715 switch (cmsg
->cmsg_type
) {
1718 int *fd
= (int *)data
;
1719 int *target_fd
= (int *)target_data
;
1720 int i
, numfds
= tgt_len
/ sizeof(int);
1722 for (i
= 0; i
< numfds
; i
++) {
1723 __put_user(fd
[i
], target_fd
+ i
);
1729 struct timeval
*tv
= (struct timeval
*)data
;
1730 struct target_timeval
*target_tv
=
1731 (struct target_timeval
*)target_data
;
1733 if (len
!= sizeof(struct timeval
) ||
1734 tgt_len
!= sizeof(struct target_timeval
)) {
1738 /* copy struct timeval to target */
1739 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1740 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1743 case SCM_CREDENTIALS
:
1745 struct ucred
*cred
= (struct ucred
*)data
;
1746 struct target_ucred
*target_cred
=
1747 (struct target_ucred
*)target_data
;
1749 __put_user(cred
->pid
, &target_cred
->pid
);
1750 __put_user(cred
->uid
, &target_cred
->uid
);
1751 __put_user(cred
->gid
, &target_cred
->gid
);
1761 gemu_log("Unsupported ancillary data: %d/%d\n",
1762 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1763 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1764 if (tgt_len
> len
) {
1765 memset(target_data
+ len
, 0, tgt_len
- len
);
1769 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1770 tgt_space
= TARGET_CMSG_SPACE(len
);
1771 if (msg_controllen
< tgt_space
) {
1772 tgt_space
= msg_controllen
;
1774 msg_controllen
-= tgt_space
;
1776 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1777 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1780 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1782 target_msgh
->msg_controllen
= tswapal(space
);
1786 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1788 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1789 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1790 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1791 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1792 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1795 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1797 abi_long (*host_to_target_nlmsg
)
1798 (struct nlmsghdr
*))
1803 while (len
> sizeof(struct nlmsghdr
)) {
1805 nlmsg_len
= nlh
->nlmsg_len
;
1806 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1811 switch (nlh
->nlmsg_type
) {
1813 tswap_nlmsghdr(nlh
);
1819 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1820 e
->error
= tswap32(e
->error
);
1821 tswap_nlmsghdr(&e
->msg
);
1822 tswap_nlmsghdr(nlh
);
1826 ret
= host_to_target_nlmsg(nlh
);
1828 tswap_nlmsghdr(nlh
);
1833 tswap_nlmsghdr(nlh
);
1834 len
-= NLMSG_ALIGN(nlmsg_len
);
1835 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1840 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1842 abi_long (*target_to_host_nlmsg
)
1843 (struct nlmsghdr
*))
1847 while (len
> sizeof(struct nlmsghdr
)) {
1848 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1849 tswap32(nlh
->nlmsg_len
) > len
) {
1852 tswap_nlmsghdr(nlh
);
1853 switch (nlh
->nlmsg_type
) {
1860 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1861 e
->error
= tswap32(e
->error
);
1862 tswap_nlmsghdr(&e
->msg
);
1866 ret
= target_to_host_nlmsg(nlh
);
1871 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1872 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1877 #ifdef CONFIG_RTNETLINK
1878 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1879 size_t len
, void *context
,
1880 abi_long (*host_to_target_nlattr
)
1884 unsigned short nla_len
;
1887 while (len
> sizeof(struct nlattr
)) {
1888 nla_len
= nlattr
->nla_len
;
1889 if (nla_len
< sizeof(struct nlattr
) ||
1893 ret
= host_to_target_nlattr(nlattr
, context
);
1894 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1895 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1899 len
-= NLA_ALIGN(nla_len
);
1900 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1905 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1907 abi_long (*host_to_target_rtattr
)
1910 unsigned short rta_len
;
1913 while (len
> sizeof(struct rtattr
)) {
1914 rta_len
= rtattr
->rta_len
;
1915 if (rta_len
< sizeof(struct rtattr
) ||
1919 ret
= host_to_target_rtattr(rtattr
);
1920 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1921 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1925 len
-= RTA_ALIGN(rta_len
);
1926 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1931 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1933 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
1940 switch (nlattr
->nla_type
) {
1942 case QEMU_IFLA_BR_FDB_FLUSH
:
1945 case QEMU_IFLA_BR_GROUP_ADDR
:
1948 case QEMU_IFLA_BR_VLAN_FILTERING
:
1949 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
1950 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
1951 case QEMU_IFLA_BR_MCAST_ROUTER
:
1952 case QEMU_IFLA_BR_MCAST_SNOOPING
:
1953 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
1954 case QEMU_IFLA_BR_MCAST_QUERIER
:
1955 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
1956 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
1957 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
1960 case QEMU_IFLA_BR_PRIORITY
:
1961 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
1962 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
1963 case QEMU_IFLA_BR_ROOT_PORT
:
1964 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
1965 u16
= NLA_DATA(nlattr
);
1966 *u16
= tswap16(*u16
);
1969 case QEMU_IFLA_BR_FORWARD_DELAY
:
1970 case QEMU_IFLA_BR_HELLO_TIME
:
1971 case QEMU_IFLA_BR_MAX_AGE
:
1972 case QEMU_IFLA_BR_AGEING_TIME
:
1973 case QEMU_IFLA_BR_STP_STATE
:
1974 case QEMU_IFLA_BR_ROOT_PATH_COST
:
1975 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
1976 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
1977 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
1978 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
1979 u32
= NLA_DATA(nlattr
);
1980 *u32
= tswap32(*u32
);
1983 case QEMU_IFLA_BR_HELLO_TIMER
:
1984 case QEMU_IFLA_BR_TCN_TIMER
:
1985 case QEMU_IFLA_BR_GC_TIMER
:
1986 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
1987 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
1988 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
1989 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
1990 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
1991 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
1992 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
1993 u64
= NLA_DATA(nlattr
);
1994 *u64
= tswap64(*u64
);
1996 /* ifla_bridge_id: uin8_t[] */
1997 case QEMU_IFLA_BR_ROOT_ID
:
1998 case QEMU_IFLA_BR_BRIDGE_ID
:
2001 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2007 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2014 switch (nlattr
->nla_type
) {
2016 case QEMU_IFLA_BRPORT_STATE
:
2017 case QEMU_IFLA_BRPORT_MODE
:
2018 case QEMU_IFLA_BRPORT_GUARD
:
2019 case QEMU_IFLA_BRPORT_PROTECT
:
2020 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2021 case QEMU_IFLA_BRPORT_LEARNING
:
2022 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2023 case QEMU_IFLA_BRPORT_PROXYARP
:
2024 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2025 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2026 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2027 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2028 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2031 case QEMU_IFLA_BRPORT_PRIORITY
:
2032 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2033 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2034 case QEMU_IFLA_BRPORT_ID
:
2035 case QEMU_IFLA_BRPORT_NO
:
2036 u16
= NLA_DATA(nlattr
);
2037 *u16
= tswap16(*u16
);
2040 case QEMU_IFLA_BRPORT_COST
:
2041 u32
= NLA_DATA(nlattr
);
2042 *u32
= tswap32(*u32
);
2045 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2046 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2047 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2048 u64
= NLA_DATA(nlattr
);
2049 *u64
= tswap64(*u64
);
2051 /* ifla_bridge_id: uint8_t[] */
2052 case QEMU_IFLA_BRPORT_ROOT_ID
:
2053 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2056 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2062 struct linkinfo_context
{
2069 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2072 struct linkinfo_context
*li_context
= context
;
2074 switch (nlattr
->nla_type
) {
2076 case QEMU_IFLA_INFO_KIND
:
2077 li_context
->name
= NLA_DATA(nlattr
);
2078 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2080 case QEMU_IFLA_INFO_SLAVE_KIND
:
2081 li_context
->slave_name
= NLA_DATA(nlattr
);
2082 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2085 case QEMU_IFLA_INFO_XSTATS
:
2086 /* FIXME: only used by CAN */
2089 case QEMU_IFLA_INFO_DATA
:
2090 if (strncmp(li_context
->name
, "bridge",
2091 li_context
->len
) == 0) {
2092 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2095 host_to_target_data_bridge_nlattr
);
2097 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2100 case QEMU_IFLA_INFO_SLAVE_DATA
:
2101 if (strncmp(li_context
->slave_name
, "bridge",
2102 li_context
->slave_len
) == 0) {
2103 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2106 host_to_target_slave_data_bridge_nlattr
);
2108 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2109 li_context
->slave_name
);
2113 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2120 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2126 switch (nlattr
->nla_type
) {
2127 case QEMU_IFLA_INET_CONF
:
2128 u32
= NLA_DATA(nlattr
);
2129 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2131 u32
[i
] = tswap32(u32
[i
]);
2135 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2140 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2145 struct ifla_cacheinfo
*ci
;
2148 switch (nlattr
->nla_type
) {
2150 case QEMU_IFLA_INET6_TOKEN
:
2153 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2156 case QEMU_IFLA_INET6_FLAGS
:
2157 u32
= NLA_DATA(nlattr
);
2158 *u32
= tswap32(*u32
);
2161 case QEMU_IFLA_INET6_CONF
:
2162 u32
= NLA_DATA(nlattr
);
2163 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2165 u32
[i
] = tswap32(u32
[i
]);
2168 /* ifla_cacheinfo */
2169 case QEMU_IFLA_INET6_CACHEINFO
:
2170 ci
= NLA_DATA(nlattr
);
2171 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2172 ci
->tstamp
= tswap32(ci
->tstamp
);
2173 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2174 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2177 case QEMU_IFLA_INET6_STATS
:
2178 case QEMU_IFLA_INET6_ICMP6STATS
:
2179 u64
= NLA_DATA(nlattr
);
2180 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2182 u64
[i
] = tswap64(u64
[i
]);
2186 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2191 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2194 switch (nlattr
->nla_type
) {
2196 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2198 host_to_target_data_inet_nlattr
);
2200 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2202 host_to_target_data_inet6_nlattr
);
2204 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2210 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2213 struct rtnl_link_stats
*st
;
2214 struct rtnl_link_stats64
*st64
;
2215 struct rtnl_link_ifmap
*map
;
2216 struct linkinfo_context li_context
;
2218 switch (rtattr
->rta_type
) {
2220 case QEMU_IFLA_ADDRESS
:
2221 case QEMU_IFLA_BROADCAST
:
2223 case QEMU_IFLA_IFNAME
:
2224 case QEMU_IFLA_QDISC
:
2227 case QEMU_IFLA_OPERSTATE
:
2228 case QEMU_IFLA_LINKMODE
:
2229 case QEMU_IFLA_CARRIER
:
2230 case QEMU_IFLA_PROTO_DOWN
:
2234 case QEMU_IFLA_LINK
:
2235 case QEMU_IFLA_WEIGHT
:
2236 case QEMU_IFLA_TXQLEN
:
2237 case QEMU_IFLA_CARRIER_CHANGES
:
2238 case QEMU_IFLA_NUM_RX_QUEUES
:
2239 case QEMU_IFLA_NUM_TX_QUEUES
:
2240 case QEMU_IFLA_PROMISCUITY
:
2241 case QEMU_IFLA_EXT_MASK
:
2242 case QEMU_IFLA_LINK_NETNSID
:
2243 case QEMU_IFLA_GROUP
:
2244 case QEMU_IFLA_MASTER
:
2245 case QEMU_IFLA_NUM_VF
:
2246 u32
= RTA_DATA(rtattr
);
2247 *u32
= tswap32(*u32
);
2249 /* struct rtnl_link_stats */
2250 case QEMU_IFLA_STATS
:
2251 st
= RTA_DATA(rtattr
);
2252 st
->rx_packets
= tswap32(st
->rx_packets
);
2253 st
->tx_packets
= tswap32(st
->tx_packets
);
2254 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2255 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2256 st
->rx_errors
= tswap32(st
->rx_errors
);
2257 st
->tx_errors
= tswap32(st
->tx_errors
);
2258 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2259 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2260 st
->multicast
= tswap32(st
->multicast
);
2261 st
->collisions
= tswap32(st
->collisions
);
2263 /* detailed rx_errors: */
2264 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2265 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2266 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2267 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2268 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2269 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2271 /* detailed tx_errors */
2272 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2273 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2274 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2275 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2276 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2279 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2280 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2282 /* struct rtnl_link_stats64 */
2283 case QEMU_IFLA_STATS64
:
2284 st64
= RTA_DATA(rtattr
);
2285 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2286 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2287 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2288 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2289 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2290 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2291 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2292 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2293 st64
->multicast
= tswap64(st64
->multicast
);
2294 st64
->collisions
= tswap64(st64
->collisions
);
2296 /* detailed rx_errors: */
2297 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2298 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2299 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2300 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2301 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2302 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2304 /* detailed tx_errors */
2305 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2306 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2307 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2308 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2309 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2312 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2313 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2315 /* struct rtnl_link_ifmap */
2317 map
= RTA_DATA(rtattr
);
2318 map
->mem_start
= tswap64(map
->mem_start
);
2319 map
->mem_end
= tswap64(map
->mem_end
);
2320 map
->base_addr
= tswap64(map
->base_addr
);
2321 map
->irq
= tswap16(map
->irq
);
2324 case QEMU_IFLA_LINKINFO
:
2325 memset(&li_context
, 0, sizeof(li_context
));
2326 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2328 host_to_target_data_linkinfo_nlattr
);
2329 case QEMU_IFLA_AF_SPEC
:
2330 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2332 host_to_target_data_spec_nlattr
);
2334 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2340 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2343 struct ifa_cacheinfo
*ci
;
2345 switch (rtattr
->rta_type
) {
2346 /* binary: depends on family type */
2356 u32
= RTA_DATA(rtattr
);
2357 *u32
= tswap32(*u32
);
2359 /* struct ifa_cacheinfo */
2361 ci
= RTA_DATA(rtattr
);
2362 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2363 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2364 ci
->cstamp
= tswap32(ci
->cstamp
);
2365 ci
->tstamp
= tswap32(ci
->tstamp
);
2368 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2374 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2377 switch (rtattr
->rta_type
) {
2378 /* binary: depends on family type */
2387 u32
= RTA_DATA(rtattr
);
2388 *u32
= tswap32(*u32
);
2391 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2397 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2398 uint32_t rtattr_len
)
2400 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2401 host_to_target_data_link_rtattr
);
2404 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2405 uint32_t rtattr_len
)
2407 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2408 host_to_target_data_addr_rtattr
);
2411 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2412 uint32_t rtattr_len
)
2414 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2415 host_to_target_data_route_rtattr
);
2418 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2421 struct ifinfomsg
*ifi
;
2422 struct ifaddrmsg
*ifa
;
2425 nlmsg_len
= nlh
->nlmsg_len
;
2426 switch (nlh
->nlmsg_type
) {
2430 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2431 ifi
= NLMSG_DATA(nlh
);
2432 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2433 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2434 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2435 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2436 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2437 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2443 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2444 ifa
= NLMSG_DATA(nlh
);
2445 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2446 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2447 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2453 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2454 rtm
= NLMSG_DATA(nlh
);
2455 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2456 host_to_target_route_rtattr(RTM_RTA(rtm
),
2457 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2461 return -TARGET_EINVAL
;
2466 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2469 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2472 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2474 abi_long (*target_to_host_rtattr
)
2479 while (len
>= sizeof(struct rtattr
)) {
2480 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2481 tswap16(rtattr
->rta_len
) > len
) {
2484 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2485 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2486 ret
= target_to_host_rtattr(rtattr
);
2490 len
-= RTA_ALIGN(rtattr
->rta_len
);
2491 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2492 RTA_ALIGN(rtattr
->rta_len
));
2497 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2499 switch (rtattr
->rta_type
) {
2501 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2507 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2509 switch (rtattr
->rta_type
) {
2510 /* binary: depends on family type */
2515 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2521 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2524 switch (rtattr
->rta_type
) {
2525 /* binary: depends on family type */
2532 u32
= RTA_DATA(rtattr
);
2533 *u32
= tswap32(*u32
);
2536 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2542 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2543 uint32_t rtattr_len
)
2545 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2546 target_to_host_data_link_rtattr
);
2549 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2550 uint32_t rtattr_len
)
2552 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2553 target_to_host_data_addr_rtattr
);
2556 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2557 uint32_t rtattr_len
)
2559 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2560 target_to_host_data_route_rtattr
);
2563 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2565 struct ifinfomsg
*ifi
;
2566 struct ifaddrmsg
*ifa
;
2569 switch (nlh
->nlmsg_type
) {
2574 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2575 ifi
= NLMSG_DATA(nlh
);
2576 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2577 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2578 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2579 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2580 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2581 NLMSG_LENGTH(sizeof(*ifi
)));
2587 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2588 ifa
= NLMSG_DATA(nlh
);
2589 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2590 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2591 NLMSG_LENGTH(sizeof(*ifa
)));
2598 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2599 rtm
= NLMSG_DATA(nlh
);
2600 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2601 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2602 NLMSG_LENGTH(sizeof(*rtm
)));
2606 return -TARGET_EOPNOTSUPP
;
2611 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2613 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2615 #endif /* CONFIG_RTNETLINK */
2617 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2619 switch (nlh
->nlmsg_type
) {
2621 gemu_log("Unknown host audit message type %d\n",
2623 return -TARGET_EINVAL
;
2628 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2631 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2634 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2636 switch (nlh
->nlmsg_type
) {
2638 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2639 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2642 gemu_log("Unknown target audit message type %d\n",
2644 return -TARGET_EINVAL
;
2650 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2652 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2655 /* do_setsockopt() Must return target values and target errnos. */
2656 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2657 abi_ulong optval_addr
, socklen_t optlen
)
2661 struct ip_mreqn
*ip_mreq
;
2662 struct ip_mreq_source
*ip_mreq_source
;
2666 /* TCP options all take an 'int' value. */
2667 if (optlen
< sizeof(uint32_t))
2668 return -TARGET_EINVAL
;
2670 if (get_user_u32(val
, optval_addr
))
2671 return -TARGET_EFAULT
;
2672 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2679 case IP_ROUTER_ALERT
:
2683 case IP_MTU_DISCOVER
:
2689 case IP_MULTICAST_TTL
:
2690 case IP_MULTICAST_LOOP
:
2692 if (optlen
>= sizeof(uint32_t)) {
2693 if (get_user_u32(val
, optval_addr
))
2694 return -TARGET_EFAULT
;
2695 } else if (optlen
>= 1) {
2696 if (get_user_u8(val
, optval_addr
))
2697 return -TARGET_EFAULT
;
2699 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2701 case IP_ADD_MEMBERSHIP
:
2702 case IP_DROP_MEMBERSHIP
:
2703 if (optlen
< sizeof (struct target_ip_mreq
) ||
2704 optlen
> sizeof (struct target_ip_mreqn
))
2705 return -TARGET_EINVAL
;
2707 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2708 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2709 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2712 case IP_BLOCK_SOURCE
:
2713 case IP_UNBLOCK_SOURCE
:
2714 case IP_ADD_SOURCE_MEMBERSHIP
:
2715 case IP_DROP_SOURCE_MEMBERSHIP
:
2716 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2717 return -TARGET_EINVAL
;
2719 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2720 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2721 unlock_user (ip_mreq_source
, optval_addr
, 0);
2730 case IPV6_MTU_DISCOVER
:
2733 case IPV6_RECVPKTINFO
:
2735 if (optlen
< sizeof(uint32_t)) {
2736 return -TARGET_EINVAL
;
2738 if (get_user_u32(val
, optval_addr
)) {
2739 return -TARGET_EFAULT
;
2741 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2742 &val
, sizeof(val
)));
2751 /* struct icmp_filter takes an u32 value */
2752 if (optlen
< sizeof(uint32_t)) {
2753 return -TARGET_EINVAL
;
2756 if (get_user_u32(val
, optval_addr
)) {
2757 return -TARGET_EFAULT
;
2759 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2760 &val
, sizeof(val
)));
2767 case TARGET_SOL_SOCKET
:
2769 case TARGET_SO_RCVTIMEO
:
2773 optname
= SO_RCVTIMEO
;
2776 if (optlen
!= sizeof(struct target_timeval
)) {
2777 return -TARGET_EINVAL
;
2780 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2781 return -TARGET_EFAULT
;
2784 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2788 case TARGET_SO_SNDTIMEO
:
2789 optname
= SO_SNDTIMEO
;
2791 case TARGET_SO_ATTACH_FILTER
:
2793 struct target_sock_fprog
*tfprog
;
2794 struct target_sock_filter
*tfilter
;
2795 struct sock_fprog fprog
;
2796 struct sock_filter
*filter
;
2799 if (optlen
!= sizeof(*tfprog
)) {
2800 return -TARGET_EINVAL
;
2802 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2803 return -TARGET_EFAULT
;
2805 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2806 tswapal(tfprog
->filter
), 0)) {
2807 unlock_user_struct(tfprog
, optval_addr
, 1);
2808 return -TARGET_EFAULT
;
2811 fprog
.len
= tswap16(tfprog
->len
);
2812 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2813 if (filter
== NULL
) {
2814 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2815 unlock_user_struct(tfprog
, optval_addr
, 1);
2816 return -TARGET_ENOMEM
;
2818 for (i
= 0; i
< fprog
.len
; i
++) {
2819 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2820 filter
[i
].jt
= tfilter
[i
].jt
;
2821 filter
[i
].jf
= tfilter
[i
].jf
;
2822 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2824 fprog
.filter
= filter
;
2826 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2827 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2830 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2831 unlock_user_struct(tfprog
, optval_addr
, 1);
2834 case TARGET_SO_BINDTODEVICE
:
2836 char *dev_ifname
, *addr_ifname
;
2838 if (optlen
> IFNAMSIZ
- 1) {
2839 optlen
= IFNAMSIZ
- 1;
2841 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2843 return -TARGET_EFAULT
;
2845 optname
= SO_BINDTODEVICE
;
2846 addr_ifname
= alloca(IFNAMSIZ
);
2847 memcpy(addr_ifname
, dev_ifname
, optlen
);
2848 addr_ifname
[optlen
] = 0;
2849 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2850 addr_ifname
, optlen
));
2851 unlock_user (dev_ifname
, optval_addr
, 0);
2854 /* Options with 'int' argument. */
2855 case TARGET_SO_DEBUG
:
2858 case TARGET_SO_REUSEADDR
:
2859 optname
= SO_REUSEADDR
;
2861 case TARGET_SO_TYPE
:
2864 case TARGET_SO_ERROR
:
2867 case TARGET_SO_DONTROUTE
:
2868 optname
= SO_DONTROUTE
;
2870 case TARGET_SO_BROADCAST
:
2871 optname
= SO_BROADCAST
;
2873 case TARGET_SO_SNDBUF
:
2874 optname
= SO_SNDBUF
;
2876 case TARGET_SO_SNDBUFFORCE
:
2877 optname
= SO_SNDBUFFORCE
;
2879 case TARGET_SO_RCVBUF
:
2880 optname
= SO_RCVBUF
;
2882 case TARGET_SO_RCVBUFFORCE
:
2883 optname
= SO_RCVBUFFORCE
;
2885 case TARGET_SO_KEEPALIVE
:
2886 optname
= SO_KEEPALIVE
;
2888 case TARGET_SO_OOBINLINE
:
2889 optname
= SO_OOBINLINE
;
2891 case TARGET_SO_NO_CHECK
:
2892 optname
= SO_NO_CHECK
;
2894 case TARGET_SO_PRIORITY
:
2895 optname
= SO_PRIORITY
;
2898 case TARGET_SO_BSDCOMPAT
:
2899 optname
= SO_BSDCOMPAT
;
2902 case TARGET_SO_PASSCRED
:
2903 optname
= SO_PASSCRED
;
2905 case TARGET_SO_PASSSEC
:
2906 optname
= SO_PASSSEC
;
2908 case TARGET_SO_TIMESTAMP
:
2909 optname
= SO_TIMESTAMP
;
2911 case TARGET_SO_RCVLOWAT
:
2912 optname
= SO_RCVLOWAT
;
2918 if (optlen
< sizeof(uint32_t))
2919 return -TARGET_EINVAL
;
2921 if (get_user_u32(val
, optval_addr
))
2922 return -TARGET_EFAULT
;
2923 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2927 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2928 ret
= -TARGET_ENOPROTOOPT
;
2933 /* do_getsockopt() Must return target values and target errnos. */
2934 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2935 abi_ulong optval_addr
, abi_ulong optlen
)
2942 case TARGET_SOL_SOCKET
:
2945 /* These don't just return a single integer */
2946 case TARGET_SO_LINGER
:
2947 case TARGET_SO_RCVTIMEO
:
2948 case TARGET_SO_SNDTIMEO
:
2949 case TARGET_SO_PEERNAME
:
2951 case TARGET_SO_PEERCRED
: {
2954 struct target_ucred
*tcr
;
2956 if (get_user_u32(len
, optlen
)) {
2957 return -TARGET_EFAULT
;
2960 return -TARGET_EINVAL
;
2964 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
2972 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
2973 return -TARGET_EFAULT
;
2975 __put_user(cr
.pid
, &tcr
->pid
);
2976 __put_user(cr
.uid
, &tcr
->uid
);
2977 __put_user(cr
.gid
, &tcr
->gid
);
2978 unlock_user_struct(tcr
, optval_addr
, 1);
2979 if (put_user_u32(len
, optlen
)) {
2980 return -TARGET_EFAULT
;
2984 /* Options with 'int' argument. */
2985 case TARGET_SO_DEBUG
:
2988 case TARGET_SO_REUSEADDR
:
2989 optname
= SO_REUSEADDR
;
2991 case TARGET_SO_TYPE
:
2994 case TARGET_SO_ERROR
:
2997 case TARGET_SO_DONTROUTE
:
2998 optname
= SO_DONTROUTE
;
3000 case TARGET_SO_BROADCAST
:
3001 optname
= SO_BROADCAST
;
3003 case TARGET_SO_SNDBUF
:
3004 optname
= SO_SNDBUF
;
3006 case TARGET_SO_RCVBUF
:
3007 optname
= SO_RCVBUF
;
3009 case TARGET_SO_KEEPALIVE
:
3010 optname
= SO_KEEPALIVE
;
3012 case TARGET_SO_OOBINLINE
:
3013 optname
= SO_OOBINLINE
;
3015 case TARGET_SO_NO_CHECK
:
3016 optname
= SO_NO_CHECK
;
3018 case TARGET_SO_PRIORITY
:
3019 optname
= SO_PRIORITY
;
3022 case TARGET_SO_BSDCOMPAT
:
3023 optname
= SO_BSDCOMPAT
;
3026 case TARGET_SO_PASSCRED
:
3027 optname
= SO_PASSCRED
;
3029 case TARGET_SO_TIMESTAMP
:
3030 optname
= SO_TIMESTAMP
;
3032 case TARGET_SO_RCVLOWAT
:
3033 optname
= SO_RCVLOWAT
;
3035 case TARGET_SO_ACCEPTCONN
:
3036 optname
= SO_ACCEPTCONN
;
3043 /* TCP options all take an 'int' value. */
3045 if (get_user_u32(len
, optlen
))
3046 return -TARGET_EFAULT
;
3048 return -TARGET_EINVAL
;
3050 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3053 if (optname
== SO_TYPE
) {
3054 val
= host_to_target_sock_type(val
);
3059 if (put_user_u32(val
, optval_addr
))
3060 return -TARGET_EFAULT
;
3062 if (put_user_u8(val
, optval_addr
))
3063 return -TARGET_EFAULT
;
3065 if (put_user_u32(len
, optlen
))
3066 return -TARGET_EFAULT
;
3073 case IP_ROUTER_ALERT
:
3077 case IP_MTU_DISCOVER
:
3083 case IP_MULTICAST_TTL
:
3084 case IP_MULTICAST_LOOP
:
3085 if (get_user_u32(len
, optlen
))
3086 return -TARGET_EFAULT
;
3088 return -TARGET_EINVAL
;
3090 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3093 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3095 if (put_user_u32(len
, optlen
)
3096 || put_user_u8(val
, optval_addr
))
3097 return -TARGET_EFAULT
;
3099 if (len
> sizeof(int))
3101 if (put_user_u32(len
, optlen
)
3102 || put_user_u32(val
, optval_addr
))
3103 return -TARGET_EFAULT
;
3107 ret
= -TARGET_ENOPROTOOPT
;
3113 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3115 ret
= -TARGET_EOPNOTSUPP
;
3121 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3122 int count
, int copy
)
3124 struct target_iovec
*target_vec
;
3126 abi_ulong total_len
, max_len
;
3129 bool bad_address
= false;
3135 if (count
< 0 || count
> IOV_MAX
) {
3140 vec
= g_try_new0(struct iovec
, count
);
3146 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3147 count
* sizeof(struct target_iovec
), 1);
3148 if (target_vec
== NULL
) {
3153 /* ??? If host page size > target page size, this will result in a
3154 value larger than what we can actually support. */
3155 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3158 for (i
= 0; i
< count
; i
++) {
3159 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3160 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3165 } else if (len
== 0) {
3166 /* Zero length pointer is ignored. */
3167 vec
[i
].iov_base
= 0;
3169 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3170 /* If the first buffer pointer is bad, this is a fault. But
3171 * subsequent bad buffers will result in a partial write; this
3172 * is realized by filling the vector with null pointers and
3174 if (!vec
[i
].iov_base
) {
3185 if (len
> max_len
- total_len
) {
3186 len
= max_len
- total_len
;
3189 vec
[i
].iov_len
= len
;
3193 unlock_user(target_vec
, target_addr
, 0);
3198 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3199 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3202 unlock_user(target_vec
, target_addr
, 0);
3209 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3210 int count
, int copy
)
3212 struct target_iovec
*target_vec
;
3215 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3216 count
* sizeof(struct target_iovec
), 1);
3218 for (i
= 0; i
< count
; i
++) {
3219 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3220 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3224 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3226 unlock_user(target_vec
, target_addr
, 0);
3232 static inline int target_to_host_sock_type(int *type
)
3235 int target_type
= *type
;
3237 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3238 case TARGET_SOCK_DGRAM
:
3239 host_type
= SOCK_DGRAM
;
3241 case TARGET_SOCK_STREAM
:
3242 host_type
= SOCK_STREAM
;
3245 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3248 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3249 #if defined(SOCK_CLOEXEC)
3250 host_type
|= SOCK_CLOEXEC
;
3252 return -TARGET_EINVAL
;
3255 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3256 #if defined(SOCK_NONBLOCK)
3257 host_type
|= SOCK_NONBLOCK
;
3258 #elif !defined(O_NONBLOCK)
3259 return -TARGET_EINVAL
;
3266 /* Try to emulate socket type flags after socket creation. */
3267 static int sock_flags_fixup(int fd
, int target_type
)
3269 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3270 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3271 int flags
= fcntl(fd
, F_GETFL
);
3272 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3274 return -TARGET_EINVAL
;
3281 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3282 abi_ulong target_addr
,
3285 struct sockaddr
*addr
= host_addr
;
3286 struct target_sockaddr
*target_saddr
;
3288 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3289 if (!target_saddr
) {
3290 return -TARGET_EFAULT
;
3293 memcpy(addr
, target_saddr
, len
);
3294 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3295 /* spkt_protocol is big-endian */
3297 unlock_user(target_saddr
, target_addr
, 0);
3301 static TargetFdTrans target_packet_trans
= {
3302 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3305 #ifdef CONFIG_RTNETLINK
3306 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3310 ret
= target_to_host_nlmsg_route(buf
, len
);
3318 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3322 ret
= host_to_target_nlmsg_route(buf
, len
);
3330 static TargetFdTrans target_netlink_route_trans
= {
3331 .target_to_host_data
= netlink_route_target_to_host
,
3332 .host_to_target_data
= netlink_route_host_to_target
,
3334 #endif /* CONFIG_RTNETLINK */
3336 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3340 ret
= target_to_host_nlmsg_audit(buf
, len
);
3348 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3352 ret
= host_to_target_nlmsg_audit(buf
, len
);
3360 static TargetFdTrans target_netlink_audit_trans
= {
3361 .target_to_host_data
= netlink_audit_target_to_host
,
3362 .host_to_target_data
= netlink_audit_host_to_target
,
3365 /* do_socket() Must return target values and target errnos. */
3366 static abi_long
do_socket(int domain
, int type
, int protocol
)
3368 int target_type
= type
;
3371 ret
= target_to_host_sock_type(&type
);
3376 if (domain
== PF_NETLINK
&& !(
3377 #ifdef CONFIG_RTNETLINK
3378 protocol
== NETLINK_ROUTE
||
3380 protocol
== NETLINK_KOBJECT_UEVENT
||
3381 protocol
== NETLINK_AUDIT
)) {
3382 return -EPFNOSUPPORT
;
3385 if (domain
== AF_PACKET
||
3386 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3387 protocol
= tswap16(protocol
);
3390 ret
= get_errno(socket(domain
, type
, protocol
));
3392 ret
= sock_flags_fixup(ret
, target_type
);
3393 if (type
== SOCK_PACKET
) {
3394 /* Manage an obsolete case :
3395 * if socket type is SOCK_PACKET, bind by name
3397 fd_trans_register(ret
, &target_packet_trans
);
3398 } else if (domain
== PF_NETLINK
) {
3400 #ifdef CONFIG_RTNETLINK
3402 fd_trans_register(ret
, &target_netlink_route_trans
);
3405 case NETLINK_KOBJECT_UEVENT
:
3406 /* nothing to do: messages are strings */
3409 fd_trans_register(ret
, &target_netlink_audit_trans
);
3412 g_assert_not_reached();
3419 /* do_bind() Must return target values and target errnos. */
3420 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3426 if ((int)addrlen
< 0) {
3427 return -TARGET_EINVAL
;
3430 addr
= alloca(addrlen
+1);
3432 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3436 return get_errno(bind(sockfd
, addr
, addrlen
));
3439 /* do_connect() Must return target values and target errnos. */
3440 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3446 if ((int)addrlen
< 0) {
3447 return -TARGET_EINVAL
;
3450 addr
= alloca(addrlen
+1);
3452 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3456 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3459 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3460 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3461 int flags
, int send
)
3467 abi_ulong target_vec
;
3469 if (msgp
->msg_name
) {
3470 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3471 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3472 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3473 tswapal(msgp
->msg_name
),
3479 msg
.msg_name
= NULL
;
3480 msg
.msg_namelen
= 0;
3482 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3483 msg
.msg_control
= alloca(msg
.msg_controllen
);
3484 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3486 count
= tswapal(msgp
->msg_iovlen
);
3487 target_vec
= tswapal(msgp
->msg_iov
);
3488 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3489 target_vec
, count
, send
);
3491 ret
= -host_to_target_errno(errno
);
3494 msg
.msg_iovlen
= count
;
3498 if (fd_trans_target_to_host_data(fd
)) {
3501 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3502 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3503 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3504 msg
.msg_iov
->iov_len
);
3506 msg
.msg_iov
->iov_base
= host_msg
;
3507 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3511 ret
= target_to_host_cmsg(&msg
, msgp
);
3513 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3517 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3518 if (!is_error(ret
)) {
3520 if (fd_trans_host_to_target_data(fd
)) {
3521 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3524 ret
= host_to_target_cmsg(msgp
, &msg
);
3526 if (!is_error(ret
)) {
3527 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3528 if (msg
.msg_name
!= NULL
) {
3529 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3530 msg
.msg_name
, msg
.msg_namelen
);
3542 unlock_iovec(vec
, target_vec
, count
, !send
);
3547 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3548 int flags
, int send
)
3551 struct target_msghdr
*msgp
;
3553 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3557 return -TARGET_EFAULT
;
3559 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3560 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3564 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3565 * so it might not have this *mmsg-specific flag either.
3567 #ifndef MSG_WAITFORONE
3568 #define MSG_WAITFORONE 0x10000
3571 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3572 unsigned int vlen
, unsigned int flags
,
3575 struct target_mmsghdr
*mmsgp
;
3579 if (vlen
> UIO_MAXIOV
) {
3583 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3585 return -TARGET_EFAULT
;
3588 for (i
= 0; i
< vlen
; i
++) {
3589 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3590 if (is_error(ret
)) {
3593 mmsgp
[i
].msg_len
= tswap32(ret
);
3594 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3595 if (flags
& MSG_WAITFORONE
) {
3596 flags
|= MSG_DONTWAIT
;
3600 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3602 /* Return number of datagrams sent if we sent any at all;
3603 * otherwise return the error.
3611 /* do_accept4() Must return target values and target errnos. */
3612 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3613 abi_ulong target_addrlen_addr
, int flags
)
3620 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3622 if (target_addr
== 0) {
3623 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3626 /* linux returns EINVAL if addrlen pointer is invalid */
3627 if (get_user_u32(addrlen
, target_addrlen_addr
))
3628 return -TARGET_EINVAL
;
3630 if ((int)addrlen
< 0) {
3631 return -TARGET_EINVAL
;
3634 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3635 return -TARGET_EINVAL
;
3637 addr
= alloca(addrlen
);
3639 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3640 if (!is_error(ret
)) {
3641 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3642 if (put_user_u32(addrlen
, target_addrlen_addr
))
3643 ret
= -TARGET_EFAULT
;
3648 /* do_getpeername() Must return target values and target errnos. */
3649 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3650 abi_ulong target_addrlen_addr
)
3656 if (get_user_u32(addrlen
, target_addrlen_addr
))
3657 return -TARGET_EFAULT
;
3659 if ((int)addrlen
< 0) {
3660 return -TARGET_EINVAL
;
3663 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3664 return -TARGET_EFAULT
;
3666 addr
= alloca(addrlen
);
3668 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3669 if (!is_error(ret
)) {
3670 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3671 if (put_user_u32(addrlen
, target_addrlen_addr
))
3672 ret
= -TARGET_EFAULT
;
3677 /* do_getsockname() Must return target values and target errnos. */
3678 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3679 abi_ulong target_addrlen_addr
)
3685 if (get_user_u32(addrlen
, target_addrlen_addr
))
3686 return -TARGET_EFAULT
;
3688 if ((int)addrlen
< 0) {
3689 return -TARGET_EINVAL
;
3692 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3693 return -TARGET_EFAULT
;
3695 addr
= alloca(addrlen
);
3697 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3698 if (!is_error(ret
)) {
3699 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3700 if (put_user_u32(addrlen
, target_addrlen_addr
))
3701 ret
= -TARGET_EFAULT
;
3706 /* do_socketpair() Must return target values and target errnos. */
3707 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3708 abi_ulong target_tab_addr
)
3713 target_to_host_sock_type(&type
);
3715 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3716 if (!is_error(ret
)) {
3717 if (put_user_s32(tab
[0], target_tab_addr
)
3718 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3719 ret
= -TARGET_EFAULT
;
3724 /* do_sendto() Must return target values and target errnos. */
3725 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3726 abi_ulong target_addr
, socklen_t addrlen
)
3730 void *copy_msg
= NULL
;
3733 if ((int)addrlen
< 0) {
3734 return -TARGET_EINVAL
;
3737 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3739 return -TARGET_EFAULT
;
3740 if (fd_trans_target_to_host_data(fd
)) {
3741 copy_msg
= host_msg
;
3742 host_msg
= g_malloc(len
);
3743 memcpy(host_msg
, copy_msg
, len
);
3744 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3750 addr
= alloca(addrlen
+1);
3751 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3755 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3757 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3762 host_msg
= copy_msg
;
3764 unlock_user(host_msg
, msg
, 0);
3768 /* do_recvfrom() Must return target values and target errnos. */
3769 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3770 abi_ulong target_addr
,
3771 abi_ulong target_addrlen
)
3778 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3780 return -TARGET_EFAULT
;
3782 if (get_user_u32(addrlen
, target_addrlen
)) {
3783 ret
= -TARGET_EFAULT
;
3786 if ((int)addrlen
< 0) {
3787 ret
= -TARGET_EINVAL
;
3790 addr
= alloca(addrlen
);
3791 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3794 addr
= NULL
; /* To keep compiler quiet. */
3795 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3797 if (!is_error(ret
)) {
3798 if (fd_trans_host_to_target_data(fd
)) {
3799 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3802 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3803 if (put_user_u32(addrlen
, target_addrlen
)) {
3804 ret
= -TARGET_EFAULT
;
3808 unlock_user(host_msg
, msg
, len
);
3811 unlock_user(host_msg
, msg
, 0);
3816 #ifdef TARGET_NR_socketcall
3817 /* do_socketcall() Must return target values and target errnos. */
3818 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3820 static const unsigned ac
[] = { /* number of arguments per call */
3821 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3822 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3823 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3824 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3825 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3826 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3827 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3828 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3829 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3830 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3831 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3832 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3833 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3834 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3835 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3836 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3837 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3838 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3839 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3840 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3842 abi_long a
[6]; /* max 6 args */
3844 /* first, collect the arguments in a[] according to ac[] */
3845 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3847 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3848 for (i
= 0; i
< ac
[num
]; ++i
) {
3849 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3850 return -TARGET_EFAULT
;
3855 /* now when we have the args, actually handle the call */
3857 case SOCKOP_socket
: /* domain, type, protocol */
3858 return do_socket(a
[0], a
[1], a
[2]);
3859 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3860 return do_bind(a
[0], a
[1], a
[2]);
3861 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3862 return do_connect(a
[0], a
[1], a
[2]);
3863 case SOCKOP_listen
: /* sockfd, backlog */
3864 return get_errno(listen(a
[0], a
[1]));
3865 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3866 return do_accept4(a
[0], a
[1], a
[2], 0);
3867 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3868 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3869 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3870 return do_getsockname(a
[0], a
[1], a
[2]);
3871 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3872 return do_getpeername(a
[0], a
[1], a
[2]);
3873 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3874 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3875 case SOCKOP_send
: /* sockfd, msg, len, flags */
3876 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3877 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3878 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3879 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3880 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3881 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3882 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3883 case SOCKOP_shutdown
: /* sockfd, how */
3884 return get_errno(shutdown(a
[0], a
[1]));
3885 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3886 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3887 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3888 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3889 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3890 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3891 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3892 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3893 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3894 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3895 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3896 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3898 gemu_log("Unsupported socketcall: %d\n", num
);
3899 return -TARGET_ENOSYS
;
3904 #define N_SHM_REGIONS 32
3906 static struct shm_region
{
3910 } shm_regions
[N_SHM_REGIONS
];
3912 #ifndef TARGET_SEMID64_DS
3913 /* asm-generic version of this struct */
3914 struct target_semid64_ds
3916 struct target_ipc_perm sem_perm
;
3917 abi_ulong sem_otime
;
3918 #if TARGET_ABI_BITS == 32
3919 abi_ulong __unused1
;
3921 abi_ulong sem_ctime
;
3922 #if TARGET_ABI_BITS == 32
3923 abi_ulong __unused2
;
3925 abi_ulong sem_nsems
;
3926 abi_ulong __unused3
;
3927 abi_ulong __unused4
;
3931 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
3932 abi_ulong target_addr
)
3934 struct target_ipc_perm
*target_ip
;
3935 struct target_semid64_ds
*target_sd
;
3937 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3938 return -TARGET_EFAULT
;
3939 target_ip
= &(target_sd
->sem_perm
);
3940 host_ip
->__key
= tswap32(target_ip
->__key
);
3941 host_ip
->uid
= tswap32(target_ip
->uid
);
3942 host_ip
->gid
= tswap32(target_ip
->gid
);
3943 host_ip
->cuid
= tswap32(target_ip
->cuid
);
3944 host_ip
->cgid
= tswap32(target_ip
->cgid
);
3945 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3946 host_ip
->mode
= tswap32(target_ip
->mode
);
3948 host_ip
->mode
= tswap16(target_ip
->mode
);
3950 #if defined(TARGET_PPC)
3951 host_ip
->__seq
= tswap32(target_ip
->__seq
);
3953 host_ip
->__seq
= tswap16(target_ip
->__seq
);
3955 unlock_user_struct(target_sd
, target_addr
, 0);
3959 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
3960 struct ipc_perm
*host_ip
)
3962 struct target_ipc_perm
*target_ip
;
3963 struct target_semid64_ds
*target_sd
;
3965 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
3966 return -TARGET_EFAULT
;
3967 target_ip
= &(target_sd
->sem_perm
);
3968 target_ip
->__key
= tswap32(host_ip
->__key
);
3969 target_ip
->uid
= tswap32(host_ip
->uid
);
3970 target_ip
->gid
= tswap32(host_ip
->gid
);
3971 target_ip
->cuid
= tswap32(host_ip
->cuid
);
3972 target_ip
->cgid
= tswap32(host_ip
->cgid
);
3973 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3974 target_ip
->mode
= tswap32(host_ip
->mode
);
3976 target_ip
->mode
= tswap16(host_ip
->mode
);
3978 #if defined(TARGET_PPC)
3979 target_ip
->__seq
= tswap32(host_ip
->__seq
);
3981 target_ip
->__seq
= tswap16(host_ip
->__seq
);
3983 unlock_user_struct(target_sd
, target_addr
, 1);
3987 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
3988 abi_ulong target_addr
)
3990 struct target_semid64_ds
*target_sd
;
3992 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
3993 return -TARGET_EFAULT
;
3994 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
3995 return -TARGET_EFAULT
;
3996 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
3997 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
3998 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
3999 unlock_user_struct(target_sd
, target_addr
, 0);
4003 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4004 struct semid_ds
*host_sd
)
4006 struct target_semid64_ds
*target_sd
;
4008 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4009 return -TARGET_EFAULT
;
4010 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4011 return -TARGET_EFAULT
;
4012 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4013 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4014 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4015 unlock_user_struct(target_sd
, target_addr
, 1);
4019 struct target_seminfo
{
4032 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4033 struct seminfo
*host_seminfo
)
4035 struct target_seminfo
*target_seminfo
;
4036 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4037 return -TARGET_EFAULT
;
4038 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4039 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4040 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4041 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4042 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4043 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4044 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4045 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4046 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4047 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4048 unlock_user_struct(target_seminfo
, target_addr
, 1);
4054 struct semid_ds
*buf
;
4055 unsigned short *array
;
4056 struct seminfo
*__buf
;
4059 union target_semun
{
4066 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4067 abi_ulong target_addr
)
4070 unsigned short *array
;
4072 struct semid_ds semid_ds
;
4075 semun
.buf
= &semid_ds
;
4077 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4079 return get_errno(ret
);
4081 nsems
= semid_ds
.sem_nsems
;
4083 *host_array
= g_try_new(unsigned short, nsems
);
4085 return -TARGET_ENOMEM
;
4087 array
= lock_user(VERIFY_READ
, target_addr
,
4088 nsems
*sizeof(unsigned short), 1);
4090 g_free(*host_array
);
4091 return -TARGET_EFAULT
;
4094 for(i
=0; i
<nsems
; i
++) {
4095 __get_user((*host_array
)[i
], &array
[i
]);
4097 unlock_user(array
, target_addr
, 0);
4102 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4103 unsigned short **host_array
)
4106 unsigned short *array
;
4108 struct semid_ds semid_ds
;
4111 semun
.buf
= &semid_ds
;
4113 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4115 return get_errno(ret
);
4117 nsems
= semid_ds
.sem_nsems
;
4119 array
= lock_user(VERIFY_WRITE
, target_addr
,
4120 nsems
*sizeof(unsigned short), 0);
4122 return -TARGET_EFAULT
;
4124 for(i
=0; i
<nsems
; i
++) {
4125 __put_user((*host_array
)[i
], &array
[i
]);
4127 g_free(*host_array
);
4128 unlock_user(array
, target_addr
, 1);
4133 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4134 abi_ulong target_arg
)
4136 union target_semun target_su
= { .buf
= target_arg
};
4138 struct semid_ds dsarg
;
4139 unsigned short *array
= NULL
;
4140 struct seminfo seminfo
;
4141 abi_long ret
= -TARGET_EINVAL
;
4148 /* In 64 bit cross-endian situations, we will erroneously pick up
4149 * the wrong half of the union for the "val" element. To rectify
4150 * this, the entire 8-byte structure is byteswapped, followed by
4151 * a swap of the 4 byte val field. In other cases, the data is
4152 * already in proper host byte order. */
4153 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4154 target_su
.buf
= tswapal(target_su
.buf
);
4155 arg
.val
= tswap32(target_su
.val
);
4157 arg
.val
= target_su
.val
;
4159 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4163 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4167 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4168 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4175 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4179 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4180 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4186 arg
.__buf
= &seminfo
;
4187 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4188 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4196 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4203 struct target_sembuf
{
4204 unsigned short sem_num
;
4209 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4210 abi_ulong target_addr
,
4213 struct target_sembuf
*target_sembuf
;
4216 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4217 nsops
*sizeof(struct target_sembuf
), 1);
4219 return -TARGET_EFAULT
;
4221 for(i
=0; i
<nsops
; i
++) {
4222 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4223 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4224 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4227 unlock_user(target_sembuf
, target_addr
, 0);
4232 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4234 struct sembuf sops
[nsops
];
4236 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4237 return -TARGET_EFAULT
;
4239 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4242 struct target_msqid_ds
4244 struct target_ipc_perm msg_perm
;
4245 abi_ulong msg_stime
;
4246 #if TARGET_ABI_BITS == 32
4247 abi_ulong __unused1
;
4249 abi_ulong msg_rtime
;
4250 #if TARGET_ABI_BITS == 32
4251 abi_ulong __unused2
;
4253 abi_ulong msg_ctime
;
4254 #if TARGET_ABI_BITS == 32
4255 abi_ulong __unused3
;
4257 abi_ulong __msg_cbytes
;
4259 abi_ulong msg_qbytes
;
4260 abi_ulong msg_lspid
;
4261 abi_ulong msg_lrpid
;
4262 abi_ulong __unused4
;
4263 abi_ulong __unused5
;
4266 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4267 abi_ulong target_addr
)
4269 struct target_msqid_ds
*target_md
;
4271 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4272 return -TARGET_EFAULT
;
4273 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4274 return -TARGET_EFAULT
;
4275 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4276 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4277 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4278 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4279 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4280 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4281 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4282 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4283 unlock_user_struct(target_md
, target_addr
, 0);
4287 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4288 struct msqid_ds
*host_md
)
4290 struct target_msqid_ds
*target_md
;
4292 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4293 return -TARGET_EFAULT
;
4294 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4295 return -TARGET_EFAULT
;
4296 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4297 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4298 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4299 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4300 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4301 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4302 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4303 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4304 unlock_user_struct(target_md
, target_addr
, 1);
4308 struct target_msginfo
{
4316 unsigned short int msgseg
;
4319 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4320 struct msginfo
*host_msginfo
)
4322 struct target_msginfo
*target_msginfo
;
4323 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4324 return -TARGET_EFAULT
;
4325 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4326 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4327 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4328 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4329 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4330 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4331 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4332 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4333 unlock_user_struct(target_msginfo
, target_addr
, 1);
4337 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4339 struct msqid_ds dsarg
;
4340 struct msginfo msginfo
;
4341 abi_long ret
= -TARGET_EINVAL
;
4349 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4350 return -TARGET_EFAULT
;
4351 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4352 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4353 return -TARGET_EFAULT
;
4356 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4360 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4361 if (host_to_target_msginfo(ptr
, &msginfo
))
4362 return -TARGET_EFAULT
;
4369 struct target_msgbuf
{
4374 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4375 ssize_t msgsz
, int msgflg
)
4377 struct target_msgbuf
*target_mb
;
4378 struct msgbuf
*host_mb
;
4382 return -TARGET_EINVAL
;
4385 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4386 return -TARGET_EFAULT
;
4387 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4389 unlock_user_struct(target_mb
, msgp
, 0);
4390 return -TARGET_ENOMEM
;
4392 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4393 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4394 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4396 unlock_user_struct(target_mb
, msgp
, 0);
4401 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4402 ssize_t msgsz
, abi_long msgtyp
,
4405 struct target_msgbuf
*target_mb
;
4407 struct msgbuf
*host_mb
;
4411 return -TARGET_EINVAL
;
4414 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4415 return -TARGET_EFAULT
;
4417 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4419 ret
= -TARGET_ENOMEM
;
4422 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4425 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4426 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4427 if (!target_mtext
) {
4428 ret
= -TARGET_EFAULT
;
4431 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4432 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4435 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4439 unlock_user_struct(target_mb
, msgp
, 1);
4444 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4445 abi_ulong target_addr
)
4447 struct target_shmid_ds
*target_sd
;
4449 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4450 return -TARGET_EFAULT
;
4451 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4452 return -TARGET_EFAULT
;
4453 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4454 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4455 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4456 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4457 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4458 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4459 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4460 unlock_user_struct(target_sd
, target_addr
, 0);
4464 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4465 struct shmid_ds
*host_sd
)
4467 struct target_shmid_ds
*target_sd
;
4469 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4470 return -TARGET_EFAULT
;
4471 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4472 return -TARGET_EFAULT
;
4473 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4474 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4475 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4476 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4477 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4478 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4479 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4480 unlock_user_struct(target_sd
, target_addr
, 1);
4484 struct target_shminfo
{
4492 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4493 struct shminfo
*host_shminfo
)
4495 struct target_shminfo
*target_shminfo
;
4496 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4497 return -TARGET_EFAULT
;
4498 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4499 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4500 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4501 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4502 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4503 unlock_user_struct(target_shminfo
, target_addr
, 1);
4507 struct target_shm_info
{
4512 abi_ulong swap_attempts
;
4513 abi_ulong swap_successes
;
4516 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4517 struct shm_info
*host_shm_info
)
4519 struct target_shm_info
*target_shm_info
;
4520 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4521 return -TARGET_EFAULT
;
4522 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4523 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4524 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4525 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4526 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4527 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4528 unlock_user_struct(target_shm_info
, target_addr
, 1);
4532 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4534 struct shmid_ds dsarg
;
4535 struct shminfo shminfo
;
4536 struct shm_info shm_info
;
4537 abi_long ret
= -TARGET_EINVAL
;
4545 if (target_to_host_shmid_ds(&dsarg
, buf
))
4546 return -TARGET_EFAULT
;
4547 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4548 if (host_to_target_shmid_ds(buf
, &dsarg
))
4549 return -TARGET_EFAULT
;
4552 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4553 if (host_to_target_shminfo(buf
, &shminfo
))
4554 return -TARGET_EFAULT
;
4557 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4558 if (host_to_target_shm_info(buf
, &shm_info
))
4559 return -TARGET_EFAULT
;
4564 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4571 static inline abi_ulong
do_shmat(int shmid
, abi_ulong shmaddr
, int shmflg
)
4575 struct shmid_ds shm_info
;
4578 /* find out the length of the shared memory segment */
4579 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4580 if (is_error(ret
)) {
4581 /* can't get length, bail out */
4588 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4590 abi_ulong mmap_start
;
4592 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4594 if (mmap_start
== -1) {
4596 host_raddr
= (void *)-1;
4598 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4601 if (host_raddr
== (void *)-1) {
4603 return get_errno((long)host_raddr
);
4605 raddr
=h2g((unsigned long)host_raddr
);
4607 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4608 PAGE_VALID
| PAGE_READ
|
4609 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4611 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4612 if (!shm_regions
[i
].in_use
) {
4613 shm_regions
[i
].in_use
= true;
4614 shm_regions
[i
].start
= raddr
;
4615 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4625 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4629 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4630 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4631 shm_regions
[i
].in_use
= false;
4632 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4637 return get_errno(shmdt(g2h(shmaddr
)));
4640 #ifdef TARGET_NR_ipc
4641 /* ??? This only works with linear mappings. */
4642 /* do_ipc() must return target values and target errnos. */
4643 static abi_long
do_ipc(unsigned int call
, abi_long first
,
4644 abi_long second
, abi_long third
,
4645 abi_long ptr
, abi_long fifth
)
4650 version
= call
>> 16;
4655 ret
= do_semop(first
, ptr
, second
);
4659 ret
= get_errno(semget(first
, second
, third
));
4662 case IPCOP_semctl
: {
4663 /* The semun argument to semctl is passed by value, so dereference the
4666 get_user_ual(atptr
, ptr
);
4667 ret
= do_semctl(first
, second
, third
, atptr
);
4672 ret
= get_errno(msgget(first
, second
));
4676 ret
= do_msgsnd(first
, ptr
, second
, third
);
4680 ret
= do_msgctl(first
, second
, ptr
);
4687 struct target_ipc_kludge
{
4692 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4693 ret
= -TARGET_EFAULT
;
4697 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4699 unlock_user_struct(tmp
, ptr
, 0);
4703 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4712 raddr
= do_shmat(first
, ptr
, second
);
4713 if (is_error(raddr
))
4714 return get_errno(raddr
);
4715 if (put_user_ual(raddr
, third
))
4716 return -TARGET_EFAULT
;
4720 ret
= -TARGET_EINVAL
;
4725 ret
= do_shmdt(ptr
);
4729 /* IPC_* flag values are the same on all linux platforms */
4730 ret
= get_errno(shmget(first
, second
, third
));
4733 /* IPC_* and SHM_* command values are the same on all linux platforms */
4735 ret
= do_shmctl(first
, second
, ptr
);
4738 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4739 ret
= -TARGET_ENOSYS
;
4746 /* kernel structure types definitions */
4748 #define STRUCT(name, ...) STRUCT_ ## name,
4749 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4751 #include "syscall_types.h"
4755 #undef STRUCT_SPECIAL
4757 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4758 #define STRUCT_SPECIAL(name)
4759 #include "syscall_types.h"
4761 #undef STRUCT_SPECIAL
4763 typedef struct IOCTLEntry IOCTLEntry
;
4765 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4766 int fd
, int cmd
, abi_long arg
);
4770 unsigned int host_cmd
;
4773 do_ioctl_fn
*do_ioctl
;
4774 const argtype arg_type
[5];
4777 #define IOC_R 0x0001
4778 #define IOC_W 0x0002
4779 #define IOC_RW (IOC_R | IOC_W)
4781 #define MAX_STRUCT_SIZE 4096
4783 #ifdef CONFIG_FIEMAP
4784 /* So fiemap access checks don't overflow on 32 bit systems.
4785 * This is very slightly smaller than the limit imposed by
4786 * the underlying kernel.
4788 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4789 / sizeof(struct fiemap_extent))
4791 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4792 int fd
, int cmd
, abi_long arg
)
4794 /* The parameter for this ioctl is a struct fiemap followed
4795 * by an array of struct fiemap_extent whose size is set
4796 * in fiemap->fm_extent_count. The array is filled in by the
4799 int target_size_in
, target_size_out
;
4801 const argtype
*arg_type
= ie
->arg_type
;
4802 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4805 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4809 assert(arg_type
[0] == TYPE_PTR
);
4810 assert(ie
->access
== IOC_RW
);
4812 target_size_in
= thunk_type_size(arg_type
, 0);
4813 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4815 return -TARGET_EFAULT
;
4817 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4818 unlock_user(argptr
, arg
, 0);
4819 fm
= (struct fiemap
*)buf_temp
;
4820 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4821 return -TARGET_EINVAL
;
4824 outbufsz
= sizeof (*fm
) +
4825 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4827 if (outbufsz
> MAX_STRUCT_SIZE
) {
4828 /* We can't fit all the extents into the fixed size buffer.
4829 * Allocate one that is large enough and use it instead.
4831 fm
= g_try_malloc(outbufsz
);
4833 return -TARGET_ENOMEM
;
4835 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4838 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4839 if (!is_error(ret
)) {
4840 target_size_out
= target_size_in
;
4841 /* An extent_count of 0 means we were only counting the extents
4842 * so there are no structs to copy
4844 if (fm
->fm_extent_count
!= 0) {
4845 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4847 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4849 ret
= -TARGET_EFAULT
;
4851 /* Convert the struct fiemap */
4852 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4853 if (fm
->fm_extent_count
!= 0) {
4854 p
= argptr
+ target_size_in
;
4855 /* ...and then all the struct fiemap_extents */
4856 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4857 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4862 unlock_user(argptr
, arg
, target_size_out
);
4872 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4873 int fd
, int cmd
, abi_long arg
)
4875 const argtype
*arg_type
= ie
->arg_type
;
4879 struct ifconf
*host_ifconf
;
4881 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4882 int target_ifreq_size
;
4887 abi_long target_ifc_buf
;
4891 assert(arg_type
[0] == TYPE_PTR
);
4892 assert(ie
->access
== IOC_RW
);
4895 target_size
= thunk_type_size(arg_type
, 0);
4897 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4899 return -TARGET_EFAULT
;
4900 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4901 unlock_user(argptr
, arg
, 0);
4903 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
4904 target_ifc_len
= host_ifconf
->ifc_len
;
4905 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
4907 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
4908 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
4909 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
4911 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
4912 if (outbufsz
> MAX_STRUCT_SIZE
) {
4913 /* We can't fit all the extents into the fixed size buffer.
4914 * Allocate one that is large enough and use it instead.
4916 host_ifconf
= malloc(outbufsz
);
4918 return -TARGET_ENOMEM
;
4920 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
4923 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
4925 host_ifconf
->ifc_len
= host_ifc_len
;
4926 host_ifconf
->ifc_buf
= host_ifc_buf
;
4928 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
4929 if (!is_error(ret
)) {
4930 /* convert host ifc_len to target ifc_len */
4932 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
4933 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
4934 host_ifconf
->ifc_len
= target_ifc_len
;
4936 /* restore target ifc_buf */
4938 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
4940 /* copy struct ifconf to target user */
4942 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
4944 return -TARGET_EFAULT
;
4945 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
4946 unlock_user(argptr
, arg
, target_size
);
4948 /* copy ifreq[] to target user */
4950 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
4951 for (i
= 0; i
< nb_ifreq
; i
++) {
4952 thunk_convert(argptr
+ i
* target_ifreq_size
,
4953 host_ifc_buf
+ i
* sizeof(struct ifreq
),
4954 ifreq_arg_type
, THUNK_TARGET
);
4956 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
4966 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
4967 int cmd
, abi_long arg
)
4970 struct dm_ioctl
*host_dm
;
4971 abi_long guest_data
;
4972 uint32_t guest_data_size
;
4974 const argtype
*arg_type
= ie
->arg_type
;
4976 void *big_buf
= NULL
;
4980 target_size
= thunk_type_size(arg_type
, 0);
4981 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
4983 ret
= -TARGET_EFAULT
;
4986 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4987 unlock_user(argptr
, arg
, 0);
4989 /* buf_temp is too small, so fetch things into a bigger buffer */
4990 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
4991 memcpy(big_buf
, buf_temp
, target_size
);
4995 guest_data
= arg
+ host_dm
->data_start
;
4996 if ((guest_data
- arg
) < 0) {
5000 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5001 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5003 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5004 switch (ie
->host_cmd
) {
5006 case DM_LIST_DEVICES
:
5009 case DM_DEV_SUSPEND
:
5012 case DM_TABLE_STATUS
:
5013 case DM_TABLE_CLEAR
:
5015 case DM_LIST_VERSIONS
:
5019 case DM_DEV_SET_GEOMETRY
:
5020 /* data contains only strings */
5021 memcpy(host_data
, argptr
, guest_data_size
);
5024 memcpy(host_data
, argptr
, guest_data_size
);
5025 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5029 void *gspec
= argptr
;
5030 void *cur_data
= host_data
;
5031 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5032 int spec_size
= thunk_type_size(arg_type
, 0);
5035 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5036 struct dm_target_spec
*spec
= cur_data
;
5040 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5041 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5043 spec
->next
= sizeof(*spec
) + slen
;
5044 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5046 cur_data
+= spec
->next
;
5051 ret
= -TARGET_EINVAL
;
5052 unlock_user(argptr
, guest_data
, 0);
5055 unlock_user(argptr
, guest_data
, 0);
5057 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5058 if (!is_error(ret
)) {
5059 guest_data
= arg
+ host_dm
->data_start
;
5060 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5061 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5062 switch (ie
->host_cmd
) {
5067 case DM_DEV_SUSPEND
:
5070 case DM_TABLE_CLEAR
:
5072 case DM_DEV_SET_GEOMETRY
:
5073 /* no return data */
5075 case DM_LIST_DEVICES
:
5077 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5078 uint32_t remaining_data
= guest_data_size
;
5079 void *cur_data
= argptr
;
5080 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5081 int nl_size
= 12; /* can't use thunk_size due to alignment */
5084 uint32_t next
= nl
->next
;
5086 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5088 if (remaining_data
< nl
->next
) {
5089 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5092 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5093 strcpy(cur_data
+ nl_size
, nl
->name
);
5094 cur_data
+= nl
->next
;
5095 remaining_data
-= nl
->next
;
5099 nl
= (void*)nl
+ next
;
5104 case DM_TABLE_STATUS
:
5106 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5107 void *cur_data
= argptr
;
5108 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5109 int spec_size
= thunk_type_size(arg_type
, 0);
5112 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5113 uint32_t next
= spec
->next
;
5114 int slen
= strlen((char*)&spec
[1]) + 1;
5115 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5116 if (guest_data_size
< spec
->next
) {
5117 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5120 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5121 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5122 cur_data
= argptr
+ spec
->next
;
5123 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5129 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5130 int count
= *(uint32_t*)hdata
;
5131 uint64_t *hdev
= hdata
+ 8;
5132 uint64_t *gdev
= argptr
+ 8;
5135 *(uint32_t*)argptr
= tswap32(count
);
5136 for (i
= 0; i
< count
; i
++) {
5137 *gdev
= tswap64(*hdev
);
5143 case DM_LIST_VERSIONS
:
5145 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5146 uint32_t remaining_data
= guest_data_size
;
5147 void *cur_data
= argptr
;
5148 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5149 int vers_size
= thunk_type_size(arg_type
, 0);
5152 uint32_t next
= vers
->next
;
5154 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5156 if (remaining_data
< vers
->next
) {
5157 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5160 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5161 strcpy(cur_data
+ vers_size
, vers
->name
);
5162 cur_data
+= vers
->next
;
5163 remaining_data
-= vers
->next
;
5167 vers
= (void*)vers
+ next
;
5172 unlock_user(argptr
, guest_data
, 0);
5173 ret
= -TARGET_EINVAL
;
5176 unlock_user(argptr
, guest_data
, guest_data_size
);
5178 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5180 ret
= -TARGET_EFAULT
;
5183 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5184 unlock_user(argptr
, arg
, target_size
);
5191 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5192 int cmd
, abi_long arg
)
5196 const argtype
*arg_type
= ie
->arg_type
;
5197 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5200 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5201 struct blkpg_partition host_part
;
5203 /* Read and convert blkpg */
5205 target_size
= thunk_type_size(arg_type
, 0);
5206 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5208 ret
= -TARGET_EFAULT
;
5211 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5212 unlock_user(argptr
, arg
, 0);
5214 switch (host_blkpg
->op
) {
5215 case BLKPG_ADD_PARTITION
:
5216 case BLKPG_DEL_PARTITION
:
5217 /* payload is struct blkpg_partition */
5220 /* Unknown opcode */
5221 ret
= -TARGET_EINVAL
;
5225 /* Read and convert blkpg->data */
5226 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5227 target_size
= thunk_type_size(part_arg_type
, 0);
5228 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5230 ret
= -TARGET_EFAULT
;
5233 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5234 unlock_user(argptr
, arg
, 0);
5236 /* Swizzle the data pointer to our local copy and call! */
5237 host_blkpg
->data
= &host_part
;
5238 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5244 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5245 int fd
, int cmd
, abi_long arg
)
5247 const argtype
*arg_type
= ie
->arg_type
;
5248 const StructEntry
*se
;
5249 const argtype
*field_types
;
5250 const int *dst_offsets
, *src_offsets
;
5253 abi_ulong
*target_rt_dev_ptr
;
5254 unsigned long *host_rt_dev_ptr
;
5258 assert(ie
->access
== IOC_W
);
5259 assert(*arg_type
== TYPE_PTR
);
5261 assert(*arg_type
== TYPE_STRUCT
);
5262 target_size
= thunk_type_size(arg_type
, 0);
5263 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5265 return -TARGET_EFAULT
;
5268 assert(*arg_type
== (int)STRUCT_rtentry
);
5269 se
= struct_entries
+ *arg_type
++;
5270 assert(se
->convert
[0] == NULL
);
5271 /* convert struct here to be able to catch rt_dev string */
5272 field_types
= se
->field_types
;
5273 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5274 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5275 for (i
= 0; i
< se
->nb_fields
; i
++) {
5276 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5277 assert(*field_types
== TYPE_PTRVOID
);
5278 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5279 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5280 if (*target_rt_dev_ptr
!= 0) {
5281 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5282 tswapal(*target_rt_dev_ptr
));
5283 if (!*host_rt_dev_ptr
) {
5284 unlock_user(argptr
, arg
, 0);
5285 return -TARGET_EFAULT
;
5288 *host_rt_dev_ptr
= 0;
5293 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5294 argptr
+ src_offsets
[i
],
5295 field_types
, THUNK_HOST
);
5297 unlock_user(argptr
, arg
, 0);
5299 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5300 if (*host_rt_dev_ptr
!= 0) {
5301 unlock_user((void *)*host_rt_dev_ptr
,
5302 *target_rt_dev_ptr
, 0);
5307 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5308 int fd
, int cmd
, abi_long arg
)
5310 int sig
= target_to_host_signal(arg
);
5311 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5314 static IOCTLEntry ioctl_entries
[] = {
5315 #define IOCTL(cmd, access, ...) \
5316 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5317 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5318 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5323 /* ??? Implement proper locking for ioctls. */
5324 /* do_ioctl() Must return target values and target errnos. */
5325 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5327 const IOCTLEntry
*ie
;
5328 const argtype
*arg_type
;
5330 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5336 if (ie
->target_cmd
== 0) {
5337 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5338 return -TARGET_ENOSYS
;
5340 if (ie
->target_cmd
== cmd
)
5344 arg_type
= ie
->arg_type
;
5346 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5349 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5352 switch(arg_type
[0]) {
5355 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5359 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5363 target_size
= thunk_type_size(arg_type
, 0);
5364 switch(ie
->access
) {
5366 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5367 if (!is_error(ret
)) {
5368 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5370 return -TARGET_EFAULT
;
5371 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5372 unlock_user(argptr
, arg
, target_size
);
5376 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5378 return -TARGET_EFAULT
;
5379 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5380 unlock_user(argptr
, arg
, 0);
5381 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5385 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5387 return -TARGET_EFAULT
;
5388 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5389 unlock_user(argptr
, arg
, 0);
5390 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5391 if (!is_error(ret
)) {
5392 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5394 return -TARGET_EFAULT
;
5395 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5396 unlock_user(argptr
, arg
, target_size
);
5402 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5403 (long)cmd
, arg_type
[0]);
5404 ret
= -TARGET_ENOSYS
;
5410 static const bitmask_transtbl iflag_tbl
[] = {
5411 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5412 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5413 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5414 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5415 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5416 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5417 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5418 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5419 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5420 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5421 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5422 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5423 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5424 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5428 static const bitmask_transtbl oflag_tbl
[] = {
5429 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5430 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5431 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5432 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5433 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5434 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5435 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5436 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5437 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5438 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5439 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5440 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5441 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5442 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5443 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5444 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5445 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5446 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5447 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5448 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5449 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5450 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5451 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5452 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5456 static const bitmask_transtbl cflag_tbl
[] = {
5457 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5458 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5459 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5460 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5461 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5462 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5463 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5464 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5465 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5466 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5467 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5468 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5469 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5470 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5471 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5472 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5473 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5474 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5475 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5476 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5477 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5478 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5479 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5480 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5481 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5482 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5483 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5484 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5485 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5486 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5487 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5491 static const bitmask_transtbl lflag_tbl
[] = {
5492 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5493 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5494 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5495 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5496 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5497 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5498 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5499 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5500 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5501 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5502 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5503 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5504 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5505 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5506 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5510 static void target_to_host_termios (void *dst
, const void *src
)
5512 struct host_termios
*host
= dst
;
5513 const struct target_termios
*target
= src
;
5516 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5518 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5520 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5522 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5523 host
->c_line
= target
->c_line
;
5525 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5526 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5527 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5528 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5529 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5530 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5531 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5532 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5533 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5534 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5535 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5536 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5537 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5538 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5539 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5540 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5541 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5542 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5545 static void host_to_target_termios (void *dst
, const void *src
)
5547 struct target_termios
*target
= dst
;
5548 const struct host_termios
*host
= src
;
5551 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5553 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5555 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5557 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5558 target
->c_line
= host
->c_line
;
5560 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5561 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5562 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5563 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5564 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5565 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5566 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5567 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5568 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5569 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5570 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5571 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5572 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5573 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5574 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5575 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5576 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5577 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5580 static const StructEntry struct_termios_def
= {
5581 .convert
= { host_to_target_termios
, target_to_host_termios
},
5582 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5583 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5586 static bitmask_transtbl mmap_flags_tbl
[] = {
5587 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5588 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5589 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5590 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5591 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5592 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5593 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5594 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5595 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5600 #if defined(TARGET_I386)
5602 /* NOTE: there is really one LDT for all the threads */
5603 static uint8_t *ldt_table
;
5605 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5612 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5613 if (size
> bytecount
)
5615 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5617 return -TARGET_EFAULT
;
5618 /* ??? Should this by byteswapped? */
5619 memcpy(p
, ldt_table
, size
);
5620 unlock_user(p
, ptr
, size
);
5624 /* XXX: add locking support */
5625 static abi_long
write_ldt(CPUX86State
*env
,
5626 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5628 struct target_modify_ldt_ldt_s ldt_info
;
5629 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5630 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5631 int seg_not_present
, useable
, lm
;
5632 uint32_t *lp
, entry_1
, entry_2
;
5634 if (bytecount
!= sizeof(ldt_info
))
5635 return -TARGET_EINVAL
;
5636 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5637 return -TARGET_EFAULT
;
5638 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5639 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5640 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5641 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5642 unlock_user_struct(target_ldt_info
, ptr
, 0);
5644 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5645 return -TARGET_EINVAL
;
5646 seg_32bit
= ldt_info
.flags
& 1;
5647 contents
= (ldt_info
.flags
>> 1) & 3;
5648 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5649 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5650 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5651 useable
= (ldt_info
.flags
>> 6) & 1;
5655 lm
= (ldt_info
.flags
>> 7) & 1;
5657 if (contents
== 3) {
5659 return -TARGET_EINVAL
;
5660 if (seg_not_present
== 0)
5661 return -TARGET_EINVAL
;
5663 /* allocate the LDT */
5665 env
->ldt
.base
= target_mmap(0,
5666 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5667 PROT_READ
|PROT_WRITE
,
5668 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5669 if (env
->ldt
.base
== -1)
5670 return -TARGET_ENOMEM
;
5671 memset(g2h(env
->ldt
.base
), 0,
5672 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5673 env
->ldt
.limit
= 0xffff;
5674 ldt_table
= g2h(env
->ldt
.base
);
5677 /* NOTE: same code as Linux kernel */
5678 /* Allow LDTs to be cleared by the user. */
5679 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5682 read_exec_only
== 1 &&
5684 limit_in_pages
== 0 &&
5685 seg_not_present
== 1 &&
5693 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5694 (ldt_info
.limit
& 0x0ffff);
5695 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5696 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5697 (ldt_info
.limit
& 0xf0000) |
5698 ((read_exec_only
^ 1) << 9) |
5700 ((seg_not_present
^ 1) << 15) |
5702 (limit_in_pages
<< 23) |
5706 entry_2
|= (useable
<< 20);
5708 /* Install the new entry ... */
5710 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5711 lp
[0] = tswap32(entry_1
);
5712 lp
[1] = tswap32(entry_2
);
5716 /* specific and weird i386 syscalls */
5717 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5718 unsigned long bytecount
)
5724 ret
= read_ldt(ptr
, bytecount
);
5727 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5730 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5733 ret
= -TARGET_ENOSYS
;
5739 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5740 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5742 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5743 struct target_modify_ldt_ldt_s ldt_info
;
5744 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5745 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5746 int seg_not_present
, useable
, lm
;
5747 uint32_t *lp
, entry_1
, entry_2
;
5750 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5751 if (!target_ldt_info
)
5752 return -TARGET_EFAULT
;
5753 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5754 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5755 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5756 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5757 if (ldt_info
.entry_number
== -1) {
5758 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5759 if (gdt_table
[i
] == 0) {
5760 ldt_info
.entry_number
= i
;
5761 target_ldt_info
->entry_number
= tswap32(i
);
5766 unlock_user_struct(target_ldt_info
, ptr
, 1);
5768 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5769 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5770 return -TARGET_EINVAL
;
5771 seg_32bit
= ldt_info
.flags
& 1;
5772 contents
= (ldt_info
.flags
>> 1) & 3;
5773 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5774 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5775 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5776 useable
= (ldt_info
.flags
>> 6) & 1;
5780 lm
= (ldt_info
.flags
>> 7) & 1;
5783 if (contents
== 3) {
5784 if (seg_not_present
== 0)
5785 return -TARGET_EINVAL
;
5788 /* NOTE: same code as Linux kernel */
5789 /* Allow LDTs to be cleared by the user. */
5790 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5791 if ((contents
== 0 &&
5792 read_exec_only
== 1 &&
5794 limit_in_pages
== 0 &&
5795 seg_not_present
== 1 &&
5803 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5804 (ldt_info
.limit
& 0x0ffff);
5805 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5806 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5807 (ldt_info
.limit
& 0xf0000) |
5808 ((read_exec_only
^ 1) << 9) |
5810 ((seg_not_present
^ 1) << 15) |
5812 (limit_in_pages
<< 23) |
5817 /* Install the new entry ... */
5819 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5820 lp
[0] = tswap32(entry_1
);
5821 lp
[1] = tswap32(entry_2
);
5825 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5827 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5828 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5829 uint32_t base_addr
, limit
, flags
;
5830 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5831 int seg_not_present
, useable
, lm
;
5832 uint32_t *lp
, entry_1
, entry_2
;
5834 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5835 if (!target_ldt_info
)
5836 return -TARGET_EFAULT
;
5837 idx
= tswap32(target_ldt_info
->entry_number
);
5838 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5839 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5840 unlock_user_struct(target_ldt_info
, ptr
, 1);
5841 return -TARGET_EINVAL
;
5843 lp
= (uint32_t *)(gdt_table
+ idx
);
5844 entry_1
= tswap32(lp
[0]);
5845 entry_2
= tswap32(lp
[1]);
5847 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5848 contents
= (entry_2
>> 10) & 3;
5849 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5850 seg_32bit
= (entry_2
>> 22) & 1;
5851 limit_in_pages
= (entry_2
>> 23) & 1;
5852 useable
= (entry_2
>> 20) & 1;
5856 lm
= (entry_2
>> 21) & 1;
5858 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5859 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5860 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5861 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5862 base_addr
= (entry_1
>> 16) |
5863 (entry_2
& 0xff000000) |
5864 ((entry_2
& 0xff) << 16);
5865 target_ldt_info
->base_addr
= tswapal(base_addr
);
5866 target_ldt_info
->limit
= tswap32(limit
);
5867 target_ldt_info
->flags
= tswap32(flags
);
5868 unlock_user_struct(target_ldt_info
, ptr
, 1);
5871 #endif /* TARGET_I386 && TARGET_ABI32 */
5873 #ifndef TARGET_ABI32
5874 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5881 case TARGET_ARCH_SET_GS
:
5882 case TARGET_ARCH_SET_FS
:
5883 if (code
== TARGET_ARCH_SET_GS
)
5887 cpu_x86_load_seg(env
, idx
, 0);
5888 env
->segs
[idx
].base
= addr
;
5890 case TARGET_ARCH_GET_GS
:
5891 case TARGET_ARCH_GET_FS
:
5892 if (code
== TARGET_ARCH_GET_GS
)
5896 val
= env
->segs
[idx
].base
;
5897 if (put_user(val
, addr
, abi_ulong
))
5898 ret
= -TARGET_EFAULT
;
5901 ret
= -TARGET_EINVAL
;
5908 #endif /* defined(TARGET_I386) */
5910 #define NEW_STACK_SIZE 0x40000
5913 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
5916 pthread_mutex_t mutex
;
5917 pthread_cond_t cond
;
5920 abi_ulong child_tidptr
;
5921 abi_ulong parent_tidptr
;
5925 static void *clone_func(void *arg
)
5927 new_thread_info
*info
= arg
;
5932 rcu_register_thread();
5934 cpu
= ENV_GET_CPU(env
);
5936 ts
= (TaskState
*)cpu
->opaque
;
5937 info
->tid
= gettid();
5938 cpu
->host_tid
= info
->tid
;
5940 if (info
->child_tidptr
)
5941 put_user_u32(info
->tid
, info
->child_tidptr
);
5942 if (info
->parent_tidptr
)
5943 put_user_u32(info
->tid
, info
->parent_tidptr
);
5944 /* Enable signals. */
5945 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
5946 /* Signal to the parent that we're ready. */
5947 pthread_mutex_lock(&info
->mutex
);
5948 pthread_cond_broadcast(&info
->cond
);
5949 pthread_mutex_unlock(&info
->mutex
);
5950 /* Wait until the parent has finshed initializing the tls state. */
5951 pthread_mutex_lock(&clone_lock
);
5952 pthread_mutex_unlock(&clone_lock
);
5958 /* do_fork() Must return host values and target errnos (unlike most
5959 do_*() functions). */
5960 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
5961 abi_ulong parent_tidptr
, target_ulong newtls
,
5962 abi_ulong child_tidptr
)
5964 CPUState
*cpu
= ENV_GET_CPU(env
);
5968 CPUArchState
*new_env
;
5969 unsigned int nptl_flags
;
5972 /* Emulate vfork() with fork() */
5973 if (flags
& CLONE_VFORK
)
5974 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
5976 if (flags
& CLONE_VM
) {
5977 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
5978 new_thread_info info
;
5979 pthread_attr_t attr
;
5981 ts
= g_new0(TaskState
, 1);
5982 init_task_state(ts
);
5983 /* we create a new CPU instance. */
5984 new_env
= cpu_copy(env
);
5985 /* Init regs that differ from the parent. */
5986 cpu_clone_regs(new_env
, newsp
);
5987 new_cpu
= ENV_GET_CPU(new_env
);
5988 new_cpu
->opaque
= ts
;
5989 ts
->bprm
= parent_ts
->bprm
;
5990 ts
->info
= parent_ts
->info
;
5991 ts
->signal_mask
= parent_ts
->signal_mask
;
5993 flags
&= ~CLONE_NPTL_FLAGS2
;
5995 if (nptl_flags
& CLONE_CHILD_CLEARTID
) {
5996 ts
->child_tidptr
= child_tidptr
;
5999 if (nptl_flags
& CLONE_SETTLS
)
6000 cpu_set_tls (new_env
, newtls
);
6002 /* Grab a mutex so that thread setup appears atomic. */
6003 pthread_mutex_lock(&clone_lock
);
6005 memset(&info
, 0, sizeof(info
));
6006 pthread_mutex_init(&info
.mutex
, NULL
);
6007 pthread_mutex_lock(&info
.mutex
);
6008 pthread_cond_init(&info
.cond
, NULL
);
6010 if (nptl_flags
& CLONE_CHILD_SETTID
)
6011 info
.child_tidptr
= child_tidptr
;
6012 if (nptl_flags
& CLONE_PARENT_SETTID
)
6013 info
.parent_tidptr
= parent_tidptr
;
6015 ret
= pthread_attr_init(&attr
);
6016 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6017 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6018 /* It is not safe to deliver signals until the child has finished
6019 initializing, so temporarily block all signals. */
6020 sigfillset(&sigmask
);
6021 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6023 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6024 /* TODO: Free new CPU state if thread creation failed. */
6026 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6027 pthread_attr_destroy(&attr
);
6029 /* Wait for the child to initialize. */
6030 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6032 if (flags
& CLONE_PARENT_SETTID
)
6033 put_user_u32(ret
, parent_tidptr
);
6037 pthread_mutex_unlock(&info
.mutex
);
6038 pthread_cond_destroy(&info
.cond
);
6039 pthread_mutex_destroy(&info
.mutex
);
6040 pthread_mutex_unlock(&clone_lock
);
6042 /* if no CLONE_VM, we consider it is a fork */
6043 if ((flags
& ~(CSIGNAL
| CLONE_NPTL_FLAGS2
)) != 0) {
6044 return -TARGET_EINVAL
;
6047 if (block_signals()) {
6048 return -TARGET_ERESTARTSYS
;
6054 /* Child Process. */
6056 cpu_clone_regs(env
, newsp
);
6058 /* There is a race condition here. The parent process could
6059 theoretically read the TID in the child process before the child
6060 tid is set. This would require using either ptrace
6061 (not implemented) or having *_tidptr to point at a shared memory
6062 mapping. We can't repeat the spinlock hack used above because
6063 the child process gets its own copy of the lock. */
6064 if (flags
& CLONE_CHILD_SETTID
)
6065 put_user_u32(gettid(), child_tidptr
);
6066 if (flags
& CLONE_PARENT_SETTID
)
6067 put_user_u32(gettid(), parent_tidptr
);
6068 ts
= (TaskState
*)cpu
->opaque
;
6069 if (flags
& CLONE_SETTLS
)
6070 cpu_set_tls (env
, newtls
);
6071 if (flags
& CLONE_CHILD_CLEARTID
)
6072 ts
->child_tidptr
= child_tidptr
;
6080 /* warning : doesn't handle linux specific flags... */
6081 static int target_to_host_fcntl_cmd(int cmd
)
6084 case TARGET_F_DUPFD
:
6085 case TARGET_F_GETFD
:
6086 case TARGET_F_SETFD
:
6087 case TARGET_F_GETFL
:
6088 case TARGET_F_SETFL
:
6090 case TARGET_F_GETLK
:
6092 case TARGET_F_SETLK
:
6094 case TARGET_F_SETLKW
:
6096 case TARGET_F_GETOWN
:
6098 case TARGET_F_SETOWN
:
6100 case TARGET_F_GETSIG
:
6102 case TARGET_F_SETSIG
:
6104 #if TARGET_ABI_BITS == 32
6105 case TARGET_F_GETLK64
:
6107 case TARGET_F_SETLK64
:
6109 case TARGET_F_SETLKW64
:
6112 case TARGET_F_SETLEASE
:
6114 case TARGET_F_GETLEASE
:
6116 #ifdef F_DUPFD_CLOEXEC
6117 case TARGET_F_DUPFD_CLOEXEC
:
6118 return F_DUPFD_CLOEXEC
;
6120 case TARGET_F_NOTIFY
:
6123 case TARGET_F_GETOWN_EX
:
6127 case TARGET_F_SETOWN_EX
:
6131 case TARGET_F_SETPIPE_SZ
:
6132 return F_SETPIPE_SZ
;
6133 case TARGET_F_GETPIPE_SZ
:
6134 return F_GETPIPE_SZ
;
6137 return -TARGET_EINVAL
;
6139 return -TARGET_EINVAL
;
6142 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6143 static const bitmask_transtbl flock_tbl
[] = {
6144 TRANSTBL_CONVERT(F_RDLCK
),
6145 TRANSTBL_CONVERT(F_WRLCK
),
6146 TRANSTBL_CONVERT(F_UNLCK
),
6147 TRANSTBL_CONVERT(F_EXLCK
),
6148 TRANSTBL_CONVERT(F_SHLCK
),
6152 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6153 abi_ulong target_flock_addr
)
6155 struct target_flock
*target_fl
;
6158 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6159 return -TARGET_EFAULT
;
6162 __get_user(l_type
, &target_fl
->l_type
);
6163 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6164 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6165 __get_user(fl
->l_start
, &target_fl
->l_start
);
6166 __get_user(fl
->l_len
, &target_fl
->l_len
);
6167 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6168 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6172 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6173 const struct flock64
*fl
)
6175 struct target_flock
*target_fl
;
6178 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6179 return -TARGET_EFAULT
;
6182 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6183 __put_user(l_type
, &target_fl
->l_type
);
6184 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6185 __put_user(fl
->l_start
, &target_fl
->l_start
);
6186 __put_user(fl
->l_len
, &target_fl
->l_len
);
6187 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6188 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6192 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6193 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6195 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6196 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6197 abi_ulong target_flock_addr
)
6199 struct target_eabi_flock64
*target_fl
;
6202 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6203 return -TARGET_EFAULT
;
6206 __get_user(l_type
, &target_fl
->l_type
);
6207 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6208 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6209 __get_user(fl
->l_start
, &target_fl
->l_start
);
6210 __get_user(fl
->l_len
, &target_fl
->l_len
);
6211 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6212 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6216 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6217 const struct flock64
*fl
)
6219 struct target_eabi_flock64
*target_fl
;
6222 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6223 return -TARGET_EFAULT
;
6226 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6227 __put_user(l_type
, &target_fl
->l_type
);
6228 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6229 __put_user(fl
->l_start
, &target_fl
->l_start
);
6230 __put_user(fl
->l_len
, &target_fl
->l_len
);
6231 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6232 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6237 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6238 abi_ulong target_flock_addr
)
6240 struct target_flock64
*target_fl
;
6243 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6244 return -TARGET_EFAULT
;
6247 __get_user(l_type
, &target_fl
->l_type
);
6248 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6249 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6250 __get_user(fl
->l_start
, &target_fl
->l_start
);
6251 __get_user(fl
->l_len
, &target_fl
->l_len
);
6252 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6253 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6257 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6258 const struct flock64
*fl
)
6260 struct target_flock64
*target_fl
;
6263 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6264 return -TARGET_EFAULT
;
6267 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6268 __put_user(l_type
, &target_fl
->l_type
);
6269 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6270 __put_user(fl
->l_start
, &target_fl
->l_start
);
6271 __put_user(fl
->l_len
, &target_fl
->l_len
);
6272 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6273 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6277 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6279 struct flock64 fl64
;
6281 struct f_owner_ex fox
;
6282 struct target_f_owner_ex
*target_fox
;
6285 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6287 if (host_cmd
== -TARGET_EINVAL
)
6291 case TARGET_F_GETLK
:
6292 ret
= copy_from_user_flock(&fl64
, arg
);
6296 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6298 ret
= copy_to_user_flock(arg
, &fl64
);
6302 case TARGET_F_SETLK
:
6303 case TARGET_F_SETLKW
:
6304 ret
= copy_from_user_flock(&fl64
, arg
);
6308 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6311 case TARGET_F_GETLK64
:
6312 ret
= copy_from_user_flock64(&fl64
, arg
);
6316 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6318 ret
= copy_to_user_flock64(arg
, &fl64
);
6321 case TARGET_F_SETLK64
:
6322 case TARGET_F_SETLKW64
:
6323 ret
= copy_from_user_flock64(&fl64
, arg
);
6327 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6330 case TARGET_F_GETFL
:
6331 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6333 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6337 case TARGET_F_SETFL
:
6338 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6339 target_to_host_bitmask(arg
,
6344 case TARGET_F_GETOWN_EX
:
6345 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6347 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6348 return -TARGET_EFAULT
;
6349 target_fox
->type
= tswap32(fox
.type
);
6350 target_fox
->pid
= tswap32(fox
.pid
);
6351 unlock_user_struct(target_fox
, arg
, 1);
6357 case TARGET_F_SETOWN_EX
:
6358 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6359 return -TARGET_EFAULT
;
6360 fox
.type
= tswap32(target_fox
->type
);
6361 fox
.pid
= tswap32(target_fox
->pid
);
6362 unlock_user_struct(target_fox
, arg
, 0);
6363 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6367 case TARGET_F_SETOWN
:
6368 case TARGET_F_GETOWN
:
6369 case TARGET_F_SETSIG
:
6370 case TARGET_F_GETSIG
:
6371 case TARGET_F_SETLEASE
:
6372 case TARGET_F_GETLEASE
:
6373 case TARGET_F_SETPIPE_SZ
:
6374 case TARGET_F_GETPIPE_SZ
:
6375 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6379 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6387 static inline int high2lowuid(int uid
)
6395 static inline int high2lowgid(int gid
)
6403 static inline int low2highuid(int uid
)
6405 if ((int16_t)uid
== -1)
6411 static inline int low2highgid(int gid
)
6413 if ((int16_t)gid
== -1)
6418 static inline int tswapid(int id
)
6423 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6425 #else /* !USE_UID16 */
6426 static inline int high2lowuid(int uid
)
6430 static inline int high2lowgid(int gid
)
6434 static inline int low2highuid(int uid
)
6438 static inline int low2highgid(int gid
)
6442 static inline int tswapid(int id
)
6447 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6449 #endif /* USE_UID16 */
6451 /* We must do direct syscalls for setting UID/GID, because we want to
6452 * implement the Linux system call semantics of "change only for this thread",
6453 * not the libc/POSIX semantics of "change for all threads in process".
6454 * (See http://ewontfix.com/17/ for more details.)
6455 * We use the 32-bit version of the syscalls if present; if it is not
6456 * then either the host architecture supports 32-bit UIDs natively with
6457 * the standard syscall, or the 16-bit UID is the best we can do.
6459 #ifdef __NR_setuid32
6460 #define __NR_sys_setuid __NR_setuid32
6462 #define __NR_sys_setuid __NR_setuid
6464 #ifdef __NR_setgid32
6465 #define __NR_sys_setgid __NR_setgid32
6467 #define __NR_sys_setgid __NR_setgid
6469 #ifdef __NR_setresuid32
6470 #define __NR_sys_setresuid __NR_setresuid32
6472 #define __NR_sys_setresuid __NR_setresuid
6474 #ifdef __NR_setresgid32
6475 #define __NR_sys_setresgid __NR_setresgid32
6477 #define __NR_sys_setresgid __NR_setresgid
6480 _syscall1(int, sys_setuid
, uid_t
, uid
)
6481 _syscall1(int, sys_setgid
, gid_t
, gid
)
6482 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6483 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6485 void syscall_init(void)
6488 const argtype
*arg_type
;
6492 thunk_init(STRUCT_MAX
);
6494 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6495 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6496 #include "syscall_types.h"
6498 #undef STRUCT_SPECIAL
6500 /* Build target_to_host_errno_table[] table from
6501 * host_to_target_errno_table[]. */
6502 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6503 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6506 /* we patch the ioctl size if necessary. We rely on the fact that
6507 no ioctl has all the bits at '1' in the size field */
6509 while (ie
->target_cmd
!= 0) {
6510 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6511 TARGET_IOC_SIZEMASK
) {
6512 arg_type
= ie
->arg_type
;
6513 if (arg_type
[0] != TYPE_PTR
) {
6514 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6519 size
= thunk_type_size(arg_type
, 0);
6520 ie
->target_cmd
= (ie
->target_cmd
&
6521 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6522 (size
<< TARGET_IOC_SIZESHIFT
);
6525 /* automatic consistency check if same arch */
6526 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6527 (defined(__x86_64__) && defined(TARGET_X86_64))
6528 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6529 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6530 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6537 #if TARGET_ABI_BITS == 32
6538 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6540 #ifdef TARGET_WORDS_BIGENDIAN
6541 return ((uint64_t)word0
<< 32) | word1
;
6543 return ((uint64_t)word1
<< 32) | word0
;
6546 #else /* TARGET_ABI_BITS == 32 */
6547 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6551 #endif /* TARGET_ABI_BITS != 32 */
6553 #ifdef TARGET_NR_truncate64
6554 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6559 if (regpairs_aligned(cpu_env
)) {
6563 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6567 #ifdef TARGET_NR_ftruncate64
6568 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6573 if (regpairs_aligned(cpu_env
)) {
6577 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6581 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6582 abi_ulong target_addr
)
6584 struct target_timespec
*target_ts
;
6586 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6587 return -TARGET_EFAULT
;
6588 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6589 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6590 unlock_user_struct(target_ts
, target_addr
, 0);
6594 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6595 struct timespec
*host_ts
)
6597 struct target_timespec
*target_ts
;
6599 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6600 return -TARGET_EFAULT
;
6601 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6602 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6603 unlock_user_struct(target_ts
, target_addr
, 1);
6607 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6608 abi_ulong target_addr
)
6610 struct target_itimerspec
*target_itspec
;
6612 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6613 return -TARGET_EFAULT
;
6616 host_itspec
->it_interval
.tv_sec
=
6617 tswapal(target_itspec
->it_interval
.tv_sec
);
6618 host_itspec
->it_interval
.tv_nsec
=
6619 tswapal(target_itspec
->it_interval
.tv_nsec
);
6620 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6621 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6623 unlock_user_struct(target_itspec
, target_addr
, 1);
6627 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6628 struct itimerspec
*host_its
)
6630 struct target_itimerspec
*target_itspec
;
6632 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6633 return -TARGET_EFAULT
;
6636 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6637 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6639 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6640 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6642 unlock_user_struct(target_itspec
, target_addr
, 0);
6646 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6647 abi_ulong target_addr
)
6649 struct target_sigevent
*target_sevp
;
6651 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6652 return -TARGET_EFAULT
;
6655 /* This union is awkward on 64 bit systems because it has a 32 bit
6656 * integer and a pointer in it; we follow the conversion approach
6657 * used for handling sigval types in signal.c so the guest should get
6658 * the correct value back even if we did a 64 bit byteswap and it's
6659 * using the 32 bit integer.
6661 host_sevp
->sigev_value
.sival_ptr
=
6662 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6663 host_sevp
->sigev_signo
=
6664 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6665 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6666 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6668 unlock_user_struct(target_sevp
, target_addr
, 1);
6672 #if defined(TARGET_NR_mlockall)
6673 static inline int target_to_host_mlockall_arg(int arg
)
6677 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6678 result
|= MCL_CURRENT
;
6680 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6681 result
|= MCL_FUTURE
;
6687 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6688 abi_ulong target_addr
,
6689 struct stat
*host_st
)
6691 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6692 if (((CPUARMState
*)cpu_env
)->eabi
) {
6693 struct target_eabi_stat64
*target_st
;
6695 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6696 return -TARGET_EFAULT
;
6697 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6698 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6699 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6700 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6701 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6703 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6704 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6705 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6706 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6707 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6708 __put_user(host_st
->st_size
, &target_st
->st_size
);
6709 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6710 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6711 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6712 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6713 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6714 unlock_user_struct(target_st
, target_addr
, 1);
6718 #if defined(TARGET_HAS_STRUCT_STAT64)
6719 struct target_stat64
*target_st
;
6721 struct target_stat
*target_st
;
6724 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6725 return -TARGET_EFAULT
;
6726 memset(target_st
, 0, sizeof(*target_st
));
6727 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6728 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6729 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6730 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6732 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6733 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6734 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6735 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6736 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6737 /* XXX: better use of kernel struct */
6738 __put_user(host_st
->st_size
, &target_st
->st_size
);
6739 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6740 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6741 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6742 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6743 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6744 unlock_user_struct(target_st
, target_addr
, 1);
6750 /* ??? Using host futex calls even when target atomic operations
6751 are not really atomic probably breaks things. However implementing
6752 futexes locally would make futexes shared between multiple processes
6753 tricky. However they're probably useless because guest atomic
6754 operations won't work either. */
6755 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6756 target_ulong uaddr2
, int val3
)
6758 struct timespec ts
, *pts
;
6761 /* ??? We assume FUTEX_* constants are the same on both host
6763 #ifdef FUTEX_CMD_MASK
6764 base_op
= op
& FUTEX_CMD_MASK
;
6770 case FUTEX_WAIT_BITSET
:
6773 target_to_host_timespec(pts
, timeout
);
6777 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6780 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6782 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6784 case FUTEX_CMP_REQUEUE
:
6786 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6787 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6788 But the prototype takes a `struct timespec *'; insert casts
6789 to satisfy the compiler. We do not need to tswap TIMEOUT
6790 since it's not compared to guest memory. */
6791 pts
= (struct timespec
*)(uintptr_t) timeout
;
6792 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6794 (base_op
== FUTEX_CMP_REQUEUE
6798 return -TARGET_ENOSYS
;
6801 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6802 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6803 abi_long handle
, abi_long mount_id
,
6806 struct file_handle
*target_fh
;
6807 struct file_handle
*fh
;
6811 unsigned int size
, total_size
;
6813 if (get_user_s32(size
, handle
)) {
6814 return -TARGET_EFAULT
;
6817 name
= lock_user_string(pathname
);
6819 return -TARGET_EFAULT
;
6822 total_size
= sizeof(struct file_handle
) + size
;
6823 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6825 unlock_user(name
, pathname
, 0);
6826 return -TARGET_EFAULT
;
6829 fh
= g_malloc0(total_size
);
6830 fh
->handle_bytes
= size
;
6832 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6833 unlock_user(name
, pathname
, 0);
6835 /* man name_to_handle_at(2):
6836 * Other than the use of the handle_bytes field, the caller should treat
6837 * the file_handle structure as an opaque data type
6840 memcpy(target_fh
, fh
, total_size
);
6841 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6842 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6844 unlock_user(target_fh
, handle
, total_size
);
6846 if (put_user_s32(mid
, mount_id
)) {
6847 return -TARGET_EFAULT
;
6855 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6856 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6859 struct file_handle
*target_fh
;
6860 struct file_handle
*fh
;
6861 unsigned int size
, total_size
;
6864 if (get_user_s32(size
, handle
)) {
6865 return -TARGET_EFAULT
;
6868 total_size
= sizeof(struct file_handle
) + size
;
6869 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6871 return -TARGET_EFAULT
;
6874 fh
= g_memdup(target_fh
, total_size
);
6875 fh
->handle_bytes
= size
;
6876 fh
->handle_type
= tswap32(target_fh
->handle_type
);
6878 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
6879 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
6883 unlock_user(target_fh
, handle
, total_size
);
6889 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
6891 /* signalfd siginfo conversion */
6894 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
6895 const struct signalfd_siginfo
*info
)
6897 int sig
= host_to_target_signal(info
->ssi_signo
);
6899 /* linux/signalfd.h defines a ssi_addr_lsb
6900 * not defined in sys/signalfd.h but used by some kernels
6903 #ifdef BUS_MCEERR_AO
6904 if (tinfo
->ssi_signo
== SIGBUS
&&
6905 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
6906 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
6907 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
6908 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
6909 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
6913 tinfo
->ssi_signo
= tswap32(sig
);
6914 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
6915 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
6916 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
6917 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
6918 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
6919 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
6920 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
6921 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
6922 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
6923 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
6924 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
6925 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
6926 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
6927 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
6928 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
6931 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
6935 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
6936 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
6942 static TargetFdTrans target_signalfd_trans
= {
6943 .host_to_target_data
= host_to_target_data_signalfd
,
6946 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
6949 target_sigset_t
*target_mask
;
6953 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
6954 return -TARGET_EINVAL
;
6956 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
6957 return -TARGET_EFAULT
;
6960 target_to_host_sigset(&host_mask
, target_mask
);
6962 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
6964 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
6966 fd_trans_register(ret
, &target_signalfd_trans
);
6969 unlock_user_struct(target_mask
, mask
, 0);
6975 /* Map host to target signal numbers for the wait family of syscalls.
6976 Assume all other status bits are the same. */
6977 int host_to_target_waitstatus(int status
)
6979 if (WIFSIGNALED(status
)) {
6980 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
6982 if (WIFSTOPPED(status
)) {
6983 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
6989 static int open_self_cmdline(void *cpu_env
, int fd
)
6992 bool word_skipped
= false;
6994 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7004 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7007 fd_orig
= close(fd_orig
);
7010 } else if (nb_read
== 0) {
7014 if (!word_skipped
) {
7015 /* Skip the first string, which is the path to qemu-*-static
7016 instead of the actual command. */
7017 cp_buf
= memchr(buf
, 0, nb_read
);
7019 /* Null byte found, skip one string */
7021 nb_read
-= cp_buf
- buf
;
7022 word_skipped
= true;
7027 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7036 return close(fd_orig
);
7039 static int open_self_maps(void *cpu_env
, int fd
)
7041 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7042 TaskState
*ts
= cpu
->opaque
;
7048 fp
= fopen("/proc/self/maps", "r");
7053 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7054 int fields
, dev_maj
, dev_min
, inode
;
7055 uint64_t min
, max
, offset
;
7056 char flag_r
, flag_w
, flag_x
, flag_p
;
7057 char path
[512] = "";
7058 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7059 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7060 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7062 if ((fields
< 10) || (fields
> 11)) {
7065 if (h2g_valid(min
)) {
7066 int flags
= page_get_flags(h2g(min
));
7067 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7068 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7071 if (h2g(min
) == ts
->info
->stack_limit
) {
7072 pstrcpy(path
, sizeof(path
), " [stack]");
7074 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7075 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7076 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7077 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7078 path
[0] ? " " : "", path
);
7088 static int open_self_stat(void *cpu_env
, int fd
)
7090 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7091 TaskState
*ts
= cpu
->opaque
;
7092 abi_ulong start_stack
= ts
->info
->start_stack
;
7095 for (i
= 0; i
< 44; i
++) {
7103 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7104 } else if (i
== 1) {
7106 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7107 } else if (i
== 27) {
7110 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7112 /* for the rest, there is MasterCard */
7113 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7117 if (write(fd
, buf
, len
) != len
) {
7125 static int open_self_auxv(void *cpu_env
, int fd
)
7127 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7128 TaskState
*ts
= cpu
->opaque
;
7129 abi_ulong auxv
= ts
->info
->saved_auxv
;
7130 abi_ulong len
= ts
->info
->auxv_len
;
7134 * Auxiliary vector is stored in target process stack.
7135 * read in whole auxv vector and copy it to file
7137 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7141 r
= write(fd
, ptr
, len
);
7148 lseek(fd
, 0, SEEK_SET
);
7149 unlock_user(ptr
, auxv
, len
);
7155 static int is_proc_myself(const char *filename
, const char *entry
)
7157 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7158 filename
+= strlen("/proc/");
7159 if (!strncmp(filename
, "self/", strlen("self/"))) {
7160 filename
+= strlen("self/");
7161 } else if (*filename
>= '1' && *filename
<= '9') {
7163 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7164 if (!strncmp(filename
, myself
, strlen(myself
))) {
7165 filename
+= strlen(myself
);
7172 if (!strcmp(filename
, entry
)) {
7179 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7180 static int is_proc(const char *filename
, const char *entry
)
7182 return strcmp(filename
, entry
) == 0;
7185 static int open_net_route(void *cpu_env
, int fd
)
7192 fp
= fopen("/proc/net/route", "r");
7199 read
= getline(&line
, &len
, fp
);
7200 dprintf(fd
, "%s", line
);
7204 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7206 uint32_t dest
, gw
, mask
;
7207 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7208 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7209 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7210 &mask
, &mtu
, &window
, &irtt
);
7211 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7212 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7213 metric
, tswap32(mask
), mtu
, window
, irtt
);
7223 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7226 const char *filename
;
7227 int (*fill
)(void *cpu_env
, int fd
);
7228 int (*cmp
)(const char *s1
, const char *s2
);
7230 const struct fake_open
*fake_open
;
7231 static const struct fake_open fakes
[] = {
7232 { "maps", open_self_maps
, is_proc_myself
},
7233 { "stat", open_self_stat
, is_proc_myself
},
7234 { "auxv", open_self_auxv
, is_proc_myself
},
7235 { "cmdline", open_self_cmdline
, is_proc_myself
},
7236 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7237 { "/proc/net/route", open_net_route
, is_proc
},
7239 { NULL
, NULL
, NULL
}
7242 if (is_proc_myself(pathname
, "exe")) {
7243 int execfd
= qemu_getauxval(AT_EXECFD
);
7244 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7247 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7248 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7253 if (fake_open
->filename
) {
7255 char filename
[PATH_MAX
];
7258 /* create temporary file to map stat to */
7259 tmpdir
= getenv("TMPDIR");
7262 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7263 fd
= mkstemp(filename
);
7269 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7275 lseek(fd
, 0, SEEK_SET
);
7280 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7283 #define TIMER_MAGIC 0x0caf0000
7284 #define TIMER_MAGIC_MASK 0xffff0000
7286 /* Convert QEMU provided timer ID back to internal 16bit index format */
7287 static target_timer_t
get_timer_id(abi_long arg
)
7289 target_timer_t timerid
= arg
;
7291 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7292 return -TARGET_EINVAL
;
7297 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7298 return -TARGET_EINVAL
;
7304 /* do_syscall() should always have a single exit point at the end so
7305 that actions, such as logging of syscall results, can be performed.
7306 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7307 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7308 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7309 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7312 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7318 #if defined(DEBUG_ERESTARTSYS)
7319 /* Debug-only code for exercising the syscall-restart code paths
7320 * in the per-architecture cpu main loops: restart every syscall
7321 * the guest makes once before letting it through.
7328 return -TARGET_ERESTARTSYS
;
7334 gemu_log("syscall %d", num
);
7336 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7338 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7341 case TARGET_NR_exit
:
7342 /* In old applications this may be used to implement _exit(2).
7343 However in threaded applictions it is used for thread termination,
7344 and _exit_group is used for application termination.
7345 Do thread termination if we have more then one thread. */
7347 if (block_signals()) {
7348 ret
= -TARGET_ERESTARTSYS
;
7352 if (CPU_NEXT(first_cpu
)) {
7356 /* Remove the CPU from the list. */
7357 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7360 if (ts
->child_tidptr
) {
7361 put_user_u32(0, ts
->child_tidptr
);
7362 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7366 object_unref(OBJECT(cpu
));
7368 rcu_unregister_thread();
7374 gdb_exit(cpu_env
, arg1
);
7376 ret
= 0; /* avoid warning */
7378 case TARGET_NR_read
:
7382 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7384 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7386 fd_trans_host_to_target_data(arg1
)) {
7387 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7389 unlock_user(p
, arg2
, ret
);
7392 case TARGET_NR_write
:
7393 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7395 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7396 unlock_user(p
, arg2
, 0);
7398 #ifdef TARGET_NR_open
7399 case TARGET_NR_open
:
7400 if (!(p
= lock_user_string(arg1
)))
7402 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7403 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7405 fd_trans_unregister(ret
);
7406 unlock_user(p
, arg1
, 0);
7409 case TARGET_NR_openat
:
7410 if (!(p
= lock_user_string(arg2
)))
7412 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7413 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7415 fd_trans_unregister(ret
);
7416 unlock_user(p
, arg2
, 0);
7418 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7419 case TARGET_NR_name_to_handle_at
:
7420 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7423 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7424 case TARGET_NR_open_by_handle_at
:
7425 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7426 fd_trans_unregister(ret
);
7429 case TARGET_NR_close
:
7430 fd_trans_unregister(arg1
);
7431 ret
= get_errno(close(arg1
));
7436 #ifdef TARGET_NR_fork
7437 case TARGET_NR_fork
:
7438 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7441 #ifdef TARGET_NR_waitpid
7442 case TARGET_NR_waitpid
:
7445 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7446 if (!is_error(ret
) && arg2
&& ret
7447 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7452 #ifdef TARGET_NR_waitid
7453 case TARGET_NR_waitid
:
7457 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7458 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7459 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7461 host_to_target_siginfo(p
, &info
);
7462 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7467 #ifdef TARGET_NR_creat /* not on alpha */
7468 case TARGET_NR_creat
:
7469 if (!(p
= lock_user_string(arg1
)))
7471 ret
= get_errno(creat(p
, arg2
));
7472 fd_trans_unregister(ret
);
7473 unlock_user(p
, arg1
, 0);
7476 #ifdef TARGET_NR_link
7477 case TARGET_NR_link
:
7480 p
= lock_user_string(arg1
);
7481 p2
= lock_user_string(arg2
);
7483 ret
= -TARGET_EFAULT
;
7485 ret
= get_errno(link(p
, p2
));
7486 unlock_user(p2
, arg2
, 0);
7487 unlock_user(p
, arg1
, 0);
7491 #if defined(TARGET_NR_linkat)
7492 case TARGET_NR_linkat
:
7497 p
= lock_user_string(arg2
);
7498 p2
= lock_user_string(arg4
);
7500 ret
= -TARGET_EFAULT
;
7502 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7503 unlock_user(p
, arg2
, 0);
7504 unlock_user(p2
, arg4
, 0);
7508 #ifdef TARGET_NR_unlink
7509 case TARGET_NR_unlink
:
7510 if (!(p
= lock_user_string(arg1
)))
7512 ret
= get_errno(unlink(p
));
7513 unlock_user(p
, arg1
, 0);
7516 #if defined(TARGET_NR_unlinkat)
7517 case TARGET_NR_unlinkat
:
7518 if (!(p
= lock_user_string(arg2
)))
7520 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7521 unlock_user(p
, arg2
, 0);
7524 case TARGET_NR_execve
:
7526 char **argp
, **envp
;
7529 abi_ulong guest_argp
;
7530 abi_ulong guest_envp
;
7537 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7538 if (get_user_ual(addr
, gp
))
7546 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7547 if (get_user_ual(addr
, gp
))
7554 argp
= alloca((argc
+ 1) * sizeof(void *));
7555 envp
= alloca((envc
+ 1) * sizeof(void *));
7557 for (gp
= guest_argp
, q
= argp
; gp
;
7558 gp
+= sizeof(abi_ulong
), q
++) {
7559 if (get_user_ual(addr
, gp
))
7563 if (!(*q
= lock_user_string(addr
)))
7565 total_size
+= strlen(*q
) + 1;
7569 for (gp
= guest_envp
, q
= envp
; gp
;
7570 gp
+= sizeof(abi_ulong
), q
++) {
7571 if (get_user_ual(addr
, gp
))
7575 if (!(*q
= lock_user_string(addr
)))
7577 total_size
+= strlen(*q
) + 1;
7581 if (!(p
= lock_user_string(arg1
)))
7583 /* Although execve() is not an interruptible syscall it is
7584 * a special case where we must use the safe_syscall wrapper:
7585 * if we allow a signal to happen before we make the host
7586 * syscall then we will 'lose' it, because at the point of
7587 * execve the process leaves QEMU's control. So we use the
7588 * safe syscall wrapper to ensure that we either take the
7589 * signal as a guest signal, or else it does not happen
7590 * before the execve completes and makes it the other
7591 * program's problem.
7593 ret
= get_errno(safe_execve(p
, argp
, envp
));
7594 unlock_user(p
, arg1
, 0);
7599 ret
= -TARGET_EFAULT
;
7602 for (gp
= guest_argp
, q
= argp
; *q
;
7603 gp
+= sizeof(abi_ulong
), q
++) {
7604 if (get_user_ual(addr
, gp
)
7607 unlock_user(*q
, addr
, 0);
7609 for (gp
= guest_envp
, q
= envp
; *q
;
7610 gp
+= sizeof(abi_ulong
), q
++) {
7611 if (get_user_ual(addr
, gp
)
7614 unlock_user(*q
, addr
, 0);
7618 case TARGET_NR_chdir
:
7619 if (!(p
= lock_user_string(arg1
)))
7621 ret
= get_errno(chdir(p
));
7622 unlock_user(p
, arg1
, 0);
7624 #ifdef TARGET_NR_time
7625 case TARGET_NR_time
:
7628 ret
= get_errno(time(&host_time
));
7631 && put_user_sal(host_time
, arg1
))
7636 #ifdef TARGET_NR_mknod
7637 case TARGET_NR_mknod
:
7638 if (!(p
= lock_user_string(arg1
)))
7640 ret
= get_errno(mknod(p
, arg2
, arg3
));
7641 unlock_user(p
, arg1
, 0);
7644 #if defined(TARGET_NR_mknodat)
7645 case TARGET_NR_mknodat
:
7646 if (!(p
= lock_user_string(arg2
)))
7648 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7649 unlock_user(p
, arg2
, 0);
7652 #ifdef TARGET_NR_chmod
7653 case TARGET_NR_chmod
:
7654 if (!(p
= lock_user_string(arg1
)))
7656 ret
= get_errno(chmod(p
, arg2
));
7657 unlock_user(p
, arg1
, 0);
7660 #ifdef TARGET_NR_break
7661 case TARGET_NR_break
:
7664 #ifdef TARGET_NR_oldstat
7665 case TARGET_NR_oldstat
:
7668 case TARGET_NR_lseek
:
7669 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7671 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7672 /* Alpha specific */
7673 case TARGET_NR_getxpid
:
7674 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7675 ret
= get_errno(getpid());
7678 #ifdef TARGET_NR_getpid
7679 case TARGET_NR_getpid
:
7680 ret
= get_errno(getpid());
7683 case TARGET_NR_mount
:
7685 /* need to look at the data field */
7689 p
= lock_user_string(arg1
);
7697 p2
= lock_user_string(arg2
);
7700 unlock_user(p
, arg1
, 0);
7706 p3
= lock_user_string(arg3
);
7709 unlock_user(p
, arg1
, 0);
7711 unlock_user(p2
, arg2
, 0);
7718 /* FIXME - arg5 should be locked, but it isn't clear how to
7719 * do that since it's not guaranteed to be a NULL-terminated
7723 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7725 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7727 ret
= get_errno(ret
);
7730 unlock_user(p
, arg1
, 0);
7732 unlock_user(p2
, arg2
, 0);
7734 unlock_user(p3
, arg3
, 0);
7738 #ifdef TARGET_NR_umount
7739 case TARGET_NR_umount
:
7740 if (!(p
= lock_user_string(arg1
)))
7742 ret
= get_errno(umount(p
));
7743 unlock_user(p
, arg1
, 0);
7746 #ifdef TARGET_NR_stime /* not on alpha */
7747 case TARGET_NR_stime
:
7750 if (get_user_sal(host_time
, arg1
))
7752 ret
= get_errno(stime(&host_time
));
7756 case TARGET_NR_ptrace
:
7758 #ifdef TARGET_NR_alarm /* not on alpha */
7759 case TARGET_NR_alarm
:
7763 #ifdef TARGET_NR_oldfstat
7764 case TARGET_NR_oldfstat
:
7767 #ifdef TARGET_NR_pause /* not on alpha */
7768 case TARGET_NR_pause
:
7769 if (!block_signals()) {
7770 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7772 ret
= -TARGET_EINTR
;
7775 #ifdef TARGET_NR_utime
7776 case TARGET_NR_utime
:
7778 struct utimbuf tbuf
, *host_tbuf
;
7779 struct target_utimbuf
*target_tbuf
;
7781 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7783 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7784 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7785 unlock_user_struct(target_tbuf
, arg2
, 0);
7790 if (!(p
= lock_user_string(arg1
)))
7792 ret
= get_errno(utime(p
, host_tbuf
));
7793 unlock_user(p
, arg1
, 0);
7797 #ifdef TARGET_NR_utimes
7798 case TARGET_NR_utimes
:
7800 struct timeval
*tvp
, tv
[2];
7802 if (copy_from_user_timeval(&tv
[0], arg2
)
7803 || copy_from_user_timeval(&tv
[1],
7804 arg2
+ sizeof(struct target_timeval
)))
7810 if (!(p
= lock_user_string(arg1
)))
7812 ret
= get_errno(utimes(p
, tvp
));
7813 unlock_user(p
, arg1
, 0);
7817 #if defined(TARGET_NR_futimesat)
7818 case TARGET_NR_futimesat
:
7820 struct timeval
*tvp
, tv
[2];
7822 if (copy_from_user_timeval(&tv
[0], arg3
)
7823 || copy_from_user_timeval(&tv
[1],
7824 arg3
+ sizeof(struct target_timeval
)))
7830 if (!(p
= lock_user_string(arg2
)))
7832 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7833 unlock_user(p
, arg2
, 0);
7837 #ifdef TARGET_NR_stty
7838 case TARGET_NR_stty
:
7841 #ifdef TARGET_NR_gtty
7842 case TARGET_NR_gtty
:
7845 #ifdef TARGET_NR_access
7846 case TARGET_NR_access
:
7847 if (!(p
= lock_user_string(arg1
)))
7849 ret
= get_errno(access(path(p
), arg2
));
7850 unlock_user(p
, arg1
, 0);
7853 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7854 case TARGET_NR_faccessat
:
7855 if (!(p
= lock_user_string(arg2
)))
7857 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7858 unlock_user(p
, arg2
, 0);
7861 #ifdef TARGET_NR_nice /* not on alpha */
7862 case TARGET_NR_nice
:
7863 ret
= get_errno(nice(arg1
));
7866 #ifdef TARGET_NR_ftime
7867 case TARGET_NR_ftime
:
7870 case TARGET_NR_sync
:
7874 case TARGET_NR_kill
:
7875 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
7877 #ifdef TARGET_NR_rename
7878 case TARGET_NR_rename
:
7881 p
= lock_user_string(arg1
);
7882 p2
= lock_user_string(arg2
);
7884 ret
= -TARGET_EFAULT
;
7886 ret
= get_errno(rename(p
, p2
));
7887 unlock_user(p2
, arg2
, 0);
7888 unlock_user(p
, arg1
, 0);
7892 #if defined(TARGET_NR_renameat)
7893 case TARGET_NR_renameat
:
7896 p
= lock_user_string(arg2
);
7897 p2
= lock_user_string(arg4
);
7899 ret
= -TARGET_EFAULT
;
7901 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
7902 unlock_user(p2
, arg4
, 0);
7903 unlock_user(p
, arg2
, 0);
7907 #ifdef TARGET_NR_mkdir
7908 case TARGET_NR_mkdir
:
7909 if (!(p
= lock_user_string(arg1
)))
7911 ret
= get_errno(mkdir(p
, arg2
));
7912 unlock_user(p
, arg1
, 0);
7915 #if defined(TARGET_NR_mkdirat)
7916 case TARGET_NR_mkdirat
:
7917 if (!(p
= lock_user_string(arg2
)))
7919 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
7920 unlock_user(p
, arg2
, 0);
7923 #ifdef TARGET_NR_rmdir
7924 case TARGET_NR_rmdir
:
7925 if (!(p
= lock_user_string(arg1
)))
7927 ret
= get_errno(rmdir(p
));
7928 unlock_user(p
, arg1
, 0);
7932 ret
= get_errno(dup(arg1
));
7934 fd_trans_dup(arg1
, ret
);
7937 #ifdef TARGET_NR_pipe
7938 case TARGET_NR_pipe
:
7939 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
7942 #ifdef TARGET_NR_pipe2
7943 case TARGET_NR_pipe2
:
7944 ret
= do_pipe(cpu_env
, arg1
,
7945 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
7948 case TARGET_NR_times
:
7950 struct target_tms
*tmsp
;
7952 ret
= get_errno(times(&tms
));
7954 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
7957 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
7958 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
7959 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
7960 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
7963 ret
= host_to_target_clock_t(ret
);
7966 #ifdef TARGET_NR_prof
7967 case TARGET_NR_prof
:
7970 #ifdef TARGET_NR_signal
7971 case TARGET_NR_signal
:
7974 case TARGET_NR_acct
:
7976 ret
= get_errno(acct(NULL
));
7978 if (!(p
= lock_user_string(arg1
)))
7980 ret
= get_errno(acct(path(p
)));
7981 unlock_user(p
, arg1
, 0);
7984 #ifdef TARGET_NR_umount2
7985 case TARGET_NR_umount2
:
7986 if (!(p
= lock_user_string(arg1
)))
7988 ret
= get_errno(umount2(p
, arg2
));
7989 unlock_user(p
, arg1
, 0);
7992 #ifdef TARGET_NR_lock
7993 case TARGET_NR_lock
:
7996 case TARGET_NR_ioctl
:
7997 ret
= do_ioctl(arg1
, arg2
, arg3
);
7999 case TARGET_NR_fcntl
:
8000 ret
= do_fcntl(arg1
, arg2
, arg3
);
8002 #ifdef TARGET_NR_mpx
8006 case TARGET_NR_setpgid
:
8007 ret
= get_errno(setpgid(arg1
, arg2
));
8009 #ifdef TARGET_NR_ulimit
8010 case TARGET_NR_ulimit
:
8013 #ifdef TARGET_NR_oldolduname
8014 case TARGET_NR_oldolduname
:
8017 case TARGET_NR_umask
:
8018 ret
= get_errno(umask(arg1
));
8020 case TARGET_NR_chroot
:
8021 if (!(p
= lock_user_string(arg1
)))
8023 ret
= get_errno(chroot(p
));
8024 unlock_user(p
, arg1
, 0);
8026 #ifdef TARGET_NR_ustat
8027 case TARGET_NR_ustat
:
8030 #ifdef TARGET_NR_dup2
8031 case TARGET_NR_dup2
:
8032 ret
= get_errno(dup2(arg1
, arg2
));
8034 fd_trans_dup(arg1
, arg2
);
8038 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8039 case TARGET_NR_dup3
:
8040 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8042 fd_trans_dup(arg1
, arg2
);
8046 #ifdef TARGET_NR_getppid /* not on alpha */
8047 case TARGET_NR_getppid
:
8048 ret
= get_errno(getppid());
8051 #ifdef TARGET_NR_getpgrp
8052 case TARGET_NR_getpgrp
:
8053 ret
= get_errno(getpgrp());
8056 case TARGET_NR_setsid
:
8057 ret
= get_errno(setsid());
8059 #ifdef TARGET_NR_sigaction
8060 case TARGET_NR_sigaction
:
8062 #if defined(TARGET_ALPHA)
8063 struct target_sigaction act
, oact
, *pact
= 0;
8064 struct target_old_sigaction
*old_act
;
8066 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8068 act
._sa_handler
= old_act
->_sa_handler
;
8069 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8070 act
.sa_flags
= old_act
->sa_flags
;
8071 act
.sa_restorer
= 0;
8072 unlock_user_struct(old_act
, arg2
, 0);
8075 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8076 if (!is_error(ret
) && arg3
) {
8077 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8079 old_act
->_sa_handler
= oact
._sa_handler
;
8080 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8081 old_act
->sa_flags
= oact
.sa_flags
;
8082 unlock_user_struct(old_act
, arg3
, 1);
8084 #elif defined(TARGET_MIPS)
8085 struct target_sigaction act
, oact
, *pact
, *old_act
;
8088 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8090 act
._sa_handler
= old_act
->_sa_handler
;
8091 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8092 act
.sa_flags
= old_act
->sa_flags
;
8093 unlock_user_struct(old_act
, arg2
, 0);
8099 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8101 if (!is_error(ret
) && arg3
) {
8102 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8104 old_act
->_sa_handler
= oact
._sa_handler
;
8105 old_act
->sa_flags
= oact
.sa_flags
;
8106 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8107 old_act
->sa_mask
.sig
[1] = 0;
8108 old_act
->sa_mask
.sig
[2] = 0;
8109 old_act
->sa_mask
.sig
[3] = 0;
8110 unlock_user_struct(old_act
, arg3
, 1);
8113 struct target_old_sigaction
*old_act
;
8114 struct target_sigaction act
, oact
, *pact
;
8116 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8118 act
._sa_handler
= old_act
->_sa_handler
;
8119 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8120 act
.sa_flags
= old_act
->sa_flags
;
8121 act
.sa_restorer
= old_act
->sa_restorer
;
8122 unlock_user_struct(old_act
, arg2
, 0);
8127 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8128 if (!is_error(ret
) && arg3
) {
8129 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8131 old_act
->_sa_handler
= oact
._sa_handler
;
8132 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8133 old_act
->sa_flags
= oact
.sa_flags
;
8134 old_act
->sa_restorer
= oact
.sa_restorer
;
8135 unlock_user_struct(old_act
, arg3
, 1);
8141 case TARGET_NR_rt_sigaction
:
8143 #if defined(TARGET_ALPHA)
8144 struct target_sigaction act
, oact
, *pact
= 0;
8145 struct target_rt_sigaction
*rt_act
;
8147 if (arg4
!= sizeof(target_sigset_t
)) {
8148 ret
= -TARGET_EINVAL
;
8152 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8154 act
._sa_handler
= rt_act
->_sa_handler
;
8155 act
.sa_mask
= rt_act
->sa_mask
;
8156 act
.sa_flags
= rt_act
->sa_flags
;
8157 act
.sa_restorer
= arg5
;
8158 unlock_user_struct(rt_act
, arg2
, 0);
8161 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8162 if (!is_error(ret
) && arg3
) {
8163 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8165 rt_act
->_sa_handler
= oact
._sa_handler
;
8166 rt_act
->sa_mask
= oact
.sa_mask
;
8167 rt_act
->sa_flags
= oact
.sa_flags
;
8168 unlock_user_struct(rt_act
, arg3
, 1);
8171 struct target_sigaction
*act
;
8172 struct target_sigaction
*oact
;
8174 if (arg4
!= sizeof(target_sigset_t
)) {
8175 ret
= -TARGET_EINVAL
;
8179 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8184 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8185 ret
= -TARGET_EFAULT
;
8186 goto rt_sigaction_fail
;
8190 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8193 unlock_user_struct(act
, arg2
, 0);
8195 unlock_user_struct(oact
, arg3
, 1);
8199 #ifdef TARGET_NR_sgetmask /* not on alpha */
8200 case TARGET_NR_sgetmask
:
8203 abi_ulong target_set
;
8204 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8206 host_to_target_old_sigset(&target_set
, &cur_set
);
8212 #ifdef TARGET_NR_ssetmask /* not on alpha */
8213 case TARGET_NR_ssetmask
:
8215 sigset_t set
, oset
, cur_set
;
8216 abi_ulong target_set
= arg1
;
8217 /* We only have one word of the new mask so we must read
8218 * the rest of it with do_sigprocmask() and OR in this word.
8219 * We are guaranteed that a do_sigprocmask() that only queries
8220 * the signal mask will not fail.
8222 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8224 target_to_host_old_sigset(&set
, &target_set
);
8225 sigorset(&set
, &set
, &cur_set
);
8226 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8228 host_to_target_old_sigset(&target_set
, &oset
);
8234 #ifdef TARGET_NR_sigprocmask
8235 case TARGET_NR_sigprocmask
:
8237 #if defined(TARGET_ALPHA)
8238 sigset_t set
, oldset
;
8243 case TARGET_SIG_BLOCK
:
8246 case TARGET_SIG_UNBLOCK
:
8249 case TARGET_SIG_SETMASK
:
8253 ret
= -TARGET_EINVAL
;
8257 target_to_host_old_sigset(&set
, &mask
);
8259 ret
= do_sigprocmask(how
, &set
, &oldset
);
8260 if (!is_error(ret
)) {
8261 host_to_target_old_sigset(&mask
, &oldset
);
8263 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8266 sigset_t set
, oldset
, *set_ptr
;
8271 case TARGET_SIG_BLOCK
:
8274 case TARGET_SIG_UNBLOCK
:
8277 case TARGET_SIG_SETMASK
:
8281 ret
= -TARGET_EINVAL
;
8284 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8286 target_to_host_old_sigset(&set
, p
);
8287 unlock_user(p
, arg2
, 0);
8293 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8294 if (!is_error(ret
) && arg3
) {
8295 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8297 host_to_target_old_sigset(p
, &oldset
);
8298 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8304 case TARGET_NR_rt_sigprocmask
:
8307 sigset_t set
, oldset
, *set_ptr
;
8309 if (arg4
!= sizeof(target_sigset_t
)) {
8310 ret
= -TARGET_EINVAL
;
8316 case TARGET_SIG_BLOCK
:
8319 case TARGET_SIG_UNBLOCK
:
8322 case TARGET_SIG_SETMASK
:
8326 ret
= -TARGET_EINVAL
;
8329 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8331 target_to_host_sigset(&set
, p
);
8332 unlock_user(p
, arg2
, 0);
8338 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8339 if (!is_error(ret
) && arg3
) {
8340 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8342 host_to_target_sigset(p
, &oldset
);
8343 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8347 #ifdef TARGET_NR_sigpending
8348 case TARGET_NR_sigpending
:
8351 ret
= get_errno(sigpending(&set
));
8352 if (!is_error(ret
)) {
8353 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8355 host_to_target_old_sigset(p
, &set
);
8356 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8361 case TARGET_NR_rt_sigpending
:
8365 /* Yes, this check is >, not != like most. We follow the kernel's
8366 * logic and it does it like this because it implements
8367 * NR_sigpending through the same code path, and in that case
8368 * the old_sigset_t is smaller in size.
8370 if (arg2
> sizeof(target_sigset_t
)) {
8371 ret
= -TARGET_EINVAL
;
8375 ret
= get_errno(sigpending(&set
));
8376 if (!is_error(ret
)) {
8377 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8379 host_to_target_sigset(p
, &set
);
8380 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8384 #ifdef TARGET_NR_sigsuspend
8385 case TARGET_NR_sigsuspend
:
8387 TaskState
*ts
= cpu
->opaque
;
8388 #if defined(TARGET_ALPHA)
8389 abi_ulong mask
= arg1
;
8390 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8392 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8394 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8395 unlock_user(p
, arg1
, 0);
8397 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8399 if (ret
!= -TARGET_ERESTARTSYS
) {
8400 ts
->in_sigsuspend
= 1;
8405 case TARGET_NR_rt_sigsuspend
:
8407 TaskState
*ts
= cpu
->opaque
;
8409 if (arg2
!= sizeof(target_sigset_t
)) {
8410 ret
= -TARGET_EINVAL
;
8413 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8415 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8416 unlock_user(p
, arg1
, 0);
8417 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8419 if (ret
!= -TARGET_ERESTARTSYS
) {
8420 ts
->in_sigsuspend
= 1;
8424 case TARGET_NR_rt_sigtimedwait
:
8427 struct timespec uts
, *puts
;
8430 if (arg4
!= sizeof(target_sigset_t
)) {
8431 ret
= -TARGET_EINVAL
;
8435 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8437 target_to_host_sigset(&set
, p
);
8438 unlock_user(p
, arg1
, 0);
8441 target_to_host_timespec(puts
, arg3
);
8445 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8447 if (!is_error(ret
)) {
8449 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8454 host_to_target_siginfo(p
, &uinfo
);
8455 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8457 ret
= host_to_target_signal(ret
);
8461 case TARGET_NR_rt_sigqueueinfo
:
8465 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8469 target_to_host_siginfo(&uinfo
, p
);
8470 unlock_user(p
, arg1
, 0);
8471 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8474 #ifdef TARGET_NR_sigreturn
8475 case TARGET_NR_sigreturn
:
8476 if (block_signals()) {
8477 ret
= -TARGET_ERESTARTSYS
;
8479 ret
= do_sigreturn(cpu_env
);
8483 case TARGET_NR_rt_sigreturn
:
8484 if (block_signals()) {
8485 ret
= -TARGET_ERESTARTSYS
;
8487 ret
= do_rt_sigreturn(cpu_env
);
8490 case TARGET_NR_sethostname
:
8491 if (!(p
= lock_user_string(arg1
)))
8493 ret
= get_errno(sethostname(p
, arg2
));
8494 unlock_user(p
, arg1
, 0);
8496 case TARGET_NR_setrlimit
:
8498 int resource
= target_to_host_resource(arg1
);
8499 struct target_rlimit
*target_rlim
;
8501 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8503 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8504 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8505 unlock_user_struct(target_rlim
, arg2
, 0);
8506 ret
= get_errno(setrlimit(resource
, &rlim
));
8509 case TARGET_NR_getrlimit
:
8511 int resource
= target_to_host_resource(arg1
);
8512 struct target_rlimit
*target_rlim
;
8515 ret
= get_errno(getrlimit(resource
, &rlim
));
8516 if (!is_error(ret
)) {
8517 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8519 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8520 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8521 unlock_user_struct(target_rlim
, arg2
, 1);
8525 case TARGET_NR_getrusage
:
8527 struct rusage rusage
;
8528 ret
= get_errno(getrusage(arg1
, &rusage
));
8529 if (!is_error(ret
)) {
8530 ret
= host_to_target_rusage(arg2
, &rusage
);
8534 case TARGET_NR_gettimeofday
:
8537 ret
= get_errno(gettimeofday(&tv
, NULL
));
8538 if (!is_error(ret
)) {
8539 if (copy_to_user_timeval(arg1
, &tv
))
8544 case TARGET_NR_settimeofday
:
8546 struct timeval tv
, *ptv
= NULL
;
8547 struct timezone tz
, *ptz
= NULL
;
8550 if (copy_from_user_timeval(&tv
, arg1
)) {
8557 if (copy_from_user_timezone(&tz
, arg2
)) {
8563 ret
= get_errno(settimeofday(ptv
, ptz
));
8566 #if defined(TARGET_NR_select)
8567 case TARGET_NR_select
:
8568 #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
8569 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8572 struct target_sel_arg_struct
*sel
;
8573 abi_ulong inp
, outp
, exp
, tvp
;
8576 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1))
8578 nsel
= tswapal(sel
->n
);
8579 inp
= tswapal(sel
->inp
);
8580 outp
= tswapal(sel
->outp
);
8581 exp
= tswapal(sel
->exp
);
8582 tvp
= tswapal(sel
->tvp
);
8583 unlock_user_struct(sel
, arg1
, 0);
8584 ret
= do_select(nsel
, inp
, outp
, exp
, tvp
);
8589 #ifdef TARGET_NR_pselect6
8590 case TARGET_NR_pselect6
:
8592 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8593 fd_set rfds
, wfds
, efds
;
8594 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8595 struct timespec ts
, *ts_ptr
;
8598 * The 6th arg is actually two args smashed together,
8599 * so we cannot use the C library.
8607 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8608 target_sigset_t
*target_sigset
;
8616 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8620 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8624 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8630 * This takes a timespec, and not a timeval, so we cannot
8631 * use the do_select() helper ...
8634 if (target_to_host_timespec(&ts
, ts_addr
)) {
8642 /* Extract the two packed args for the sigset */
8645 sig
.size
= SIGSET_T_SIZE
;
8647 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8651 arg_sigset
= tswapal(arg7
[0]);
8652 arg_sigsize
= tswapal(arg7
[1]);
8653 unlock_user(arg7
, arg6
, 0);
8657 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8658 /* Like the kernel, we enforce correct size sigsets */
8659 ret
= -TARGET_EINVAL
;
8662 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8663 sizeof(*target_sigset
), 1);
8664 if (!target_sigset
) {
8667 target_to_host_sigset(&set
, target_sigset
);
8668 unlock_user(target_sigset
, arg_sigset
, 0);
8676 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8679 if (!is_error(ret
)) {
8680 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8682 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8684 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8687 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8693 #ifdef TARGET_NR_symlink
8694 case TARGET_NR_symlink
:
8697 p
= lock_user_string(arg1
);
8698 p2
= lock_user_string(arg2
);
8700 ret
= -TARGET_EFAULT
;
8702 ret
= get_errno(symlink(p
, p2
));
8703 unlock_user(p2
, arg2
, 0);
8704 unlock_user(p
, arg1
, 0);
8708 #if defined(TARGET_NR_symlinkat)
8709 case TARGET_NR_symlinkat
:
8712 p
= lock_user_string(arg1
);
8713 p2
= lock_user_string(arg3
);
8715 ret
= -TARGET_EFAULT
;
8717 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8718 unlock_user(p2
, arg3
, 0);
8719 unlock_user(p
, arg1
, 0);
8723 #ifdef TARGET_NR_oldlstat
8724 case TARGET_NR_oldlstat
:
8727 #ifdef TARGET_NR_readlink
8728 case TARGET_NR_readlink
:
8731 p
= lock_user_string(arg1
);
8732 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8734 ret
= -TARGET_EFAULT
;
8736 /* Short circuit this for the magic exe check. */
8737 ret
= -TARGET_EINVAL
;
8738 } else if (is_proc_myself((const char *)p
, "exe")) {
8739 char real
[PATH_MAX
], *temp
;
8740 temp
= realpath(exec_path
, real
);
8741 /* Return value is # of bytes that we wrote to the buffer. */
8743 ret
= get_errno(-1);
8745 /* Don't worry about sign mismatch as earlier mapping
8746 * logic would have thrown a bad address error. */
8747 ret
= MIN(strlen(real
), arg3
);
8748 /* We cannot NUL terminate the string. */
8749 memcpy(p2
, real
, ret
);
8752 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8754 unlock_user(p2
, arg2
, ret
);
8755 unlock_user(p
, arg1
, 0);
8759 #if defined(TARGET_NR_readlinkat)
8760 case TARGET_NR_readlinkat
:
8763 p
= lock_user_string(arg2
);
8764 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8766 ret
= -TARGET_EFAULT
;
8767 } else if (is_proc_myself((const char *)p
, "exe")) {
8768 char real
[PATH_MAX
], *temp
;
8769 temp
= realpath(exec_path
, real
);
8770 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8771 snprintf((char *)p2
, arg4
, "%s", real
);
8773 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8775 unlock_user(p2
, arg3
, ret
);
8776 unlock_user(p
, arg2
, 0);
8780 #ifdef TARGET_NR_uselib
8781 case TARGET_NR_uselib
:
8784 #ifdef TARGET_NR_swapon
8785 case TARGET_NR_swapon
:
8786 if (!(p
= lock_user_string(arg1
)))
8788 ret
= get_errno(swapon(p
, arg2
));
8789 unlock_user(p
, arg1
, 0);
8792 case TARGET_NR_reboot
:
8793 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8794 /* arg4 must be ignored in all other cases */
8795 p
= lock_user_string(arg4
);
8799 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8800 unlock_user(p
, arg4
, 0);
8802 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8805 #ifdef TARGET_NR_readdir
8806 case TARGET_NR_readdir
:
8809 #ifdef TARGET_NR_mmap
8810 case TARGET_NR_mmap
:
8811 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8812 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8813 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8814 || defined(TARGET_S390X)
8817 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8818 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8826 unlock_user(v
, arg1
, 0);
8827 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8828 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8832 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8833 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8839 #ifdef TARGET_NR_mmap2
8840 case TARGET_NR_mmap2
:
8842 #define MMAP_SHIFT 12
8844 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8845 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8847 arg6
<< MMAP_SHIFT
));
8850 case TARGET_NR_munmap
:
8851 ret
= get_errno(target_munmap(arg1
, arg2
));
8853 case TARGET_NR_mprotect
:
8855 TaskState
*ts
= cpu
->opaque
;
8856 /* Special hack to detect libc making the stack executable. */
8857 if ((arg3
& PROT_GROWSDOWN
)
8858 && arg1
>= ts
->info
->stack_limit
8859 && arg1
<= ts
->info
->start_stack
) {
8860 arg3
&= ~PROT_GROWSDOWN
;
8861 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8862 arg1
= ts
->info
->stack_limit
;
8865 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8867 #ifdef TARGET_NR_mremap
8868 case TARGET_NR_mremap
:
8869 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8872 /* ??? msync/mlock/munlock are broken for softmmu. */
8873 #ifdef TARGET_NR_msync
8874 case TARGET_NR_msync
:
8875 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8878 #ifdef TARGET_NR_mlock
8879 case TARGET_NR_mlock
:
8880 ret
= get_errno(mlock(g2h(arg1
), arg2
));
8883 #ifdef TARGET_NR_munlock
8884 case TARGET_NR_munlock
:
8885 ret
= get_errno(munlock(g2h(arg1
), arg2
));
8888 #ifdef TARGET_NR_mlockall
8889 case TARGET_NR_mlockall
:
8890 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
8893 #ifdef TARGET_NR_munlockall
8894 case TARGET_NR_munlockall
:
8895 ret
= get_errno(munlockall());
8898 case TARGET_NR_truncate
:
8899 if (!(p
= lock_user_string(arg1
)))
8901 ret
= get_errno(truncate(p
, arg2
));
8902 unlock_user(p
, arg1
, 0);
8904 case TARGET_NR_ftruncate
:
8905 ret
= get_errno(ftruncate(arg1
, arg2
));
8907 case TARGET_NR_fchmod
:
8908 ret
= get_errno(fchmod(arg1
, arg2
));
8910 #if defined(TARGET_NR_fchmodat)
8911 case TARGET_NR_fchmodat
:
8912 if (!(p
= lock_user_string(arg2
)))
8914 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
8915 unlock_user(p
, arg2
, 0);
8918 case TARGET_NR_getpriority
:
8919 /* Note that negative values are valid for getpriority, so we must
8920 differentiate based on errno settings. */
8922 ret
= getpriority(arg1
, arg2
);
8923 if (ret
== -1 && errno
!= 0) {
8924 ret
= -host_to_target_errno(errno
);
8928 /* Return value is the unbiased priority. Signal no error. */
8929 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
8931 /* Return value is a biased priority to avoid negative numbers. */
8935 case TARGET_NR_setpriority
:
8936 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
8938 #ifdef TARGET_NR_profil
8939 case TARGET_NR_profil
:
8942 case TARGET_NR_statfs
:
8943 if (!(p
= lock_user_string(arg1
)))
8945 ret
= get_errno(statfs(path(p
), &stfs
));
8946 unlock_user(p
, arg1
, 0);
8948 if (!is_error(ret
)) {
8949 struct target_statfs
*target_stfs
;
8951 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
8953 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8954 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8955 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8956 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8957 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8958 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8959 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8960 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8961 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8962 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8963 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8964 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8965 unlock_user_struct(target_stfs
, arg2
, 1);
8968 case TARGET_NR_fstatfs
:
8969 ret
= get_errno(fstatfs(arg1
, &stfs
));
8970 goto convert_statfs
;
8971 #ifdef TARGET_NR_statfs64
8972 case TARGET_NR_statfs64
:
8973 if (!(p
= lock_user_string(arg1
)))
8975 ret
= get_errno(statfs(path(p
), &stfs
));
8976 unlock_user(p
, arg1
, 0);
8978 if (!is_error(ret
)) {
8979 struct target_statfs64
*target_stfs
;
8981 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
8983 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
8984 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
8985 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
8986 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
8987 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
8988 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
8989 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
8990 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
8991 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
8992 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
8993 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
8994 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
8995 unlock_user_struct(target_stfs
, arg3
, 1);
8998 case TARGET_NR_fstatfs64
:
8999 ret
= get_errno(fstatfs(arg1
, &stfs
));
9000 goto convert_statfs64
;
9002 #ifdef TARGET_NR_ioperm
9003 case TARGET_NR_ioperm
:
9006 #ifdef TARGET_NR_socketcall
9007 case TARGET_NR_socketcall
:
9008 ret
= do_socketcall(arg1
, arg2
);
9011 #ifdef TARGET_NR_accept
9012 case TARGET_NR_accept
:
9013 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9016 #ifdef TARGET_NR_accept4
9017 case TARGET_NR_accept4
:
9018 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9021 #ifdef TARGET_NR_bind
9022 case TARGET_NR_bind
:
9023 ret
= do_bind(arg1
, arg2
, arg3
);
9026 #ifdef TARGET_NR_connect
9027 case TARGET_NR_connect
:
9028 ret
= do_connect(arg1
, arg2
, arg3
);
9031 #ifdef TARGET_NR_getpeername
9032 case TARGET_NR_getpeername
:
9033 ret
= do_getpeername(arg1
, arg2
, arg3
);
9036 #ifdef TARGET_NR_getsockname
9037 case TARGET_NR_getsockname
:
9038 ret
= do_getsockname(arg1
, arg2
, arg3
);
9041 #ifdef TARGET_NR_getsockopt
9042 case TARGET_NR_getsockopt
:
9043 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9046 #ifdef TARGET_NR_listen
9047 case TARGET_NR_listen
:
9048 ret
= get_errno(listen(arg1
, arg2
));
9051 #ifdef TARGET_NR_recv
9052 case TARGET_NR_recv
:
9053 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9056 #ifdef TARGET_NR_recvfrom
9057 case TARGET_NR_recvfrom
:
9058 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9061 #ifdef TARGET_NR_recvmsg
9062 case TARGET_NR_recvmsg
:
9063 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9066 #ifdef TARGET_NR_send
9067 case TARGET_NR_send
:
9068 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9071 #ifdef TARGET_NR_sendmsg
9072 case TARGET_NR_sendmsg
:
9073 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9076 #ifdef TARGET_NR_sendmmsg
9077 case TARGET_NR_sendmmsg
:
9078 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9080 case TARGET_NR_recvmmsg
:
9081 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9084 #ifdef TARGET_NR_sendto
9085 case TARGET_NR_sendto
:
9086 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9089 #ifdef TARGET_NR_shutdown
9090 case TARGET_NR_shutdown
:
9091 ret
= get_errno(shutdown(arg1
, arg2
));
9094 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9095 case TARGET_NR_getrandom
:
9096 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9100 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9101 unlock_user(p
, arg1
, ret
);
9104 #ifdef TARGET_NR_socket
9105 case TARGET_NR_socket
:
9106 ret
= do_socket(arg1
, arg2
, arg3
);
9107 fd_trans_unregister(ret
);
9110 #ifdef TARGET_NR_socketpair
9111 case TARGET_NR_socketpair
:
9112 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9115 #ifdef TARGET_NR_setsockopt
9116 case TARGET_NR_setsockopt
:
9117 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9121 case TARGET_NR_syslog
:
9122 if (!(p
= lock_user_string(arg2
)))
9124 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9125 unlock_user(p
, arg2
, 0);
9128 case TARGET_NR_setitimer
:
9130 struct itimerval value
, ovalue
, *pvalue
;
9134 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9135 || copy_from_user_timeval(&pvalue
->it_value
,
9136 arg2
+ sizeof(struct target_timeval
)))
9141 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9142 if (!is_error(ret
) && arg3
) {
9143 if (copy_to_user_timeval(arg3
,
9144 &ovalue
.it_interval
)
9145 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9151 case TARGET_NR_getitimer
:
9153 struct itimerval value
;
9155 ret
= get_errno(getitimer(arg1
, &value
));
9156 if (!is_error(ret
) && arg2
) {
9157 if (copy_to_user_timeval(arg2
,
9159 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9165 #ifdef TARGET_NR_stat
9166 case TARGET_NR_stat
:
9167 if (!(p
= lock_user_string(arg1
)))
9169 ret
= get_errno(stat(path(p
), &st
));
9170 unlock_user(p
, arg1
, 0);
9173 #ifdef TARGET_NR_lstat
9174 case TARGET_NR_lstat
:
9175 if (!(p
= lock_user_string(arg1
)))
9177 ret
= get_errno(lstat(path(p
), &st
));
9178 unlock_user(p
, arg1
, 0);
9181 case TARGET_NR_fstat
:
9183 ret
= get_errno(fstat(arg1
, &st
));
9184 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9187 if (!is_error(ret
)) {
9188 struct target_stat
*target_st
;
9190 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9192 memset(target_st
, 0, sizeof(*target_st
));
9193 __put_user(st
.st_dev
, &target_st
->st_dev
);
9194 __put_user(st
.st_ino
, &target_st
->st_ino
);
9195 __put_user(st
.st_mode
, &target_st
->st_mode
);
9196 __put_user(st
.st_uid
, &target_st
->st_uid
);
9197 __put_user(st
.st_gid
, &target_st
->st_gid
);
9198 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9199 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9200 __put_user(st
.st_size
, &target_st
->st_size
);
9201 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9202 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9203 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9204 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9205 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9206 unlock_user_struct(target_st
, arg2
, 1);
9210 #ifdef TARGET_NR_olduname
9211 case TARGET_NR_olduname
:
9214 #ifdef TARGET_NR_iopl
9215 case TARGET_NR_iopl
:
9218 case TARGET_NR_vhangup
:
9219 ret
= get_errno(vhangup());
9221 #ifdef TARGET_NR_idle
9222 case TARGET_NR_idle
:
9225 #ifdef TARGET_NR_syscall
9226 case TARGET_NR_syscall
:
9227 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9228 arg6
, arg7
, arg8
, 0);
9231 case TARGET_NR_wait4
:
9234 abi_long status_ptr
= arg2
;
9235 struct rusage rusage
, *rusage_ptr
;
9236 abi_ulong target_rusage
= arg4
;
9237 abi_long rusage_err
;
9239 rusage_ptr
= &rusage
;
9242 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9243 if (!is_error(ret
)) {
9244 if (status_ptr
&& ret
) {
9245 status
= host_to_target_waitstatus(status
);
9246 if (put_user_s32(status
, status_ptr
))
9249 if (target_rusage
) {
9250 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9258 #ifdef TARGET_NR_swapoff
9259 case TARGET_NR_swapoff
:
9260 if (!(p
= lock_user_string(arg1
)))
9262 ret
= get_errno(swapoff(p
));
9263 unlock_user(p
, arg1
, 0);
9266 case TARGET_NR_sysinfo
:
9268 struct target_sysinfo
*target_value
;
9269 struct sysinfo value
;
9270 ret
= get_errno(sysinfo(&value
));
9271 if (!is_error(ret
) && arg1
)
9273 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9275 __put_user(value
.uptime
, &target_value
->uptime
);
9276 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9277 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9278 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9279 __put_user(value
.totalram
, &target_value
->totalram
);
9280 __put_user(value
.freeram
, &target_value
->freeram
);
9281 __put_user(value
.sharedram
, &target_value
->sharedram
);
9282 __put_user(value
.bufferram
, &target_value
->bufferram
);
9283 __put_user(value
.totalswap
, &target_value
->totalswap
);
9284 __put_user(value
.freeswap
, &target_value
->freeswap
);
9285 __put_user(value
.procs
, &target_value
->procs
);
9286 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9287 __put_user(value
.freehigh
, &target_value
->freehigh
);
9288 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9289 unlock_user_struct(target_value
, arg1
, 1);
9293 #ifdef TARGET_NR_ipc
9295 ret
= do_ipc(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9298 #ifdef TARGET_NR_semget
9299 case TARGET_NR_semget
:
9300 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9303 #ifdef TARGET_NR_semop
9304 case TARGET_NR_semop
:
9305 ret
= do_semop(arg1
, arg2
, arg3
);
9308 #ifdef TARGET_NR_semctl
9309 case TARGET_NR_semctl
:
9310 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9313 #ifdef TARGET_NR_msgctl
9314 case TARGET_NR_msgctl
:
9315 ret
= do_msgctl(arg1
, arg2
, arg3
);
9318 #ifdef TARGET_NR_msgget
9319 case TARGET_NR_msgget
:
9320 ret
= get_errno(msgget(arg1
, arg2
));
9323 #ifdef TARGET_NR_msgrcv
9324 case TARGET_NR_msgrcv
:
9325 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9328 #ifdef TARGET_NR_msgsnd
9329 case TARGET_NR_msgsnd
:
9330 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9333 #ifdef TARGET_NR_shmget
9334 case TARGET_NR_shmget
:
9335 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9338 #ifdef TARGET_NR_shmctl
9339 case TARGET_NR_shmctl
:
9340 ret
= do_shmctl(arg1
, arg2
, arg3
);
9343 #ifdef TARGET_NR_shmat
9344 case TARGET_NR_shmat
:
9345 ret
= do_shmat(arg1
, arg2
, arg3
);
9348 #ifdef TARGET_NR_shmdt
9349 case TARGET_NR_shmdt
:
9350 ret
= do_shmdt(arg1
);
9353 case TARGET_NR_fsync
:
9354 ret
= get_errno(fsync(arg1
));
9356 case TARGET_NR_clone
:
9357 /* Linux manages to have three different orderings for its
9358 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9359 * match the kernel's CONFIG_CLONE_* settings.
9360 * Microblaze is further special in that it uses a sixth
9361 * implicit argument to clone for the TLS pointer.
9363 #if defined(TARGET_MICROBLAZE)
9364 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9365 #elif defined(TARGET_CLONE_BACKWARDS)
9366 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9367 #elif defined(TARGET_CLONE_BACKWARDS2)
9368 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9370 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9373 #ifdef __NR_exit_group
9374 /* new thread calls */
9375 case TARGET_NR_exit_group
:
9379 gdb_exit(cpu_env
, arg1
);
9380 ret
= get_errno(exit_group(arg1
));
9383 case TARGET_NR_setdomainname
:
9384 if (!(p
= lock_user_string(arg1
)))
9386 ret
= get_errno(setdomainname(p
, arg2
));
9387 unlock_user(p
, arg1
, 0);
9389 case TARGET_NR_uname
:
9390 /* no need to transcode because we use the linux syscall */
9392 struct new_utsname
* buf
;
9394 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9396 ret
= get_errno(sys_uname(buf
));
9397 if (!is_error(ret
)) {
9398 /* Overwrite the native machine name with whatever is being
9400 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9401 /* Allow the user to override the reported release. */
9402 if (qemu_uname_release
&& *qemu_uname_release
) {
9403 g_strlcpy(buf
->release
, qemu_uname_release
,
9404 sizeof(buf
->release
));
9407 unlock_user_struct(buf
, arg1
, 1);
9411 case TARGET_NR_modify_ldt
:
9412 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9414 #if !defined(TARGET_X86_64)
9415 case TARGET_NR_vm86old
:
9417 case TARGET_NR_vm86
:
9418 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9422 case TARGET_NR_adjtimex
:
9424 #ifdef TARGET_NR_create_module
9425 case TARGET_NR_create_module
:
9427 case TARGET_NR_init_module
:
9428 case TARGET_NR_delete_module
:
9429 #ifdef TARGET_NR_get_kernel_syms
9430 case TARGET_NR_get_kernel_syms
:
9433 case TARGET_NR_quotactl
:
9435 case TARGET_NR_getpgid
:
9436 ret
= get_errno(getpgid(arg1
));
9438 case TARGET_NR_fchdir
:
9439 ret
= get_errno(fchdir(arg1
));
9441 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9442 case TARGET_NR_bdflush
:
9445 #ifdef TARGET_NR_sysfs
9446 case TARGET_NR_sysfs
:
9449 case TARGET_NR_personality
:
9450 ret
= get_errno(personality(arg1
));
9452 #ifdef TARGET_NR_afs_syscall
9453 case TARGET_NR_afs_syscall
:
9456 #ifdef TARGET_NR__llseek /* Not on alpha */
9457 case TARGET_NR__llseek
:
9460 #if !defined(__NR_llseek)
9461 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9463 ret
= get_errno(res
);
9468 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9470 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9476 #ifdef TARGET_NR_getdents
9477 case TARGET_NR_getdents
:
9478 #ifdef __NR_getdents
9479 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9481 struct target_dirent
*target_dirp
;
9482 struct linux_dirent
*dirp
;
9483 abi_long count
= arg3
;
9485 dirp
= g_try_malloc(count
);
9487 ret
= -TARGET_ENOMEM
;
9491 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9492 if (!is_error(ret
)) {
9493 struct linux_dirent
*de
;
9494 struct target_dirent
*tde
;
9496 int reclen
, treclen
;
9497 int count1
, tnamelen
;
9501 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9505 reclen
= de
->d_reclen
;
9506 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9507 assert(tnamelen
>= 0);
9508 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9509 assert(count1
+ treclen
<= count
);
9510 tde
->d_reclen
= tswap16(treclen
);
9511 tde
->d_ino
= tswapal(de
->d_ino
);
9512 tde
->d_off
= tswapal(de
->d_off
);
9513 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9514 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9516 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9520 unlock_user(target_dirp
, arg2
, ret
);
9526 struct linux_dirent
*dirp
;
9527 abi_long count
= arg3
;
9529 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9531 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9532 if (!is_error(ret
)) {
9533 struct linux_dirent
*de
;
9538 reclen
= de
->d_reclen
;
9541 de
->d_reclen
= tswap16(reclen
);
9542 tswapls(&de
->d_ino
);
9543 tswapls(&de
->d_off
);
9544 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9548 unlock_user(dirp
, arg2
, ret
);
9552 /* Implement getdents in terms of getdents64 */
9554 struct linux_dirent64
*dirp
;
9555 abi_long count
= arg3
;
9557 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9561 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9562 if (!is_error(ret
)) {
9563 /* Convert the dirent64 structs to target dirent. We do this
9564 * in-place, since we can guarantee that a target_dirent is no
9565 * larger than a dirent64; however this means we have to be
9566 * careful to read everything before writing in the new format.
9568 struct linux_dirent64
*de
;
9569 struct target_dirent
*tde
;
9574 tde
= (struct target_dirent
*)dirp
;
9576 int namelen
, treclen
;
9577 int reclen
= de
->d_reclen
;
9578 uint64_t ino
= de
->d_ino
;
9579 int64_t off
= de
->d_off
;
9580 uint8_t type
= de
->d_type
;
9582 namelen
= strlen(de
->d_name
);
9583 treclen
= offsetof(struct target_dirent
, d_name
)
9585 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9587 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9588 tde
->d_ino
= tswapal(ino
);
9589 tde
->d_off
= tswapal(off
);
9590 tde
->d_reclen
= tswap16(treclen
);
9591 /* The target_dirent type is in what was formerly a padding
9592 * byte at the end of the structure:
9594 *(((char *)tde
) + treclen
- 1) = type
;
9596 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9597 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9603 unlock_user(dirp
, arg2
, ret
);
9607 #endif /* TARGET_NR_getdents */
9608 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9609 case TARGET_NR_getdents64
:
9611 struct linux_dirent64
*dirp
;
9612 abi_long count
= arg3
;
9613 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9615 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9616 if (!is_error(ret
)) {
9617 struct linux_dirent64
*de
;
9622 reclen
= de
->d_reclen
;
9625 de
->d_reclen
= tswap16(reclen
);
9626 tswap64s((uint64_t *)&de
->d_ino
);
9627 tswap64s((uint64_t *)&de
->d_off
);
9628 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9632 unlock_user(dirp
, arg2
, ret
);
9635 #endif /* TARGET_NR_getdents64 */
9636 #if defined(TARGET_NR__newselect)
9637 case TARGET_NR__newselect
:
9638 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9641 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9642 # ifdef TARGET_NR_poll
9643 case TARGET_NR_poll
:
9645 # ifdef TARGET_NR_ppoll
9646 case TARGET_NR_ppoll
:
9649 struct target_pollfd
*target_pfd
;
9650 unsigned int nfds
= arg2
;
9657 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9658 sizeof(struct target_pollfd
) * nfds
, 1);
9663 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9664 for (i
= 0; i
< nfds
; i
++) {
9665 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9666 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9671 # ifdef TARGET_NR_ppoll
9672 case TARGET_NR_ppoll
:
9674 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9675 target_sigset_t
*target_set
;
9676 sigset_t _set
, *set
= &_set
;
9679 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9680 unlock_user(target_pfd
, arg1
, 0);
9688 if (arg5
!= sizeof(target_sigset_t
)) {
9689 unlock_user(target_pfd
, arg1
, 0);
9690 ret
= -TARGET_EINVAL
;
9694 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9696 unlock_user(target_pfd
, arg1
, 0);
9699 target_to_host_sigset(set
, target_set
);
9704 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9705 set
, SIGSET_T_SIZE
));
9707 if (!is_error(ret
) && arg3
) {
9708 host_to_target_timespec(arg3
, timeout_ts
);
9711 unlock_user(target_set
, arg4
, 0);
9716 # ifdef TARGET_NR_poll
9717 case TARGET_NR_poll
:
9719 struct timespec ts
, *pts
;
9722 /* Convert ms to secs, ns */
9723 ts
.tv_sec
= arg3
/ 1000;
9724 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9727 /* -ve poll() timeout means "infinite" */
9730 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9735 g_assert_not_reached();
9738 if (!is_error(ret
)) {
9739 for(i
= 0; i
< nfds
; i
++) {
9740 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9743 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9747 case TARGET_NR_flock
:
9748 /* NOTE: the flock constant seems to be the same for every
9750 ret
= get_errno(safe_flock(arg1
, arg2
));
9752 case TARGET_NR_readv
:
9754 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9756 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9757 unlock_iovec(vec
, arg2
, arg3
, 1);
9759 ret
= -host_to_target_errno(errno
);
9763 case TARGET_NR_writev
:
9765 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9767 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9768 unlock_iovec(vec
, arg2
, arg3
, 0);
9770 ret
= -host_to_target_errno(errno
);
9774 case TARGET_NR_getsid
:
9775 ret
= get_errno(getsid(arg1
));
9777 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9778 case TARGET_NR_fdatasync
:
9779 ret
= get_errno(fdatasync(arg1
));
9782 #ifdef TARGET_NR__sysctl
9783 case TARGET_NR__sysctl
:
9784 /* We don't implement this, but ENOTDIR is always a safe
9786 ret
= -TARGET_ENOTDIR
;
9789 case TARGET_NR_sched_getaffinity
:
9791 unsigned int mask_size
;
9792 unsigned long *mask
;
9795 * sched_getaffinity needs multiples of ulong, so need to take
9796 * care of mismatches between target ulong and host ulong sizes.
9798 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9799 ret
= -TARGET_EINVAL
;
9802 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9804 mask
= alloca(mask_size
);
9805 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9807 if (!is_error(ret
)) {
9809 /* More data returned than the caller's buffer will fit.
9810 * This only happens if sizeof(abi_long) < sizeof(long)
9811 * and the caller passed us a buffer holding an odd number
9812 * of abi_longs. If the host kernel is actually using the
9813 * extra 4 bytes then fail EINVAL; otherwise we can just
9814 * ignore them and only copy the interesting part.
9816 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9817 if (numcpus
> arg2
* 8) {
9818 ret
= -TARGET_EINVAL
;
9824 if (copy_to_user(arg3
, mask
, ret
)) {
9830 case TARGET_NR_sched_setaffinity
:
9832 unsigned int mask_size
;
9833 unsigned long *mask
;
9836 * sched_setaffinity needs multiples of ulong, so need to take
9837 * care of mismatches between target ulong and host ulong sizes.
9839 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9840 ret
= -TARGET_EINVAL
;
9843 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9845 mask
= alloca(mask_size
);
9846 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9849 memcpy(mask
, p
, arg2
);
9850 unlock_user_struct(p
, arg2
, 0);
9852 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9855 case TARGET_NR_sched_setparam
:
9857 struct sched_param
*target_schp
;
9858 struct sched_param schp
;
9861 return -TARGET_EINVAL
;
9863 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9865 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9866 unlock_user_struct(target_schp
, arg2
, 0);
9867 ret
= get_errno(sched_setparam(arg1
, &schp
));
9870 case TARGET_NR_sched_getparam
:
9872 struct sched_param
*target_schp
;
9873 struct sched_param schp
;
9876 return -TARGET_EINVAL
;
9878 ret
= get_errno(sched_getparam(arg1
, &schp
));
9879 if (!is_error(ret
)) {
9880 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
9882 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
9883 unlock_user_struct(target_schp
, arg2
, 1);
9887 case TARGET_NR_sched_setscheduler
:
9889 struct sched_param
*target_schp
;
9890 struct sched_param schp
;
9892 return -TARGET_EINVAL
;
9894 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
9896 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9897 unlock_user_struct(target_schp
, arg3
, 0);
9898 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
9901 case TARGET_NR_sched_getscheduler
:
9902 ret
= get_errno(sched_getscheduler(arg1
));
9904 case TARGET_NR_sched_yield
:
9905 ret
= get_errno(sched_yield());
9907 case TARGET_NR_sched_get_priority_max
:
9908 ret
= get_errno(sched_get_priority_max(arg1
));
9910 case TARGET_NR_sched_get_priority_min
:
9911 ret
= get_errno(sched_get_priority_min(arg1
));
9913 case TARGET_NR_sched_rr_get_interval
:
9916 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
9917 if (!is_error(ret
)) {
9918 ret
= host_to_target_timespec(arg2
, &ts
);
9922 case TARGET_NR_nanosleep
:
9924 struct timespec req
, rem
;
9925 target_to_host_timespec(&req
, arg1
);
9926 ret
= get_errno(safe_nanosleep(&req
, &rem
));
9927 if (is_error(ret
) && arg2
) {
9928 host_to_target_timespec(arg2
, &rem
);
9932 #ifdef TARGET_NR_query_module
9933 case TARGET_NR_query_module
:
9936 #ifdef TARGET_NR_nfsservctl
9937 case TARGET_NR_nfsservctl
:
9940 case TARGET_NR_prctl
:
9942 case PR_GET_PDEATHSIG
:
9945 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
9946 if (!is_error(ret
) && arg2
9947 && put_user_ual(deathsig
, arg2
)) {
9955 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
9959 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9961 unlock_user(name
, arg2
, 16);
9966 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
9970 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
9972 unlock_user(name
, arg2
, 0);
9977 /* Most prctl options have no pointer arguments */
9978 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
9982 #ifdef TARGET_NR_arch_prctl
9983 case TARGET_NR_arch_prctl
:
9984 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
9985 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
9991 #ifdef TARGET_NR_pread64
9992 case TARGET_NR_pread64
:
9993 if (regpairs_aligned(cpu_env
)) {
9997 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
9999 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10000 unlock_user(p
, arg2
, ret
);
10002 case TARGET_NR_pwrite64
:
10003 if (regpairs_aligned(cpu_env
)) {
10007 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10009 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10010 unlock_user(p
, arg2
, 0);
10013 case TARGET_NR_getcwd
:
10014 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10016 ret
= get_errno(sys_getcwd1(p
, arg2
));
10017 unlock_user(p
, arg1
, ret
);
10019 case TARGET_NR_capget
:
10020 case TARGET_NR_capset
:
10022 struct target_user_cap_header
*target_header
;
10023 struct target_user_cap_data
*target_data
= NULL
;
10024 struct __user_cap_header_struct header
;
10025 struct __user_cap_data_struct data
[2];
10026 struct __user_cap_data_struct
*dataptr
= NULL
;
10027 int i
, target_datalen
;
10028 int data_items
= 1;
10030 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10033 header
.version
= tswap32(target_header
->version
);
10034 header
.pid
= tswap32(target_header
->pid
);
10036 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10037 /* Version 2 and up takes pointer to two user_data structs */
10041 target_datalen
= sizeof(*target_data
) * data_items
;
10044 if (num
== TARGET_NR_capget
) {
10045 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10047 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10049 if (!target_data
) {
10050 unlock_user_struct(target_header
, arg1
, 0);
10054 if (num
== TARGET_NR_capset
) {
10055 for (i
= 0; i
< data_items
; i
++) {
10056 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10057 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10058 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10065 if (num
== TARGET_NR_capget
) {
10066 ret
= get_errno(capget(&header
, dataptr
));
10068 ret
= get_errno(capset(&header
, dataptr
));
10071 /* The kernel always updates version for both capget and capset */
10072 target_header
->version
= tswap32(header
.version
);
10073 unlock_user_struct(target_header
, arg1
, 1);
10076 if (num
== TARGET_NR_capget
) {
10077 for (i
= 0; i
< data_items
; i
++) {
10078 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10079 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10080 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10082 unlock_user(target_data
, arg2
, target_datalen
);
10084 unlock_user(target_data
, arg2
, 0);
10089 case TARGET_NR_sigaltstack
:
10090 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10093 #ifdef CONFIG_SENDFILE
10094 case TARGET_NR_sendfile
:
10096 off_t
*offp
= NULL
;
10099 ret
= get_user_sal(off
, arg3
);
10100 if (is_error(ret
)) {
10105 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10106 if (!is_error(ret
) && arg3
) {
10107 abi_long ret2
= put_user_sal(off
, arg3
);
10108 if (is_error(ret2
)) {
10114 #ifdef TARGET_NR_sendfile64
10115 case TARGET_NR_sendfile64
:
10117 off_t
*offp
= NULL
;
10120 ret
= get_user_s64(off
, arg3
);
10121 if (is_error(ret
)) {
10126 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10127 if (!is_error(ret
) && arg3
) {
10128 abi_long ret2
= put_user_s64(off
, arg3
);
10129 if (is_error(ret2
)) {
10137 case TARGET_NR_sendfile
:
10138 #ifdef TARGET_NR_sendfile64
10139 case TARGET_NR_sendfile64
:
10141 goto unimplemented
;
10144 #ifdef TARGET_NR_getpmsg
10145 case TARGET_NR_getpmsg
:
10146 goto unimplemented
;
10148 #ifdef TARGET_NR_putpmsg
10149 case TARGET_NR_putpmsg
:
10150 goto unimplemented
;
10152 #ifdef TARGET_NR_vfork
10153 case TARGET_NR_vfork
:
10154 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10158 #ifdef TARGET_NR_ugetrlimit
10159 case TARGET_NR_ugetrlimit
:
10161 struct rlimit rlim
;
10162 int resource
= target_to_host_resource(arg1
);
10163 ret
= get_errno(getrlimit(resource
, &rlim
));
10164 if (!is_error(ret
)) {
10165 struct target_rlimit
*target_rlim
;
10166 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10168 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10169 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10170 unlock_user_struct(target_rlim
, arg2
, 1);
10175 #ifdef TARGET_NR_truncate64
10176 case TARGET_NR_truncate64
:
10177 if (!(p
= lock_user_string(arg1
)))
10179 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10180 unlock_user(p
, arg1
, 0);
10183 #ifdef TARGET_NR_ftruncate64
10184 case TARGET_NR_ftruncate64
:
10185 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10188 #ifdef TARGET_NR_stat64
10189 case TARGET_NR_stat64
:
10190 if (!(p
= lock_user_string(arg1
)))
10192 ret
= get_errno(stat(path(p
), &st
));
10193 unlock_user(p
, arg1
, 0);
10194 if (!is_error(ret
))
10195 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10198 #ifdef TARGET_NR_lstat64
10199 case TARGET_NR_lstat64
:
10200 if (!(p
= lock_user_string(arg1
)))
10202 ret
= get_errno(lstat(path(p
), &st
));
10203 unlock_user(p
, arg1
, 0);
10204 if (!is_error(ret
))
10205 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10208 #ifdef TARGET_NR_fstat64
10209 case TARGET_NR_fstat64
:
10210 ret
= get_errno(fstat(arg1
, &st
));
10211 if (!is_error(ret
))
10212 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10215 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10216 #ifdef TARGET_NR_fstatat64
10217 case TARGET_NR_fstatat64
:
10219 #ifdef TARGET_NR_newfstatat
10220 case TARGET_NR_newfstatat
:
10222 if (!(p
= lock_user_string(arg2
)))
10224 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10225 if (!is_error(ret
))
10226 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10229 #ifdef TARGET_NR_lchown
10230 case TARGET_NR_lchown
:
10231 if (!(p
= lock_user_string(arg1
)))
10233 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10234 unlock_user(p
, arg1
, 0);
10237 #ifdef TARGET_NR_getuid
10238 case TARGET_NR_getuid
:
10239 ret
= get_errno(high2lowuid(getuid()));
10242 #ifdef TARGET_NR_getgid
10243 case TARGET_NR_getgid
:
10244 ret
= get_errno(high2lowgid(getgid()));
10247 #ifdef TARGET_NR_geteuid
10248 case TARGET_NR_geteuid
:
10249 ret
= get_errno(high2lowuid(geteuid()));
10252 #ifdef TARGET_NR_getegid
10253 case TARGET_NR_getegid
:
10254 ret
= get_errno(high2lowgid(getegid()));
10257 case TARGET_NR_setreuid
:
10258 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10260 case TARGET_NR_setregid
:
10261 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10263 case TARGET_NR_getgroups
:
10265 int gidsetsize
= arg1
;
10266 target_id
*target_grouplist
;
10270 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10271 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10272 if (gidsetsize
== 0)
10274 if (!is_error(ret
)) {
10275 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10276 if (!target_grouplist
)
10278 for(i
= 0;i
< ret
; i
++)
10279 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10280 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10284 case TARGET_NR_setgroups
:
10286 int gidsetsize
= arg1
;
10287 target_id
*target_grouplist
;
10288 gid_t
*grouplist
= NULL
;
10291 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10292 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10293 if (!target_grouplist
) {
10294 ret
= -TARGET_EFAULT
;
10297 for (i
= 0; i
< gidsetsize
; i
++) {
10298 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10300 unlock_user(target_grouplist
, arg2
, 0);
10302 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10305 case TARGET_NR_fchown
:
10306 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10308 #if defined(TARGET_NR_fchownat)
10309 case TARGET_NR_fchownat
:
10310 if (!(p
= lock_user_string(arg2
)))
10312 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10313 low2highgid(arg4
), arg5
));
10314 unlock_user(p
, arg2
, 0);
10317 #ifdef TARGET_NR_setresuid
10318 case TARGET_NR_setresuid
:
10319 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10321 low2highuid(arg3
)));
10324 #ifdef TARGET_NR_getresuid
10325 case TARGET_NR_getresuid
:
10327 uid_t ruid
, euid
, suid
;
10328 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10329 if (!is_error(ret
)) {
10330 if (put_user_id(high2lowuid(ruid
), arg1
)
10331 || put_user_id(high2lowuid(euid
), arg2
)
10332 || put_user_id(high2lowuid(suid
), arg3
))
10338 #ifdef TARGET_NR_getresgid
10339 case TARGET_NR_setresgid
:
10340 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10342 low2highgid(arg3
)));
10345 #ifdef TARGET_NR_getresgid
10346 case TARGET_NR_getresgid
:
10348 gid_t rgid
, egid
, sgid
;
10349 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10350 if (!is_error(ret
)) {
10351 if (put_user_id(high2lowgid(rgid
), arg1
)
10352 || put_user_id(high2lowgid(egid
), arg2
)
10353 || put_user_id(high2lowgid(sgid
), arg3
))
10359 #ifdef TARGET_NR_chown
10360 case TARGET_NR_chown
:
10361 if (!(p
= lock_user_string(arg1
)))
10363 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10364 unlock_user(p
, arg1
, 0);
10367 case TARGET_NR_setuid
:
10368 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10370 case TARGET_NR_setgid
:
10371 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10373 case TARGET_NR_setfsuid
:
10374 ret
= get_errno(setfsuid(arg1
));
10376 case TARGET_NR_setfsgid
:
10377 ret
= get_errno(setfsgid(arg1
));
10380 #ifdef TARGET_NR_lchown32
10381 case TARGET_NR_lchown32
:
10382 if (!(p
= lock_user_string(arg1
)))
10384 ret
= get_errno(lchown(p
, arg2
, arg3
));
10385 unlock_user(p
, arg1
, 0);
10388 #ifdef TARGET_NR_getuid32
10389 case TARGET_NR_getuid32
:
10390 ret
= get_errno(getuid());
10394 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10395 /* Alpha specific */
10396 case TARGET_NR_getxuid
:
10400 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10402 ret
= get_errno(getuid());
10405 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10406 /* Alpha specific */
10407 case TARGET_NR_getxgid
:
10411 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10413 ret
= get_errno(getgid());
10416 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10417 /* Alpha specific */
10418 case TARGET_NR_osf_getsysinfo
:
10419 ret
= -TARGET_EOPNOTSUPP
;
10421 case TARGET_GSI_IEEE_FP_CONTROL
:
10423 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10425 /* Copied from linux ieee_fpcr_to_swcr. */
10426 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10427 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10428 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10429 | SWCR_TRAP_ENABLE_DZE
10430 | SWCR_TRAP_ENABLE_OVF
);
10431 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10432 | SWCR_TRAP_ENABLE_INE
);
10433 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10434 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10436 if (put_user_u64 (swcr
, arg2
))
10442 /* case GSI_IEEE_STATE_AT_SIGNAL:
10443 -- Not implemented in linux kernel.
10445 -- Retrieves current unaligned access state; not much used.
10446 case GSI_PROC_TYPE:
10447 -- Retrieves implver information; surely not used.
10448 case GSI_GET_HWRPB:
10449 -- Grabs a copy of the HWRPB; surely not used.
10454 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10455 /* Alpha specific */
10456 case TARGET_NR_osf_setsysinfo
:
10457 ret
= -TARGET_EOPNOTSUPP
;
10459 case TARGET_SSI_IEEE_FP_CONTROL
:
10461 uint64_t swcr
, fpcr
, orig_fpcr
;
10463 if (get_user_u64 (swcr
, arg2
)) {
10466 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10467 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10469 /* Copied from linux ieee_swcr_to_fpcr. */
10470 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10471 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10472 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10473 | SWCR_TRAP_ENABLE_DZE
10474 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10475 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10476 | SWCR_TRAP_ENABLE_INE
)) << 57;
10477 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10478 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10480 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10485 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10487 uint64_t exc
, fpcr
, orig_fpcr
;
10490 if (get_user_u64(exc
, arg2
)) {
10494 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10496 /* We only add to the exception status here. */
10497 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10499 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10502 /* Old exceptions are not signaled. */
10503 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10505 /* If any exceptions set by this call,
10506 and are unmasked, send a signal. */
10508 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10509 si_code
= TARGET_FPE_FLTRES
;
10511 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10512 si_code
= TARGET_FPE_FLTUND
;
10514 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10515 si_code
= TARGET_FPE_FLTOVF
;
10517 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10518 si_code
= TARGET_FPE_FLTDIV
;
10520 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10521 si_code
= TARGET_FPE_FLTINV
;
10523 if (si_code
!= 0) {
10524 target_siginfo_t info
;
10525 info
.si_signo
= SIGFPE
;
10527 info
.si_code
= si_code
;
10528 info
._sifields
._sigfault
._addr
10529 = ((CPUArchState
*)cpu_env
)->pc
;
10530 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
10535 /* case SSI_NVPAIRS:
10536 -- Used with SSIN_UACPROC to enable unaligned accesses.
10537 case SSI_IEEE_STATE_AT_SIGNAL:
10538 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10539 -- Not implemented in linux kernel
10544 #ifdef TARGET_NR_osf_sigprocmask
10545 /* Alpha specific. */
10546 case TARGET_NR_osf_sigprocmask
:
10550 sigset_t set
, oldset
;
10553 case TARGET_SIG_BLOCK
:
10556 case TARGET_SIG_UNBLOCK
:
10559 case TARGET_SIG_SETMASK
:
10563 ret
= -TARGET_EINVAL
;
10567 target_to_host_old_sigset(&set
, &mask
);
10568 ret
= do_sigprocmask(how
, &set
, &oldset
);
10570 host_to_target_old_sigset(&mask
, &oldset
);
10577 #ifdef TARGET_NR_getgid32
10578 case TARGET_NR_getgid32
:
10579 ret
= get_errno(getgid());
10582 #ifdef TARGET_NR_geteuid32
10583 case TARGET_NR_geteuid32
:
10584 ret
= get_errno(geteuid());
10587 #ifdef TARGET_NR_getegid32
10588 case TARGET_NR_getegid32
:
10589 ret
= get_errno(getegid());
10592 #ifdef TARGET_NR_setreuid32
10593 case TARGET_NR_setreuid32
:
10594 ret
= get_errno(setreuid(arg1
, arg2
));
10597 #ifdef TARGET_NR_setregid32
10598 case TARGET_NR_setregid32
:
10599 ret
= get_errno(setregid(arg1
, arg2
));
10602 #ifdef TARGET_NR_getgroups32
10603 case TARGET_NR_getgroups32
:
10605 int gidsetsize
= arg1
;
10606 uint32_t *target_grouplist
;
10610 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10611 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10612 if (gidsetsize
== 0)
10614 if (!is_error(ret
)) {
10615 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10616 if (!target_grouplist
) {
10617 ret
= -TARGET_EFAULT
;
10620 for(i
= 0;i
< ret
; i
++)
10621 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10622 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10627 #ifdef TARGET_NR_setgroups32
10628 case TARGET_NR_setgroups32
:
10630 int gidsetsize
= arg1
;
10631 uint32_t *target_grouplist
;
10635 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10636 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10637 if (!target_grouplist
) {
10638 ret
= -TARGET_EFAULT
;
10641 for(i
= 0;i
< gidsetsize
; i
++)
10642 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10643 unlock_user(target_grouplist
, arg2
, 0);
10644 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10648 #ifdef TARGET_NR_fchown32
10649 case TARGET_NR_fchown32
:
10650 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10653 #ifdef TARGET_NR_setresuid32
10654 case TARGET_NR_setresuid32
:
10655 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10658 #ifdef TARGET_NR_getresuid32
10659 case TARGET_NR_getresuid32
:
10661 uid_t ruid
, euid
, suid
;
10662 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10663 if (!is_error(ret
)) {
10664 if (put_user_u32(ruid
, arg1
)
10665 || put_user_u32(euid
, arg2
)
10666 || put_user_u32(suid
, arg3
))
10672 #ifdef TARGET_NR_setresgid32
10673 case TARGET_NR_setresgid32
:
10674 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10677 #ifdef TARGET_NR_getresgid32
10678 case TARGET_NR_getresgid32
:
10680 gid_t rgid
, egid
, sgid
;
10681 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10682 if (!is_error(ret
)) {
10683 if (put_user_u32(rgid
, arg1
)
10684 || put_user_u32(egid
, arg2
)
10685 || put_user_u32(sgid
, arg3
))
10691 #ifdef TARGET_NR_chown32
10692 case TARGET_NR_chown32
:
10693 if (!(p
= lock_user_string(arg1
)))
10695 ret
= get_errno(chown(p
, arg2
, arg3
));
10696 unlock_user(p
, arg1
, 0);
10699 #ifdef TARGET_NR_setuid32
10700 case TARGET_NR_setuid32
:
10701 ret
= get_errno(sys_setuid(arg1
));
10704 #ifdef TARGET_NR_setgid32
10705 case TARGET_NR_setgid32
:
10706 ret
= get_errno(sys_setgid(arg1
));
10709 #ifdef TARGET_NR_setfsuid32
10710 case TARGET_NR_setfsuid32
:
10711 ret
= get_errno(setfsuid(arg1
));
10714 #ifdef TARGET_NR_setfsgid32
10715 case TARGET_NR_setfsgid32
:
10716 ret
= get_errno(setfsgid(arg1
));
10720 case TARGET_NR_pivot_root
:
10721 goto unimplemented
;
10722 #ifdef TARGET_NR_mincore
10723 case TARGET_NR_mincore
:
10726 ret
= -TARGET_EFAULT
;
10727 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10729 if (!(p
= lock_user_string(arg3
)))
10731 ret
= get_errno(mincore(a
, arg2
, p
));
10732 unlock_user(p
, arg3
, ret
);
10734 unlock_user(a
, arg1
, 0);
10738 #ifdef TARGET_NR_arm_fadvise64_64
10739 case TARGET_NR_arm_fadvise64_64
:
10740 /* arm_fadvise64_64 looks like fadvise64_64 but
10741 * with different argument order: fd, advice, offset, len
10742 * rather than the usual fd, offset, len, advice.
10743 * Note that offset and len are both 64-bit so appear as
10744 * pairs of 32-bit registers.
10746 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10747 target_offset64(arg5
, arg6
), arg2
);
10748 ret
= -host_to_target_errno(ret
);
10752 #if TARGET_ABI_BITS == 32
10754 #ifdef TARGET_NR_fadvise64_64
10755 case TARGET_NR_fadvise64_64
:
10756 /* 6 args: fd, offset (high, low), len (high, low), advice */
10757 if (regpairs_aligned(cpu_env
)) {
10758 /* offset is in (3,4), len in (5,6) and advice in 7 */
10765 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10766 target_offset64(arg2
, arg3
),
10767 target_offset64(arg4
, arg5
),
10772 #ifdef TARGET_NR_fadvise64
10773 case TARGET_NR_fadvise64
:
10774 /* 5 args: fd, offset (high, low), len, advice */
10775 if (regpairs_aligned(cpu_env
)) {
10776 /* offset is in (3,4), len in 5 and advice in 6 */
10782 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10783 target_offset64(arg2
, arg3
),
10788 #else /* not a 32-bit ABI */
10789 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10790 #ifdef TARGET_NR_fadvise64_64
10791 case TARGET_NR_fadvise64_64
:
10793 #ifdef TARGET_NR_fadvise64
10794 case TARGET_NR_fadvise64
:
10796 #ifdef TARGET_S390X
10798 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10799 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10800 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10801 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10805 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10808 #endif /* end of 64-bit ABI fadvise handling */
10810 #ifdef TARGET_NR_madvise
10811 case TARGET_NR_madvise
:
10812 /* A straight passthrough may not be safe because qemu sometimes
10813 turns private file-backed mappings into anonymous mappings.
10814 This will break MADV_DONTNEED.
10815 This is a hint, so ignoring and returning success is ok. */
10816 ret
= get_errno(0);
10819 #if TARGET_ABI_BITS == 32
10820 case TARGET_NR_fcntl64
:
10824 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10825 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10828 if (((CPUARMState
*)cpu_env
)->eabi
) {
10829 copyfrom
= copy_from_user_eabi_flock64
;
10830 copyto
= copy_to_user_eabi_flock64
;
10834 cmd
= target_to_host_fcntl_cmd(arg2
);
10835 if (cmd
== -TARGET_EINVAL
) {
10841 case TARGET_F_GETLK64
:
10842 ret
= copyfrom(&fl
, arg3
);
10846 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10848 ret
= copyto(arg3
, &fl
);
10852 case TARGET_F_SETLK64
:
10853 case TARGET_F_SETLKW64
:
10854 ret
= copyfrom(&fl
, arg3
);
10858 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10861 ret
= do_fcntl(arg1
, arg2
, arg3
);
10867 #ifdef TARGET_NR_cacheflush
10868 case TARGET_NR_cacheflush
:
10869 /* self-modifying code is handled automatically, so nothing needed */
10873 #ifdef TARGET_NR_security
10874 case TARGET_NR_security
:
10875 goto unimplemented
;
10877 #ifdef TARGET_NR_getpagesize
10878 case TARGET_NR_getpagesize
:
10879 ret
= TARGET_PAGE_SIZE
;
10882 case TARGET_NR_gettid
:
10883 ret
= get_errno(gettid());
10885 #ifdef TARGET_NR_readahead
10886 case TARGET_NR_readahead
:
10887 #if TARGET_ABI_BITS == 32
10888 if (regpairs_aligned(cpu_env
)) {
10893 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
10895 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
10900 #ifdef TARGET_NR_setxattr
10901 case TARGET_NR_listxattr
:
10902 case TARGET_NR_llistxattr
:
10906 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10908 ret
= -TARGET_EFAULT
;
10912 p
= lock_user_string(arg1
);
10914 if (num
== TARGET_NR_listxattr
) {
10915 ret
= get_errno(listxattr(p
, b
, arg3
));
10917 ret
= get_errno(llistxattr(p
, b
, arg3
));
10920 ret
= -TARGET_EFAULT
;
10922 unlock_user(p
, arg1
, 0);
10923 unlock_user(b
, arg2
, arg3
);
10926 case TARGET_NR_flistxattr
:
10930 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10932 ret
= -TARGET_EFAULT
;
10936 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
10937 unlock_user(b
, arg2
, arg3
);
10940 case TARGET_NR_setxattr
:
10941 case TARGET_NR_lsetxattr
:
10943 void *p
, *n
, *v
= 0;
10945 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10947 ret
= -TARGET_EFAULT
;
10951 p
= lock_user_string(arg1
);
10952 n
= lock_user_string(arg2
);
10954 if (num
== TARGET_NR_setxattr
) {
10955 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
10957 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
10960 ret
= -TARGET_EFAULT
;
10962 unlock_user(p
, arg1
, 0);
10963 unlock_user(n
, arg2
, 0);
10964 unlock_user(v
, arg3
, 0);
10967 case TARGET_NR_fsetxattr
:
10971 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
10973 ret
= -TARGET_EFAULT
;
10977 n
= lock_user_string(arg2
);
10979 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
10981 ret
= -TARGET_EFAULT
;
10983 unlock_user(n
, arg2
, 0);
10984 unlock_user(v
, arg3
, 0);
10987 case TARGET_NR_getxattr
:
10988 case TARGET_NR_lgetxattr
:
10990 void *p
, *n
, *v
= 0;
10992 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
10994 ret
= -TARGET_EFAULT
;
10998 p
= lock_user_string(arg1
);
10999 n
= lock_user_string(arg2
);
11001 if (num
== TARGET_NR_getxattr
) {
11002 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11004 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11007 ret
= -TARGET_EFAULT
;
11009 unlock_user(p
, arg1
, 0);
11010 unlock_user(n
, arg2
, 0);
11011 unlock_user(v
, arg3
, arg4
);
11014 case TARGET_NR_fgetxattr
:
11018 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11020 ret
= -TARGET_EFAULT
;
11024 n
= lock_user_string(arg2
);
11026 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11028 ret
= -TARGET_EFAULT
;
11030 unlock_user(n
, arg2
, 0);
11031 unlock_user(v
, arg3
, arg4
);
11034 case TARGET_NR_removexattr
:
11035 case TARGET_NR_lremovexattr
:
11038 p
= lock_user_string(arg1
);
11039 n
= lock_user_string(arg2
);
11041 if (num
== TARGET_NR_removexattr
) {
11042 ret
= get_errno(removexattr(p
, n
));
11044 ret
= get_errno(lremovexattr(p
, n
));
11047 ret
= -TARGET_EFAULT
;
11049 unlock_user(p
, arg1
, 0);
11050 unlock_user(n
, arg2
, 0);
11053 case TARGET_NR_fremovexattr
:
11056 n
= lock_user_string(arg2
);
11058 ret
= get_errno(fremovexattr(arg1
, n
));
11060 ret
= -TARGET_EFAULT
;
11062 unlock_user(n
, arg2
, 0);
11066 #endif /* CONFIG_ATTR */
11067 #ifdef TARGET_NR_set_thread_area
11068 case TARGET_NR_set_thread_area
:
11069 #if defined(TARGET_MIPS)
11070 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11073 #elif defined(TARGET_CRIS)
11075 ret
= -TARGET_EINVAL
;
11077 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11081 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11082 ret
= do_set_thread_area(cpu_env
, arg1
);
11084 #elif defined(TARGET_M68K)
11086 TaskState
*ts
= cpu
->opaque
;
11087 ts
->tp_value
= arg1
;
11092 goto unimplemented_nowarn
;
11095 #ifdef TARGET_NR_get_thread_area
11096 case TARGET_NR_get_thread_area
:
11097 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11098 ret
= do_get_thread_area(cpu_env
, arg1
);
11100 #elif defined(TARGET_M68K)
11102 TaskState
*ts
= cpu
->opaque
;
11103 ret
= ts
->tp_value
;
11107 goto unimplemented_nowarn
;
11110 #ifdef TARGET_NR_getdomainname
11111 case TARGET_NR_getdomainname
:
11112 goto unimplemented_nowarn
;
11115 #ifdef TARGET_NR_clock_gettime
11116 case TARGET_NR_clock_gettime
:
11118 struct timespec ts
;
11119 ret
= get_errno(clock_gettime(arg1
, &ts
));
11120 if (!is_error(ret
)) {
11121 host_to_target_timespec(arg2
, &ts
);
11126 #ifdef TARGET_NR_clock_getres
11127 case TARGET_NR_clock_getres
:
11129 struct timespec ts
;
11130 ret
= get_errno(clock_getres(arg1
, &ts
));
11131 if (!is_error(ret
)) {
11132 host_to_target_timespec(arg2
, &ts
);
11137 #ifdef TARGET_NR_clock_nanosleep
11138 case TARGET_NR_clock_nanosleep
:
11140 struct timespec ts
;
11141 target_to_host_timespec(&ts
, arg3
);
11142 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11143 &ts
, arg4
? &ts
: NULL
));
11145 host_to_target_timespec(arg4
, &ts
);
11147 #if defined(TARGET_PPC)
11148 /* clock_nanosleep is odd in that it returns positive errno values.
11149 * On PPC, CR0 bit 3 should be set in such a situation. */
11150 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11151 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11158 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11159 case TARGET_NR_set_tid_address
:
11160 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11164 case TARGET_NR_tkill
:
11165 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11168 case TARGET_NR_tgkill
:
11169 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11170 target_to_host_signal(arg3
)));
11173 #ifdef TARGET_NR_set_robust_list
11174 case TARGET_NR_set_robust_list
:
11175 case TARGET_NR_get_robust_list
:
11176 /* The ABI for supporting robust futexes has userspace pass
11177 * the kernel a pointer to a linked list which is updated by
11178 * userspace after the syscall; the list is walked by the kernel
11179 * when the thread exits. Since the linked list in QEMU guest
11180 * memory isn't a valid linked list for the host and we have
11181 * no way to reliably intercept the thread-death event, we can't
11182 * support these. Silently return ENOSYS so that guest userspace
11183 * falls back to a non-robust futex implementation (which should
11184 * be OK except in the corner case of the guest crashing while
11185 * holding a mutex that is shared with another process via
11188 goto unimplemented_nowarn
;
11191 #if defined(TARGET_NR_utimensat)
11192 case TARGET_NR_utimensat
:
11194 struct timespec
*tsp
, ts
[2];
11198 target_to_host_timespec(ts
, arg3
);
11199 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11203 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11205 if (!(p
= lock_user_string(arg2
))) {
11206 ret
= -TARGET_EFAULT
;
11209 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11210 unlock_user(p
, arg2
, 0);
11215 case TARGET_NR_futex
:
11216 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11218 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11219 case TARGET_NR_inotify_init
:
11220 ret
= get_errno(sys_inotify_init());
11223 #ifdef CONFIG_INOTIFY1
11224 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11225 case TARGET_NR_inotify_init1
:
11226 ret
= get_errno(sys_inotify_init1(arg1
));
11230 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11231 case TARGET_NR_inotify_add_watch
:
11232 p
= lock_user_string(arg2
);
11233 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11234 unlock_user(p
, arg2
, 0);
11237 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11238 case TARGET_NR_inotify_rm_watch
:
11239 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11243 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11244 case TARGET_NR_mq_open
:
11246 struct mq_attr posix_mq_attr
, *attrp
;
11248 p
= lock_user_string(arg1
- 1);
11250 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11251 attrp
= &posix_mq_attr
;
11255 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11256 unlock_user (p
, arg1
, 0);
11260 case TARGET_NR_mq_unlink
:
11261 p
= lock_user_string(arg1
- 1);
11262 ret
= get_errno(mq_unlink(p
));
11263 unlock_user (p
, arg1
, 0);
11266 case TARGET_NR_mq_timedsend
:
11268 struct timespec ts
;
11270 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11272 target_to_host_timespec(&ts
, arg5
);
11273 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11274 host_to_target_timespec(arg5
, &ts
);
11276 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11278 unlock_user (p
, arg2
, arg3
);
11282 case TARGET_NR_mq_timedreceive
:
11284 struct timespec ts
;
11287 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11289 target_to_host_timespec(&ts
, arg5
);
11290 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11292 host_to_target_timespec(arg5
, &ts
);
11294 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11297 unlock_user (p
, arg2
, arg3
);
11299 put_user_u32(prio
, arg4
);
11303 /* Not implemented for now... */
11304 /* case TARGET_NR_mq_notify: */
11307 case TARGET_NR_mq_getsetattr
:
11309 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11312 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11313 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11316 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11317 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11324 #ifdef CONFIG_SPLICE
11325 #ifdef TARGET_NR_tee
11326 case TARGET_NR_tee
:
11328 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11332 #ifdef TARGET_NR_splice
11333 case TARGET_NR_splice
:
11335 loff_t loff_in
, loff_out
;
11336 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11338 if (get_user_u64(loff_in
, arg2
)) {
11341 ploff_in
= &loff_in
;
11344 if (get_user_u64(loff_out
, arg4
)) {
11347 ploff_out
= &loff_out
;
11349 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11351 if (put_user_u64(loff_in
, arg2
)) {
11356 if (put_user_u64(loff_out
, arg4
)) {
11363 #ifdef TARGET_NR_vmsplice
11364 case TARGET_NR_vmsplice
:
11366 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11368 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11369 unlock_iovec(vec
, arg2
, arg3
, 0);
11371 ret
= -host_to_target_errno(errno
);
11376 #endif /* CONFIG_SPLICE */
11377 #ifdef CONFIG_EVENTFD
11378 #if defined(TARGET_NR_eventfd)
11379 case TARGET_NR_eventfd
:
11380 ret
= get_errno(eventfd(arg1
, 0));
11381 fd_trans_unregister(ret
);
11384 #if defined(TARGET_NR_eventfd2)
11385 case TARGET_NR_eventfd2
:
11387 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11388 if (arg2
& TARGET_O_NONBLOCK
) {
11389 host_flags
|= O_NONBLOCK
;
11391 if (arg2
& TARGET_O_CLOEXEC
) {
11392 host_flags
|= O_CLOEXEC
;
11394 ret
= get_errno(eventfd(arg1
, host_flags
));
11395 fd_trans_unregister(ret
);
11399 #endif /* CONFIG_EVENTFD */
11400 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11401 case TARGET_NR_fallocate
:
11402 #if TARGET_ABI_BITS == 32
11403 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11404 target_offset64(arg5
, arg6
)));
11406 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11410 #if defined(CONFIG_SYNC_FILE_RANGE)
11411 #if defined(TARGET_NR_sync_file_range)
11412 case TARGET_NR_sync_file_range
:
11413 #if TARGET_ABI_BITS == 32
11414 #if defined(TARGET_MIPS)
11415 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11416 target_offset64(arg5
, arg6
), arg7
));
11418 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11419 target_offset64(arg4
, arg5
), arg6
));
11420 #endif /* !TARGET_MIPS */
11422 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11426 #if defined(TARGET_NR_sync_file_range2)
11427 case TARGET_NR_sync_file_range2
:
11428 /* This is like sync_file_range but the arguments are reordered */
11429 #if TARGET_ABI_BITS == 32
11430 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11431 target_offset64(arg5
, arg6
), arg2
));
11433 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11438 #if defined(TARGET_NR_signalfd4)
11439 case TARGET_NR_signalfd4
:
11440 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11443 #if defined(TARGET_NR_signalfd)
11444 case TARGET_NR_signalfd
:
11445 ret
= do_signalfd4(arg1
, arg2
, 0);
11448 #if defined(CONFIG_EPOLL)
11449 #if defined(TARGET_NR_epoll_create)
11450 case TARGET_NR_epoll_create
:
11451 ret
= get_errno(epoll_create(arg1
));
11454 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11455 case TARGET_NR_epoll_create1
:
11456 ret
= get_errno(epoll_create1(arg1
));
11459 #if defined(TARGET_NR_epoll_ctl)
11460 case TARGET_NR_epoll_ctl
:
11462 struct epoll_event ep
;
11463 struct epoll_event
*epp
= 0;
11465 struct target_epoll_event
*target_ep
;
11466 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11469 ep
.events
= tswap32(target_ep
->events
);
11470 /* The epoll_data_t union is just opaque data to the kernel,
11471 * so we transfer all 64 bits across and need not worry what
11472 * actual data type it is.
11474 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11475 unlock_user_struct(target_ep
, arg4
, 0);
11478 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11483 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11484 #if defined(TARGET_NR_epoll_wait)
11485 case TARGET_NR_epoll_wait
:
11487 #if defined(TARGET_NR_epoll_pwait)
11488 case TARGET_NR_epoll_pwait
:
11491 struct target_epoll_event
*target_ep
;
11492 struct epoll_event
*ep
;
11494 int maxevents
= arg3
;
11495 int timeout
= arg4
;
11497 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11498 maxevents
* sizeof(struct target_epoll_event
), 1);
11503 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11506 #if defined(TARGET_NR_epoll_pwait)
11507 case TARGET_NR_epoll_pwait
:
11509 target_sigset_t
*target_set
;
11510 sigset_t _set
, *set
= &_set
;
11513 if (arg6
!= sizeof(target_sigset_t
)) {
11514 ret
= -TARGET_EINVAL
;
11518 target_set
= lock_user(VERIFY_READ
, arg5
,
11519 sizeof(target_sigset_t
), 1);
11521 unlock_user(target_ep
, arg2
, 0);
11524 target_to_host_sigset(set
, target_set
);
11525 unlock_user(target_set
, arg5
, 0);
11530 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11531 set
, SIGSET_T_SIZE
));
11535 #if defined(TARGET_NR_epoll_wait)
11536 case TARGET_NR_epoll_wait
:
11537 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11542 ret
= -TARGET_ENOSYS
;
11544 if (!is_error(ret
)) {
11546 for (i
= 0; i
< ret
; i
++) {
11547 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11548 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11551 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11556 #ifdef TARGET_NR_prlimit64
11557 case TARGET_NR_prlimit64
:
11559 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11560 struct target_rlimit64
*target_rnew
, *target_rold
;
11561 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11562 int resource
= target_to_host_resource(arg2
);
11564 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11567 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11568 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11569 unlock_user_struct(target_rnew
, arg3
, 0);
11573 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11574 if (!is_error(ret
) && arg4
) {
11575 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11578 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11579 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11580 unlock_user_struct(target_rold
, arg4
, 1);
11585 #ifdef TARGET_NR_gethostname
11586 case TARGET_NR_gethostname
:
11588 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11590 ret
= get_errno(gethostname(name
, arg2
));
11591 unlock_user(name
, arg1
, arg2
);
11593 ret
= -TARGET_EFAULT
;
11598 #ifdef TARGET_NR_atomic_cmpxchg_32
11599 case TARGET_NR_atomic_cmpxchg_32
:
11601 /* should use start_exclusive from main.c */
11602 abi_ulong mem_value
;
11603 if (get_user_u32(mem_value
, arg6
)) {
11604 target_siginfo_t info
;
11605 info
.si_signo
= SIGSEGV
;
11607 info
.si_code
= TARGET_SEGV_MAPERR
;
11608 info
._sifields
._sigfault
._addr
= arg6
;
11609 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
, &info
);
11613 if (mem_value
== arg2
)
11614 put_user_u32(arg1
, arg6
);
11619 #ifdef TARGET_NR_atomic_barrier
11620 case TARGET_NR_atomic_barrier
:
11622 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11628 #ifdef TARGET_NR_timer_create
11629 case TARGET_NR_timer_create
:
11631 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11633 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11636 int timer_index
= next_free_host_timer();
11638 if (timer_index
< 0) {
11639 ret
= -TARGET_EAGAIN
;
11641 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11644 phost_sevp
= &host_sevp
;
11645 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11651 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11655 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11664 #ifdef TARGET_NR_timer_settime
11665 case TARGET_NR_timer_settime
:
11667 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11668 * struct itimerspec * old_value */
11669 target_timer_t timerid
= get_timer_id(arg1
);
11673 } else if (arg3
== 0) {
11674 ret
= -TARGET_EINVAL
;
11676 timer_t htimer
= g_posix_timers
[timerid
];
11677 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11679 target_to_host_itimerspec(&hspec_new
, arg3
);
11681 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11682 host_to_target_itimerspec(arg2
, &hspec_old
);
11688 #ifdef TARGET_NR_timer_gettime
11689 case TARGET_NR_timer_gettime
:
11691 /* args: timer_t timerid, struct itimerspec *curr_value */
11692 target_timer_t timerid
= get_timer_id(arg1
);
11696 } else if (!arg2
) {
11697 ret
= -TARGET_EFAULT
;
11699 timer_t htimer
= g_posix_timers
[timerid
];
11700 struct itimerspec hspec
;
11701 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11703 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11704 ret
= -TARGET_EFAULT
;
11711 #ifdef TARGET_NR_timer_getoverrun
11712 case TARGET_NR_timer_getoverrun
:
11714 /* args: timer_t timerid */
11715 target_timer_t timerid
= get_timer_id(arg1
);
11720 timer_t htimer
= g_posix_timers
[timerid
];
11721 ret
= get_errno(timer_getoverrun(htimer
));
11723 fd_trans_unregister(ret
);
11728 #ifdef TARGET_NR_timer_delete
11729 case TARGET_NR_timer_delete
:
11731 /* args: timer_t timerid */
11732 target_timer_t timerid
= get_timer_id(arg1
);
11737 timer_t htimer
= g_posix_timers
[timerid
];
11738 ret
= get_errno(timer_delete(htimer
));
11739 g_posix_timers
[timerid
] = 0;
11745 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11746 case TARGET_NR_timerfd_create
:
11747 ret
= get_errno(timerfd_create(arg1
,
11748 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11752 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11753 case TARGET_NR_timerfd_gettime
:
11755 struct itimerspec its_curr
;
11757 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11759 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11766 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11767 case TARGET_NR_timerfd_settime
:
11769 struct itimerspec its_new
, its_old
, *p_new
;
11772 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11780 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11782 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11789 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11790 case TARGET_NR_ioprio_get
:
11791 ret
= get_errno(ioprio_get(arg1
, arg2
));
11795 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11796 case TARGET_NR_ioprio_set
:
11797 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11801 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11802 case TARGET_NR_setns
:
11803 ret
= get_errno(setns(arg1
, arg2
));
11806 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11807 case TARGET_NR_unshare
:
11808 ret
= get_errno(unshare(arg1
));
11814 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11815 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11816 unimplemented_nowarn
:
11818 ret
= -TARGET_ENOSYS
;
11823 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11826 print_syscall_ret(num
, ret
);
11827 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11830 ret
= -TARGET_EFAULT
;