4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include "qemu-common.h"
64 #include <sys/timerfd.h>
70 #include <sys/eventfd.h>
73 #include <sys/epoll.h>
76 #include "qemu/xattr.h"
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
95 #include <linux/mtio.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
100 #include <linux/fb.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #ifdef CONFIG_RTNETLINK
110 #include <linux/rtnetlink.h>
111 #include <linux/if_bridge.h>
113 #include <linux/audit.h>
114 #include "linux_loop.h"
120 #define CLONE_IO 0x80000000 /* Clone io context */
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
171 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
172 * once. This exercises the codepaths for restart.
174 //#define DEBUG_ERESTARTSYS
176 //#include <linux/msdos_fs.h>
177 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
178 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
188 #define _syscall0(type,name) \
189 static type name (void) \
191 return syscall(__NR_##name); \
194 #define _syscall1(type,name,type1,arg1) \
195 static type name (type1 arg1) \
197 return syscall(__NR_##name, arg1); \
200 #define _syscall2(type,name,type1,arg1,type2,arg2) \
201 static type name (type1 arg1,type2 arg2) \
203 return syscall(__NR_##name, arg1, arg2); \
206 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
207 static type name (type1 arg1,type2 arg2,type3 arg3) \
209 return syscall(__NR_##name, arg1, arg2, arg3); \
212 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
215 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
218 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
226 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
227 type5,arg5,type6,arg6) \
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
235 #define __NR_sys_uname __NR_uname
236 #define __NR_sys_getcwd1 __NR_getcwd
237 #define __NR_sys_getdents __NR_getdents
238 #define __NR_sys_getdents64 __NR_getdents64
239 #define __NR_sys_getpriority __NR_getpriority
240 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
241 #define __NR_sys_syslog __NR_syslog
242 #define __NR_sys_futex __NR_futex
243 #define __NR_sys_inotify_init __NR_inotify_init
244 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
245 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
247 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
249 #define __NR__llseek __NR_lseek
252 /* Newer kernel ports have llseek() instead of _llseek() */
253 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
254 #define TARGET_NR__llseek TARGET_NR_llseek
258 _syscall0(int, gettid
)
260 /* This is a replacement for the host gettid() and must return a host
262 static int gettid(void) {
266 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
267 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
269 #if !defined(__NR_getdents) || \
270 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
271 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
273 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
274 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
275 loff_t
*, res
, uint
, wh
);
277 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
278 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
279 #ifdef __NR_exit_group
280 _syscall1(int,exit_group
,int,error_code
)
282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
283 _syscall1(int,set_tid_address
,int *,tidptr
)
285 #if defined(TARGET_NR_futex) && defined(__NR_futex)
286 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
287 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
289 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
290 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
291 unsigned long *, user_mask_ptr
);
292 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
293 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
294 unsigned long *, user_mask_ptr
);
295 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
297 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
298 struct __user_cap_data_struct
*, data
);
299 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
300 struct __user_cap_data_struct
*, data
);
301 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
302 _syscall2(int, ioprio_get
, int, which
, int, who
)
304 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
305 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
307 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
308 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
311 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
312 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
313 unsigned long, idx1
, unsigned long, idx2
)
316 static bitmask_transtbl fcntl_flags_tbl
[] = {
317 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
318 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
319 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
320 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
321 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
322 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
323 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
324 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
325 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
326 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
327 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
328 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
329 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
330 #if defined(O_DIRECT)
331 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
333 #if defined(O_NOATIME)
334 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
336 #if defined(O_CLOEXEC)
337 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
340 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
342 /* Don't terminate the list prematurely on 64-bit host+guest. */
343 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
344 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
351 QEMU_IFLA_BR_FORWARD_DELAY
,
352 QEMU_IFLA_BR_HELLO_TIME
,
353 QEMU_IFLA_BR_MAX_AGE
,
354 QEMU_IFLA_BR_AGEING_TIME
,
355 QEMU_IFLA_BR_STP_STATE
,
356 QEMU_IFLA_BR_PRIORITY
,
357 QEMU_IFLA_BR_VLAN_FILTERING
,
358 QEMU_IFLA_BR_VLAN_PROTOCOL
,
359 QEMU_IFLA_BR_GROUP_FWD_MASK
,
360 QEMU_IFLA_BR_ROOT_ID
,
361 QEMU_IFLA_BR_BRIDGE_ID
,
362 QEMU_IFLA_BR_ROOT_PORT
,
363 QEMU_IFLA_BR_ROOT_PATH_COST
,
364 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
365 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
366 QEMU_IFLA_BR_HELLO_TIMER
,
367 QEMU_IFLA_BR_TCN_TIMER
,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
369 QEMU_IFLA_BR_GC_TIMER
,
370 QEMU_IFLA_BR_GROUP_ADDR
,
371 QEMU_IFLA_BR_FDB_FLUSH
,
372 QEMU_IFLA_BR_MCAST_ROUTER
,
373 QEMU_IFLA_BR_MCAST_SNOOPING
,
374 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
375 QEMU_IFLA_BR_MCAST_QUERIER
,
376 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
377 QEMU_IFLA_BR_MCAST_HASH_MAX
,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
379 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
380 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
381 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
382 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
383 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
384 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
385 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
386 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
387 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
388 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
389 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
391 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
392 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
416 QEMU_IFLA_NET_NS_PID
,
419 QEMU_IFLA_VFINFO_LIST
,
427 QEMU_IFLA_PROMISCUITY
,
428 QEMU_IFLA_NUM_TX_QUEUES
,
429 QEMU_IFLA_NUM_RX_QUEUES
,
431 QEMU_IFLA_PHYS_PORT_ID
,
432 QEMU_IFLA_CARRIER_CHANGES
,
433 QEMU_IFLA_PHYS_SWITCH_ID
,
434 QEMU_IFLA_LINK_NETNSID
,
435 QEMU_IFLA_PHYS_PORT_NAME
,
436 QEMU_IFLA_PROTO_DOWN
,
437 QEMU_IFLA_GSO_MAX_SEGS
,
438 QEMU_IFLA_GSO_MAX_SIZE
,
445 QEMU_IFLA_BRPORT_UNSPEC
,
446 QEMU_IFLA_BRPORT_STATE
,
447 QEMU_IFLA_BRPORT_PRIORITY
,
448 QEMU_IFLA_BRPORT_COST
,
449 QEMU_IFLA_BRPORT_MODE
,
450 QEMU_IFLA_BRPORT_GUARD
,
451 QEMU_IFLA_BRPORT_PROTECT
,
452 QEMU_IFLA_BRPORT_FAST_LEAVE
,
453 QEMU_IFLA_BRPORT_LEARNING
,
454 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
455 QEMU_IFLA_BRPORT_PROXYARP
,
456 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
457 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
458 QEMU_IFLA_BRPORT_ROOT_ID
,
459 QEMU_IFLA_BRPORT_BRIDGE_ID
,
460 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
461 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
464 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
465 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
466 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
467 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
468 QEMU_IFLA_BRPORT_HOLD_TIMER
,
469 QEMU_IFLA_BRPORT_FLUSH
,
470 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
471 QEMU_IFLA_BRPORT_PAD
,
472 QEMU___IFLA_BRPORT_MAX
476 QEMU_IFLA_INFO_UNSPEC
,
479 QEMU_IFLA_INFO_XSTATS
,
480 QEMU_IFLA_INFO_SLAVE_KIND
,
481 QEMU_IFLA_INFO_SLAVE_DATA
,
482 QEMU___IFLA_INFO_MAX
,
486 QEMU_IFLA_INET_UNSPEC
,
488 QEMU___IFLA_INET_MAX
,
492 QEMU_IFLA_INET6_UNSPEC
,
493 QEMU_IFLA_INET6_FLAGS
,
494 QEMU_IFLA_INET6_CONF
,
495 QEMU_IFLA_INET6_STATS
,
496 QEMU_IFLA_INET6_MCAST
,
497 QEMU_IFLA_INET6_CACHEINFO
,
498 QEMU_IFLA_INET6_ICMP6STATS
,
499 QEMU_IFLA_INET6_TOKEN
,
500 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
501 QEMU___IFLA_INET6_MAX
504 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
505 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
506 typedef struct TargetFdTrans
{
507 TargetFdDataFunc host_to_target_data
;
508 TargetFdDataFunc target_to_host_data
;
509 TargetFdAddrFunc target_to_host_addr
;
512 static TargetFdTrans
**target_fd_trans
;
514 static unsigned int target_fd_max
;
516 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
518 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
519 return target_fd_trans
[fd
]->target_to_host_data
;
524 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
526 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
527 return target_fd_trans
[fd
]->host_to_target_data
;
532 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
534 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
535 return target_fd_trans
[fd
]->target_to_host_addr
;
540 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
544 if (fd
>= target_fd_max
) {
545 oldmax
= target_fd_max
;
546 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
547 target_fd_trans
= g_renew(TargetFdTrans
*,
548 target_fd_trans
, target_fd_max
);
549 memset((void *)(target_fd_trans
+ oldmax
), 0,
550 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
552 target_fd_trans
[fd
] = trans
;
555 static void fd_trans_unregister(int fd
)
557 if (fd
>= 0 && fd
< target_fd_max
) {
558 target_fd_trans
[fd
] = NULL
;
562 static void fd_trans_dup(int oldfd
, int newfd
)
564 fd_trans_unregister(newfd
);
565 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
566 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
570 static int sys_getcwd1(char *buf
, size_t size
)
572 if (getcwd(buf
, size
) == NULL
) {
573 /* getcwd() sets errno */
576 return strlen(buf
)+1;
579 #ifdef TARGET_NR_utimensat
580 #if defined(__NR_utimensat)
581 #define __NR_sys_utimensat __NR_utimensat
582 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
583 const struct timespec
*,tsp
,int,flags
)
585 static int sys_utimensat(int dirfd
, const char *pathname
,
586 const struct timespec times
[2], int flags
)
592 #endif /* TARGET_NR_utimensat */
594 #ifdef CONFIG_INOTIFY
595 #include <sys/inotify.h>
597 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
598 static int sys_inotify_init(void)
600 return (inotify_init());
603 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
604 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
606 return (inotify_add_watch(fd
, pathname
, mask
));
609 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
610 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
612 return (inotify_rm_watch(fd
, wd
));
615 #ifdef CONFIG_INOTIFY1
616 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
617 static int sys_inotify_init1(int flags
)
619 return (inotify_init1(flags
));
624 /* Userspace can usually survive runtime without inotify */
625 #undef TARGET_NR_inotify_init
626 #undef TARGET_NR_inotify_init1
627 #undef TARGET_NR_inotify_add_watch
628 #undef TARGET_NR_inotify_rm_watch
629 #endif /* CONFIG_INOTIFY */
631 #if defined(TARGET_NR_prlimit64)
632 #ifndef __NR_prlimit64
633 # define __NR_prlimit64 -1
635 #define __NR_sys_prlimit64 __NR_prlimit64
636 /* The glibc rlimit structure may not be that used by the underlying syscall */
637 struct host_rlimit64
{
641 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
642 const struct host_rlimit64
*, new_limit
,
643 struct host_rlimit64
*, old_limit
)
647 #if defined(TARGET_NR_timer_create)
648 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
649 static timer_t g_posix_timers
[32] = { 0, } ;
651 static inline int next_free_host_timer(void)
654 /* FIXME: Does finding the next free slot require a lock? */
655 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
656 if (g_posix_timers
[k
] == 0) {
657 g_posix_timers
[k
] = (timer_t
) 1;
665 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
667 static inline int regpairs_aligned(void *cpu_env
) {
668 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
670 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
671 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
672 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
673 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
674 * of registers which translates to the same as ARM/MIPS, because we start with
676 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
678 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
681 #define ERRNO_TABLE_SIZE 1200
683 /* target_to_host_errno_table[] is initialized from
684 * host_to_target_errno_table[] in syscall_init(). */
685 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
689 * This list is the union of errno values overridden in asm-<arch>/errno.h
690 * minus the errnos that are not actually generic to all archs.
692 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
693 [EAGAIN
] = TARGET_EAGAIN
,
694 [EIDRM
] = TARGET_EIDRM
,
695 [ECHRNG
] = TARGET_ECHRNG
,
696 [EL2NSYNC
] = TARGET_EL2NSYNC
,
697 [EL3HLT
] = TARGET_EL3HLT
,
698 [EL3RST
] = TARGET_EL3RST
,
699 [ELNRNG
] = TARGET_ELNRNG
,
700 [EUNATCH
] = TARGET_EUNATCH
,
701 [ENOCSI
] = TARGET_ENOCSI
,
702 [EL2HLT
] = TARGET_EL2HLT
,
703 [EDEADLK
] = TARGET_EDEADLK
,
704 [ENOLCK
] = TARGET_ENOLCK
,
705 [EBADE
] = TARGET_EBADE
,
706 [EBADR
] = TARGET_EBADR
,
707 [EXFULL
] = TARGET_EXFULL
,
708 [ENOANO
] = TARGET_ENOANO
,
709 [EBADRQC
] = TARGET_EBADRQC
,
710 [EBADSLT
] = TARGET_EBADSLT
,
711 [EBFONT
] = TARGET_EBFONT
,
712 [ENOSTR
] = TARGET_ENOSTR
,
713 [ENODATA
] = TARGET_ENODATA
,
714 [ETIME
] = TARGET_ETIME
,
715 [ENOSR
] = TARGET_ENOSR
,
716 [ENONET
] = TARGET_ENONET
,
717 [ENOPKG
] = TARGET_ENOPKG
,
718 [EREMOTE
] = TARGET_EREMOTE
,
719 [ENOLINK
] = TARGET_ENOLINK
,
720 [EADV
] = TARGET_EADV
,
721 [ESRMNT
] = TARGET_ESRMNT
,
722 [ECOMM
] = TARGET_ECOMM
,
723 [EPROTO
] = TARGET_EPROTO
,
724 [EDOTDOT
] = TARGET_EDOTDOT
,
725 [EMULTIHOP
] = TARGET_EMULTIHOP
,
726 [EBADMSG
] = TARGET_EBADMSG
,
727 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
728 [EOVERFLOW
] = TARGET_EOVERFLOW
,
729 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
730 [EBADFD
] = TARGET_EBADFD
,
731 [EREMCHG
] = TARGET_EREMCHG
,
732 [ELIBACC
] = TARGET_ELIBACC
,
733 [ELIBBAD
] = TARGET_ELIBBAD
,
734 [ELIBSCN
] = TARGET_ELIBSCN
,
735 [ELIBMAX
] = TARGET_ELIBMAX
,
736 [ELIBEXEC
] = TARGET_ELIBEXEC
,
737 [EILSEQ
] = TARGET_EILSEQ
,
738 [ENOSYS
] = TARGET_ENOSYS
,
739 [ELOOP
] = TARGET_ELOOP
,
740 [ERESTART
] = TARGET_ERESTART
,
741 [ESTRPIPE
] = TARGET_ESTRPIPE
,
742 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
743 [EUSERS
] = TARGET_EUSERS
,
744 [ENOTSOCK
] = TARGET_ENOTSOCK
,
745 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
746 [EMSGSIZE
] = TARGET_EMSGSIZE
,
747 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
748 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
749 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
750 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
751 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
752 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
753 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
754 [EADDRINUSE
] = TARGET_EADDRINUSE
,
755 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
756 [ENETDOWN
] = TARGET_ENETDOWN
,
757 [ENETUNREACH
] = TARGET_ENETUNREACH
,
758 [ENETRESET
] = TARGET_ENETRESET
,
759 [ECONNABORTED
] = TARGET_ECONNABORTED
,
760 [ECONNRESET
] = TARGET_ECONNRESET
,
761 [ENOBUFS
] = TARGET_ENOBUFS
,
762 [EISCONN
] = TARGET_EISCONN
,
763 [ENOTCONN
] = TARGET_ENOTCONN
,
764 [EUCLEAN
] = TARGET_EUCLEAN
,
765 [ENOTNAM
] = TARGET_ENOTNAM
,
766 [ENAVAIL
] = TARGET_ENAVAIL
,
767 [EISNAM
] = TARGET_EISNAM
,
768 [EREMOTEIO
] = TARGET_EREMOTEIO
,
769 [EDQUOT
] = TARGET_EDQUOT
,
770 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
771 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
772 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
773 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
774 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
775 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
776 [EALREADY
] = TARGET_EALREADY
,
777 [EINPROGRESS
] = TARGET_EINPROGRESS
,
778 [ESTALE
] = TARGET_ESTALE
,
779 [ECANCELED
] = TARGET_ECANCELED
,
780 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
781 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
783 [ENOKEY
] = TARGET_ENOKEY
,
786 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
789 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
792 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
795 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
797 #ifdef ENOTRECOVERABLE
798 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
801 [ENOMSG
] = TARGET_ENOMSG
,
804 [ERFKILL
] = TARGET_ERFKILL
,
807 [EHWPOISON
] = TARGET_EHWPOISON
,
811 static inline int host_to_target_errno(int err
)
813 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
814 host_to_target_errno_table
[err
]) {
815 return host_to_target_errno_table
[err
];
820 static inline int target_to_host_errno(int err
)
822 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
823 target_to_host_errno_table
[err
]) {
824 return target_to_host_errno_table
[err
];
829 static inline abi_long
get_errno(abi_long ret
)
832 return -host_to_target_errno(errno
);
837 static inline int is_error(abi_long ret
)
839 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
842 const char *target_strerror(int err
)
844 if (err
== TARGET_ERESTARTSYS
) {
845 return "To be restarted";
847 if (err
== TARGET_QEMU_ESIGRETURN
) {
848 return "Successful exit from sigreturn";
851 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
854 return strerror(target_to_host_errno(err
));
857 #define safe_syscall0(type, name) \
858 static type safe_##name(void) \
860 return safe_syscall(__NR_##name); \
863 #define safe_syscall1(type, name, type1, arg1) \
864 static type safe_##name(type1 arg1) \
866 return safe_syscall(__NR_##name, arg1); \
869 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
870 static type safe_##name(type1 arg1, type2 arg2) \
872 return safe_syscall(__NR_##name, arg1, arg2); \
875 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
876 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
878 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
881 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
883 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
888 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
896 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
897 type4, arg4, type5, arg5, type6, arg6) \
898 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
899 type5 arg5, type6 arg6) \
901 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
904 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
905 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
906 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
907 int, flags
, mode_t
, mode
)
908 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
909 struct rusage
*, rusage
)
910 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
911 int, options
, struct rusage
*, rusage
)
912 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
913 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
914 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
915 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
916 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
918 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
919 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
921 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
922 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
923 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
924 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
925 safe_syscall2(int, tkill
, int, tid
, int, sig
)
926 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
927 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
928 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
929 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
930 unsigned long, pos_l
, unsigned long, pos_h
)
931 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
932 unsigned long, pos_l
, unsigned long, pos_h
)
933 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
935 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
936 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
937 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
938 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
939 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
940 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
941 safe_syscall2(int, flock
, int, fd
, int, operation
)
942 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
943 const struct timespec
*, uts
, size_t, sigsetsize
)
944 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
946 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
947 struct timespec
*, rem
)
948 #ifdef TARGET_NR_clock_nanosleep
949 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
950 const struct timespec
*, req
, struct timespec
*, rem
)
953 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
955 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
956 long, msgtype
, int, flags
)
957 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
958 unsigned, nsops
, const struct timespec
*, timeout
)
960 /* This host kernel architecture uses a single ipc syscall; fake up
961 * wrappers for the sub-operations to hide this implementation detail.
962 * Annoyingly we can't include linux/ipc.h to get the constant definitions
963 * for the call parameter because some structs in there conflict with the
964 * sys/ipc.h ones. So we just define them here, and rely on them being
965 * the same for all host architectures.
967 #define Q_SEMTIMEDOP 4
970 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
972 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
973 void *, ptr
, long, fifth
)
974 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
976 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
978 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
980 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
982 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
983 const struct timespec
*timeout
)
985 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
989 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
990 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
991 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
992 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
993 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
995 /* We do ioctl like this rather than via safe_syscall3 to preserve the
996 * "third argument might be integer or pointer or not present" behaviour of
999 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1000 /* Similarly for fcntl. Note that callers must always:
1001 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1002 * use the flock64 struct rather than unsuffixed flock
1003 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1006 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1008 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1011 static inline int host_to_target_sock_type(int host_type
)
1015 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1017 target_type
= TARGET_SOCK_DGRAM
;
1020 target_type
= TARGET_SOCK_STREAM
;
1023 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1027 #if defined(SOCK_CLOEXEC)
1028 if (host_type
& SOCK_CLOEXEC
) {
1029 target_type
|= TARGET_SOCK_CLOEXEC
;
1033 #if defined(SOCK_NONBLOCK)
1034 if (host_type
& SOCK_NONBLOCK
) {
1035 target_type
|= TARGET_SOCK_NONBLOCK
;
1042 static abi_ulong target_brk
;
1043 static abi_ulong target_original_brk
;
1044 static abi_ulong brk_page
;
1046 void target_set_brk(abi_ulong new_brk
)
1048 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1049 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1052 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1053 #define DEBUGF_BRK(message, args...)
1055 /* do_brk() must return target values and target errnos. */
1056 abi_long
do_brk(abi_ulong new_brk
)
1058 abi_long mapped_addr
;
1059 abi_ulong new_alloc_size
;
1061 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1064 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1067 if (new_brk
< target_original_brk
) {
1068 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1073 /* If the new brk is less than the highest page reserved to the
1074 * target heap allocation, set it and we're almost done... */
1075 if (new_brk
<= brk_page
) {
1076 /* Heap contents are initialized to zero, as for anonymous
1078 if (new_brk
> target_brk
) {
1079 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1081 target_brk
= new_brk
;
1082 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1086 /* We need to allocate more memory after the brk... Note that
1087 * we don't use MAP_FIXED because that will map over the top of
1088 * any existing mapping (like the one with the host libc or qemu
1089 * itself); instead we treat "mapped but at wrong address" as
1090 * a failure and unmap again.
1092 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1093 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1094 PROT_READ
|PROT_WRITE
,
1095 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1097 if (mapped_addr
== brk_page
) {
1098 /* Heap contents are initialized to zero, as for anonymous
1099 * mapped pages. Technically the new pages are already
1100 * initialized to zero since they *are* anonymous mapped
1101 * pages, however we have to take care with the contents that
1102 * come from the remaining part of the previous page: it may
1103 * contains garbage data due to a previous heap usage (grown
1104 * then shrunken). */
1105 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1107 target_brk
= new_brk
;
1108 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1112 } else if (mapped_addr
!= -1) {
1113 /* Mapped but at wrong address, meaning there wasn't actually
1114 * enough space for this brk.
1116 target_munmap(mapped_addr
, new_alloc_size
);
1118 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1121 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1124 #if defined(TARGET_ALPHA)
1125 /* We (partially) emulate OSF/1 on Alpha, which requires we
1126 return a proper errno, not an unchanged brk value. */
1127 return -TARGET_ENOMEM
;
1129 /* For everything else, return the previous break. */
1133 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1134 abi_ulong target_fds_addr
,
1138 abi_ulong b
, *target_fds
;
1140 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1141 if (!(target_fds
= lock_user(VERIFY_READ
,
1143 sizeof(abi_ulong
) * nw
,
1145 return -TARGET_EFAULT
;
1149 for (i
= 0; i
< nw
; i
++) {
1150 /* grab the abi_ulong */
1151 __get_user(b
, &target_fds
[i
]);
1152 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1153 /* check the bit inside the abi_ulong */
1160 unlock_user(target_fds
, target_fds_addr
, 0);
1165 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1166 abi_ulong target_fds_addr
,
1169 if (target_fds_addr
) {
1170 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1171 return -TARGET_EFAULT
;
1179 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1185 abi_ulong
*target_fds
;
1187 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1188 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1190 sizeof(abi_ulong
) * nw
,
1192 return -TARGET_EFAULT
;
1195 for (i
= 0; i
< nw
; i
++) {
1197 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1198 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1201 __put_user(v
, &target_fds
[i
]);
1204 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1209 #if defined(__alpha__)
1210 #define HOST_HZ 1024
1215 static inline abi_long
host_to_target_clock_t(long ticks
)
1217 #if HOST_HZ == TARGET_HZ
1220 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1224 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1225 const struct rusage
*rusage
)
1227 struct target_rusage
*target_rusage
;
1229 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1230 return -TARGET_EFAULT
;
1231 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1232 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1233 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1234 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1235 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1236 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1237 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1238 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1239 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1240 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1241 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1242 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1243 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1244 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1245 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1246 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1247 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1248 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1249 unlock_user_struct(target_rusage
, target_addr
, 1);
1254 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1256 abi_ulong target_rlim_swap
;
1259 target_rlim_swap
= tswapal(target_rlim
);
1260 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1261 return RLIM_INFINITY
;
1263 result
= target_rlim_swap
;
1264 if (target_rlim_swap
!= (rlim_t
)result
)
1265 return RLIM_INFINITY
;
1270 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1272 abi_ulong target_rlim_swap
;
1275 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1276 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1278 target_rlim_swap
= rlim
;
1279 result
= tswapal(target_rlim_swap
);
1284 static inline int target_to_host_resource(int code
)
1287 case TARGET_RLIMIT_AS
:
1289 case TARGET_RLIMIT_CORE
:
1291 case TARGET_RLIMIT_CPU
:
1293 case TARGET_RLIMIT_DATA
:
1295 case TARGET_RLIMIT_FSIZE
:
1296 return RLIMIT_FSIZE
;
1297 case TARGET_RLIMIT_LOCKS
:
1298 return RLIMIT_LOCKS
;
1299 case TARGET_RLIMIT_MEMLOCK
:
1300 return RLIMIT_MEMLOCK
;
1301 case TARGET_RLIMIT_MSGQUEUE
:
1302 return RLIMIT_MSGQUEUE
;
1303 case TARGET_RLIMIT_NICE
:
1305 case TARGET_RLIMIT_NOFILE
:
1306 return RLIMIT_NOFILE
;
1307 case TARGET_RLIMIT_NPROC
:
1308 return RLIMIT_NPROC
;
1309 case TARGET_RLIMIT_RSS
:
1311 case TARGET_RLIMIT_RTPRIO
:
1312 return RLIMIT_RTPRIO
;
1313 case TARGET_RLIMIT_SIGPENDING
:
1314 return RLIMIT_SIGPENDING
;
1315 case TARGET_RLIMIT_STACK
:
1316 return RLIMIT_STACK
;
1322 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1323 abi_ulong target_tv_addr
)
1325 struct target_timeval
*target_tv
;
1327 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1328 return -TARGET_EFAULT
;
1330 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1331 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1333 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1338 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1339 const struct timeval
*tv
)
1341 struct target_timeval
*target_tv
;
1343 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1344 return -TARGET_EFAULT
;
1346 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1347 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1349 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1354 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1355 abi_ulong target_tz_addr
)
1357 struct target_timezone
*target_tz
;
1359 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1360 return -TARGET_EFAULT
;
1363 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1364 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1366 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1371 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1374 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1375 abi_ulong target_mq_attr_addr
)
1377 struct target_mq_attr
*target_mq_attr
;
1379 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1380 target_mq_attr_addr
, 1))
1381 return -TARGET_EFAULT
;
1383 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1384 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1385 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1386 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1388 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1393 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1394 const struct mq_attr
*attr
)
1396 struct target_mq_attr
*target_mq_attr
;
1398 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1399 target_mq_attr_addr
, 0))
1400 return -TARGET_EFAULT
;
1402 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1403 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1404 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1405 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1407 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1413 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1414 /* do_select() must return target values and target errnos. */
1415 static abi_long
do_select(int n
,
1416 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1417 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1419 fd_set rfds
, wfds
, efds
;
1420 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1422 struct timespec ts
, *ts_ptr
;
1425 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1429 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1433 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1438 if (target_tv_addr
) {
1439 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1440 return -TARGET_EFAULT
;
1441 ts
.tv_sec
= tv
.tv_sec
;
1442 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1448 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1451 if (!is_error(ret
)) {
1452 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1453 return -TARGET_EFAULT
;
1454 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1455 return -TARGET_EFAULT
;
1456 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1457 return -TARGET_EFAULT
;
1459 if (target_tv_addr
) {
1460 tv
.tv_sec
= ts
.tv_sec
;
1461 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1462 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1463 return -TARGET_EFAULT
;
1471 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1472 static abi_long
do_old_select(abi_ulong arg1
)
1474 struct target_sel_arg_struct
*sel
;
1475 abi_ulong inp
, outp
, exp
, tvp
;
1478 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1479 return -TARGET_EFAULT
;
1482 nsel
= tswapal(sel
->n
);
1483 inp
= tswapal(sel
->inp
);
1484 outp
= tswapal(sel
->outp
);
1485 exp
= tswapal(sel
->exp
);
1486 tvp
= tswapal(sel
->tvp
);
1488 unlock_user_struct(sel
, arg1
, 0);
1490 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1495 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1498 return pipe2(host_pipe
, flags
);
1504 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1505 int flags
, int is_pipe2
)
1509 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1512 return get_errno(ret
);
1514 /* Several targets have special calling conventions for the original
1515 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1517 #if defined(TARGET_ALPHA)
1518 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1519 return host_pipe
[0];
1520 #elif defined(TARGET_MIPS)
1521 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1522 return host_pipe
[0];
1523 #elif defined(TARGET_SH4)
1524 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1525 return host_pipe
[0];
1526 #elif defined(TARGET_SPARC)
1527 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1528 return host_pipe
[0];
1532 if (put_user_s32(host_pipe
[0], pipedes
)
1533 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1534 return -TARGET_EFAULT
;
1535 return get_errno(ret
);
1538 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1539 abi_ulong target_addr
,
1542 struct target_ip_mreqn
*target_smreqn
;
1544 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1546 return -TARGET_EFAULT
;
1547 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1548 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1549 if (len
== sizeof(struct target_ip_mreqn
))
1550 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1551 unlock_user(target_smreqn
, target_addr
, 0);
1556 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1557 abi_ulong target_addr
,
1560 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1561 sa_family_t sa_family
;
1562 struct target_sockaddr
*target_saddr
;
1564 if (fd_trans_target_to_host_addr(fd
)) {
1565 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1568 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1570 return -TARGET_EFAULT
;
1572 sa_family
= tswap16(target_saddr
->sa_family
);
1574 /* Oops. The caller might send a incomplete sun_path; sun_path
1575 * must be terminated by \0 (see the manual page), but
1576 * unfortunately it is quite common to specify sockaddr_un
1577 * length as "strlen(x->sun_path)" while it should be
1578 * "strlen(...) + 1". We'll fix that here if needed.
1579 * Linux kernel has a similar feature.
1582 if (sa_family
== AF_UNIX
) {
1583 if (len
< unix_maxlen
&& len
> 0) {
1584 char *cp
= (char*)target_saddr
;
1586 if ( cp
[len
-1] && !cp
[len
] )
1589 if (len
> unix_maxlen
)
1593 memcpy(addr
, target_saddr
, len
);
1594 addr
->sa_family
= sa_family
;
1595 if (sa_family
== AF_NETLINK
) {
1596 struct sockaddr_nl
*nladdr
;
1598 nladdr
= (struct sockaddr_nl
*)addr
;
1599 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1600 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1601 } else if (sa_family
== AF_PACKET
) {
1602 struct target_sockaddr_ll
*lladdr
;
1604 lladdr
= (struct target_sockaddr_ll
*)addr
;
1605 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1606 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1608 unlock_user(target_saddr
, target_addr
, 0);
1613 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1614 struct sockaddr
*addr
,
1617 struct target_sockaddr
*target_saddr
;
1623 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1625 return -TARGET_EFAULT
;
1626 memcpy(target_saddr
, addr
, len
);
1627 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1628 sizeof(target_saddr
->sa_family
)) {
1629 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1631 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1632 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1633 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1634 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1635 } else if (addr
->sa_family
== AF_PACKET
) {
1636 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1637 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1638 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1639 } else if (addr
->sa_family
== AF_INET6
&&
1640 len
>= sizeof(struct target_sockaddr_in6
)) {
1641 struct target_sockaddr_in6
*target_in6
=
1642 (struct target_sockaddr_in6
*)target_saddr
;
1643 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1645 unlock_user(target_saddr
, target_addr
, len
);
1650 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1651 struct target_msghdr
*target_msgh
)
1653 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1654 abi_long msg_controllen
;
1655 abi_ulong target_cmsg_addr
;
1656 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1657 socklen_t space
= 0;
1659 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1660 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1662 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1663 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1664 target_cmsg_start
= target_cmsg
;
1666 return -TARGET_EFAULT
;
1668 while (cmsg
&& target_cmsg
) {
1669 void *data
= CMSG_DATA(cmsg
);
1670 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1672 int len
= tswapal(target_cmsg
->cmsg_len
)
1673 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1675 space
+= CMSG_SPACE(len
);
1676 if (space
> msgh
->msg_controllen
) {
1677 space
-= CMSG_SPACE(len
);
1678 /* This is a QEMU bug, since we allocated the payload
1679 * area ourselves (unlike overflow in host-to-target
1680 * conversion, which is just the guest giving us a buffer
1681 * that's too small). It can't happen for the payload types
1682 * we currently support; if it becomes an issue in future
1683 * we would need to improve our allocation strategy to
1684 * something more intelligent than "twice the size of the
1685 * target buffer we're reading from".
1687 gemu_log("Host cmsg overflow\n");
1691 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1692 cmsg
->cmsg_level
= SOL_SOCKET
;
1694 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1696 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1697 cmsg
->cmsg_len
= CMSG_LEN(len
);
1699 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1700 int *fd
= (int *)data
;
1701 int *target_fd
= (int *)target_data
;
1702 int i
, numfds
= len
/ sizeof(int);
1704 for (i
= 0; i
< numfds
; i
++) {
1705 __get_user(fd
[i
], target_fd
+ i
);
1707 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1708 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1709 struct ucred
*cred
= (struct ucred
*)data
;
1710 struct target_ucred
*target_cred
=
1711 (struct target_ucred
*)target_data
;
1713 __get_user(cred
->pid
, &target_cred
->pid
);
1714 __get_user(cred
->uid
, &target_cred
->uid
);
1715 __get_user(cred
->gid
, &target_cred
->gid
);
1717 gemu_log("Unsupported ancillary data: %d/%d\n",
1718 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1719 memcpy(data
, target_data
, len
);
1722 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1723 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1726 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1728 msgh
->msg_controllen
= space
;
1732 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1733 struct msghdr
*msgh
)
1735 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1736 abi_long msg_controllen
;
1737 abi_ulong target_cmsg_addr
;
1738 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1739 socklen_t space
= 0;
1741 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1742 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1744 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1745 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1746 target_cmsg_start
= target_cmsg
;
1748 return -TARGET_EFAULT
;
1750 while (cmsg
&& target_cmsg
) {
1751 void *data
= CMSG_DATA(cmsg
);
1752 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1754 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1755 int tgt_len
, tgt_space
;
1757 /* We never copy a half-header but may copy half-data;
1758 * this is Linux's behaviour in put_cmsg(). Note that
1759 * truncation here is a guest problem (which we report
1760 * to the guest via the CTRUNC bit), unlike truncation
1761 * in target_to_host_cmsg, which is a QEMU bug.
1763 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1764 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1768 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1769 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1771 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1773 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1775 tgt_len
= TARGET_CMSG_LEN(len
);
1777 /* Payload types which need a different size of payload on
1778 * the target must adjust tgt_len here.
1780 switch (cmsg
->cmsg_level
) {
1782 switch (cmsg
->cmsg_type
) {
1784 tgt_len
= sizeof(struct target_timeval
);
1793 if (msg_controllen
< tgt_len
) {
1794 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1795 tgt_len
= msg_controllen
;
1798 /* We must now copy-and-convert len bytes of payload
1799 * into tgt_len bytes of destination space. Bear in mind
1800 * that in both source and destination we may be dealing
1801 * with a truncated value!
1803 switch (cmsg
->cmsg_level
) {
1805 switch (cmsg
->cmsg_type
) {
1808 int *fd
= (int *)data
;
1809 int *target_fd
= (int *)target_data
;
1810 int i
, numfds
= tgt_len
/ sizeof(int);
1812 for (i
= 0; i
< numfds
; i
++) {
1813 __put_user(fd
[i
], target_fd
+ i
);
1819 struct timeval
*tv
= (struct timeval
*)data
;
1820 struct target_timeval
*target_tv
=
1821 (struct target_timeval
*)target_data
;
1823 if (len
!= sizeof(struct timeval
) ||
1824 tgt_len
!= sizeof(struct target_timeval
)) {
1828 /* copy struct timeval to target */
1829 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1830 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1833 case SCM_CREDENTIALS
:
1835 struct ucred
*cred
= (struct ucred
*)data
;
1836 struct target_ucred
*target_cred
=
1837 (struct target_ucred
*)target_data
;
1839 __put_user(cred
->pid
, &target_cred
->pid
);
1840 __put_user(cred
->uid
, &target_cred
->uid
);
1841 __put_user(cred
->gid
, &target_cred
->gid
);
1850 switch (cmsg
->cmsg_type
) {
1853 uint32_t *v
= (uint32_t *)data
;
1854 uint32_t *t_int
= (uint32_t *)target_data
;
1856 __put_user(*v
, t_int
);
1862 struct sock_extended_err ee
;
1863 struct sockaddr_in offender
;
1865 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1866 struct errhdr_t
*target_errh
=
1867 (struct errhdr_t
*)target_data
;
1869 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1870 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1871 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1872 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1873 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1874 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1875 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1876 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1877 (void *) &errh
->offender
, sizeof(errh
->offender
));
1886 switch (cmsg
->cmsg_type
) {
1889 uint32_t *v
= (uint32_t *)data
;
1890 uint32_t *t_int
= (uint32_t *)target_data
;
1892 __put_user(*v
, t_int
);
1898 struct sock_extended_err ee
;
1899 struct sockaddr_in6 offender
;
1901 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1902 struct errhdr6_t
*target_errh
=
1903 (struct errhdr6_t
*)target_data
;
1905 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1906 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1907 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1908 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1909 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1910 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1911 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1912 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1913 (void *) &errh
->offender
, sizeof(errh
->offender
));
1923 gemu_log("Unsupported ancillary data: %d/%d\n",
1924 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1925 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1926 if (tgt_len
> len
) {
1927 memset(target_data
+ len
, 0, tgt_len
- len
);
1931 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1932 tgt_space
= TARGET_CMSG_SPACE(len
);
1933 if (msg_controllen
< tgt_space
) {
1934 tgt_space
= msg_controllen
;
1936 msg_controllen
-= tgt_space
;
1938 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1939 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1942 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1944 target_msgh
->msg_controllen
= tswapal(space
);
1948 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1950 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1951 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1952 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1953 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1954 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1957 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1959 abi_long (*host_to_target_nlmsg
)
1960 (struct nlmsghdr
*))
1965 while (len
> sizeof(struct nlmsghdr
)) {
1967 nlmsg_len
= nlh
->nlmsg_len
;
1968 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1973 switch (nlh
->nlmsg_type
) {
1975 tswap_nlmsghdr(nlh
);
1981 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1982 e
->error
= tswap32(e
->error
);
1983 tswap_nlmsghdr(&e
->msg
);
1984 tswap_nlmsghdr(nlh
);
1988 ret
= host_to_target_nlmsg(nlh
);
1990 tswap_nlmsghdr(nlh
);
1995 tswap_nlmsghdr(nlh
);
1996 len
-= NLMSG_ALIGN(nlmsg_len
);
1997 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2002 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2004 abi_long (*target_to_host_nlmsg
)
2005 (struct nlmsghdr
*))
2009 while (len
> sizeof(struct nlmsghdr
)) {
2010 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2011 tswap32(nlh
->nlmsg_len
) > len
) {
2014 tswap_nlmsghdr(nlh
);
2015 switch (nlh
->nlmsg_type
) {
2022 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2023 e
->error
= tswap32(e
->error
);
2024 tswap_nlmsghdr(&e
->msg
);
2028 ret
= target_to_host_nlmsg(nlh
);
2033 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2034 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2039 #ifdef CONFIG_RTNETLINK
2040 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2041 size_t len
, void *context
,
2042 abi_long (*host_to_target_nlattr
)
2046 unsigned short nla_len
;
2049 while (len
> sizeof(struct nlattr
)) {
2050 nla_len
= nlattr
->nla_len
;
2051 if (nla_len
< sizeof(struct nlattr
) ||
2055 ret
= host_to_target_nlattr(nlattr
, context
);
2056 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2057 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2061 len
-= NLA_ALIGN(nla_len
);
2062 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2067 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2069 abi_long (*host_to_target_rtattr
)
2072 unsigned short rta_len
;
2075 while (len
> sizeof(struct rtattr
)) {
2076 rta_len
= rtattr
->rta_len
;
2077 if (rta_len
< sizeof(struct rtattr
) ||
2081 ret
= host_to_target_rtattr(rtattr
);
2082 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2083 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2087 len
-= RTA_ALIGN(rta_len
);
2088 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2093 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2095 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2102 switch (nlattr
->nla_type
) {
2104 case QEMU_IFLA_BR_FDB_FLUSH
:
2107 case QEMU_IFLA_BR_GROUP_ADDR
:
2110 case QEMU_IFLA_BR_VLAN_FILTERING
:
2111 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2112 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2113 case QEMU_IFLA_BR_MCAST_ROUTER
:
2114 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2115 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2116 case QEMU_IFLA_BR_MCAST_QUERIER
:
2117 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2118 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2119 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2122 case QEMU_IFLA_BR_PRIORITY
:
2123 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2124 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2125 case QEMU_IFLA_BR_ROOT_PORT
:
2126 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2127 u16
= NLA_DATA(nlattr
);
2128 *u16
= tswap16(*u16
);
2131 case QEMU_IFLA_BR_FORWARD_DELAY
:
2132 case QEMU_IFLA_BR_HELLO_TIME
:
2133 case QEMU_IFLA_BR_MAX_AGE
:
2134 case QEMU_IFLA_BR_AGEING_TIME
:
2135 case QEMU_IFLA_BR_STP_STATE
:
2136 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2137 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2138 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2139 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2140 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2141 u32
= NLA_DATA(nlattr
);
2142 *u32
= tswap32(*u32
);
2145 case QEMU_IFLA_BR_HELLO_TIMER
:
2146 case QEMU_IFLA_BR_TCN_TIMER
:
2147 case QEMU_IFLA_BR_GC_TIMER
:
2148 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2149 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2150 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2151 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2152 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2153 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2154 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2155 u64
= NLA_DATA(nlattr
);
2156 *u64
= tswap64(*u64
);
2158 /* ifla_bridge_id: uin8_t[] */
2159 case QEMU_IFLA_BR_ROOT_ID
:
2160 case QEMU_IFLA_BR_BRIDGE_ID
:
2163 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2169 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2176 switch (nlattr
->nla_type
) {
2178 case QEMU_IFLA_BRPORT_STATE
:
2179 case QEMU_IFLA_BRPORT_MODE
:
2180 case QEMU_IFLA_BRPORT_GUARD
:
2181 case QEMU_IFLA_BRPORT_PROTECT
:
2182 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2183 case QEMU_IFLA_BRPORT_LEARNING
:
2184 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2185 case QEMU_IFLA_BRPORT_PROXYARP
:
2186 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2187 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2188 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2189 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2190 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2193 case QEMU_IFLA_BRPORT_PRIORITY
:
2194 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2195 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2196 case QEMU_IFLA_BRPORT_ID
:
2197 case QEMU_IFLA_BRPORT_NO
:
2198 u16
= NLA_DATA(nlattr
);
2199 *u16
= tswap16(*u16
);
2202 case QEMU_IFLA_BRPORT_COST
:
2203 u32
= NLA_DATA(nlattr
);
2204 *u32
= tswap32(*u32
);
2207 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2208 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2209 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2210 u64
= NLA_DATA(nlattr
);
2211 *u64
= tswap64(*u64
);
2213 /* ifla_bridge_id: uint8_t[] */
2214 case QEMU_IFLA_BRPORT_ROOT_ID
:
2215 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2218 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2224 struct linkinfo_context
{
2231 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2234 struct linkinfo_context
*li_context
= context
;
2236 switch (nlattr
->nla_type
) {
2238 case QEMU_IFLA_INFO_KIND
:
2239 li_context
->name
= NLA_DATA(nlattr
);
2240 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2242 case QEMU_IFLA_INFO_SLAVE_KIND
:
2243 li_context
->slave_name
= NLA_DATA(nlattr
);
2244 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2247 case QEMU_IFLA_INFO_XSTATS
:
2248 /* FIXME: only used by CAN */
2251 case QEMU_IFLA_INFO_DATA
:
2252 if (strncmp(li_context
->name
, "bridge",
2253 li_context
->len
) == 0) {
2254 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2257 host_to_target_data_bridge_nlattr
);
2259 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2262 case QEMU_IFLA_INFO_SLAVE_DATA
:
2263 if (strncmp(li_context
->slave_name
, "bridge",
2264 li_context
->slave_len
) == 0) {
2265 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2268 host_to_target_slave_data_bridge_nlattr
);
2270 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2271 li_context
->slave_name
);
2275 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2282 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2288 switch (nlattr
->nla_type
) {
2289 case QEMU_IFLA_INET_CONF
:
2290 u32
= NLA_DATA(nlattr
);
2291 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2293 u32
[i
] = tswap32(u32
[i
]);
2297 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2302 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2307 struct ifla_cacheinfo
*ci
;
2310 switch (nlattr
->nla_type
) {
2312 case QEMU_IFLA_INET6_TOKEN
:
2315 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2318 case QEMU_IFLA_INET6_FLAGS
:
2319 u32
= NLA_DATA(nlattr
);
2320 *u32
= tswap32(*u32
);
2323 case QEMU_IFLA_INET6_CONF
:
2324 u32
= NLA_DATA(nlattr
);
2325 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2327 u32
[i
] = tswap32(u32
[i
]);
2330 /* ifla_cacheinfo */
2331 case QEMU_IFLA_INET6_CACHEINFO
:
2332 ci
= NLA_DATA(nlattr
);
2333 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2334 ci
->tstamp
= tswap32(ci
->tstamp
);
2335 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2336 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2339 case QEMU_IFLA_INET6_STATS
:
2340 case QEMU_IFLA_INET6_ICMP6STATS
:
2341 u64
= NLA_DATA(nlattr
);
2342 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2344 u64
[i
] = tswap64(u64
[i
]);
2348 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2353 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2356 switch (nlattr
->nla_type
) {
2358 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2360 host_to_target_data_inet_nlattr
);
2362 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2364 host_to_target_data_inet6_nlattr
);
2366 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2372 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2375 struct rtnl_link_stats
*st
;
2376 struct rtnl_link_stats64
*st64
;
2377 struct rtnl_link_ifmap
*map
;
2378 struct linkinfo_context li_context
;
2380 switch (rtattr
->rta_type
) {
2382 case QEMU_IFLA_ADDRESS
:
2383 case QEMU_IFLA_BROADCAST
:
2385 case QEMU_IFLA_IFNAME
:
2386 case QEMU_IFLA_QDISC
:
2389 case QEMU_IFLA_OPERSTATE
:
2390 case QEMU_IFLA_LINKMODE
:
2391 case QEMU_IFLA_CARRIER
:
2392 case QEMU_IFLA_PROTO_DOWN
:
2396 case QEMU_IFLA_LINK
:
2397 case QEMU_IFLA_WEIGHT
:
2398 case QEMU_IFLA_TXQLEN
:
2399 case QEMU_IFLA_CARRIER_CHANGES
:
2400 case QEMU_IFLA_NUM_RX_QUEUES
:
2401 case QEMU_IFLA_NUM_TX_QUEUES
:
2402 case QEMU_IFLA_PROMISCUITY
:
2403 case QEMU_IFLA_EXT_MASK
:
2404 case QEMU_IFLA_LINK_NETNSID
:
2405 case QEMU_IFLA_GROUP
:
2406 case QEMU_IFLA_MASTER
:
2407 case QEMU_IFLA_NUM_VF
:
2408 case QEMU_IFLA_GSO_MAX_SEGS
:
2409 case QEMU_IFLA_GSO_MAX_SIZE
:
2410 u32
= RTA_DATA(rtattr
);
2411 *u32
= tswap32(*u32
);
2413 /* struct rtnl_link_stats */
2414 case QEMU_IFLA_STATS
:
2415 st
= RTA_DATA(rtattr
);
2416 st
->rx_packets
= tswap32(st
->rx_packets
);
2417 st
->tx_packets
= tswap32(st
->tx_packets
);
2418 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2419 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2420 st
->rx_errors
= tswap32(st
->rx_errors
);
2421 st
->tx_errors
= tswap32(st
->tx_errors
);
2422 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2423 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2424 st
->multicast
= tswap32(st
->multicast
);
2425 st
->collisions
= tswap32(st
->collisions
);
2427 /* detailed rx_errors: */
2428 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2429 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2430 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2431 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2432 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2433 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2435 /* detailed tx_errors */
2436 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2437 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2438 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2439 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2440 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2443 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2444 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2446 /* struct rtnl_link_stats64 */
2447 case QEMU_IFLA_STATS64
:
2448 st64
= RTA_DATA(rtattr
);
2449 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2450 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2451 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2452 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2453 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2454 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2455 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2456 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2457 st64
->multicast
= tswap64(st64
->multicast
);
2458 st64
->collisions
= tswap64(st64
->collisions
);
2460 /* detailed rx_errors: */
2461 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2462 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2463 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2464 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2465 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2466 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2468 /* detailed tx_errors */
2469 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2470 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2471 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2472 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2473 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2476 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2477 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2479 /* struct rtnl_link_ifmap */
2481 map
= RTA_DATA(rtattr
);
2482 map
->mem_start
= tswap64(map
->mem_start
);
2483 map
->mem_end
= tswap64(map
->mem_end
);
2484 map
->base_addr
= tswap64(map
->base_addr
);
2485 map
->irq
= tswap16(map
->irq
);
2488 case QEMU_IFLA_LINKINFO
:
2489 memset(&li_context
, 0, sizeof(li_context
));
2490 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2492 host_to_target_data_linkinfo_nlattr
);
2493 case QEMU_IFLA_AF_SPEC
:
2494 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2496 host_to_target_data_spec_nlattr
);
2498 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2504 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2507 struct ifa_cacheinfo
*ci
;
2509 switch (rtattr
->rta_type
) {
2510 /* binary: depends on family type */
2520 u32
= RTA_DATA(rtattr
);
2521 *u32
= tswap32(*u32
);
2523 /* struct ifa_cacheinfo */
2525 ci
= RTA_DATA(rtattr
);
2526 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2527 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2528 ci
->cstamp
= tswap32(ci
->cstamp
);
2529 ci
->tstamp
= tswap32(ci
->tstamp
);
2532 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2538 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2541 switch (rtattr
->rta_type
) {
2542 /* binary: depends on family type */
2551 u32
= RTA_DATA(rtattr
);
2552 *u32
= tswap32(*u32
);
2555 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2561 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2562 uint32_t rtattr_len
)
2564 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2565 host_to_target_data_link_rtattr
);
2568 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2569 uint32_t rtattr_len
)
2571 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2572 host_to_target_data_addr_rtattr
);
2575 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2576 uint32_t rtattr_len
)
2578 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2579 host_to_target_data_route_rtattr
);
2582 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2585 struct ifinfomsg
*ifi
;
2586 struct ifaddrmsg
*ifa
;
2589 nlmsg_len
= nlh
->nlmsg_len
;
2590 switch (nlh
->nlmsg_type
) {
2594 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2595 ifi
= NLMSG_DATA(nlh
);
2596 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2597 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2598 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2599 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2600 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2601 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2607 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2608 ifa
= NLMSG_DATA(nlh
);
2609 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2610 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2611 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2617 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2618 rtm
= NLMSG_DATA(nlh
);
2619 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2620 host_to_target_route_rtattr(RTM_RTA(rtm
),
2621 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2625 return -TARGET_EINVAL
;
2630 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2633 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2636 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2638 abi_long (*target_to_host_rtattr
)
2643 while (len
>= sizeof(struct rtattr
)) {
2644 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2645 tswap16(rtattr
->rta_len
) > len
) {
2648 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2649 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2650 ret
= target_to_host_rtattr(rtattr
);
2654 len
-= RTA_ALIGN(rtattr
->rta_len
);
2655 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2656 RTA_ALIGN(rtattr
->rta_len
));
2661 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2663 switch (rtattr
->rta_type
) {
2665 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2671 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2673 switch (rtattr
->rta_type
) {
2674 /* binary: depends on family type */
2679 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2685 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2688 switch (rtattr
->rta_type
) {
2689 /* binary: depends on family type */
2697 u32
= RTA_DATA(rtattr
);
2698 *u32
= tswap32(*u32
);
2701 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2707 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2708 uint32_t rtattr_len
)
2710 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2711 target_to_host_data_link_rtattr
);
2714 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2715 uint32_t rtattr_len
)
2717 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2718 target_to_host_data_addr_rtattr
);
2721 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2722 uint32_t rtattr_len
)
2724 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2725 target_to_host_data_route_rtattr
);
2728 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2730 struct ifinfomsg
*ifi
;
2731 struct ifaddrmsg
*ifa
;
2734 switch (nlh
->nlmsg_type
) {
2739 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2740 ifi
= NLMSG_DATA(nlh
);
2741 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2742 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2743 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2744 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2745 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2746 NLMSG_LENGTH(sizeof(*ifi
)));
2752 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2753 ifa
= NLMSG_DATA(nlh
);
2754 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2755 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2756 NLMSG_LENGTH(sizeof(*ifa
)));
2763 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2764 rtm
= NLMSG_DATA(nlh
);
2765 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2766 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2767 NLMSG_LENGTH(sizeof(*rtm
)));
2771 return -TARGET_EOPNOTSUPP
;
2776 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2778 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2780 #endif /* CONFIG_RTNETLINK */
2782 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2784 switch (nlh
->nlmsg_type
) {
2786 gemu_log("Unknown host audit message type %d\n",
2788 return -TARGET_EINVAL
;
2793 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2796 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2799 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2801 switch (nlh
->nlmsg_type
) {
2803 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2804 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2807 gemu_log("Unknown target audit message type %d\n",
2809 return -TARGET_EINVAL
;
2815 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2817 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2820 /* do_setsockopt() Must return target values and target errnos. */
2821 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2822 abi_ulong optval_addr
, socklen_t optlen
)
2826 struct ip_mreqn
*ip_mreq
;
2827 struct ip_mreq_source
*ip_mreq_source
;
2831 /* TCP options all take an 'int' value. */
2832 if (optlen
< sizeof(uint32_t))
2833 return -TARGET_EINVAL
;
2835 if (get_user_u32(val
, optval_addr
))
2836 return -TARGET_EFAULT
;
2837 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2844 case IP_ROUTER_ALERT
:
2848 case IP_MTU_DISCOVER
:
2855 case IP_MULTICAST_TTL
:
2856 case IP_MULTICAST_LOOP
:
2858 if (optlen
>= sizeof(uint32_t)) {
2859 if (get_user_u32(val
, optval_addr
))
2860 return -TARGET_EFAULT
;
2861 } else if (optlen
>= 1) {
2862 if (get_user_u8(val
, optval_addr
))
2863 return -TARGET_EFAULT
;
2865 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2867 case IP_ADD_MEMBERSHIP
:
2868 case IP_DROP_MEMBERSHIP
:
2869 if (optlen
< sizeof (struct target_ip_mreq
) ||
2870 optlen
> sizeof (struct target_ip_mreqn
))
2871 return -TARGET_EINVAL
;
2873 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2874 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2875 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2878 case IP_BLOCK_SOURCE
:
2879 case IP_UNBLOCK_SOURCE
:
2880 case IP_ADD_SOURCE_MEMBERSHIP
:
2881 case IP_DROP_SOURCE_MEMBERSHIP
:
2882 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2883 return -TARGET_EINVAL
;
2885 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2886 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2887 unlock_user (ip_mreq_source
, optval_addr
, 0);
2896 case IPV6_MTU_DISCOVER
:
2899 case IPV6_RECVPKTINFO
:
2900 case IPV6_UNICAST_HOPS
:
2902 case IPV6_RECVHOPLIMIT
:
2903 case IPV6_2292HOPLIMIT
:
2906 if (optlen
< sizeof(uint32_t)) {
2907 return -TARGET_EINVAL
;
2909 if (get_user_u32(val
, optval_addr
)) {
2910 return -TARGET_EFAULT
;
2912 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2913 &val
, sizeof(val
)));
2917 struct in6_pktinfo pki
;
2919 if (optlen
< sizeof(pki
)) {
2920 return -TARGET_EINVAL
;
2923 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2924 return -TARGET_EFAULT
;
2927 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2929 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2930 &pki
, sizeof(pki
)));
2941 struct icmp6_filter icmp6f
;
2943 if (optlen
> sizeof(icmp6f
)) {
2944 optlen
= sizeof(icmp6f
);
2947 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2948 return -TARGET_EFAULT
;
2951 for (val
= 0; val
< 8; val
++) {
2952 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2955 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2967 /* those take an u32 value */
2968 if (optlen
< sizeof(uint32_t)) {
2969 return -TARGET_EINVAL
;
2972 if (get_user_u32(val
, optval_addr
)) {
2973 return -TARGET_EFAULT
;
2975 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2976 &val
, sizeof(val
)));
2983 case TARGET_SOL_SOCKET
:
2985 case TARGET_SO_RCVTIMEO
:
2989 optname
= SO_RCVTIMEO
;
2992 if (optlen
!= sizeof(struct target_timeval
)) {
2993 return -TARGET_EINVAL
;
2996 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2997 return -TARGET_EFAULT
;
3000 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3004 case TARGET_SO_SNDTIMEO
:
3005 optname
= SO_SNDTIMEO
;
3007 case TARGET_SO_ATTACH_FILTER
:
3009 struct target_sock_fprog
*tfprog
;
3010 struct target_sock_filter
*tfilter
;
3011 struct sock_fprog fprog
;
3012 struct sock_filter
*filter
;
3015 if (optlen
!= sizeof(*tfprog
)) {
3016 return -TARGET_EINVAL
;
3018 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3019 return -TARGET_EFAULT
;
3021 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3022 tswapal(tfprog
->filter
), 0)) {
3023 unlock_user_struct(tfprog
, optval_addr
, 1);
3024 return -TARGET_EFAULT
;
3027 fprog
.len
= tswap16(tfprog
->len
);
3028 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3029 if (filter
== NULL
) {
3030 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3031 unlock_user_struct(tfprog
, optval_addr
, 1);
3032 return -TARGET_ENOMEM
;
3034 for (i
= 0; i
< fprog
.len
; i
++) {
3035 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3036 filter
[i
].jt
= tfilter
[i
].jt
;
3037 filter
[i
].jf
= tfilter
[i
].jf
;
3038 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3040 fprog
.filter
= filter
;
3042 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3043 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3046 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3047 unlock_user_struct(tfprog
, optval_addr
, 1);
3050 case TARGET_SO_BINDTODEVICE
:
3052 char *dev_ifname
, *addr_ifname
;
3054 if (optlen
> IFNAMSIZ
- 1) {
3055 optlen
= IFNAMSIZ
- 1;
3057 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3059 return -TARGET_EFAULT
;
3061 optname
= SO_BINDTODEVICE
;
3062 addr_ifname
= alloca(IFNAMSIZ
);
3063 memcpy(addr_ifname
, dev_ifname
, optlen
);
3064 addr_ifname
[optlen
] = 0;
3065 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3066 addr_ifname
, optlen
));
3067 unlock_user (dev_ifname
, optval_addr
, 0);
3070 /* Options with 'int' argument. */
3071 case TARGET_SO_DEBUG
:
3074 case TARGET_SO_REUSEADDR
:
3075 optname
= SO_REUSEADDR
;
3077 case TARGET_SO_TYPE
:
3080 case TARGET_SO_ERROR
:
3083 case TARGET_SO_DONTROUTE
:
3084 optname
= SO_DONTROUTE
;
3086 case TARGET_SO_BROADCAST
:
3087 optname
= SO_BROADCAST
;
3089 case TARGET_SO_SNDBUF
:
3090 optname
= SO_SNDBUF
;
3092 case TARGET_SO_SNDBUFFORCE
:
3093 optname
= SO_SNDBUFFORCE
;
3095 case TARGET_SO_RCVBUF
:
3096 optname
= SO_RCVBUF
;
3098 case TARGET_SO_RCVBUFFORCE
:
3099 optname
= SO_RCVBUFFORCE
;
3101 case TARGET_SO_KEEPALIVE
:
3102 optname
= SO_KEEPALIVE
;
3104 case TARGET_SO_OOBINLINE
:
3105 optname
= SO_OOBINLINE
;
3107 case TARGET_SO_NO_CHECK
:
3108 optname
= SO_NO_CHECK
;
3110 case TARGET_SO_PRIORITY
:
3111 optname
= SO_PRIORITY
;
3114 case TARGET_SO_BSDCOMPAT
:
3115 optname
= SO_BSDCOMPAT
;
3118 case TARGET_SO_PASSCRED
:
3119 optname
= SO_PASSCRED
;
3121 case TARGET_SO_PASSSEC
:
3122 optname
= SO_PASSSEC
;
3124 case TARGET_SO_TIMESTAMP
:
3125 optname
= SO_TIMESTAMP
;
3127 case TARGET_SO_RCVLOWAT
:
3128 optname
= SO_RCVLOWAT
;
3134 if (optlen
< sizeof(uint32_t))
3135 return -TARGET_EINVAL
;
3137 if (get_user_u32(val
, optval_addr
))
3138 return -TARGET_EFAULT
;
3139 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3143 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3144 ret
= -TARGET_ENOPROTOOPT
;
3149 /* do_getsockopt() Must return target values and target errnos. */
3150 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3151 abi_ulong optval_addr
, abi_ulong optlen
)
3158 case TARGET_SOL_SOCKET
:
3161 /* These don't just return a single integer */
3162 case TARGET_SO_LINGER
:
3163 case TARGET_SO_RCVTIMEO
:
3164 case TARGET_SO_SNDTIMEO
:
3165 case TARGET_SO_PEERNAME
:
3167 case TARGET_SO_PEERCRED
: {
3170 struct target_ucred
*tcr
;
3172 if (get_user_u32(len
, optlen
)) {
3173 return -TARGET_EFAULT
;
3176 return -TARGET_EINVAL
;
3180 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3188 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3189 return -TARGET_EFAULT
;
3191 __put_user(cr
.pid
, &tcr
->pid
);
3192 __put_user(cr
.uid
, &tcr
->uid
);
3193 __put_user(cr
.gid
, &tcr
->gid
);
3194 unlock_user_struct(tcr
, optval_addr
, 1);
3195 if (put_user_u32(len
, optlen
)) {
3196 return -TARGET_EFAULT
;
3200 /* Options with 'int' argument. */
3201 case TARGET_SO_DEBUG
:
3204 case TARGET_SO_REUSEADDR
:
3205 optname
= SO_REUSEADDR
;
3207 case TARGET_SO_TYPE
:
3210 case TARGET_SO_ERROR
:
3213 case TARGET_SO_DONTROUTE
:
3214 optname
= SO_DONTROUTE
;
3216 case TARGET_SO_BROADCAST
:
3217 optname
= SO_BROADCAST
;
3219 case TARGET_SO_SNDBUF
:
3220 optname
= SO_SNDBUF
;
3222 case TARGET_SO_RCVBUF
:
3223 optname
= SO_RCVBUF
;
3225 case TARGET_SO_KEEPALIVE
:
3226 optname
= SO_KEEPALIVE
;
3228 case TARGET_SO_OOBINLINE
:
3229 optname
= SO_OOBINLINE
;
3231 case TARGET_SO_NO_CHECK
:
3232 optname
= SO_NO_CHECK
;
3234 case TARGET_SO_PRIORITY
:
3235 optname
= SO_PRIORITY
;
3238 case TARGET_SO_BSDCOMPAT
:
3239 optname
= SO_BSDCOMPAT
;
3242 case TARGET_SO_PASSCRED
:
3243 optname
= SO_PASSCRED
;
3245 case TARGET_SO_TIMESTAMP
:
3246 optname
= SO_TIMESTAMP
;
3248 case TARGET_SO_RCVLOWAT
:
3249 optname
= SO_RCVLOWAT
;
3251 case TARGET_SO_ACCEPTCONN
:
3252 optname
= SO_ACCEPTCONN
;
3259 /* TCP options all take an 'int' value. */
3261 if (get_user_u32(len
, optlen
))
3262 return -TARGET_EFAULT
;
3264 return -TARGET_EINVAL
;
3266 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3269 if (optname
== SO_TYPE
) {
3270 val
= host_to_target_sock_type(val
);
3275 if (put_user_u32(val
, optval_addr
))
3276 return -TARGET_EFAULT
;
3278 if (put_user_u8(val
, optval_addr
))
3279 return -TARGET_EFAULT
;
3281 if (put_user_u32(len
, optlen
))
3282 return -TARGET_EFAULT
;
3289 case IP_ROUTER_ALERT
:
3293 case IP_MTU_DISCOVER
:
3299 case IP_MULTICAST_TTL
:
3300 case IP_MULTICAST_LOOP
:
3301 if (get_user_u32(len
, optlen
))
3302 return -TARGET_EFAULT
;
3304 return -TARGET_EINVAL
;
3306 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3309 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3311 if (put_user_u32(len
, optlen
)
3312 || put_user_u8(val
, optval_addr
))
3313 return -TARGET_EFAULT
;
3315 if (len
> sizeof(int))
3317 if (put_user_u32(len
, optlen
)
3318 || put_user_u32(val
, optval_addr
))
3319 return -TARGET_EFAULT
;
3323 ret
= -TARGET_ENOPROTOOPT
;
3329 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3331 ret
= -TARGET_EOPNOTSUPP
;
3337 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3338 abi_ulong count
, int copy
)
3340 struct target_iovec
*target_vec
;
3342 abi_ulong total_len
, max_len
;
3345 bool bad_address
= false;
3351 if (count
> IOV_MAX
) {
3356 vec
= g_try_new0(struct iovec
, count
);
3362 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3363 count
* sizeof(struct target_iovec
), 1);
3364 if (target_vec
== NULL
) {
3369 /* ??? If host page size > target page size, this will result in a
3370 value larger than what we can actually support. */
3371 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3374 for (i
= 0; i
< count
; i
++) {
3375 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3376 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3381 } else if (len
== 0) {
3382 /* Zero length pointer is ignored. */
3383 vec
[i
].iov_base
= 0;
3385 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3386 /* If the first buffer pointer is bad, this is a fault. But
3387 * subsequent bad buffers will result in a partial write; this
3388 * is realized by filling the vector with null pointers and
3390 if (!vec
[i
].iov_base
) {
3401 if (len
> max_len
- total_len
) {
3402 len
= max_len
- total_len
;
3405 vec
[i
].iov_len
= len
;
3409 unlock_user(target_vec
, target_addr
, 0);
3414 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3415 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3418 unlock_user(target_vec
, target_addr
, 0);
3425 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3426 abi_ulong count
, int copy
)
3428 struct target_iovec
*target_vec
;
3431 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3432 count
* sizeof(struct target_iovec
), 1);
3434 for (i
= 0; i
< count
; i
++) {
3435 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3436 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3440 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3442 unlock_user(target_vec
, target_addr
, 0);
3448 static inline int target_to_host_sock_type(int *type
)
3451 int target_type
= *type
;
3453 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3454 case TARGET_SOCK_DGRAM
:
3455 host_type
= SOCK_DGRAM
;
3457 case TARGET_SOCK_STREAM
:
3458 host_type
= SOCK_STREAM
;
3461 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3464 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3465 #if defined(SOCK_CLOEXEC)
3466 host_type
|= SOCK_CLOEXEC
;
3468 return -TARGET_EINVAL
;
3471 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3472 #if defined(SOCK_NONBLOCK)
3473 host_type
|= SOCK_NONBLOCK
;
3474 #elif !defined(O_NONBLOCK)
3475 return -TARGET_EINVAL
;
3482 /* Try to emulate socket type flags after socket creation. */
3483 static int sock_flags_fixup(int fd
, int target_type
)
3485 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3486 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3487 int flags
= fcntl(fd
, F_GETFL
);
3488 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3490 return -TARGET_EINVAL
;
3497 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3498 abi_ulong target_addr
,
3501 struct sockaddr
*addr
= host_addr
;
3502 struct target_sockaddr
*target_saddr
;
3504 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3505 if (!target_saddr
) {
3506 return -TARGET_EFAULT
;
3509 memcpy(addr
, target_saddr
, len
);
3510 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3511 /* spkt_protocol is big-endian */
3513 unlock_user(target_saddr
, target_addr
, 0);
3517 static TargetFdTrans target_packet_trans
= {
3518 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3521 #ifdef CONFIG_RTNETLINK
3522 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3526 ret
= target_to_host_nlmsg_route(buf
, len
);
3534 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3538 ret
= host_to_target_nlmsg_route(buf
, len
);
3546 static TargetFdTrans target_netlink_route_trans
= {
3547 .target_to_host_data
= netlink_route_target_to_host
,
3548 .host_to_target_data
= netlink_route_host_to_target
,
3550 #endif /* CONFIG_RTNETLINK */
3552 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3556 ret
= target_to_host_nlmsg_audit(buf
, len
);
3564 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3568 ret
= host_to_target_nlmsg_audit(buf
, len
);
3576 static TargetFdTrans target_netlink_audit_trans
= {
3577 .target_to_host_data
= netlink_audit_target_to_host
,
3578 .host_to_target_data
= netlink_audit_host_to_target
,
3581 /* do_socket() Must return target values and target errnos. */
3582 static abi_long
do_socket(int domain
, int type
, int protocol
)
3584 int target_type
= type
;
3587 ret
= target_to_host_sock_type(&type
);
3592 if (domain
== PF_NETLINK
&& !(
3593 #ifdef CONFIG_RTNETLINK
3594 protocol
== NETLINK_ROUTE
||
3596 protocol
== NETLINK_KOBJECT_UEVENT
||
3597 protocol
== NETLINK_AUDIT
)) {
3598 return -EPFNOSUPPORT
;
3601 if (domain
== AF_PACKET
||
3602 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3603 protocol
= tswap16(protocol
);
3606 ret
= get_errno(socket(domain
, type
, protocol
));
3608 ret
= sock_flags_fixup(ret
, target_type
);
3609 if (type
== SOCK_PACKET
) {
3610 /* Manage an obsolete case :
3611 * if socket type is SOCK_PACKET, bind by name
3613 fd_trans_register(ret
, &target_packet_trans
);
3614 } else if (domain
== PF_NETLINK
) {
3616 #ifdef CONFIG_RTNETLINK
3618 fd_trans_register(ret
, &target_netlink_route_trans
);
3621 case NETLINK_KOBJECT_UEVENT
:
3622 /* nothing to do: messages are strings */
3625 fd_trans_register(ret
, &target_netlink_audit_trans
);
3628 g_assert_not_reached();
3635 /* do_bind() Must return target values and target errnos. */
3636 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3642 if ((int)addrlen
< 0) {
3643 return -TARGET_EINVAL
;
3646 addr
= alloca(addrlen
+1);
3648 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3652 return get_errno(bind(sockfd
, addr
, addrlen
));
3655 /* do_connect() Must return target values and target errnos. */
3656 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3662 if ((int)addrlen
< 0) {
3663 return -TARGET_EINVAL
;
3666 addr
= alloca(addrlen
+1);
3668 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3672 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3675 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3676 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3677 int flags
, int send
)
3683 abi_ulong target_vec
;
3685 if (msgp
->msg_name
) {
3686 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3687 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3688 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3689 tswapal(msgp
->msg_name
),
3691 if (ret
== -TARGET_EFAULT
) {
3692 /* For connected sockets msg_name and msg_namelen must
3693 * be ignored, so returning EFAULT immediately is wrong.
3694 * Instead, pass a bad msg_name to the host kernel, and
3695 * let it decide whether to return EFAULT or not.
3697 msg
.msg_name
= (void *)-1;
3702 msg
.msg_name
= NULL
;
3703 msg
.msg_namelen
= 0;
3705 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3706 msg
.msg_control
= alloca(msg
.msg_controllen
);
3707 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3709 count
= tswapal(msgp
->msg_iovlen
);
3710 target_vec
= tswapal(msgp
->msg_iov
);
3712 if (count
> IOV_MAX
) {
3713 /* sendrcvmsg returns a different errno for this condition than
3714 * readv/writev, so we must catch it here before lock_iovec() does.
3716 ret
= -TARGET_EMSGSIZE
;
3720 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3721 target_vec
, count
, send
);
3723 ret
= -host_to_target_errno(errno
);
3726 msg
.msg_iovlen
= count
;
3730 if (fd_trans_target_to_host_data(fd
)) {
3733 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3734 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3735 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3736 msg
.msg_iov
->iov_len
);
3738 msg
.msg_iov
->iov_base
= host_msg
;
3739 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3743 ret
= target_to_host_cmsg(&msg
, msgp
);
3745 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3749 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3750 if (!is_error(ret
)) {
3752 if (fd_trans_host_to_target_data(fd
)) {
3753 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3756 ret
= host_to_target_cmsg(msgp
, &msg
);
3758 if (!is_error(ret
)) {
3759 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3760 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3761 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3762 msg
.msg_name
, msg
.msg_namelen
);
3774 unlock_iovec(vec
, target_vec
, count
, !send
);
3779 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3780 int flags
, int send
)
3783 struct target_msghdr
*msgp
;
3785 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3789 return -TARGET_EFAULT
;
3791 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3792 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3796 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3797 * so it might not have this *mmsg-specific flag either.
3799 #ifndef MSG_WAITFORONE
3800 #define MSG_WAITFORONE 0x10000
3803 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3804 unsigned int vlen
, unsigned int flags
,
3807 struct target_mmsghdr
*mmsgp
;
3811 if (vlen
> UIO_MAXIOV
) {
3815 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3817 return -TARGET_EFAULT
;
3820 for (i
= 0; i
< vlen
; i
++) {
3821 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3822 if (is_error(ret
)) {
3825 mmsgp
[i
].msg_len
= tswap32(ret
);
3826 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3827 if (flags
& MSG_WAITFORONE
) {
3828 flags
|= MSG_DONTWAIT
;
3832 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3834 /* Return number of datagrams sent if we sent any at all;
3835 * otherwise return the error.
3843 /* do_accept4() Must return target values and target errnos. */
3844 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3845 abi_ulong target_addrlen_addr
, int flags
)
3852 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3854 if (target_addr
== 0) {
3855 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3858 /* linux returns EINVAL if addrlen pointer is invalid */
3859 if (get_user_u32(addrlen
, target_addrlen_addr
))
3860 return -TARGET_EINVAL
;
3862 if ((int)addrlen
< 0) {
3863 return -TARGET_EINVAL
;
3866 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3867 return -TARGET_EINVAL
;
3869 addr
= alloca(addrlen
);
3871 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3872 if (!is_error(ret
)) {
3873 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3874 if (put_user_u32(addrlen
, target_addrlen_addr
))
3875 ret
= -TARGET_EFAULT
;
3880 /* do_getpeername() Must return target values and target errnos. */
3881 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3882 abi_ulong target_addrlen_addr
)
3888 if (get_user_u32(addrlen
, target_addrlen_addr
))
3889 return -TARGET_EFAULT
;
3891 if ((int)addrlen
< 0) {
3892 return -TARGET_EINVAL
;
3895 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3896 return -TARGET_EFAULT
;
3898 addr
= alloca(addrlen
);
3900 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3901 if (!is_error(ret
)) {
3902 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3903 if (put_user_u32(addrlen
, target_addrlen_addr
))
3904 ret
= -TARGET_EFAULT
;
3909 /* do_getsockname() Must return target values and target errnos. */
3910 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3911 abi_ulong target_addrlen_addr
)
3917 if (get_user_u32(addrlen
, target_addrlen_addr
))
3918 return -TARGET_EFAULT
;
3920 if ((int)addrlen
< 0) {
3921 return -TARGET_EINVAL
;
3924 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3925 return -TARGET_EFAULT
;
3927 addr
= alloca(addrlen
);
3929 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3930 if (!is_error(ret
)) {
3931 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3932 if (put_user_u32(addrlen
, target_addrlen_addr
))
3933 ret
= -TARGET_EFAULT
;
3938 /* do_socketpair() Must return target values and target errnos. */
3939 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3940 abi_ulong target_tab_addr
)
3945 target_to_host_sock_type(&type
);
3947 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3948 if (!is_error(ret
)) {
3949 if (put_user_s32(tab
[0], target_tab_addr
)
3950 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3951 ret
= -TARGET_EFAULT
;
3956 /* do_sendto() Must return target values and target errnos. */
3957 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3958 abi_ulong target_addr
, socklen_t addrlen
)
3962 void *copy_msg
= NULL
;
3965 if ((int)addrlen
< 0) {
3966 return -TARGET_EINVAL
;
3969 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3971 return -TARGET_EFAULT
;
3972 if (fd_trans_target_to_host_data(fd
)) {
3973 copy_msg
= host_msg
;
3974 host_msg
= g_malloc(len
);
3975 memcpy(host_msg
, copy_msg
, len
);
3976 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3982 addr
= alloca(addrlen
+1);
3983 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3987 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3989 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3994 host_msg
= copy_msg
;
3996 unlock_user(host_msg
, msg
, 0);
4000 /* do_recvfrom() Must return target values and target errnos. */
4001 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4002 abi_ulong target_addr
,
4003 abi_ulong target_addrlen
)
4010 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4012 return -TARGET_EFAULT
;
4014 if (get_user_u32(addrlen
, target_addrlen
)) {
4015 ret
= -TARGET_EFAULT
;
4018 if ((int)addrlen
< 0) {
4019 ret
= -TARGET_EINVAL
;
4022 addr
= alloca(addrlen
);
4023 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4026 addr
= NULL
; /* To keep compiler quiet. */
4027 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4029 if (!is_error(ret
)) {
4030 if (fd_trans_host_to_target_data(fd
)) {
4031 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4034 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4035 if (put_user_u32(addrlen
, target_addrlen
)) {
4036 ret
= -TARGET_EFAULT
;
4040 unlock_user(host_msg
, msg
, len
);
4043 unlock_user(host_msg
, msg
, 0);
4048 #ifdef TARGET_NR_socketcall
4049 /* do_socketcall() must return target values and target errnos. */
4050 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4052 static const unsigned nargs
[] = { /* number of arguments per operation */
4053 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4054 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4055 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4056 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4057 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4058 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4059 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4060 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4061 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4062 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4063 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4064 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4065 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4066 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4067 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4068 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4069 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4070 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4071 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4072 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4074 abi_long a
[6]; /* max 6 args */
4077 /* check the range of the first argument num */
4078 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4079 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4080 return -TARGET_EINVAL
;
4082 /* ensure we have space for args */
4083 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4084 return -TARGET_EINVAL
;
4086 /* collect the arguments in a[] according to nargs[] */
4087 for (i
= 0; i
< nargs
[num
]; ++i
) {
4088 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4089 return -TARGET_EFAULT
;
4092 /* now when we have the args, invoke the appropriate underlying function */
4094 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4095 return do_socket(a
[0], a
[1], a
[2]);
4096 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4097 return do_bind(a
[0], a
[1], a
[2]);
4098 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4099 return do_connect(a
[0], a
[1], a
[2]);
4100 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4101 return get_errno(listen(a
[0], a
[1]));
4102 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4103 return do_accept4(a
[0], a
[1], a
[2], 0);
4104 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4105 return do_getsockname(a
[0], a
[1], a
[2]);
4106 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4107 return do_getpeername(a
[0], a
[1], a
[2]);
4108 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4109 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4110 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4111 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4112 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4113 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4114 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4115 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4116 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4117 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4118 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4119 return get_errno(shutdown(a
[0], a
[1]));
4120 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4121 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4122 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4123 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4124 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4125 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4126 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4127 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4128 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4129 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4130 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4131 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4132 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4133 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4135 gemu_log("Unsupported socketcall: %d\n", num
);
4136 return -TARGET_EINVAL
;
4141 #define N_SHM_REGIONS 32
4143 static struct shm_region
{
4147 } shm_regions
[N_SHM_REGIONS
];
4149 #ifndef TARGET_SEMID64_DS
4150 /* asm-generic version of this struct */
4151 struct target_semid64_ds
4153 struct target_ipc_perm sem_perm
;
4154 abi_ulong sem_otime
;
4155 #if TARGET_ABI_BITS == 32
4156 abi_ulong __unused1
;
4158 abi_ulong sem_ctime
;
4159 #if TARGET_ABI_BITS == 32
4160 abi_ulong __unused2
;
4162 abi_ulong sem_nsems
;
4163 abi_ulong __unused3
;
4164 abi_ulong __unused4
;
4168 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4169 abi_ulong target_addr
)
4171 struct target_ipc_perm
*target_ip
;
4172 struct target_semid64_ds
*target_sd
;
4174 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4175 return -TARGET_EFAULT
;
4176 target_ip
= &(target_sd
->sem_perm
);
4177 host_ip
->__key
= tswap32(target_ip
->__key
);
4178 host_ip
->uid
= tswap32(target_ip
->uid
);
4179 host_ip
->gid
= tswap32(target_ip
->gid
);
4180 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4181 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4182 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4183 host_ip
->mode
= tswap32(target_ip
->mode
);
4185 host_ip
->mode
= tswap16(target_ip
->mode
);
4187 #if defined(TARGET_PPC)
4188 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4190 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4192 unlock_user_struct(target_sd
, target_addr
, 0);
4196 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4197 struct ipc_perm
*host_ip
)
4199 struct target_ipc_perm
*target_ip
;
4200 struct target_semid64_ds
*target_sd
;
4202 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4203 return -TARGET_EFAULT
;
4204 target_ip
= &(target_sd
->sem_perm
);
4205 target_ip
->__key
= tswap32(host_ip
->__key
);
4206 target_ip
->uid
= tswap32(host_ip
->uid
);
4207 target_ip
->gid
= tswap32(host_ip
->gid
);
4208 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4209 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4210 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4211 target_ip
->mode
= tswap32(host_ip
->mode
);
4213 target_ip
->mode
= tswap16(host_ip
->mode
);
4215 #if defined(TARGET_PPC)
4216 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4218 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4220 unlock_user_struct(target_sd
, target_addr
, 1);
4224 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4225 abi_ulong target_addr
)
4227 struct target_semid64_ds
*target_sd
;
4229 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4230 return -TARGET_EFAULT
;
4231 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4232 return -TARGET_EFAULT
;
4233 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4234 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4235 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4236 unlock_user_struct(target_sd
, target_addr
, 0);
4240 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4241 struct semid_ds
*host_sd
)
4243 struct target_semid64_ds
*target_sd
;
4245 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4246 return -TARGET_EFAULT
;
4247 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4248 return -TARGET_EFAULT
;
4249 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4250 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4251 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4252 unlock_user_struct(target_sd
, target_addr
, 1);
4256 struct target_seminfo
{
4269 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4270 struct seminfo
*host_seminfo
)
4272 struct target_seminfo
*target_seminfo
;
4273 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4274 return -TARGET_EFAULT
;
4275 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4276 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4277 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4278 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4279 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4280 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4281 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4282 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4283 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4284 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4285 unlock_user_struct(target_seminfo
, target_addr
, 1);
4291 struct semid_ds
*buf
;
4292 unsigned short *array
;
4293 struct seminfo
*__buf
;
4296 union target_semun
{
4303 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4304 abi_ulong target_addr
)
4307 unsigned short *array
;
4309 struct semid_ds semid_ds
;
4312 semun
.buf
= &semid_ds
;
4314 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4316 return get_errno(ret
);
4318 nsems
= semid_ds
.sem_nsems
;
4320 *host_array
= g_try_new(unsigned short, nsems
);
4322 return -TARGET_ENOMEM
;
4324 array
= lock_user(VERIFY_READ
, target_addr
,
4325 nsems
*sizeof(unsigned short), 1);
4327 g_free(*host_array
);
4328 return -TARGET_EFAULT
;
4331 for(i
=0; i
<nsems
; i
++) {
4332 __get_user((*host_array
)[i
], &array
[i
]);
4334 unlock_user(array
, target_addr
, 0);
4339 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4340 unsigned short **host_array
)
4343 unsigned short *array
;
4345 struct semid_ds semid_ds
;
4348 semun
.buf
= &semid_ds
;
4350 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4352 return get_errno(ret
);
4354 nsems
= semid_ds
.sem_nsems
;
4356 array
= lock_user(VERIFY_WRITE
, target_addr
,
4357 nsems
*sizeof(unsigned short), 0);
4359 return -TARGET_EFAULT
;
4361 for(i
=0; i
<nsems
; i
++) {
4362 __put_user((*host_array
)[i
], &array
[i
]);
4364 g_free(*host_array
);
4365 unlock_user(array
, target_addr
, 1);
4370 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4371 abi_ulong target_arg
)
4373 union target_semun target_su
= { .buf
= target_arg
};
4375 struct semid_ds dsarg
;
4376 unsigned short *array
= NULL
;
4377 struct seminfo seminfo
;
4378 abi_long ret
= -TARGET_EINVAL
;
4385 /* In 64 bit cross-endian situations, we will erroneously pick up
4386 * the wrong half of the union for the "val" element. To rectify
4387 * this, the entire 8-byte structure is byteswapped, followed by
4388 * a swap of the 4 byte val field. In other cases, the data is
4389 * already in proper host byte order. */
4390 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4391 target_su
.buf
= tswapal(target_su
.buf
);
4392 arg
.val
= tswap32(target_su
.val
);
4394 arg
.val
= target_su
.val
;
4396 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4400 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4404 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4405 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4412 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4416 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4417 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4423 arg
.__buf
= &seminfo
;
4424 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4425 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4433 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4440 struct target_sembuf
{
4441 unsigned short sem_num
;
4446 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4447 abi_ulong target_addr
,
4450 struct target_sembuf
*target_sembuf
;
4453 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4454 nsops
*sizeof(struct target_sembuf
), 1);
4456 return -TARGET_EFAULT
;
4458 for(i
=0; i
<nsops
; i
++) {
4459 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4460 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4461 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4464 unlock_user(target_sembuf
, target_addr
, 0);
4469 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4471 struct sembuf sops
[nsops
];
4473 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4474 return -TARGET_EFAULT
;
4476 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4479 struct target_msqid_ds
4481 struct target_ipc_perm msg_perm
;
4482 abi_ulong msg_stime
;
4483 #if TARGET_ABI_BITS == 32
4484 abi_ulong __unused1
;
4486 abi_ulong msg_rtime
;
4487 #if TARGET_ABI_BITS == 32
4488 abi_ulong __unused2
;
4490 abi_ulong msg_ctime
;
4491 #if TARGET_ABI_BITS == 32
4492 abi_ulong __unused3
;
4494 abi_ulong __msg_cbytes
;
4496 abi_ulong msg_qbytes
;
4497 abi_ulong msg_lspid
;
4498 abi_ulong msg_lrpid
;
4499 abi_ulong __unused4
;
4500 abi_ulong __unused5
;
4503 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4504 abi_ulong target_addr
)
4506 struct target_msqid_ds
*target_md
;
4508 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4509 return -TARGET_EFAULT
;
4510 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4511 return -TARGET_EFAULT
;
4512 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4513 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4514 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4515 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4516 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4517 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4518 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4519 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4520 unlock_user_struct(target_md
, target_addr
, 0);
4524 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4525 struct msqid_ds
*host_md
)
4527 struct target_msqid_ds
*target_md
;
4529 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4530 return -TARGET_EFAULT
;
4531 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4532 return -TARGET_EFAULT
;
4533 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4534 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4535 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4536 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4537 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4538 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4539 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4540 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4541 unlock_user_struct(target_md
, target_addr
, 1);
4545 struct target_msginfo
{
4553 unsigned short int msgseg
;
4556 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4557 struct msginfo
*host_msginfo
)
4559 struct target_msginfo
*target_msginfo
;
4560 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4561 return -TARGET_EFAULT
;
4562 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4563 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4564 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4565 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4566 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4567 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4568 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4569 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4570 unlock_user_struct(target_msginfo
, target_addr
, 1);
4574 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4576 struct msqid_ds dsarg
;
4577 struct msginfo msginfo
;
4578 abi_long ret
= -TARGET_EINVAL
;
4586 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4587 return -TARGET_EFAULT
;
4588 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4589 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4590 return -TARGET_EFAULT
;
4593 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4597 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4598 if (host_to_target_msginfo(ptr
, &msginfo
))
4599 return -TARGET_EFAULT
;
4606 struct target_msgbuf
{
4611 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4612 ssize_t msgsz
, int msgflg
)
4614 struct target_msgbuf
*target_mb
;
4615 struct msgbuf
*host_mb
;
4619 return -TARGET_EINVAL
;
4622 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4623 return -TARGET_EFAULT
;
4624 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4626 unlock_user_struct(target_mb
, msgp
, 0);
4627 return -TARGET_ENOMEM
;
4629 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4630 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4631 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4633 unlock_user_struct(target_mb
, msgp
, 0);
4638 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4639 ssize_t msgsz
, abi_long msgtyp
,
4642 struct target_msgbuf
*target_mb
;
4644 struct msgbuf
*host_mb
;
4648 return -TARGET_EINVAL
;
4651 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4652 return -TARGET_EFAULT
;
4654 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4656 ret
= -TARGET_ENOMEM
;
4659 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4662 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4663 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4664 if (!target_mtext
) {
4665 ret
= -TARGET_EFAULT
;
4668 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4669 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4672 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4676 unlock_user_struct(target_mb
, msgp
, 1);
4681 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4682 abi_ulong target_addr
)
4684 struct target_shmid_ds
*target_sd
;
4686 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4687 return -TARGET_EFAULT
;
4688 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4689 return -TARGET_EFAULT
;
4690 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4691 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4692 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4693 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4694 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4695 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4696 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4697 unlock_user_struct(target_sd
, target_addr
, 0);
4701 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4702 struct shmid_ds
*host_sd
)
4704 struct target_shmid_ds
*target_sd
;
4706 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4707 return -TARGET_EFAULT
;
4708 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4709 return -TARGET_EFAULT
;
4710 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4711 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4712 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4713 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4714 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4715 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4716 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4717 unlock_user_struct(target_sd
, target_addr
, 1);
4721 struct target_shminfo
{
4729 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4730 struct shminfo
*host_shminfo
)
4732 struct target_shminfo
*target_shminfo
;
4733 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4734 return -TARGET_EFAULT
;
4735 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4736 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4737 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4738 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4739 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4740 unlock_user_struct(target_shminfo
, target_addr
, 1);
4744 struct target_shm_info
{
4749 abi_ulong swap_attempts
;
4750 abi_ulong swap_successes
;
4753 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4754 struct shm_info
*host_shm_info
)
4756 struct target_shm_info
*target_shm_info
;
4757 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4758 return -TARGET_EFAULT
;
4759 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4760 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4761 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4762 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4763 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4764 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4765 unlock_user_struct(target_shm_info
, target_addr
, 1);
4769 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4771 struct shmid_ds dsarg
;
4772 struct shminfo shminfo
;
4773 struct shm_info shm_info
;
4774 abi_long ret
= -TARGET_EINVAL
;
4782 if (target_to_host_shmid_ds(&dsarg
, buf
))
4783 return -TARGET_EFAULT
;
4784 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4785 if (host_to_target_shmid_ds(buf
, &dsarg
))
4786 return -TARGET_EFAULT
;
4789 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4790 if (host_to_target_shminfo(buf
, &shminfo
))
4791 return -TARGET_EFAULT
;
4794 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4795 if (host_to_target_shm_info(buf
, &shm_info
))
4796 return -TARGET_EFAULT
;
4801 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4808 #ifndef TARGET_FORCE_SHMLBA
4809 /* For most architectures, SHMLBA is the same as the page size;
4810 * some architectures have larger values, in which case they should
4811 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4812 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4813 * and defining its own value for SHMLBA.
4815 * The kernel also permits SHMLBA to be set by the architecture to a
4816 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4817 * this means that addresses are rounded to the large size if
4818 * SHM_RND is set but addresses not aligned to that size are not rejected
4819 * as long as they are at least page-aligned. Since the only architecture
4820 * which uses this is ia64 this code doesn't provide for that oddity.
4822 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4824 return TARGET_PAGE_SIZE
;
4828 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4829 int shmid
, abi_ulong shmaddr
, int shmflg
)
4833 struct shmid_ds shm_info
;
4837 /* find out the length of the shared memory segment */
4838 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4839 if (is_error(ret
)) {
4840 /* can't get length, bail out */
4844 shmlba
= target_shmlba(cpu_env
);
4846 if (shmaddr
& (shmlba
- 1)) {
4847 if (shmflg
& SHM_RND
) {
4848 shmaddr
&= ~(shmlba
- 1);
4850 return -TARGET_EINVAL
;
4857 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4859 abi_ulong mmap_start
;
4861 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4863 if (mmap_start
== -1) {
4865 host_raddr
= (void *)-1;
4867 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4870 if (host_raddr
== (void *)-1) {
4872 return get_errno((long)host_raddr
);
4874 raddr
=h2g((unsigned long)host_raddr
);
4876 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4877 PAGE_VALID
| PAGE_READ
|
4878 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4880 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4881 if (!shm_regions
[i
].in_use
) {
4882 shm_regions
[i
].in_use
= true;
4883 shm_regions
[i
].start
= raddr
;
4884 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4894 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4898 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4899 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4900 shm_regions
[i
].in_use
= false;
4901 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4906 return get_errno(shmdt(g2h(shmaddr
)));
4909 #ifdef TARGET_NR_ipc
4910 /* ??? This only works with linear mappings. */
4911 /* do_ipc() must return target values and target errnos. */
4912 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4913 unsigned int call
, abi_long first
,
4914 abi_long second
, abi_long third
,
4915 abi_long ptr
, abi_long fifth
)
4920 version
= call
>> 16;
4925 ret
= do_semop(first
, ptr
, second
);
4929 ret
= get_errno(semget(first
, second
, third
));
4932 case IPCOP_semctl
: {
4933 /* The semun argument to semctl is passed by value, so dereference the
4936 get_user_ual(atptr
, ptr
);
4937 ret
= do_semctl(first
, second
, third
, atptr
);
4942 ret
= get_errno(msgget(first
, second
));
4946 ret
= do_msgsnd(first
, ptr
, second
, third
);
4950 ret
= do_msgctl(first
, second
, ptr
);
4957 struct target_ipc_kludge
{
4962 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4963 ret
= -TARGET_EFAULT
;
4967 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4969 unlock_user_struct(tmp
, ptr
, 0);
4973 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4982 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4983 if (is_error(raddr
))
4984 return get_errno(raddr
);
4985 if (put_user_ual(raddr
, third
))
4986 return -TARGET_EFAULT
;
4990 ret
= -TARGET_EINVAL
;
4995 ret
= do_shmdt(ptr
);
4999 /* IPC_* flag values are the same on all linux platforms */
5000 ret
= get_errno(shmget(first
, second
, third
));
5003 /* IPC_* and SHM_* command values are the same on all linux platforms */
5005 ret
= do_shmctl(first
, second
, ptr
);
5008 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5009 ret
= -TARGET_ENOSYS
;
5016 /* kernel structure types definitions */
5018 #define STRUCT(name, ...) STRUCT_ ## name,
5019 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5021 #include "syscall_types.h"
5025 #undef STRUCT_SPECIAL
5027 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5028 #define STRUCT_SPECIAL(name)
5029 #include "syscall_types.h"
5031 #undef STRUCT_SPECIAL
5033 typedef struct IOCTLEntry IOCTLEntry
;
5035 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5036 int fd
, int cmd
, abi_long arg
);
5040 unsigned int host_cmd
;
5043 do_ioctl_fn
*do_ioctl
;
5044 const argtype arg_type
[5];
5047 #define IOC_R 0x0001
5048 #define IOC_W 0x0002
5049 #define IOC_RW (IOC_R | IOC_W)
5051 #define MAX_STRUCT_SIZE 4096
5053 #ifdef CONFIG_FIEMAP
5054 /* So fiemap access checks don't overflow on 32 bit systems.
5055 * This is very slightly smaller than the limit imposed by
5056 * the underlying kernel.
5058 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5059 / sizeof(struct fiemap_extent))
5061 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5062 int fd
, int cmd
, abi_long arg
)
5064 /* The parameter for this ioctl is a struct fiemap followed
5065 * by an array of struct fiemap_extent whose size is set
5066 * in fiemap->fm_extent_count. The array is filled in by the
5069 int target_size_in
, target_size_out
;
5071 const argtype
*arg_type
= ie
->arg_type
;
5072 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5075 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5079 assert(arg_type
[0] == TYPE_PTR
);
5080 assert(ie
->access
== IOC_RW
);
5082 target_size_in
= thunk_type_size(arg_type
, 0);
5083 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5085 return -TARGET_EFAULT
;
5087 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5088 unlock_user(argptr
, arg
, 0);
5089 fm
= (struct fiemap
*)buf_temp
;
5090 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5091 return -TARGET_EINVAL
;
5094 outbufsz
= sizeof (*fm
) +
5095 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5097 if (outbufsz
> MAX_STRUCT_SIZE
) {
5098 /* We can't fit all the extents into the fixed size buffer.
5099 * Allocate one that is large enough and use it instead.
5101 fm
= g_try_malloc(outbufsz
);
5103 return -TARGET_ENOMEM
;
5105 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5108 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5109 if (!is_error(ret
)) {
5110 target_size_out
= target_size_in
;
5111 /* An extent_count of 0 means we were only counting the extents
5112 * so there are no structs to copy
5114 if (fm
->fm_extent_count
!= 0) {
5115 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5117 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5119 ret
= -TARGET_EFAULT
;
5121 /* Convert the struct fiemap */
5122 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5123 if (fm
->fm_extent_count
!= 0) {
5124 p
= argptr
+ target_size_in
;
5125 /* ...and then all the struct fiemap_extents */
5126 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5127 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5132 unlock_user(argptr
, arg
, target_size_out
);
5142 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5143 int fd
, int cmd
, abi_long arg
)
5145 const argtype
*arg_type
= ie
->arg_type
;
5149 struct ifconf
*host_ifconf
;
5151 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5152 int target_ifreq_size
;
5157 abi_long target_ifc_buf
;
5161 assert(arg_type
[0] == TYPE_PTR
);
5162 assert(ie
->access
== IOC_RW
);
5165 target_size
= thunk_type_size(arg_type
, 0);
5167 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5169 return -TARGET_EFAULT
;
5170 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5171 unlock_user(argptr
, arg
, 0);
5173 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5174 target_ifc_len
= host_ifconf
->ifc_len
;
5175 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5177 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5178 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5179 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5181 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5182 if (outbufsz
> MAX_STRUCT_SIZE
) {
5183 /* We can't fit all the extents into the fixed size buffer.
5184 * Allocate one that is large enough and use it instead.
5186 host_ifconf
= malloc(outbufsz
);
5188 return -TARGET_ENOMEM
;
5190 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5193 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5195 host_ifconf
->ifc_len
= host_ifc_len
;
5196 host_ifconf
->ifc_buf
= host_ifc_buf
;
5198 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5199 if (!is_error(ret
)) {
5200 /* convert host ifc_len to target ifc_len */
5202 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5203 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5204 host_ifconf
->ifc_len
= target_ifc_len
;
5206 /* restore target ifc_buf */
5208 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5210 /* copy struct ifconf to target user */
5212 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5214 return -TARGET_EFAULT
;
5215 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5216 unlock_user(argptr
, arg
, target_size
);
5218 /* copy ifreq[] to target user */
5220 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5221 for (i
= 0; i
< nb_ifreq
; i
++) {
5222 thunk_convert(argptr
+ i
* target_ifreq_size
,
5223 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5224 ifreq_arg_type
, THUNK_TARGET
);
5226 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5236 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5237 int cmd
, abi_long arg
)
5240 struct dm_ioctl
*host_dm
;
5241 abi_long guest_data
;
5242 uint32_t guest_data_size
;
5244 const argtype
*arg_type
= ie
->arg_type
;
5246 void *big_buf
= NULL
;
5250 target_size
= thunk_type_size(arg_type
, 0);
5251 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5253 ret
= -TARGET_EFAULT
;
5256 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5257 unlock_user(argptr
, arg
, 0);
5259 /* buf_temp is too small, so fetch things into a bigger buffer */
5260 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5261 memcpy(big_buf
, buf_temp
, target_size
);
5265 guest_data
= arg
+ host_dm
->data_start
;
5266 if ((guest_data
- arg
) < 0) {
5267 ret
= -TARGET_EINVAL
;
5270 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5271 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5273 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5275 ret
= -TARGET_EFAULT
;
5279 switch (ie
->host_cmd
) {
5281 case DM_LIST_DEVICES
:
5284 case DM_DEV_SUSPEND
:
5287 case DM_TABLE_STATUS
:
5288 case DM_TABLE_CLEAR
:
5290 case DM_LIST_VERSIONS
:
5294 case DM_DEV_SET_GEOMETRY
:
5295 /* data contains only strings */
5296 memcpy(host_data
, argptr
, guest_data_size
);
5299 memcpy(host_data
, argptr
, guest_data_size
);
5300 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5304 void *gspec
= argptr
;
5305 void *cur_data
= host_data
;
5306 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5307 int spec_size
= thunk_type_size(arg_type
, 0);
5310 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5311 struct dm_target_spec
*spec
= cur_data
;
5315 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5316 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5318 spec
->next
= sizeof(*spec
) + slen
;
5319 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5321 cur_data
+= spec
->next
;
5326 ret
= -TARGET_EINVAL
;
5327 unlock_user(argptr
, guest_data
, 0);
5330 unlock_user(argptr
, guest_data
, 0);
5332 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5333 if (!is_error(ret
)) {
5334 guest_data
= arg
+ host_dm
->data_start
;
5335 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5336 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5337 switch (ie
->host_cmd
) {
5342 case DM_DEV_SUSPEND
:
5345 case DM_TABLE_CLEAR
:
5347 case DM_DEV_SET_GEOMETRY
:
5348 /* no return data */
5350 case DM_LIST_DEVICES
:
5352 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5353 uint32_t remaining_data
= guest_data_size
;
5354 void *cur_data
= argptr
;
5355 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5356 int nl_size
= 12; /* can't use thunk_size due to alignment */
5359 uint32_t next
= nl
->next
;
5361 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5363 if (remaining_data
< nl
->next
) {
5364 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5367 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5368 strcpy(cur_data
+ nl_size
, nl
->name
);
5369 cur_data
+= nl
->next
;
5370 remaining_data
-= nl
->next
;
5374 nl
= (void*)nl
+ next
;
5379 case DM_TABLE_STATUS
:
5381 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5382 void *cur_data
= argptr
;
5383 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5384 int spec_size
= thunk_type_size(arg_type
, 0);
5387 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5388 uint32_t next
= spec
->next
;
5389 int slen
= strlen((char*)&spec
[1]) + 1;
5390 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5391 if (guest_data_size
< spec
->next
) {
5392 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5395 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5396 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5397 cur_data
= argptr
+ spec
->next
;
5398 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5404 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5405 int count
= *(uint32_t*)hdata
;
5406 uint64_t *hdev
= hdata
+ 8;
5407 uint64_t *gdev
= argptr
+ 8;
5410 *(uint32_t*)argptr
= tswap32(count
);
5411 for (i
= 0; i
< count
; i
++) {
5412 *gdev
= tswap64(*hdev
);
5418 case DM_LIST_VERSIONS
:
5420 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5421 uint32_t remaining_data
= guest_data_size
;
5422 void *cur_data
= argptr
;
5423 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5424 int vers_size
= thunk_type_size(arg_type
, 0);
5427 uint32_t next
= vers
->next
;
5429 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5431 if (remaining_data
< vers
->next
) {
5432 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5435 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5436 strcpy(cur_data
+ vers_size
, vers
->name
);
5437 cur_data
+= vers
->next
;
5438 remaining_data
-= vers
->next
;
5442 vers
= (void*)vers
+ next
;
5447 unlock_user(argptr
, guest_data
, 0);
5448 ret
= -TARGET_EINVAL
;
5451 unlock_user(argptr
, guest_data
, guest_data_size
);
5453 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5455 ret
= -TARGET_EFAULT
;
5458 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5459 unlock_user(argptr
, arg
, target_size
);
5466 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5467 int cmd
, abi_long arg
)
5471 const argtype
*arg_type
= ie
->arg_type
;
5472 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5475 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5476 struct blkpg_partition host_part
;
5478 /* Read and convert blkpg */
5480 target_size
= thunk_type_size(arg_type
, 0);
5481 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5483 ret
= -TARGET_EFAULT
;
5486 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5487 unlock_user(argptr
, arg
, 0);
5489 switch (host_blkpg
->op
) {
5490 case BLKPG_ADD_PARTITION
:
5491 case BLKPG_DEL_PARTITION
:
5492 /* payload is struct blkpg_partition */
5495 /* Unknown opcode */
5496 ret
= -TARGET_EINVAL
;
5500 /* Read and convert blkpg->data */
5501 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5502 target_size
= thunk_type_size(part_arg_type
, 0);
5503 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5505 ret
= -TARGET_EFAULT
;
5508 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5509 unlock_user(argptr
, arg
, 0);
5511 /* Swizzle the data pointer to our local copy and call! */
5512 host_blkpg
->data
= &host_part
;
5513 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5519 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5520 int fd
, int cmd
, abi_long arg
)
5522 const argtype
*arg_type
= ie
->arg_type
;
5523 const StructEntry
*se
;
5524 const argtype
*field_types
;
5525 const int *dst_offsets
, *src_offsets
;
5528 abi_ulong
*target_rt_dev_ptr
;
5529 unsigned long *host_rt_dev_ptr
;
5533 assert(ie
->access
== IOC_W
);
5534 assert(*arg_type
== TYPE_PTR
);
5536 assert(*arg_type
== TYPE_STRUCT
);
5537 target_size
= thunk_type_size(arg_type
, 0);
5538 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5540 return -TARGET_EFAULT
;
5543 assert(*arg_type
== (int)STRUCT_rtentry
);
5544 se
= struct_entries
+ *arg_type
++;
5545 assert(se
->convert
[0] == NULL
);
5546 /* convert struct here to be able to catch rt_dev string */
5547 field_types
= se
->field_types
;
5548 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5549 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5550 for (i
= 0; i
< se
->nb_fields
; i
++) {
5551 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5552 assert(*field_types
== TYPE_PTRVOID
);
5553 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5554 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5555 if (*target_rt_dev_ptr
!= 0) {
5556 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5557 tswapal(*target_rt_dev_ptr
));
5558 if (!*host_rt_dev_ptr
) {
5559 unlock_user(argptr
, arg
, 0);
5560 return -TARGET_EFAULT
;
5563 *host_rt_dev_ptr
= 0;
5568 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5569 argptr
+ src_offsets
[i
],
5570 field_types
, THUNK_HOST
);
5572 unlock_user(argptr
, arg
, 0);
5574 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5575 if (*host_rt_dev_ptr
!= 0) {
5576 unlock_user((void *)*host_rt_dev_ptr
,
5577 *target_rt_dev_ptr
, 0);
5582 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5583 int fd
, int cmd
, abi_long arg
)
5585 int sig
= target_to_host_signal(arg
);
5586 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5589 static IOCTLEntry ioctl_entries
[] = {
5590 #define IOCTL(cmd, access, ...) \
5591 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5592 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5593 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5594 #define IOCTL_IGNORE(cmd) \
5595 { TARGET_ ## cmd, 0, #cmd },
5600 /* ??? Implement proper locking for ioctls. */
5601 /* do_ioctl() Must return target values and target errnos. */
5602 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5604 const IOCTLEntry
*ie
;
5605 const argtype
*arg_type
;
5607 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5613 if (ie
->target_cmd
== 0) {
5614 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5615 return -TARGET_ENOSYS
;
5617 if (ie
->target_cmd
== cmd
)
5621 arg_type
= ie
->arg_type
;
5623 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5626 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5627 } else if (!ie
->host_cmd
) {
5628 /* Some architectures define BSD ioctls in their headers
5629 that are not implemented in Linux. */
5630 return -TARGET_ENOSYS
;
5633 switch(arg_type
[0]) {
5636 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5640 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5644 target_size
= thunk_type_size(arg_type
, 0);
5645 switch(ie
->access
) {
5647 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5648 if (!is_error(ret
)) {
5649 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5651 return -TARGET_EFAULT
;
5652 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5653 unlock_user(argptr
, arg
, target_size
);
5657 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5659 return -TARGET_EFAULT
;
5660 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5661 unlock_user(argptr
, arg
, 0);
5662 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5666 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5668 return -TARGET_EFAULT
;
5669 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5670 unlock_user(argptr
, arg
, 0);
5671 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5672 if (!is_error(ret
)) {
5673 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5675 return -TARGET_EFAULT
;
5676 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5677 unlock_user(argptr
, arg
, target_size
);
5683 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5684 (long)cmd
, arg_type
[0]);
5685 ret
= -TARGET_ENOSYS
;
5691 static const bitmask_transtbl iflag_tbl
[] = {
5692 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5693 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5694 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5695 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5696 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5697 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5698 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5699 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5700 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5701 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5702 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5703 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5704 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5705 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5709 static const bitmask_transtbl oflag_tbl
[] = {
5710 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5711 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5712 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5713 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5714 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5715 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5716 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5717 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5718 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5719 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5720 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5721 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5722 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5723 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5724 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5725 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5726 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5727 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5728 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5729 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5730 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5731 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5732 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5733 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5737 static const bitmask_transtbl cflag_tbl
[] = {
5738 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5739 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5740 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5741 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5742 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5743 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5744 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5745 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5746 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5747 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5748 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5749 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5750 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5751 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5752 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5753 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5754 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5755 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5756 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5757 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5758 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5759 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5760 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5761 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5762 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5763 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5764 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5765 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5766 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5767 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5768 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5772 static const bitmask_transtbl lflag_tbl
[] = {
5773 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5774 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5775 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5776 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5777 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5778 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5779 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5780 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5781 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5782 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5783 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5784 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5785 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5786 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5787 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5791 static void target_to_host_termios (void *dst
, const void *src
)
5793 struct host_termios
*host
= dst
;
5794 const struct target_termios
*target
= src
;
5797 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5799 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5801 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5803 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5804 host
->c_line
= target
->c_line
;
5806 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5807 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5808 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5809 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5810 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5811 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5812 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5813 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5814 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5815 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5816 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5817 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5818 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5819 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5820 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5821 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5822 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5823 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5826 static void host_to_target_termios (void *dst
, const void *src
)
5828 struct target_termios
*target
= dst
;
5829 const struct host_termios
*host
= src
;
5832 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5834 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5836 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5838 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5839 target
->c_line
= host
->c_line
;
5841 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5842 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5843 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5844 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5845 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5846 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5847 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5848 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5849 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5850 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5851 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5852 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5853 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5854 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5855 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5856 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5857 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5858 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5861 static const StructEntry struct_termios_def
= {
5862 .convert
= { host_to_target_termios
, target_to_host_termios
},
5863 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5864 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5867 static bitmask_transtbl mmap_flags_tbl
[] = {
5868 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5869 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5870 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5871 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5872 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5873 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5874 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5875 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5876 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5881 #if defined(TARGET_I386)
5883 /* NOTE: there is really one LDT for all the threads */
5884 static uint8_t *ldt_table
;
5886 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5893 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5894 if (size
> bytecount
)
5896 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5898 return -TARGET_EFAULT
;
5899 /* ??? Should this by byteswapped? */
5900 memcpy(p
, ldt_table
, size
);
5901 unlock_user(p
, ptr
, size
);
5905 /* XXX: add locking support */
5906 static abi_long
write_ldt(CPUX86State
*env
,
5907 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5909 struct target_modify_ldt_ldt_s ldt_info
;
5910 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5911 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5912 int seg_not_present
, useable
, lm
;
5913 uint32_t *lp
, entry_1
, entry_2
;
5915 if (bytecount
!= sizeof(ldt_info
))
5916 return -TARGET_EINVAL
;
5917 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5918 return -TARGET_EFAULT
;
5919 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5920 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5921 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5922 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5923 unlock_user_struct(target_ldt_info
, ptr
, 0);
5925 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5926 return -TARGET_EINVAL
;
5927 seg_32bit
= ldt_info
.flags
& 1;
5928 contents
= (ldt_info
.flags
>> 1) & 3;
5929 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5930 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5931 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5932 useable
= (ldt_info
.flags
>> 6) & 1;
5936 lm
= (ldt_info
.flags
>> 7) & 1;
5938 if (contents
== 3) {
5940 return -TARGET_EINVAL
;
5941 if (seg_not_present
== 0)
5942 return -TARGET_EINVAL
;
5944 /* allocate the LDT */
5946 env
->ldt
.base
= target_mmap(0,
5947 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5948 PROT_READ
|PROT_WRITE
,
5949 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5950 if (env
->ldt
.base
== -1)
5951 return -TARGET_ENOMEM
;
5952 memset(g2h(env
->ldt
.base
), 0,
5953 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5954 env
->ldt
.limit
= 0xffff;
5955 ldt_table
= g2h(env
->ldt
.base
);
5958 /* NOTE: same code as Linux kernel */
5959 /* Allow LDTs to be cleared by the user. */
5960 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5963 read_exec_only
== 1 &&
5965 limit_in_pages
== 0 &&
5966 seg_not_present
== 1 &&
5974 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5975 (ldt_info
.limit
& 0x0ffff);
5976 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5977 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5978 (ldt_info
.limit
& 0xf0000) |
5979 ((read_exec_only
^ 1) << 9) |
5981 ((seg_not_present
^ 1) << 15) |
5983 (limit_in_pages
<< 23) |
5987 entry_2
|= (useable
<< 20);
5989 /* Install the new entry ... */
5991 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5992 lp
[0] = tswap32(entry_1
);
5993 lp
[1] = tswap32(entry_2
);
5997 /* specific and weird i386 syscalls */
5998 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5999 unsigned long bytecount
)
6005 ret
= read_ldt(ptr
, bytecount
);
6008 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6011 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6014 ret
= -TARGET_ENOSYS
;
6020 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6021 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6023 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6024 struct target_modify_ldt_ldt_s ldt_info
;
6025 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6026 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6027 int seg_not_present
, useable
, lm
;
6028 uint32_t *lp
, entry_1
, entry_2
;
6031 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6032 if (!target_ldt_info
)
6033 return -TARGET_EFAULT
;
6034 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6035 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6036 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6037 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6038 if (ldt_info
.entry_number
== -1) {
6039 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6040 if (gdt_table
[i
] == 0) {
6041 ldt_info
.entry_number
= i
;
6042 target_ldt_info
->entry_number
= tswap32(i
);
6047 unlock_user_struct(target_ldt_info
, ptr
, 1);
6049 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6050 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6051 return -TARGET_EINVAL
;
6052 seg_32bit
= ldt_info
.flags
& 1;
6053 contents
= (ldt_info
.flags
>> 1) & 3;
6054 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6055 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6056 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6057 useable
= (ldt_info
.flags
>> 6) & 1;
6061 lm
= (ldt_info
.flags
>> 7) & 1;
6064 if (contents
== 3) {
6065 if (seg_not_present
== 0)
6066 return -TARGET_EINVAL
;
6069 /* NOTE: same code as Linux kernel */
6070 /* Allow LDTs to be cleared by the user. */
6071 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6072 if ((contents
== 0 &&
6073 read_exec_only
== 1 &&
6075 limit_in_pages
== 0 &&
6076 seg_not_present
== 1 &&
6084 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6085 (ldt_info
.limit
& 0x0ffff);
6086 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6087 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6088 (ldt_info
.limit
& 0xf0000) |
6089 ((read_exec_only
^ 1) << 9) |
6091 ((seg_not_present
^ 1) << 15) |
6093 (limit_in_pages
<< 23) |
6098 /* Install the new entry ... */
6100 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6101 lp
[0] = tswap32(entry_1
);
6102 lp
[1] = tswap32(entry_2
);
6106 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6108 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6109 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6110 uint32_t base_addr
, limit
, flags
;
6111 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6112 int seg_not_present
, useable
, lm
;
6113 uint32_t *lp
, entry_1
, entry_2
;
6115 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6116 if (!target_ldt_info
)
6117 return -TARGET_EFAULT
;
6118 idx
= tswap32(target_ldt_info
->entry_number
);
6119 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6120 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6121 unlock_user_struct(target_ldt_info
, ptr
, 1);
6122 return -TARGET_EINVAL
;
6124 lp
= (uint32_t *)(gdt_table
+ idx
);
6125 entry_1
= tswap32(lp
[0]);
6126 entry_2
= tswap32(lp
[1]);
6128 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6129 contents
= (entry_2
>> 10) & 3;
6130 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6131 seg_32bit
= (entry_2
>> 22) & 1;
6132 limit_in_pages
= (entry_2
>> 23) & 1;
6133 useable
= (entry_2
>> 20) & 1;
6137 lm
= (entry_2
>> 21) & 1;
6139 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6140 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6141 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6142 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6143 base_addr
= (entry_1
>> 16) |
6144 (entry_2
& 0xff000000) |
6145 ((entry_2
& 0xff) << 16);
6146 target_ldt_info
->base_addr
= tswapal(base_addr
);
6147 target_ldt_info
->limit
= tswap32(limit
);
6148 target_ldt_info
->flags
= tswap32(flags
);
6149 unlock_user_struct(target_ldt_info
, ptr
, 1);
6152 #endif /* TARGET_I386 && TARGET_ABI32 */
6154 #ifndef TARGET_ABI32
6155 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6162 case TARGET_ARCH_SET_GS
:
6163 case TARGET_ARCH_SET_FS
:
6164 if (code
== TARGET_ARCH_SET_GS
)
6168 cpu_x86_load_seg(env
, idx
, 0);
6169 env
->segs
[idx
].base
= addr
;
6171 case TARGET_ARCH_GET_GS
:
6172 case TARGET_ARCH_GET_FS
:
6173 if (code
== TARGET_ARCH_GET_GS
)
6177 val
= env
->segs
[idx
].base
;
6178 if (put_user(val
, addr
, abi_ulong
))
6179 ret
= -TARGET_EFAULT
;
6182 ret
= -TARGET_EINVAL
;
6189 #endif /* defined(TARGET_I386) */
6191 #define NEW_STACK_SIZE 0x40000
6194 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6197 pthread_mutex_t mutex
;
6198 pthread_cond_t cond
;
6201 abi_ulong child_tidptr
;
6202 abi_ulong parent_tidptr
;
6206 static void *clone_func(void *arg
)
6208 new_thread_info
*info
= arg
;
6213 rcu_register_thread();
6215 cpu
= ENV_GET_CPU(env
);
6217 ts
= (TaskState
*)cpu
->opaque
;
6218 info
->tid
= gettid();
6219 cpu
->host_tid
= info
->tid
;
6221 if (info
->child_tidptr
)
6222 put_user_u32(info
->tid
, info
->child_tidptr
);
6223 if (info
->parent_tidptr
)
6224 put_user_u32(info
->tid
, info
->parent_tidptr
);
6225 /* Enable signals. */
6226 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6227 /* Signal to the parent that we're ready. */
6228 pthread_mutex_lock(&info
->mutex
);
6229 pthread_cond_broadcast(&info
->cond
);
6230 pthread_mutex_unlock(&info
->mutex
);
6231 /* Wait until the parent has finshed initializing the tls state. */
6232 pthread_mutex_lock(&clone_lock
);
6233 pthread_mutex_unlock(&clone_lock
);
6239 /* do_fork() Must return host values and target errnos (unlike most
6240 do_*() functions). */
6241 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6242 abi_ulong parent_tidptr
, target_ulong newtls
,
6243 abi_ulong child_tidptr
)
6245 CPUState
*cpu
= ENV_GET_CPU(env
);
6249 CPUArchState
*new_env
;
6252 flags
&= ~CLONE_IGNORED_FLAGS
;
6254 /* Emulate vfork() with fork() */
6255 if (flags
& CLONE_VFORK
)
6256 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6258 if (flags
& CLONE_VM
) {
6259 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6260 new_thread_info info
;
6261 pthread_attr_t attr
;
6263 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6264 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6265 return -TARGET_EINVAL
;
6268 ts
= g_new0(TaskState
, 1);
6269 init_task_state(ts
);
6270 /* we create a new CPU instance. */
6271 new_env
= cpu_copy(env
);
6272 /* Init regs that differ from the parent. */
6273 cpu_clone_regs(new_env
, newsp
);
6274 new_cpu
= ENV_GET_CPU(new_env
);
6275 new_cpu
->opaque
= ts
;
6276 ts
->bprm
= parent_ts
->bprm
;
6277 ts
->info
= parent_ts
->info
;
6278 ts
->signal_mask
= parent_ts
->signal_mask
;
6280 if (flags
& CLONE_CHILD_CLEARTID
) {
6281 ts
->child_tidptr
= child_tidptr
;
6284 if (flags
& CLONE_SETTLS
) {
6285 cpu_set_tls (new_env
, newtls
);
6288 /* Grab a mutex so that thread setup appears atomic. */
6289 pthread_mutex_lock(&clone_lock
);
6291 memset(&info
, 0, sizeof(info
));
6292 pthread_mutex_init(&info
.mutex
, NULL
);
6293 pthread_mutex_lock(&info
.mutex
);
6294 pthread_cond_init(&info
.cond
, NULL
);
6296 if (flags
& CLONE_CHILD_SETTID
) {
6297 info
.child_tidptr
= child_tidptr
;
6299 if (flags
& CLONE_PARENT_SETTID
) {
6300 info
.parent_tidptr
= parent_tidptr
;
6303 ret
= pthread_attr_init(&attr
);
6304 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6305 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6306 /* It is not safe to deliver signals until the child has finished
6307 initializing, so temporarily block all signals. */
6308 sigfillset(&sigmask
);
6309 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6311 /* If this is our first additional thread, we need to ensure we
6312 * generate code for parallel execution and flush old translations.
6314 if (!parallel_cpus
) {
6315 parallel_cpus
= true;
6319 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6320 /* TODO: Free new CPU state if thread creation failed. */
6322 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6323 pthread_attr_destroy(&attr
);
6325 /* Wait for the child to initialize. */
6326 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6331 pthread_mutex_unlock(&info
.mutex
);
6332 pthread_cond_destroy(&info
.cond
);
6333 pthread_mutex_destroy(&info
.mutex
);
6334 pthread_mutex_unlock(&clone_lock
);
6336 /* if no CLONE_VM, we consider it is a fork */
6337 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6338 return -TARGET_EINVAL
;
6341 /* We can't support custom termination signals */
6342 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6343 return -TARGET_EINVAL
;
6346 if (block_signals()) {
6347 return -TARGET_ERESTARTSYS
;
6353 /* Child Process. */
6355 cpu_clone_regs(env
, newsp
);
6357 /* There is a race condition here. The parent process could
6358 theoretically read the TID in the child process before the child
6359 tid is set. This would require using either ptrace
6360 (not implemented) or having *_tidptr to point at a shared memory
6361 mapping. We can't repeat the spinlock hack used above because
6362 the child process gets its own copy of the lock. */
6363 if (flags
& CLONE_CHILD_SETTID
)
6364 put_user_u32(gettid(), child_tidptr
);
6365 if (flags
& CLONE_PARENT_SETTID
)
6366 put_user_u32(gettid(), parent_tidptr
);
6367 ts
= (TaskState
*)cpu
->opaque
;
6368 if (flags
& CLONE_SETTLS
)
6369 cpu_set_tls (env
, newtls
);
6370 if (flags
& CLONE_CHILD_CLEARTID
)
6371 ts
->child_tidptr
= child_tidptr
;
6379 /* warning : doesn't handle linux specific flags... */
6380 static int target_to_host_fcntl_cmd(int cmd
)
6383 case TARGET_F_DUPFD
:
6384 case TARGET_F_GETFD
:
6385 case TARGET_F_SETFD
:
6386 case TARGET_F_GETFL
:
6387 case TARGET_F_SETFL
:
6389 case TARGET_F_GETLK
:
6391 case TARGET_F_SETLK
:
6393 case TARGET_F_SETLKW
:
6395 case TARGET_F_GETOWN
:
6397 case TARGET_F_SETOWN
:
6399 case TARGET_F_GETSIG
:
6401 case TARGET_F_SETSIG
:
6403 #if TARGET_ABI_BITS == 32
6404 case TARGET_F_GETLK64
:
6406 case TARGET_F_SETLK64
:
6408 case TARGET_F_SETLKW64
:
6411 case TARGET_F_SETLEASE
:
6413 case TARGET_F_GETLEASE
:
6415 #ifdef F_DUPFD_CLOEXEC
6416 case TARGET_F_DUPFD_CLOEXEC
:
6417 return F_DUPFD_CLOEXEC
;
6419 case TARGET_F_NOTIFY
:
6422 case TARGET_F_GETOWN_EX
:
6426 case TARGET_F_SETOWN_EX
:
6430 case TARGET_F_SETPIPE_SZ
:
6431 return F_SETPIPE_SZ
;
6432 case TARGET_F_GETPIPE_SZ
:
6433 return F_GETPIPE_SZ
;
6436 return -TARGET_EINVAL
;
6438 return -TARGET_EINVAL
;
6441 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6442 static const bitmask_transtbl flock_tbl
[] = {
6443 TRANSTBL_CONVERT(F_RDLCK
),
6444 TRANSTBL_CONVERT(F_WRLCK
),
6445 TRANSTBL_CONVERT(F_UNLCK
),
6446 TRANSTBL_CONVERT(F_EXLCK
),
6447 TRANSTBL_CONVERT(F_SHLCK
),
6451 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6452 abi_ulong target_flock_addr
)
6454 struct target_flock
*target_fl
;
6457 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6458 return -TARGET_EFAULT
;
6461 __get_user(l_type
, &target_fl
->l_type
);
6462 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6463 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6464 __get_user(fl
->l_start
, &target_fl
->l_start
);
6465 __get_user(fl
->l_len
, &target_fl
->l_len
);
6466 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6467 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6471 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6472 const struct flock64
*fl
)
6474 struct target_flock
*target_fl
;
6477 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6478 return -TARGET_EFAULT
;
6481 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6482 __put_user(l_type
, &target_fl
->l_type
);
6483 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6484 __put_user(fl
->l_start
, &target_fl
->l_start
);
6485 __put_user(fl
->l_len
, &target_fl
->l_len
);
6486 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6487 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6491 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6492 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6494 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6495 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6496 abi_ulong target_flock_addr
)
6498 struct target_eabi_flock64
*target_fl
;
6501 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6502 return -TARGET_EFAULT
;
6505 __get_user(l_type
, &target_fl
->l_type
);
6506 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6507 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6508 __get_user(fl
->l_start
, &target_fl
->l_start
);
6509 __get_user(fl
->l_len
, &target_fl
->l_len
);
6510 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6511 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6515 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6516 const struct flock64
*fl
)
6518 struct target_eabi_flock64
*target_fl
;
6521 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6522 return -TARGET_EFAULT
;
6525 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6526 __put_user(l_type
, &target_fl
->l_type
);
6527 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6528 __put_user(fl
->l_start
, &target_fl
->l_start
);
6529 __put_user(fl
->l_len
, &target_fl
->l_len
);
6530 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6531 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6536 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6537 abi_ulong target_flock_addr
)
6539 struct target_flock64
*target_fl
;
6542 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6543 return -TARGET_EFAULT
;
6546 __get_user(l_type
, &target_fl
->l_type
);
6547 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6548 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6549 __get_user(fl
->l_start
, &target_fl
->l_start
);
6550 __get_user(fl
->l_len
, &target_fl
->l_len
);
6551 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6552 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6556 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6557 const struct flock64
*fl
)
6559 struct target_flock64
*target_fl
;
6562 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6563 return -TARGET_EFAULT
;
6566 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6567 __put_user(l_type
, &target_fl
->l_type
);
6568 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6569 __put_user(fl
->l_start
, &target_fl
->l_start
);
6570 __put_user(fl
->l_len
, &target_fl
->l_len
);
6571 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6572 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6576 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6578 struct flock64 fl64
;
6580 struct f_owner_ex fox
;
6581 struct target_f_owner_ex
*target_fox
;
6584 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6586 if (host_cmd
== -TARGET_EINVAL
)
6590 case TARGET_F_GETLK
:
6591 ret
= copy_from_user_flock(&fl64
, arg
);
6595 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6597 ret
= copy_to_user_flock(arg
, &fl64
);
6601 case TARGET_F_SETLK
:
6602 case TARGET_F_SETLKW
:
6603 ret
= copy_from_user_flock(&fl64
, arg
);
6607 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6610 case TARGET_F_GETLK64
:
6611 ret
= copy_from_user_flock64(&fl64
, arg
);
6615 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6617 ret
= copy_to_user_flock64(arg
, &fl64
);
6620 case TARGET_F_SETLK64
:
6621 case TARGET_F_SETLKW64
:
6622 ret
= copy_from_user_flock64(&fl64
, arg
);
6626 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6629 case TARGET_F_GETFL
:
6630 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6632 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6636 case TARGET_F_SETFL
:
6637 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6638 target_to_host_bitmask(arg
,
6643 case TARGET_F_GETOWN_EX
:
6644 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6646 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6647 return -TARGET_EFAULT
;
6648 target_fox
->type
= tswap32(fox
.type
);
6649 target_fox
->pid
= tswap32(fox
.pid
);
6650 unlock_user_struct(target_fox
, arg
, 1);
6656 case TARGET_F_SETOWN_EX
:
6657 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6658 return -TARGET_EFAULT
;
6659 fox
.type
= tswap32(target_fox
->type
);
6660 fox
.pid
= tswap32(target_fox
->pid
);
6661 unlock_user_struct(target_fox
, arg
, 0);
6662 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6666 case TARGET_F_SETOWN
:
6667 case TARGET_F_GETOWN
:
6668 case TARGET_F_SETSIG
:
6669 case TARGET_F_GETSIG
:
6670 case TARGET_F_SETLEASE
:
6671 case TARGET_F_GETLEASE
:
6672 case TARGET_F_SETPIPE_SZ
:
6673 case TARGET_F_GETPIPE_SZ
:
6674 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6678 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6686 static inline int high2lowuid(int uid
)
6694 static inline int high2lowgid(int gid
)
6702 static inline int low2highuid(int uid
)
6704 if ((int16_t)uid
== -1)
6710 static inline int low2highgid(int gid
)
6712 if ((int16_t)gid
== -1)
6717 static inline int tswapid(int id
)
6722 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6724 #else /* !USE_UID16 */
6725 static inline int high2lowuid(int uid
)
6729 static inline int high2lowgid(int gid
)
6733 static inline int low2highuid(int uid
)
6737 static inline int low2highgid(int gid
)
6741 static inline int tswapid(int id
)
6746 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6748 #endif /* USE_UID16 */
6750 /* We must do direct syscalls for setting UID/GID, because we want to
6751 * implement the Linux system call semantics of "change only for this thread",
6752 * not the libc/POSIX semantics of "change for all threads in process".
6753 * (See http://ewontfix.com/17/ for more details.)
6754 * We use the 32-bit version of the syscalls if present; if it is not
6755 * then either the host architecture supports 32-bit UIDs natively with
6756 * the standard syscall, or the 16-bit UID is the best we can do.
6758 #ifdef __NR_setuid32
6759 #define __NR_sys_setuid __NR_setuid32
6761 #define __NR_sys_setuid __NR_setuid
6763 #ifdef __NR_setgid32
6764 #define __NR_sys_setgid __NR_setgid32
6766 #define __NR_sys_setgid __NR_setgid
6768 #ifdef __NR_setresuid32
6769 #define __NR_sys_setresuid __NR_setresuid32
6771 #define __NR_sys_setresuid __NR_setresuid
6773 #ifdef __NR_setresgid32
6774 #define __NR_sys_setresgid __NR_setresgid32
6776 #define __NR_sys_setresgid __NR_setresgid
6779 _syscall1(int, sys_setuid
, uid_t
, uid
)
6780 _syscall1(int, sys_setgid
, gid_t
, gid
)
6781 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6782 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6784 void syscall_init(void)
6787 const argtype
*arg_type
;
6791 thunk_init(STRUCT_MAX
);
6793 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6794 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6795 #include "syscall_types.h"
6797 #undef STRUCT_SPECIAL
6799 /* Build target_to_host_errno_table[] table from
6800 * host_to_target_errno_table[]. */
6801 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6802 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6805 /* we patch the ioctl size if necessary. We rely on the fact that
6806 no ioctl has all the bits at '1' in the size field */
6808 while (ie
->target_cmd
!= 0) {
6809 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6810 TARGET_IOC_SIZEMASK
) {
6811 arg_type
= ie
->arg_type
;
6812 if (arg_type
[0] != TYPE_PTR
) {
6813 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6818 size
= thunk_type_size(arg_type
, 0);
6819 ie
->target_cmd
= (ie
->target_cmd
&
6820 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6821 (size
<< TARGET_IOC_SIZESHIFT
);
6824 /* automatic consistency check if same arch */
6825 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6826 (defined(__x86_64__) && defined(TARGET_X86_64))
6827 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6828 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6829 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6836 #if TARGET_ABI_BITS == 32
6837 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6839 #ifdef TARGET_WORDS_BIGENDIAN
6840 return ((uint64_t)word0
<< 32) | word1
;
6842 return ((uint64_t)word1
<< 32) | word0
;
6845 #else /* TARGET_ABI_BITS == 32 */
6846 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6850 #endif /* TARGET_ABI_BITS != 32 */
6852 #ifdef TARGET_NR_truncate64
6853 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6858 if (regpairs_aligned(cpu_env
)) {
6862 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6866 #ifdef TARGET_NR_ftruncate64
6867 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6872 if (regpairs_aligned(cpu_env
)) {
6876 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6880 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6881 abi_ulong target_addr
)
6883 struct target_timespec
*target_ts
;
6885 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6886 return -TARGET_EFAULT
;
6887 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6888 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6889 unlock_user_struct(target_ts
, target_addr
, 0);
6893 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6894 struct timespec
*host_ts
)
6896 struct target_timespec
*target_ts
;
6898 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6899 return -TARGET_EFAULT
;
6900 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6901 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6902 unlock_user_struct(target_ts
, target_addr
, 1);
6906 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6907 abi_ulong target_addr
)
6909 struct target_itimerspec
*target_itspec
;
6911 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6912 return -TARGET_EFAULT
;
6915 host_itspec
->it_interval
.tv_sec
=
6916 tswapal(target_itspec
->it_interval
.tv_sec
);
6917 host_itspec
->it_interval
.tv_nsec
=
6918 tswapal(target_itspec
->it_interval
.tv_nsec
);
6919 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6920 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6922 unlock_user_struct(target_itspec
, target_addr
, 1);
6926 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6927 struct itimerspec
*host_its
)
6929 struct target_itimerspec
*target_itspec
;
6931 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6932 return -TARGET_EFAULT
;
6935 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6936 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6938 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6939 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6941 unlock_user_struct(target_itspec
, target_addr
, 0);
6945 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6946 abi_long target_addr
)
6948 struct target_timex
*target_tx
;
6950 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6951 return -TARGET_EFAULT
;
6954 __get_user(host_tx
->modes
, &target_tx
->modes
);
6955 __get_user(host_tx
->offset
, &target_tx
->offset
);
6956 __get_user(host_tx
->freq
, &target_tx
->freq
);
6957 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6958 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6959 __get_user(host_tx
->status
, &target_tx
->status
);
6960 __get_user(host_tx
->constant
, &target_tx
->constant
);
6961 __get_user(host_tx
->precision
, &target_tx
->precision
);
6962 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6963 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6964 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6965 __get_user(host_tx
->tick
, &target_tx
->tick
);
6966 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6967 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6968 __get_user(host_tx
->shift
, &target_tx
->shift
);
6969 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6970 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6971 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6972 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6973 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6974 __get_user(host_tx
->tai
, &target_tx
->tai
);
6976 unlock_user_struct(target_tx
, target_addr
, 0);
6980 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6981 struct timex
*host_tx
)
6983 struct target_timex
*target_tx
;
6985 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6986 return -TARGET_EFAULT
;
6989 __put_user(host_tx
->modes
, &target_tx
->modes
);
6990 __put_user(host_tx
->offset
, &target_tx
->offset
);
6991 __put_user(host_tx
->freq
, &target_tx
->freq
);
6992 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6993 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6994 __put_user(host_tx
->status
, &target_tx
->status
);
6995 __put_user(host_tx
->constant
, &target_tx
->constant
);
6996 __put_user(host_tx
->precision
, &target_tx
->precision
);
6997 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6998 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6999 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7000 __put_user(host_tx
->tick
, &target_tx
->tick
);
7001 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7002 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7003 __put_user(host_tx
->shift
, &target_tx
->shift
);
7004 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7005 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7006 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7007 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7008 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7009 __put_user(host_tx
->tai
, &target_tx
->tai
);
7011 unlock_user_struct(target_tx
, target_addr
, 1);
7016 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7017 abi_ulong target_addr
)
7019 struct target_sigevent
*target_sevp
;
7021 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7022 return -TARGET_EFAULT
;
7025 /* This union is awkward on 64 bit systems because it has a 32 bit
7026 * integer and a pointer in it; we follow the conversion approach
7027 * used for handling sigval types in signal.c so the guest should get
7028 * the correct value back even if we did a 64 bit byteswap and it's
7029 * using the 32 bit integer.
7031 host_sevp
->sigev_value
.sival_ptr
=
7032 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7033 host_sevp
->sigev_signo
=
7034 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7035 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7036 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7038 unlock_user_struct(target_sevp
, target_addr
, 1);
7042 #if defined(TARGET_NR_mlockall)
7043 static inline int target_to_host_mlockall_arg(int arg
)
7047 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7048 result
|= MCL_CURRENT
;
7050 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7051 result
|= MCL_FUTURE
;
7057 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7058 abi_ulong target_addr
,
7059 struct stat
*host_st
)
7061 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7062 if (((CPUARMState
*)cpu_env
)->eabi
) {
7063 struct target_eabi_stat64
*target_st
;
7065 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7066 return -TARGET_EFAULT
;
7067 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7068 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7069 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7070 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7071 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7073 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7074 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7075 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7076 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7077 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7078 __put_user(host_st
->st_size
, &target_st
->st_size
);
7079 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7080 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7081 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7082 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7083 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7084 unlock_user_struct(target_st
, target_addr
, 1);
7088 #if defined(TARGET_HAS_STRUCT_STAT64)
7089 struct target_stat64
*target_st
;
7091 struct target_stat
*target_st
;
7094 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7095 return -TARGET_EFAULT
;
7096 memset(target_st
, 0, sizeof(*target_st
));
7097 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7098 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7099 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7100 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7102 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7103 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7104 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7105 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7106 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7107 /* XXX: better use of kernel struct */
7108 __put_user(host_st
->st_size
, &target_st
->st_size
);
7109 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7110 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7111 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7112 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7113 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7114 unlock_user_struct(target_st
, target_addr
, 1);
7120 /* ??? Using host futex calls even when target atomic operations
7121 are not really atomic probably breaks things. However implementing
7122 futexes locally would make futexes shared between multiple processes
7123 tricky. However they're probably useless because guest atomic
7124 operations won't work either. */
7125 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7126 target_ulong uaddr2
, int val3
)
7128 struct timespec ts
, *pts
;
7131 /* ??? We assume FUTEX_* constants are the same on both host
7133 #ifdef FUTEX_CMD_MASK
7134 base_op
= op
& FUTEX_CMD_MASK
;
7140 case FUTEX_WAIT_BITSET
:
7143 target_to_host_timespec(pts
, timeout
);
7147 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7150 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7152 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7154 case FUTEX_CMP_REQUEUE
:
7156 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7157 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7158 But the prototype takes a `struct timespec *'; insert casts
7159 to satisfy the compiler. We do not need to tswap TIMEOUT
7160 since it's not compared to guest memory. */
7161 pts
= (struct timespec
*)(uintptr_t) timeout
;
7162 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7164 (base_op
== FUTEX_CMP_REQUEUE
7168 return -TARGET_ENOSYS
;
7171 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7172 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7173 abi_long handle
, abi_long mount_id
,
7176 struct file_handle
*target_fh
;
7177 struct file_handle
*fh
;
7181 unsigned int size
, total_size
;
7183 if (get_user_s32(size
, handle
)) {
7184 return -TARGET_EFAULT
;
7187 name
= lock_user_string(pathname
);
7189 return -TARGET_EFAULT
;
7192 total_size
= sizeof(struct file_handle
) + size
;
7193 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7195 unlock_user(name
, pathname
, 0);
7196 return -TARGET_EFAULT
;
7199 fh
= g_malloc0(total_size
);
7200 fh
->handle_bytes
= size
;
7202 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7203 unlock_user(name
, pathname
, 0);
7205 /* man name_to_handle_at(2):
7206 * Other than the use of the handle_bytes field, the caller should treat
7207 * the file_handle structure as an opaque data type
7210 memcpy(target_fh
, fh
, total_size
);
7211 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7212 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7214 unlock_user(target_fh
, handle
, total_size
);
7216 if (put_user_s32(mid
, mount_id
)) {
7217 return -TARGET_EFAULT
;
7225 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7226 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7229 struct file_handle
*target_fh
;
7230 struct file_handle
*fh
;
7231 unsigned int size
, total_size
;
7234 if (get_user_s32(size
, handle
)) {
7235 return -TARGET_EFAULT
;
7238 total_size
= sizeof(struct file_handle
) + size
;
7239 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7241 return -TARGET_EFAULT
;
7244 fh
= g_memdup(target_fh
, total_size
);
7245 fh
->handle_bytes
= size
;
7246 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7248 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7249 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7253 unlock_user(target_fh
, handle
, total_size
);
7259 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7261 /* signalfd siginfo conversion */
7264 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7265 const struct signalfd_siginfo
*info
)
7267 int sig
= host_to_target_signal(info
->ssi_signo
);
7269 /* linux/signalfd.h defines a ssi_addr_lsb
7270 * not defined in sys/signalfd.h but used by some kernels
7273 #ifdef BUS_MCEERR_AO
7274 if (tinfo
->ssi_signo
== SIGBUS
&&
7275 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7276 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7277 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7278 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7279 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7283 tinfo
->ssi_signo
= tswap32(sig
);
7284 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7285 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7286 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7287 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7288 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7289 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7290 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7291 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7292 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7293 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7294 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7295 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7296 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7297 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7298 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7301 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7305 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7306 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7312 static TargetFdTrans target_signalfd_trans
= {
7313 .host_to_target_data
= host_to_target_data_signalfd
,
7316 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7319 target_sigset_t
*target_mask
;
7323 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7324 return -TARGET_EINVAL
;
7326 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7327 return -TARGET_EFAULT
;
7330 target_to_host_sigset(&host_mask
, target_mask
);
7332 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7334 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7336 fd_trans_register(ret
, &target_signalfd_trans
);
7339 unlock_user_struct(target_mask
, mask
, 0);
7345 /* Map host to target signal numbers for the wait family of syscalls.
7346 Assume all other status bits are the same. */
7347 int host_to_target_waitstatus(int status
)
7349 if (WIFSIGNALED(status
)) {
7350 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7352 if (WIFSTOPPED(status
)) {
7353 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7359 static int open_self_cmdline(void *cpu_env
, int fd
)
7362 bool word_skipped
= false;
7364 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7374 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7377 fd_orig
= close(fd_orig
);
7380 } else if (nb_read
== 0) {
7384 if (!word_skipped
) {
7385 /* Skip the first string, which is the path to qemu-*-static
7386 instead of the actual command. */
7387 cp_buf
= memchr(buf
, 0, nb_read
);
7389 /* Null byte found, skip one string */
7391 nb_read
-= cp_buf
- buf
;
7392 word_skipped
= true;
7397 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7406 return close(fd_orig
);
7409 static int open_self_maps(void *cpu_env
, int fd
)
7411 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7412 TaskState
*ts
= cpu
->opaque
;
7418 fp
= fopen("/proc/self/maps", "r");
7423 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7424 int fields
, dev_maj
, dev_min
, inode
;
7425 uint64_t min
, max
, offset
;
7426 char flag_r
, flag_w
, flag_x
, flag_p
;
7427 char path
[512] = "";
7428 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7429 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7430 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7432 if ((fields
< 10) || (fields
> 11)) {
7435 if (h2g_valid(min
)) {
7436 int flags
= page_get_flags(h2g(min
));
7437 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7438 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7441 if (h2g(min
) == ts
->info
->stack_limit
) {
7442 pstrcpy(path
, sizeof(path
), " [stack]");
7444 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7445 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7446 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7447 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7448 path
[0] ? " " : "", path
);
7458 static int open_self_stat(void *cpu_env
, int fd
)
7460 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7461 TaskState
*ts
= cpu
->opaque
;
7462 abi_ulong start_stack
= ts
->info
->start_stack
;
7465 for (i
= 0; i
< 44; i
++) {
7473 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7474 } else if (i
== 1) {
7476 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7477 } else if (i
== 27) {
7480 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7482 /* for the rest, there is MasterCard */
7483 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7487 if (write(fd
, buf
, len
) != len
) {
7495 static int open_self_auxv(void *cpu_env
, int fd
)
7497 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7498 TaskState
*ts
= cpu
->opaque
;
7499 abi_ulong auxv
= ts
->info
->saved_auxv
;
7500 abi_ulong len
= ts
->info
->auxv_len
;
7504 * Auxiliary vector is stored in target process stack.
7505 * read in whole auxv vector and copy it to file
7507 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7511 r
= write(fd
, ptr
, len
);
7518 lseek(fd
, 0, SEEK_SET
);
7519 unlock_user(ptr
, auxv
, len
);
7525 static int is_proc_myself(const char *filename
, const char *entry
)
7527 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7528 filename
+= strlen("/proc/");
7529 if (!strncmp(filename
, "self/", strlen("self/"))) {
7530 filename
+= strlen("self/");
7531 } else if (*filename
>= '1' && *filename
<= '9') {
7533 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7534 if (!strncmp(filename
, myself
, strlen(myself
))) {
7535 filename
+= strlen(myself
);
7542 if (!strcmp(filename
, entry
)) {
7549 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7550 static int is_proc(const char *filename
, const char *entry
)
7552 return strcmp(filename
, entry
) == 0;
7555 static int open_net_route(void *cpu_env
, int fd
)
7562 fp
= fopen("/proc/net/route", "r");
7569 read
= getline(&line
, &len
, fp
);
7570 dprintf(fd
, "%s", line
);
7574 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7576 uint32_t dest
, gw
, mask
;
7577 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7578 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7579 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7580 &mask
, &mtu
, &window
, &irtt
);
7581 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7582 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7583 metric
, tswap32(mask
), mtu
, window
, irtt
);
7593 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7596 const char *filename
;
7597 int (*fill
)(void *cpu_env
, int fd
);
7598 int (*cmp
)(const char *s1
, const char *s2
);
7600 const struct fake_open
*fake_open
;
7601 static const struct fake_open fakes
[] = {
7602 { "maps", open_self_maps
, is_proc_myself
},
7603 { "stat", open_self_stat
, is_proc_myself
},
7604 { "auxv", open_self_auxv
, is_proc_myself
},
7605 { "cmdline", open_self_cmdline
, is_proc_myself
},
7606 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7607 { "/proc/net/route", open_net_route
, is_proc
},
7609 { NULL
, NULL
, NULL
}
7612 if (is_proc_myself(pathname
, "exe")) {
7613 int execfd
= qemu_getauxval(AT_EXECFD
);
7614 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7617 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7618 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7623 if (fake_open
->filename
) {
7625 char filename
[PATH_MAX
];
7628 /* create temporary file to map stat to */
7629 tmpdir
= getenv("TMPDIR");
7632 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7633 fd
= mkstemp(filename
);
7639 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7645 lseek(fd
, 0, SEEK_SET
);
7650 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7653 #define TIMER_MAGIC 0x0caf0000
7654 #define TIMER_MAGIC_MASK 0xffff0000
7656 /* Convert QEMU provided timer ID back to internal 16bit index format */
7657 static target_timer_t
get_timer_id(abi_long arg
)
7659 target_timer_t timerid
= arg
;
7661 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7662 return -TARGET_EINVAL
;
7667 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7668 return -TARGET_EINVAL
;
7674 /* do_syscall() should always have a single exit point at the end so
7675 that actions, such as logging of syscall results, can be performed.
7676 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7677 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7678 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7679 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7682 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7688 #if defined(DEBUG_ERESTARTSYS)
7689 /* Debug-only code for exercising the syscall-restart code paths
7690 * in the per-architecture cpu main loops: restart every syscall
7691 * the guest makes once before letting it through.
7698 return -TARGET_ERESTARTSYS
;
7704 gemu_log("syscall %d", num
);
7706 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7708 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7711 case TARGET_NR_exit
:
7712 /* In old applications this may be used to implement _exit(2).
7713 However in threaded applictions it is used for thread termination,
7714 and _exit_group is used for application termination.
7715 Do thread termination if we have more then one thread. */
7717 if (block_signals()) {
7718 ret
= -TARGET_ERESTARTSYS
;
7724 if (CPU_NEXT(first_cpu
)) {
7727 /* Remove the CPU from the list. */
7728 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7733 if (ts
->child_tidptr
) {
7734 put_user_u32(0, ts
->child_tidptr
);
7735 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7739 object_unref(OBJECT(cpu
));
7741 rcu_unregister_thread();
7749 gdb_exit(cpu_env
, arg1
);
7751 ret
= 0; /* avoid warning */
7753 case TARGET_NR_read
:
7757 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7759 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7761 fd_trans_host_to_target_data(arg1
)) {
7762 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7764 unlock_user(p
, arg2
, ret
);
7767 case TARGET_NR_write
:
7768 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7770 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7771 unlock_user(p
, arg2
, 0);
7773 #ifdef TARGET_NR_open
7774 case TARGET_NR_open
:
7775 if (!(p
= lock_user_string(arg1
)))
7777 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7778 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7780 fd_trans_unregister(ret
);
7781 unlock_user(p
, arg1
, 0);
7784 case TARGET_NR_openat
:
7785 if (!(p
= lock_user_string(arg2
)))
7787 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7788 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7790 fd_trans_unregister(ret
);
7791 unlock_user(p
, arg2
, 0);
7793 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7794 case TARGET_NR_name_to_handle_at
:
7795 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7798 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7799 case TARGET_NR_open_by_handle_at
:
7800 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7801 fd_trans_unregister(ret
);
7804 case TARGET_NR_close
:
7805 fd_trans_unregister(arg1
);
7806 ret
= get_errno(close(arg1
));
7811 #ifdef TARGET_NR_fork
7812 case TARGET_NR_fork
:
7813 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7816 #ifdef TARGET_NR_waitpid
7817 case TARGET_NR_waitpid
:
7820 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7821 if (!is_error(ret
) && arg2
&& ret
7822 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7827 #ifdef TARGET_NR_waitid
7828 case TARGET_NR_waitid
:
7832 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7833 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7834 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7836 host_to_target_siginfo(p
, &info
);
7837 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7842 #ifdef TARGET_NR_creat /* not on alpha */
7843 case TARGET_NR_creat
:
7844 if (!(p
= lock_user_string(arg1
)))
7846 ret
= get_errno(creat(p
, arg2
));
7847 fd_trans_unregister(ret
);
7848 unlock_user(p
, arg1
, 0);
7851 #ifdef TARGET_NR_link
7852 case TARGET_NR_link
:
7855 p
= lock_user_string(arg1
);
7856 p2
= lock_user_string(arg2
);
7858 ret
= -TARGET_EFAULT
;
7860 ret
= get_errno(link(p
, p2
));
7861 unlock_user(p2
, arg2
, 0);
7862 unlock_user(p
, arg1
, 0);
7866 #if defined(TARGET_NR_linkat)
7867 case TARGET_NR_linkat
:
7872 p
= lock_user_string(arg2
);
7873 p2
= lock_user_string(arg4
);
7875 ret
= -TARGET_EFAULT
;
7877 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7878 unlock_user(p
, arg2
, 0);
7879 unlock_user(p2
, arg4
, 0);
7883 #ifdef TARGET_NR_unlink
7884 case TARGET_NR_unlink
:
7885 if (!(p
= lock_user_string(arg1
)))
7887 ret
= get_errno(unlink(p
));
7888 unlock_user(p
, arg1
, 0);
7891 #if defined(TARGET_NR_unlinkat)
7892 case TARGET_NR_unlinkat
:
7893 if (!(p
= lock_user_string(arg2
)))
7895 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7896 unlock_user(p
, arg2
, 0);
7899 case TARGET_NR_execve
:
7901 char **argp
, **envp
;
7904 abi_ulong guest_argp
;
7905 abi_ulong guest_envp
;
7912 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7913 if (get_user_ual(addr
, gp
))
7921 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7922 if (get_user_ual(addr
, gp
))
7929 argp
= alloca((argc
+ 1) * sizeof(void *));
7930 envp
= alloca((envc
+ 1) * sizeof(void *));
7932 for (gp
= guest_argp
, q
= argp
; gp
;
7933 gp
+= sizeof(abi_ulong
), q
++) {
7934 if (get_user_ual(addr
, gp
))
7938 if (!(*q
= lock_user_string(addr
)))
7940 total_size
+= strlen(*q
) + 1;
7944 for (gp
= guest_envp
, q
= envp
; gp
;
7945 gp
+= sizeof(abi_ulong
), q
++) {
7946 if (get_user_ual(addr
, gp
))
7950 if (!(*q
= lock_user_string(addr
)))
7952 total_size
+= strlen(*q
) + 1;
7956 if (!(p
= lock_user_string(arg1
)))
7958 /* Although execve() is not an interruptible syscall it is
7959 * a special case where we must use the safe_syscall wrapper:
7960 * if we allow a signal to happen before we make the host
7961 * syscall then we will 'lose' it, because at the point of
7962 * execve the process leaves QEMU's control. So we use the
7963 * safe syscall wrapper to ensure that we either take the
7964 * signal as a guest signal, or else it does not happen
7965 * before the execve completes and makes it the other
7966 * program's problem.
7968 ret
= get_errno(safe_execve(p
, argp
, envp
));
7969 unlock_user(p
, arg1
, 0);
7974 ret
= -TARGET_EFAULT
;
7977 for (gp
= guest_argp
, q
= argp
; *q
;
7978 gp
+= sizeof(abi_ulong
), q
++) {
7979 if (get_user_ual(addr
, gp
)
7982 unlock_user(*q
, addr
, 0);
7984 for (gp
= guest_envp
, q
= envp
; *q
;
7985 gp
+= sizeof(abi_ulong
), q
++) {
7986 if (get_user_ual(addr
, gp
)
7989 unlock_user(*q
, addr
, 0);
7993 case TARGET_NR_chdir
:
7994 if (!(p
= lock_user_string(arg1
)))
7996 ret
= get_errno(chdir(p
));
7997 unlock_user(p
, arg1
, 0);
7999 #ifdef TARGET_NR_time
8000 case TARGET_NR_time
:
8003 ret
= get_errno(time(&host_time
));
8006 && put_user_sal(host_time
, arg1
))
8011 #ifdef TARGET_NR_mknod
8012 case TARGET_NR_mknod
:
8013 if (!(p
= lock_user_string(arg1
)))
8015 ret
= get_errno(mknod(p
, arg2
, arg3
));
8016 unlock_user(p
, arg1
, 0);
8019 #if defined(TARGET_NR_mknodat)
8020 case TARGET_NR_mknodat
:
8021 if (!(p
= lock_user_string(arg2
)))
8023 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8024 unlock_user(p
, arg2
, 0);
8027 #ifdef TARGET_NR_chmod
8028 case TARGET_NR_chmod
:
8029 if (!(p
= lock_user_string(arg1
)))
8031 ret
= get_errno(chmod(p
, arg2
));
8032 unlock_user(p
, arg1
, 0);
8035 #ifdef TARGET_NR_break
8036 case TARGET_NR_break
:
8039 #ifdef TARGET_NR_oldstat
8040 case TARGET_NR_oldstat
:
8043 case TARGET_NR_lseek
:
8044 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8046 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8047 /* Alpha specific */
8048 case TARGET_NR_getxpid
:
8049 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8050 ret
= get_errno(getpid());
8053 #ifdef TARGET_NR_getpid
8054 case TARGET_NR_getpid
:
8055 ret
= get_errno(getpid());
8058 case TARGET_NR_mount
:
8060 /* need to look at the data field */
8064 p
= lock_user_string(arg1
);
8072 p2
= lock_user_string(arg2
);
8075 unlock_user(p
, arg1
, 0);
8081 p3
= lock_user_string(arg3
);
8084 unlock_user(p
, arg1
, 0);
8086 unlock_user(p2
, arg2
, 0);
8093 /* FIXME - arg5 should be locked, but it isn't clear how to
8094 * do that since it's not guaranteed to be a NULL-terminated
8098 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8100 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8102 ret
= get_errno(ret
);
8105 unlock_user(p
, arg1
, 0);
8107 unlock_user(p2
, arg2
, 0);
8109 unlock_user(p3
, arg3
, 0);
8113 #ifdef TARGET_NR_umount
8114 case TARGET_NR_umount
:
8115 if (!(p
= lock_user_string(arg1
)))
8117 ret
= get_errno(umount(p
));
8118 unlock_user(p
, arg1
, 0);
8121 #ifdef TARGET_NR_stime /* not on alpha */
8122 case TARGET_NR_stime
:
8125 if (get_user_sal(host_time
, arg1
))
8127 ret
= get_errno(stime(&host_time
));
8131 case TARGET_NR_ptrace
:
8133 #ifdef TARGET_NR_alarm /* not on alpha */
8134 case TARGET_NR_alarm
:
8138 #ifdef TARGET_NR_oldfstat
8139 case TARGET_NR_oldfstat
:
8142 #ifdef TARGET_NR_pause /* not on alpha */
8143 case TARGET_NR_pause
:
8144 if (!block_signals()) {
8145 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8147 ret
= -TARGET_EINTR
;
8150 #ifdef TARGET_NR_utime
8151 case TARGET_NR_utime
:
8153 struct utimbuf tbuf
, *host_tbuf
;
8154 struct target_utimbuf
*target_tbuf
;
8156 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8158 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8159 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8160 unlock_user_struct(target_tbuf
, arg2
, 0);
8165 if (!(p
= lock_user_string(arg1
)))
8167 ret
= get_errno(utime(p
, host_tbuf
));
8168 unlock_user(p
, arg1
, 0);
8172 #ifdef TARGET_NR_utimes
8173 case TARGET_NR_utimes
:
8175 struct timeval
*tvp
, tv
[2];
8177 if (copy_from_user_timeval(&tv
[0], arg2
)
8178 || copy_from_user_timeval(&tv
[1],
8179 arg2
+ sizeof(struct target_timeval
)))
8185 if (!(p
= lock_user_string(arg1
)))
8187 ret
= get_errno(utimes(p
, tvp
));
8188 unlock_user(p
, arg1
, 0);
8192 #if defined(TARGET_NR_futimesat)
8193 case TARGET_NR_futimesat
:
8195 struct timeval
*tvp
, tv
[2];
8197 if (copy_from_user_timeval(&tv
[0], arg3
)
8198 || copy_from_user_timeval(&tv
[1],
8199 arg3
+ sizeof(struct target_timeval
)))
8205 if (!(p
= lock_user_string(arg2
)))
8207 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8208 unlock_user(p
, arg2
, 0);
8212 #ifdef TARGET_NR_stty
8213 case TARGET_NR_stty
:
8216 #ifdef TARGET_NR_gtty
8217 case TARGET_NR_gtty
:
8220 #ifdef TARGET_NR_access
8221 case TARGET_NR_access
:
8222 if (!(p
= lock_user_string(arg1
)))
8224 ret
= get_errno(access(path(p
), arg2
));
8225 unlock_user(p
, arg1
, 0);
8228 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8229 case TARGET_NR_faccessat
:
8230 if (!(p
= lock_user_string(arg2
)))
8232 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8233 unlock_user(p
, arg2
, 0);
8236 #ifdef TARGET_NR_nice /* not on alpha */
8237 case TARGET_NR_nice
:
8238 ret
= get_errno(nice(arg1
));
8241 #ifdef TARGET_NR_ftime
8242 case TARGET_NR_ftime
:
8245 case TARGET_NR_sync
:
8249 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8250 case TARGET_NR_syncfs
:
8251 ret
= get_errno(syncfs(arg1
));
8254 case TARGET_NR_kill
:
8255 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8257 #ifdef TARGET_NR_rename
8258 case TARGET_NR_rename
:
8261 p
= lock_user_string(arg1
);
8262 p2
= lock_user_string(arg2
);
8264 ret
= -TARGET_EFAULT
;
8266 ret
= get_errno(rename(p
, p2
));
8267 unlock_user(p2
, arg2
, 0);
8268 unlock_user(p
, arg1
, 0);
8272 #if defined(TARGET_NR_renameat)
8273 case TARGET_NR_renameat
:
8276 p
= lock_user_string(arg2
);
8277 p2
= lock_user_string(arg4
);
8279 ret
= -TARGET_EFAULT
;
8281 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8282 unlock_user(p2
, arg4
, 0);
8283 unlock_user(p
, arg2
, 0);
8287 #ifdef TARGET_NR_mkdir
8288 case TARGET_NR_mkdir
:
8289 if (!(p
= lock_user_string(arg1
)))
8291 ret
= get_errno(mkdir(p
, arg2
));
8292 unlock_user(p
, arg1
, 0);
8295 #if defined(TARGET_NR_mkdirat)
8296 case TARGET_NR_mkdirat
:
8297 if (!(p
= lock_user_string(arg2
)))
8299 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8300 unlock_user(p
, arg2
, 0);
8303 #ifdef TARGET_NR_rmdir
8304 case TARGET_NR_rmdir
:
8305 if (!(p
= lock_user_string(arg1
)))
8307 ret
= get_errno(rmdir(p
));
8308 unlock_user(p
, arg1
, 0);
8312 ret
= get_errno(dup(arg1
));
8314 fd_trans_dup(arg1
, ret
);
8317 #ifdef TARGET_NR_pipe
8318 case TARGET_NR_pipe
:
8319 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8322 #ifdef TARGET_NR_pipe2
8323 case TARGET_NR_pipe2
:
8324 ret
= do_pipe(cpu_env
, arg1
,
8325 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8328 case TARGET_NR_times
:
8330 struct target_tms
*tmsp
;
8332 ret
= get_errno(times(&tms
));
8334 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8337 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8338 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8339 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8340 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8343 ret
= host_to_target_clock_t(ret
);
8346 #ifdef TARGET_NR_prof
8347 case TARGET_NR_prof
:
8350 #ifdef TARGET_NR_signal
8351 case TARGET_NR_signal
:
8354 case TARGET_NR_acct
:
8356 ret
= get_errno(acct(NULL
));
8358 if (!(p
= lock_user_string(arg1
)))
8360 ret
= get_errno(acct(path(p
)));
8361 unlock_user(p
, arg1
, 0);
8364 #ifdef TARGET_NR_umount2
8365 case TARGET_NR_umount2
:
8366 if (!(p
= lock_user_string(arg1
)))
8368 ret
= get_errno(umount2(p
, arg2
));
8369 unlock_user(p
, arg1
, 0);
8372 #ifdef TARGET_NR_lock
8373 case TARGET_NR_lock
:
8376 case TARGET_NR_ioctl
:
8377 ret
= do_ioctl(arg1
, arg2
, arg3
);
8379 case TARGET_NR_fcntl
:
8380 ret
= do_fcntl(arg1
, arg2
, arg3
);
8382 #ifdef TARGET_NR_mpx
8386 case TARGET_NR_setpgid
:
8387 ret
= get_errno(setpgid(arg1
, arg2
));
8389 #ifdef TARGET_NR_ulimit
8390 case TARGET_NR_ulimit
:
8393 #ifdef TARGET_NR_oldolduname
8394 case TARGET_NR_oldolduname
:
8397 case TARGET_NR_umask
:
8398 ret
= get_errno(umask(arg1
));
8400 case TARGET_NR_chroot
:
8401 if (!(p
= lock_user_string(arg1
)))
8403 ret
= get_errno(chroot(p
));
8404 unlock_user(p
, arg1
, 0);
8406 #ifdef TARGET_NR_ustat
8407 case TARGET_NR_ustat
:
8410 #ifdef TARGET_NR_dup2
8411 case TARGET_NR_dup2
:
8412 ret
= get_errno(dup2(arg1
, arg2
));
8414 fd_trans_dup(arg1
, arg2
);
8418 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8419 case TARGET_NR_dup3
:
8420 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8422 fd_trans_dup(arg1
, arg2
);
8426 #ifdef TARGET_NR_getppid /* not on alpha */
8427 case TARGET_NR_getppid
:
8428 ret
= get_errno(getppid());
8431 #ifdef TARGET_NR_getpgrp
8432 case TARGET_NR_getpgrp
:
8433 ret
= get_errno(getpgrp());
8436 case TARGET_NR_setsid
:
8437 ret
= get_errno(setsid());
8439 #ifdef TARGET_NR_sigaction
8440 case TARGET_NR_sigaction
:
8442 #if defined(TARGET_ALPHA)
8443 struct target_sigaction act
, oact
, *pact
= 0;
8444 struct target_old_sigaction
*old_act
;
8446 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8448 act
._sa_handler
= old_act
->_sa_handler
;
8449 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8450 act
.sa_flags
= old_act
->sa_flags
;
8451 act
.sa_restorer
= 0;
8452 unlock_user_struct(old_act
, arg2
, 0);
8455 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8456 if (!is_error(ret
) && arg3
) {
8457 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8459 old_act
->_sa_handler
= oact
._sa_handler
;
8460 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8461 old_act
->sa_flags
= oact
.sa_flags
;
8462 unlock_user_struct(old_act
, arg3
, 1);
8464 #elif defined(TARGET_MIPS)
8465 struct target_sigaction act
, oact
, *pact
, *old_act
;
8468 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8470 act
._sa_handler
= old_act
->_sa_handler
;
8471 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8472 act
.sa_flags
= old_act
->sa_flags
;
8473 unlock_user_struct(old_act
, arg2
, 0);
8479 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8481 if (!is_error(ret
) && arg3
) {
8482 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8484 old_act
->_sa_handler
= oact
._sa_handler
;
8485 old_act
->sa_flags
= oact
.sa_flags
;
8486 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8487 old_act
->sa_mask
.sig
[1] = 0;
8488 old_act
->sa_mask
.sig
[2] = 0;
8489 old_act
->sa_mask
.sig
[3] = 0;
8490 unlock_user_struct(old_act
, arg3
, 1);
8493 struct target_old_sigaction
*old_act
;
8494 struct target_sigaction act
, oact
, *pact
;
8496 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8498 act
._sa_handler
= old_act
->_sa_handler
;
8499 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8500 act
.sa_flags
= old_act
->sa_flags
;
8501 act
.sa_restorer
= old_act
->sa_restorer
;
8502 unlock_user_struct(old_act
, arg2
, 0);
8507 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8508 if (!is_error(ret
) && arg3
) {
8509 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8511 old_act
->_sa_handler
= oact
._sa_handler
;
8512 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8513 old_act
->sa_flags
= oact
.sa_flags
;
8514 old_act
->sa_restorer
= oact
.sa_restorer
;
8515 unlock_user_struct(old_act
, arg3
, 1);
8521 case TARGET_NR_rt_sigaction
:
8523 #if defined(TARGET_ALPHA)
8524 struct target_sigaction act
, oact
, *pact
= 0;
8525 struct target_rt_sigaction
*rt_act
;
8527 if (arg4
!= sizeof(target_sigset_t
)) {
8528 ret
= -TARGET_EINVAL
;
8532 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8534 act
._sa_handler
= rt_act
->_sa_handler
;
8535 act
.sa_mask
= rt_act
->sa_mask
;
8536 act
.sa_flags
= rt_act
->sa_flags
;
8537 act
.sa_restorer
= arg5
;
8538 unlock_user_struct(rt_act
, arg2
, 0);
8541 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8542 if (!is_error(ret
) && arg3
) {
8543 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8545 rt_act
->_sa_handler
= oact
._sa_handler
;
8546 rt_act
->sa_mask
= oact
.sa_mask
;
8547 rt_act
->sa_flags
= oact
.sa_flags
;
8548 unlock_user_struct(rt_act
, arg3
, 1);
8551 struct target_sigaction
*act
;
8552 struct target_sigaction
*oact
;
8554 if (arg4
!= sizeof(target_sigset_t
)) {
8555 ret
= -TARGET_EINVAL
;
8559 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8564 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8565 ret
= -TARGET_EFAULT
;
8566 goto rt_sigaction_fail
;
8570 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8573 unlock_user_struct(act
, arg2
, 0);
8575 unlock_user_struct(oact
, arg3
, 1);
8579 #ifdef TARGET_NR_sgetmask /* not on alpha */
8580 case TARGET_NR_sgetmask
:
8583 abi_ulong target_set
;
8584 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8586 host_to_target_old_sigset(&target_set
, &cur_set
);
8592 #ifdef TARGET_NR_ssetmask /* not on alpha */
8593 case TARGET_NR_ssetmask
:
8595 sigset_t set
, oset
, cur_set
;
8596 abi_ulong target_set
= arg1
;
8597 /* We only have one word of the new mask so we must read
8598 * the rest of it with do_sigprocmask() and OR in this word.
8599 * We are guaranteed that a do_sigprocmask() that only queries
8600 * the signal mask will not fail.
8602 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8604 target_to_host_old_sigset(&set
, &target_set
);
8605 sigorset(&set
, &set
, &cur_set
);
8606 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8608 host_to_target_old_sigset(&target_set
, &oset
);
8614 #ifdef TARGET_NR_sigprocmask
8615 case TARGET_NR_sigprocmask
:
8617 #if defined(TARGET_ALPHA)
8618 sigset_t set
, oldset
;
8623 case TARGET_SIG_BLOCK
:
8626 case TARGET_SIG_UNBLOCK
:
8629 case TARGET_SIG_SETMASK
:
8633 ret
= -TARGET_EINVAL
;
8637 target_to_host_old_sigset(&set
, &mask
);
8639 ret
= do_sigprocmask(how
, &set
, &oldset
);
8640 if (!is_error(ret
)) {
8641 host_to_target_old_sigset(&mask
, &oldset
);
8643 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8646 sigset_t set
, oldset
, *set_ptr
;
8651 case TARGET_SIG_BLOCK
:
8654 case TARGET_SIG_UNBLOCK
:
8657 case TARGET_SIG_SETMASK
:
8661 ret
= -TARGET_EINVAL
;
8664 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8666 target_to_host_old_sigset(&set
, p
);
8667 unlock_user(p
, arg2
, 0);
8673 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8674 if (!is_error(ret
) && arg3
) {
8675 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8677 host_to_target_old_sigset(p
, &oldset
);
8678 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8684 case TARGET_NR_rt_sigprocmask
:
8687 sigset_t set
, oldset
, *set_ptr
;
8689 if (arg4
!= sizeof(target_sigset_t
)) {
8690 ret
= -TARGET_EINVAL
;
8696 case TARGET_SIG_BLOCK
:
8699 case TARGET_SIG_UNBLOCK
:
8702 case TARGET_SIG_SETMASK
:
8706 ret
= -TARGET_EINVAL
;
8709 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8711 target_to_host_sigset(&set
, p
);
8712 unlock_user(p
, arg2
, 0);
8718 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8719 if (!is_error(ret
) && arg3
) {
8720 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8722 host_to_target_sigset(p
, &oldset
);
8723 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8727 #ifdef TARGET_NR_sigpending
8728 case TARGET_NR_sigpending
:
8731 ret
= get_errno(sigpending(&set
));
8732 if (!is_error(ret
)) {
8733 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8735 host_to_target_old_sigset(p
, &set
);
8736 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8741 case TARGET_NR_rt_sigpending
:
8745 /* Yes, this check is >, not != like most. We follow the kernel's
8746 * logic and it does it like this because it implements
8747 * NR_sigpending through the same code path, and in that case
8748 * the old_sigset_t is smaller in size.
8750 if (arg2
> sizeof(target_sigset_t
)) {
8751 ret
= -TARGET_EINVAL
;
8755 ret
= get_errno(sigpending(&set
));
8756 if (!is_error(ret
)) {
8757 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8759 host_to_target_sigset(p
, &set
);
8760 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8764 #ifdef TARGET_NR_sigsuspend
8765 case TARGET_NR_sigsuspend
:
8767 TaskState
*ts
= cpu
->opaque
;
8768 #if defined(TARGET_ALPHA)
8769 abi_ulong mask
= arg1
;
8770 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8772 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8774 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8775 unlock_user(p
, arg1
, 0);
8777 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8779 if (ret
!= -TARGET_ERESTARTSYS
) {
8780 ts
->in_sigsuspend
= 1;
8785 case TARGET_NR_rt_sigsuspend
:
8787 TaskState
*ts
= cpu
->opaque
;
8789 if (arg2
!= sizeof(target_sigset_t
)) {
8790 ret
= -TARGET_EINVAL
;
8793 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8795 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8796 unlock_user(p
, arg1
, 0);
8797 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8799 if (ret
!= -TARGET_ERESTARTSYS
) {
8800 ts
->in_sigsuspend
= 1;
8804 case TARGET_NR_rt_sigtimedwait
:
8807 struct timespec uts
, *puts
;
8810 if (arg4
!= sizeof(target_sigset_t
)) {
8811 ret
= -TARGET_EINVAL
;
8815 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8817 target_to_host_sigset(&set
, p
);
8818 unlock_user(p
, arg1
, 0);
8821 target_to_host_timespec(puts
, arg3
);
8825 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8827 if (!is_error(ret
)) {
8829 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8834 host_to_target_siginfo(p
, &uinfo
);
8835 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8837 ret
= host_to_target_signal(ret
);
8841 case TARGET_NR_rt_sigqueueinfo
:
8845 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8849 target_to_host_siginfo(&uinfo
, p
);
8850 unlock_user(p
, arg1
, 0);
8851 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8854 #ifdef TARGET_NR_sigreturn
8855 case TARGET_NR_sigreturn
:
8856 if (block_signals()) {
8857 ret
= -TARGET_ERESTARTSYS
;
8859 ret
= do_sigreturn(cpu_env
);
8863 case TARGET_NR_rt_sigreturn
:
8864 if (block_signals()) {
8865 ret
= -TARGET_ERESTARTSYS
;
8867 ret
= do_rt_sigreturn(cpu_env
);
8870 case TARGET_NR_sethostname
:
8871 if (!(p
= lock_user_string(arg1
)))
8873 ret
= get_errno(sethostname(p
, arg2
));
8874 unlock_user(p
, arg1
, 0);
8876 case TARGET_NR_setrlimit
:
8878 int resource
= target_to_host_resource(arg1
);
8879 struct target_rlimit
*target_rlim
;
8881 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8883 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8884 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8885 unlock_user_struct(target_rlim
, arg2
, 0);
8886 ret
= get_errno(setrlimit(resource
, &rlim
));
8889 case TARGET_NR_getrlimit
:
8891 int resource
= target_to_host_resource(arg1
);
8892 struct target_rlimit
*target_rlim
;
8895 ret
= get_errno(getrlimit(resource
, &rlim
));
8896 if (!is_error(ret
)) {
8897 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8899 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8900 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8901 unlock_user_struct(target_rlim
, arg2
, 1);
8905 case TARGET_NR_getrusage
:
8907 struct rusage rusage
;
8908 ret
= get_errno(getrusage(arg1
, &rusage
));
8909 if (!is_error(ret
)) {
8910 ret
= host_to_target_rusage(arg2
, &rusage
);
8914 case TARGET_NR_gettimeofday
:
8917 ret
= get_errno(gettimeofday(&tv
, NULL
));
8918 if (!is_error(ret
)) {
8919 if (copy_to_user_timeval(arg1
, &tv
))
8924 case TARGET_NR_settimeofday
:
8926 struct timeval tv
, *ptv
= NULL
;
8927 struct timezone tz
, *ptz
= NULL
;
8930 if (copy_from_user_timeval(&tv
, arg1
)) {
8937 if (copy_from_user_timezone(&tz
, arg2
)) {
8943 ret
= get_errno(settimeofday(ptv
, ptz
));
8946 #if defined(TARGET_NR_select)
8947 case TARGET_NR_select
:
8948 #if defined(TARGET_WANT_NI_OLD_SELECT)
8949 /* some architectures used to have old_select here
8950 * but now ENOSYS it.
8952 ret
= -TARGET_ENOSYS
;
8953 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8954 ret
= do_old_select(arg1
);
8956 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8960 #ifdef TARGET_NR_pselect6
8961 case TARGET_NR_pselect6
:
8963 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8964 fd_set rfds
, wfds
, efds
;
8965 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8966 struct timespec ts
, *ts_ptr
;
8969 * The 6th arg is actually two args smashed together,
8970 * so we cannot use the C library.
8978 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8979 target_sigset_t
*target_sigset
;
8987 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8991 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8995 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9001 * This takes a timespec, and not a timeval, so we cannot
9002 * use the do_select() helper ...
9005 if (target_to_host_timespec(&ts
, ts_addr
)) {
9013 /* Extract the two packed args for the sigset */
9016 sig
.size
= SIGSET_T_SIZE
;
9018 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9022 arg_sigset
= tswapal(arg7
[0]);
9023 arg_sigsize
= tswapal(arg7
[1]);
9024 unlock_user(arg7
, arg6
, 0);
9028 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9029 /* Like the kernel, we enforce correct size sigsets */
9030 ret
= -TARGET_EINVAL
;
9033 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9034 sizeof(*target_sigset
), 1);
9035 if (!target_sigset
) {
9038 target_to_host_sigset(&set
, target_sigset
);
9039 unlock_user(target_sigset
, arg_sigset
, 0);
9047 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9050 if (!is_error(ret
)) {
9051 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9053 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9055 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9058 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9064 #ifdef TARGET_NR_symlink
9065 case TARGET_NR_symlink
:
9068 p
= lock_user_string(arg1
);
9069 p2
= lock_user_string(arg2
);
9071 ret
= -TARGET_EFAULT
;
9073 ret
= get_errno(symlink(p
, p2
));
9074 unlock_user(p2
, arg2
, 0);
9075 unlock_user(p
, arg1
, 0);
9079 #if defined(TARGET_NR_symlinkat)
9080 case TARGET_NR_symlinkat
:
9083 p
= lock_user_string(arg1
);
9084 p2
= lock_user_string(arg3
);
9086 ret
= -TARGET_EFAULT
;
9088 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9089 unlock_user(p2
, arg3
, 0);
9090 unlock_user(p
, arg1
, 0);
9094 #ifdef TARGET_NR_oldlstat
9095 case TARGET_NR_oldlstat
:
9098 #ifdef TARGET_NR_readlink
9099 case TARGET_NR_readlink
:
9102 p
= lock_user_string(arg1
);
9103 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9105 ret
= -TARGET_EFAULT
;
9107 /* Short circuit this for the magic exe check. */
9108 ret
= -TARGET_EINVAL
;
9109 } else if (is_proc_myself((const char *)p
, "exe")) {
9110 char real
[PATH_MAX
], *temp
;
9111 temp
= realpath(exec_path
, real
);
9112 /* Return value is # of bytes that we wrote to the buffer. */
9114 ret
= get_errno(-1);
9116 /* Don't worry about sign mismatch as earlier mapping
9117 * logic would have thrown a bad address error. */
9118 ret
= MIN(strlen(real
), arg3
);
9119 /* We cannot NUL terminate the string. */
9120 memcpy(p2
, real
, ret
);
9123 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9125 unlock_user(p2
, arg2
, ret
);
9126 unlock_user(p
, arg1
, 0);
9130 #if defined(TARGET_NR_readlinkat)
9131 case TARGET_NR_readlinkat
:
9134 p
= lock_user_string(arg2
);
9135 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9137 ret
= -TARGET_EFAULT
;
9138 } else if (is_proc_myself((const char *)p
, "exe")) {
9139 char real
[PATH_MAX
], *temp
;
9140 temp
= realpath(exec_path
, real
);
9141 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9142 snprintf((char *)p2
, arg4
, "%s", real
);
9144 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9146 unlock_user(p2
, arg3
, ret
);
9147 unlock_user(p
, arg2
, 0);
9151 #ifdef TARGET_NR_uselib
9152 case TARGET_NR_uselib
:
9155 #ifdef TARGET_NR_swapon
9156 case TARGET_NR_swapon
:
9157 if (!(p
= lock_user_string(arg1
)))
9159 ret
= get_errno(swapon(p
, arg2
));
9160 unlock_user(p
, arg1
, 0);
9163 case TARGET_NR_reboot
:
9164 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9165 /* arg4 must be ignored in all other cases */
9166 p
= lock_user_string(arg4
);
9170 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9171 unlock_user(p
, arg4
, 0);
9173 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9176 #ifdef TARGET_NR_readdir
9177 case TARGET_NR_readdir
:
9180 #ifdef TARGET_NR_mmap
9181 case TARGET_NR_mmap
:
9182 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9183 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9184 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9185 || defined(TARGET_S390X)
9188 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9189 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9197 unlock_user(v
, arg1
, 0);
9198 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9199 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9203 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9204 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9210 #ifdef TARGET_NR_mmap2
9211 case TARGET_NR_mmap2
:
9213 #define MMAP_SHIFT 12
9215 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9216 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9218 arg6
<< MMAP_SHIFT
));
9221 case TARGET_NR_munmap
:
9222 ret
= get_errno(target_munmap(arg1
, arg2
));
9224 case TARGET_NR_mprotect
:
9226 TaskState
*ts
= cpu
->opaque
;
9227 /* Special hack to detect libc making the stack executable. */
9228 if ((arg3
& PROT_GROWSDOWN
)
9229 && arg1
>= ts
->info
->stack_limit
9230 && arg1
<= ts
->info
->start_stack
) {
9231 arg3
&= ~PROT_GROWSDOWN
;
9232 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9233 arg1
= ts
->info
->stack_limit
;
9236 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9238 #ifdef TARGET_NR_mremap
9239 case TARGET_NR_mremap
:
9240 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9243 /* ??? msync/mlock/munlock are broken for softmmu. */
9244 #ifdef TARGET_NR_msync
9245 case TARGET_NR_msync
:
9246 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9249 #ifdef TARGET_NR_mlock
9250 case TARGET_NR_mlock
:
9251 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9254 #ifdef TARGET_NR_munlock
9255 case TARGET_NR_munlock
:
9256 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9259 #ifdef TARGET_NR_mlockall
9260 case TARGET_NR_mlockall
:
9261 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9264 #ifdef TARGET_NR_munlockall
9265 case TARGET_NR_munlockall
:
9266 ret
= get_errno(munlockall());
9269 case TARGET_NR_truncate
:
9270 if (!(p
= lock_user_string(arg1
)))
9272 ret
= get_errno(truncate(p
, arg2
));
9273 unlock_user(p
, arg1
, 0);
9275 case TARGET_NR_ftruncate
:
9276 ret
= get_errno(ftruncate(arg1
, arg2
));
9278 case TARGET_NR_fchmod
:
9279 ret
= get_errno(fchmod(arg1
, arg2
));
9281 #if defined(TARGET_NR_fchmodat)
9282 case TARGET_NR_fchmodat
:
9283 if (!(p
= lock_user_string(arg2
)))
9285 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9286 unlock_user(p
, arg2
, 0);
9289 case TARGET_NR_getpriority
:
9290 /* Note that negative values are valid for getpriority, so we must
9291 differentiate based on errno settings. */
9293 ret
= getpriority(arg1
, arg2
);
9294 if (ret
== -1 && errno
!= 0) {
9295 ret
= -host_to_target_errno(errno
);
9299 /* Return value is the unbiased priority. Signal no error. */
9300 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9302 /* Return value is a biased priority to avoid negative numbers. */
9306 case TARGET_NR_setpriority
:
9307 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9309 #ifdef TARGET_NR_profil
9310 case TARGET_NR_profil
:
9313 case TARGET_NR_statfs
:
9314 if (!(p
= lock_user_string(arg1
)))
9316 ret
= get_errno(statfs(path(p
), &stfs
));
9317 unlock_user(p
, arg1
, 0);
9319 if (!is_error(ret
)) {
9320 struct target_statfs
*target_stfs
;
9322 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9324 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9325 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9326 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9327 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9328 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9329 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9330 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9331 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9332 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9333 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9334 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9335 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9336 unlock_user_struct(target_stfs
, arg2
, 1);
9339 case TARGET_NR_fstatfs
:
9340 ret
= get_errno(fstatfs(arg1
, &stfs
));
9341 goto convert_statfs
;
9342 #ifdef TARGET_NR_statfs64
9343 case TARGET_NR_statfs64
:
9344 if (!(p
= lock_user_string(arg1
)))
9346 ret
= get_errno(statfs(path(p
), &stfs
));
9347 unlock_user(p
, arg1
, 0);
9349 if (!is_error(ret
)) {
9350 struct target_statfs64
*target_stfs
;
9352 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9354 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9355 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9356 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9357 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9358 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9359 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9360 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9361 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9362 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9363 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9364 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9365 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9366 unlock_user_struct(target_stfs
, arg3
, 1);
9369 case TARGET_NR_fstatfs64
:
9370 ret
= get_errno(fstatfs(arg1
, &stfs
));
9371 goto convert_statfs64
;
9373 #ifdef TARGET_NR_ioperm
9374 case TARGET_NR_ioperm
:
9377 #ifdef TARGET_NR_socketcall
9378 case TARGET_NR_socketcall
:
9379 ret
= do_socketcall(arg1
, arg2
);
9382 #ifdef TARGET_NR_accept
9383 case TARGET_NR_accept
:
9384 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9387 #ifdef TARGET_NR_accept4
9388 case TARGET_NR_accept4
:
9389 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9392 #ifdef TARGET_NR_bind
9393 case TARGET_NR_bind
:
9394 ret
= do_bind(arg1
, arg2
, arg3
);
9397 #ifdef TARGET_NR_connect
9398 case TARGET_NR_connect
:
9399 ret
= do_connect(arg1
, arg2
, arg3
);
9402 #ifdef TARGET_NR_getpeername
9403 case TARGET_NR_getpeername
:
9404 ret
= do_getpeername(arg1
, arg2
, arg3
);
9407 #ifdef TARGET_NR_getsockname
9408 case TARGET_NR_getsockname
:
9409 ret
= do_getsockname(arg1
, arg2
, arg3
);
9412 #ifdef TARGET_NR_getsockopt
9413 case TARGET_NR_getsockopt
:
9414 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9417 #ifdef TARGET_NR_listen
9418 case TARGET_NR_listen
:
9419 ret
= get_errno(listen(arg1
, arg2
));
9422 #ifdef TARGET_NR_recv
9423 case TARGET_NR_recv
:
9424 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9427 #ifdef TARGET_NR_recvfrom
9428 case TARGET_NR_recvfrom
:
9429 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9432 #ifdef TARGET_NR_recvmsg
9433 case TARGET_NR_recvmsg
:
9434 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9437 #ifdef TARGET_NR_send
9438 case TARGET_NR_send
:
9439 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9442 #ifdef TARGET_NR_sendmsg
9443 case TARGET_NR_sendmsg
:
9444 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9447 #ifdef TARGET_NR_sendmmsg
9448 case TARGET_NR_sendmmsg
:
9449 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9451 case TARGET_NR_recvmmsg
:
9452 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9455 #ifdef TARGET_NR_sendto
9456 case TARGET_NR_sendto
:
9457 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9460 #ifdef TARGET_NR_shutdown
9461 case TARGET_NR_shutdown
:
9462 ret
= get_errno(shutdown(arg1
, arg2
));
9465 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9466 case TARGET_NR_getrandom
:
9467 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9471 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9472 unlock_user(p
, arg1
, ret
);
9475 #ifdef TARGET_NR_socket
9476 case TARGET_NR_socket
:
9477 ret
= do_socket(arg1
, arg2
, arg3
);
9480 #ifdef TARGET_NR_socketpair
9481 case TARGET_NR_socketpair
:
9482 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9485 #ifdef TARGET_NR_setsockopt
9486 case TARGET_NR_setsockopt
:
9487 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9490 #if defined(TARGET_NR_syslog)
9491 case TARGET_NR_syslog
:
9496 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9497 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9498 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9499 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9500 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9501 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9502 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9503 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9505 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9508 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9509 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9510 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9512 ret
= -TARGET_EINVAL
;
9520 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9522 ret
= -TARGET_EFAULT
;
9525 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9526 unlock_user(p
, arg2
, arg3
);
9536 case TARGET_NR_setitimer
:
9538 struct itimerval value
, ovalue
, *pvalue
;
9542 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9543 || copy_from_user_timeval(&pvalue
->it_value
,
9544 arg2
+ sizeof(struct target_timeval
)))
9549 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9550 if (!is_error(ret
) && arg3
) {
9551 if (copy_to_user_timeval(arg3
,
9552 &ovalue
.it_interval
)
9553 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9559 case TARGET_NR_getitimer
:
9561 struct itimerval value
;
9563 ret
= get_errno(getitimer(arg1
, &value
));
9564 if (!is_error(ret
) && arg2
) {
9565 if (copy_to_user_timeval(arg2
,
9567 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9573 #ifdef TARGET_NR_stat
9574 case TARGET_NR_stat
:
9575 if (!(p
= lock_user_string(arg1
)))
9577 ret
= get_errno(stat(path(p
), &st
));
9578 unlock_user(p
, arg1
, 0);
9581 #ifdef TARGET_NR_lstat
9582 case TARGET_NR_lstat
:
9583 if (!(p
= lock_user_string(arg1
)))
9585 ret
= get_errno(lstat(path(p
), &st
));
9586 unlock_user(p
, arg1
, 0);
9589 case TARGET_NR_fstat
:
9591 ret
= get_errno(fstat(arg1
, &st
));
9592 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9595 if (!is_error(ret
)) {
9596 struct target_stat
*target_st
;
9598 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9600 memset(target_st
, 0, sizeof(*target_st
));
9601 __put_user(st
.st_dev
, &target_st
->st_dev
);
9602 __put_user(st
.st_ino
, &target_st
->st_ino
);
9603 __put_user(st
.st_mode
, &target_st
->st_mode
);
9604 __put_user(st
.st_uid
, &target_st
->st_uid
);
9605 __put_user(st
.st_gid
, &target_st
->st_gid
);
9606 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9607 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9608 __put_user(st
.st_size
, &target_st
->st_size
);
9609 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9610 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9611 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9612 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9613 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9614 unlock_user_struct(target_st
, arg2
, 1);
9618 #ifdef TARGET_NR_olduname
9619 case TARGET_NR_olduname
:
9622 #ifdef TARGET_NR_iopl
9623 case TARGET_NR_iopl
:
9626 case TARGET_NR_vhangup
:
9627 ret
= get_errno(vhangup());
9629 #ifdef TARGET_NR_idle
9630 case TARGET_NR_idle
:
9633 #ifdef TARGET_NR_syscall
9634 case TARGET_NR_syscall
:
9635 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9636 arg6
, arg7
, arg8
, 0);
9639 case TARGET_NR_wait4
:
9642 abi_long status_ptr
= arg2
;
9643 struct rusage rusage
, *rusage_ptr
;
9644 abi_ulong target_rusage
= arg4
;
9645 abi_long rusage_err
;
9647 rusage_ptr
= &rusage
;
9650 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9651 if (!is_error(ret
)) {
9652 if (status_ptr
&& ret
) {
9653 status
= host_to_target_waitstatus(status
);
9654 if (put_user_s32(status
, status_ptr
))
9657 if (target_rusage
) {
9658 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9666 #ifdef TARGET_NR_swapoff
9667 case TARGET_NR_swapoff
:
9668 if (!(p
= lock_user_string(arg1
)))
9670 ret
= get_errno(swapoff(p
));
9671 unlock_user(p
, arg1
, 0);
9674 case TARGET_NR_sysinfo
:
9676 struct target_sysinfo
*target_value
;
9677 struct sysinfo value
;
9678 ret
= get_errno(sysinfo(&value
));
9679 if (!is_error(ret
) && arg1
)
9681 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9683 __put_user(value
.uptime
, &target_value
->uptime
);
9684 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9685 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9686 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9687 __put_user(value
.totalram
, &target_value
->totalram
);
9688 __put_user(value
.freeram
, &target_value
->freeram
);
9689 __put_user(value
.sharedram
, &target_value
->sharedram
);
9690 __put_user(value
.bufferram
, &target_value
->bufferram
);
9691 __put_user(value
.totalswap
, &target_value
->totalswap
);
9692 __put_user(value
.freeswap
, &target_value
->freeswap
);
9693 __put_user(value
.procs
, &target_value
->procs
);
9694 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9695 __put_user(value
.freehigh
, &target_value
->freehigh
);
9696 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9697 unlock_user_struct(target_value
, arg1
, 1);
9701 #ifdef TARGET_NR_ipc
9703 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9706 #ifdef TARGET_NR_semget
9707 case TARGET_NR_semget
:
9708 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9711 #ifdef TARGET_NR_semop
9712 case TARGET_NR_semop
:
9713 ret
= do_semop(arg1
, arg2
, arg3
);
9716 #ifdef TARGET_NR_semctl
9717 case TARGET_NR_semctl
:
9718 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9721 #ifdef TARGET_NR_msgctl
9722 case TARGET_NR_msgctl
:
9723 ret
= do_msgctl(arg1
, arg2
, arg3
);
9726 #ifdef TARGET_NR_msgget
9727 case TARGET_NR_msgget
:
9728 ret
= get_errno(msgget(arg1
, arg2
));
9731 #ifdef TARGET_NR_msgrcv
9732 case TARGET_NR_msgrcv
:
9733 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9736 #ifdef TARGET_NR_msgsnd
9737 case TARGET_NR_msgsnd
:
9738 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9741 #ifdef TARGET_NR_shmget
9742 case TARGET_NR_shmget
:
9743 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9746 #ifdef TARGET_NR_shmctl
9747 case TARGET_NR_shmctl
:
9748 ret
= do_shmctl(arg1
, arg2
, arg3
);
9751 #ifdef TARGET_NR_shmat
9752 case TARGET_NR_shmat
:
9753 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9756 #ifdef TARGET_NR_shmdt
9757 case TARGET_NR_shmdt
:
9758 ret
= do_shmdt(arg1
);
9761 case TARGET_NR_fsync
:
9762 ret
= get_errno(fsync(arg1
));
9764 case TARGET_NR_clone
:
9765 /* Linux manages to have three different orderings for its
9766 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9767 * match the kernel's CONFIG_CLONE_* settings.
9768 * Microblaze is further special in that it uses a sixth
9769 * implicit argument to clone for the TLS pointer.
9771 #if defined(TARGET_MICROBLAZE)
9772 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9773 #elif defined(TARGET_CLONE_BACKWARDS)
9774 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9775 #elif defined(TARGET_CLONE_BACKWARDS2)
9776 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9778 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9781 #ifdef __NR_exit_group
9782 /* new thread calls */
9783 case TARGET_NR_exit_group
:
9787 gdb_exit(cpu_env
, arg1
);
9788 ret
= get_errno(exit_group(arg1
));
9791 case TARGET_NR_setdomainname
:
9792 if (!(p
= lock_user_string(arg1
)))
9794 ret
= get_errno(setdomainname(p
, arg2
));
9795 unlock_user(p
, arg1
, 0);
9797 case TARGET_NR_uname
:
9798 /* no need to transcode because we use the linux syscall */
9800 struct new_utsname
* buf
;
9802 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9804 ret
= get_errno(sys_uname(buf
));
9805 if (!is_error(ret
)) {
9806 /* Overwrite the native machine name with whatever is being
9808 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9809 /* Allow the user to override the reported release. */
9810 if (qemu_uname_release
&& *qemu_uname_release
) {
9811 g_strlcpy(buf
->release
, qemu_uname_release
,
9812 sizeof(buf
->release
));
9815 unlock_user_struct(buf
, arg1
, 1);
9819 case TARGET_NR_modify_ldt
:
9820 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9822 #if !defined(TARGET_X86_64)
9823 case TARGET_NR_vm86old
:
9825 case TARGET_NR_vm86
:
9826 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9830 case TARGET_NR_adjtimex
:
9832 struct timex host_buf
;
9834 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9837 ret
= get_errno(adjtimex(&host_buf
));
9838 if (!is_error(ret
)) {
9839 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9845 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9846 case TARGET_NR_clock_adjtime
:
9848 struct timex htx
, *phtx
= &htx
;
9850 if (target_to_host_timex(phtx
, arg2
) != 0) {
9853 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9854 if (!is_error(ret
) && phtx
) {
9855 if (host_to_target_timex(arg2
, phtx
) != 0) {
9862 #ifdef TARGET_NR_create_module
9863 case TARGET_NR_create_module
:
9865 case TARGET_NR_init_module
:
9866 case TARGET_NR_delete_module
:
9867 #ifdef TARGET_NR_get_kernel_syms
9868 case TARGET_NR_get_kernel_syms
:
9871 case TARGET_NR_quotactl
:
9873 case TARGET_NR_getpgid
:
9874 ret
= get_errno(getpgid(arg1
));
9876 case TARGET_NR_fchdir
:
9877 ret
= get_errno(fchdir(arg1
));
9879 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9880 case TARGET_NR_bdflush
:
9883 #ifdef TARGET_NR_sysfs
9884 case TARGET_NR_sysfs
:
9887 case TARGET_NR_personality
:
9888 ret
= get_errno(personality(arg1
));
9890 #ifdef TARGET_NR_afs_syscall
9891 case TARGET_NR_afs_syscall
:
9894 #ifdef TARGET_NR__llseek /* Not on alpha */
9895 case TARGET_NR__llseek
:
9898 #if !defined(__NR_llseek)
9899 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9901 ret
= get_errno(res
);
9906 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9908 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9914 #ifdef TARGET_NR_getdents
9915 case TARGET_NR_getdents
:
9916 #ifdef __NR_getdents
9917 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9919 struct target_dirent
*target_dirp
;
9920 struct linux_dirent
*dirp
;
9921 abi_long count
= arg3
;
9923 dirp
= g_try_malloc(count
);
9925 ret
= -TARGET_ENOMEM
;
9929 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9930 if (!is_error(ret
)) {
9931 struct linux_dirent
*de
;
9932 struct target_dirent
*tde
;
9934 int reclen
, treclen
;
9935 int count1
, tnamelen
;
9939 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9943 reclen
= de
->d_reclen
;
9944 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9945 assert(tnamelen
>= 0);
9946 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9947 assert(count1
+ treclen
<= count
);
9948 tde
->d_reclen
= tswap16(treclen
);
9949 tde
->d_ino
= tswapal(de
->d_ino
);
9950 tde
->d_off
= tswapal(de
->d_off
);
9951 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9952 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9954 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9958 unlock_user(target_dirp
, arg2
, ret
);
9964 struct linux_dirent
*dirp
;
9965 abi_long count
= arg3
;
9967 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9969 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9970 if (!is_error(ret
)) {
9971 struct linux_dirent
*de
;
9976 reclen
= de
->d_reclen
;
9979 de
->d_reclen
= tswap16(reclen
);
9980 tswapls(&de
->d_ino
);
9981 tswapls(&de
->d_off
);
9982 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9986 unlock_user(dirp
, arg2
, ret
);
9990 /* Implement getdents in terms of getdents64 */
9992 struct linux_dirent64
*dirp
;
9993 abi_long count
= arg3
;
9995 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9999 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10000 if (!is_error(ret
)) {
10001 /* Convert the dirent64 structs to target dirent. We do this
10002 * in-place, since we can guarantee that a target_dirent is no
10003 * larger than a dirent64; however this means we have to be
10004 * careful to read everything before writing in the new format.
10006 struct linux_dirent64
*de
;
10007 struct target_dirent
*tde
;
10012 tde
= (struct target_dirent
*)dirp
;
10014 int namelen
, treclen
;
10015 int reclen
= de
->d_reclen
;
10016 uint64_t ino
= de
->d_ino
;
10017 int64_t off
= de
->d_off
;
10018 uint8_t type
= de
->d_type
;
10020 namelen
= strlen(de
->d_name
);
10021 treclen
= offsetof(struct target_dirent
, d_name
)
10023 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10025 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10026 tde
->d_ino
= tswapal(ino
);
10027 tde
->d_off
= tswapal(off
);
10028 tde
->d_reclen
= tswap16(treclen
);
10029 /* The target_dirent type is in what was formerly a padding
10030 * byte at the end of the structure:
10032 *(((char *)tde
) + treclen
- 1) = type
;
10034 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10035 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10041 unlock_user(dirp
, arg2
, ret
);
10045 #endif /* TARGET_NR_getdents */
10046 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10047 case TARGET_NR_getdents64
:
10049 struct linux_dirent64
*dirp
;
10050 abi_long count
= arg3
;
10051 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10053 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10054 if (!is_error(ret
)) {
10055 struct linux_dirent64
*de
;
10060 reclen
= de
->d_reclen
;
10063 de
->d_reclen
= tswap16(reclen
);
10064 tswap64s((uint64_t *)&de
->d_ino
);
10065 tswap64s((uint64_t *)&de
->d_off
);
10066 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10070 unlock_user(dirp
, arg2
, ret
);
10073 #endif /* TARGET_NR_getdents64 */
10074 #if defined(TARGET_NR__newselect)
10075 case TARGET_NR__newselect
:
10076 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10079 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10080 # ifdef TARGET_NR_poll
10081 case TARGET_NR_poll
:
10083 # ifdef TARGET_NR_ppoll
10084 case TARGET_NR_ppoll
:
10087 struct target_pollfd
*target_pfd
;
10088 unsigned int nfds
= arg2
;
10089 struct pollfd
*pfd
;
10095 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10096 ret
= -TARGET_EINVAL
;
10100 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10101 sizeof(struct target_pollfd
) * nfds
, 1);
10106 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10107 for (i
= 0; i
< nfds
; i
++) {
10108 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10109 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10114 # ifdef TARGET_NR_ppoll
10115 case TARGET_NR_ppoll
:
10117 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10118 target_sigset_t
*target_set
;
10119 sigset_t _set
, *set
= &_set
;
10122 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10123 unlock_user(target_pfd
, arg1
, 0);
10131 if (arg5
!= sizeof(target_sigset_t
)) {
10132 unlock_user(target_pfd
, arg1
, 0);
10133 ret
= -TARGET_EINVAL
;
10137 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10139 unlock_user(target_pfd
, arg1
, 0);
10142 target_to_host_sigset(set
, target_set
);
10147 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10148 set
, SIGSET_T_SIZE
));
10150 if (!is_error(ret
) && arg3
) {
10151 host_to_target_timespec(arg3
, timeout_ts
);
10154 unlock_user(target_set
, arg4
, 0);
10159 # ifdef TARGET_NR_poll
10160 case TARGET_NR_poll
:
10162 struct timespec ts
, *pts
;
10165 /* Convert ms to secs, ns */
10166 ts
.tv_sec
= arg3
/ 1000;
10167 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10170 /* -ve poll() timeout means "infinite" */
10173 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10178 g_assert_not_reached();
10181 if (!is_error(ret
)) {
10182 for(i
= 0; i
< nfds
; i
++) {
10183 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10186 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10190 case TARGET_NR_flock
:
10191 /* NOTE: the flock constant seems to be the same for every
10193 ret
= get_errno(safe_flock(arg1
, arg2
));
10195 case TARGET_NR_readv
:
10197 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10199 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10200 unlock_iovec(vec
, arg2
, arg3
, 1);
10202 ret
= -host_to_target_errno(errno
);
10206 case TARGET_NR_writev
:
10208 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10210 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10211 unlock_iovec(vec
, arg2
, arg3
, 0);
10213 ret
= -host_to_target_errno(errno
);
10217 #if defined(TARGET_NR_preadv)
10218 case TARGET_NR_preadv
:
10220 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10222 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10223 unlock_iovec(vec
, arg2
, arg3
, 1);
10225 ret
= -host_to_target_errno(errno
);
10230 #if defined(TARGET_NR_pwritev)
10231 case TARGET_NR_pwritev
:
10233 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10235 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10236 unlock_iovec(vec
, arg2
, arg3
, 0);
10238 ret
= -host_to_target_errno(errno
);
10243 case TARGET_NR_getsid
:
10244 ret
= get_errno(getsid(arg1
));
10246 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10247 case TARGET_NR_fdatasync
:
10248 ret
= get_errno(fdatasync(arg1
));
10251 #ifdef TARGET_NR__sysctl
10252 case TARGET_NR__sysctl
:
10253 /* We don't implement this, but ENOTDIR is always a safe
10255 ret
= -TARGET_ENOTDIR
;
10258 case TARGET_NR_sched_getaffinity
:
10260 unsigned int mask_size
;
10261 unsigned long *mask
;
10264 * sched_getaffinity needs multiples of ulong, so need to take
10265 * care of mismatches between target ulong and host ulong sizes.
10267 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10268 ret
= -TARGET_EINVAL
;
10271 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10273 mask
= alloca(mask_size
);
10274 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10276 if (!is_error(ret
)) {
10278 /* More data returned than the caller's buffer will fit.
10279 * This only happens if sizeof(abi_long) < sizeof(long)
10280 * and the caller passed us a buffer holding an odd number
10281 * of abi_longs. If the host kernel is actually using the
10282 * extra 4 bytes then fail EINVAL; otherwise we can just
10283 * ignore them and only copy the interesting part.
10285 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10286 if (numcpus
> arg2
* 8) {
10287 ret
= -TARGET_EINVAL
;
10293 if (copy_to_user(arg3
, mask
, ret
)) {
10299 case TARGET_NR_sched_setaffinity
:
10301 unsigned int mask_size
;
10302 unsigned long *mask
;
10305 * sched_setaffinity needs multiples of ulong, so need to take
10306 * care of mismatches between target ulong and host ulong sizes.
10308 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10309 ret
= -TARGET_EINVAL
;
10312 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10314 mask
= alloca(mask_size
);
10315 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10318 memcpy(mask
, p
, arg2
);
10319 unlock_user_struct(p
, arg2
, 0);
10321 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10324 case TARGET_NR_sched_setparam
:
10326 struct sched_param
*target_schp
;
10327 struct sched_param schp
;
10330 return -TARGET_EINVAL
;
10332 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10334 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10335 unlock_user_struct(target_schp
, arg2
, 0);
10336 ret
= get_errno(sched_setparam(arg1
, &schp
));
10339 case TARGET_NR_sched_getparam
:
10341 struct sched_param
*target_schp
;
10342 struct sched_param schp
;
10345 return -TARGET_EINVAL
;
10347 ret
= get_errno(sched_getparam(arg1
, &schp
));
10348 if (!is_error(ret
)) {
10349 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10351 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10352 unlock_user_struct(target_schp
, arg2
, 1);
10356 case TARGET_NR_sched_setscheduler
:
10358 struct sched_param
*target_schp
;
10359 struct sched_param schp
;
10361 return -TARGET_EINVAL
;
10363 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10365 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10366 unlock_user_struct(target_schp
, arg3
, 0);
10367 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10370 case TARGET_NR_sched_getscheduler
:
10371 ret
= get_errno(sched_getscheduler(arg1
));
10373 case TARGET_NR_sched_yield
:
10374 ret
= get_errno(sched_yield());
10376 case TARGET_NR_sched_get_priority_max
:
10377 ret
= get_errno(sched_get_priority_max(arg1
));
10379 case TARGET_NR_sched_get_priority_min
:
10380 ret
= get_errno(sched_get_priority_min(arg1
));
10382 case TARGET_NR_sched_rr_get_interval
:
10384 struct timespec ts
;
10385 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10386 if (!is_error(ret
)) {
10387 ret
= host_to_target_timespec(arg2
, &ts
);
10391 case TARGET_NR_nanosleep
:
10393 struct timespec req
, rem
;
10394 target_to_host_timespec(&req
, arg1
);
10395 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10396 if (is_error(ret
) && arg2
) {
10397 host_to_target_timespec(arg2
, &rem
);
10401 #ifdef TARGET_NR_query_module
10402 case TARGET_NR_query_module
:
10403 goto unimplemented
;
10405 #ifdef TARGET_NR_nfsservctl
10406 case TARGET_NR_nfsservctl
:
10407 goto unimplemented
;
10409 case TARGET_NR_prctl
:
10411 case PR_GET_PDEATHSIG
:
10414 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10415 if (!is_error(ret
) && arg2
10416 && put_user_ual(deathsig
, arg2
)) {
10424 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10428 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10429 arg3
, arg4
, arg5
));
10430 unlock_user(name
, arg2
, 16);
10435 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10439 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10440 arg3
, arg4
, arg5
));
10441 unlock_user(name
, arg2
, 0);
10446 /* Most prctl options have no pointer arguments */
10447 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10451 #ifdef TARGET_NR_arch_prctl
10452 case TARGET_NR_arch_prctl
:
10453 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10454 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10457 goto unimplemented
;
10460 #ifdef TARGET_NR_pread64
10461 case TARGET_NR_pread64
:
10462 if (regpairs_aligned(cpu_env
)) {
10466 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10468 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10469 unlock_user(p
, arg2
, ret
);
10471 case TARGET_NR_pwrite64
:
10472 if (regpairs_aligned(cpu_env
)) {
10476 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10478 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10479 unlock_user(p
, arg2
, 0);
10482 case TARGET_NR_getcwd
:
10483 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10485 ret
= get_errno(sys_getcwd1(p
, arg2
));
10486 unlock_user(p
, arg1
, ret
);
10488 case TARGET_NR_capget
:
10489 case TARGET_NR_capset
:
10491 struct target_user_cap_header
*target_header
;
10492 struct target_user_cap_data
*target_data
= NULL
;
10493 struct __user_cap_header_struct header
;
10494 struct __user_cap_data_struct data
[2];
10495 struct __user_cap_data_struct
*dataptr
= NULL
;
10496 int i
, target_datalen
;
10497 int data_items
= 1;
10499 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10502 header
.version
= tswap32(target_header
->version
);
10503 header
.pid
= tswap32(target_header
->pid
);
10505 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10506 /* Version 2 and up takes pointer to two user_data structs */
10510 target_datalen
= sizeof(*target_data
) * data_items
;
10513 if (num
== TARGET_NR_capget
) {
10514 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10516 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10518 if (!target_data
) {
10519 unlock_user_struct(target_header
, arg1
, 0);
10523 if (num
== TARGET_NR_capset
) {
10524 for (i
= 0; i
< data_items
; i
++) {
10525 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10526 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10527 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10534 if (num
== TARGET_NR_capget
) {
10535 ret
= get_errno(capget(&header
, dataptr
));
10537 ret
= get_errno(capset(&header
, dataptr
));
10540 /* The kernel always updates version for both capget and capset */
10541 target_header
->version
= tswap32(header
.version
);
10542 unlock_user_struct(target_header
, arg1
, 1);
10545 if (num
== TARGET_NR_capget
) {
10546 for (i
= 0; i
< data_items
; i
++) {
10547 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10548 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10549 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10551 unlock_user(target_data
, arg2
, target_datalen
);
10553 unlock_user(target_data
, arg2
, 0);
10558 case TARGET_NR_sigaltstack
:
10559 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10562 #ifdef CONFIG_SENDFILE
10563 case TARGET_NR_sendfile
:
10565 off_t
*offp
= NULL
;
10568 ret
= get_user_sal(off
, arg3
);
10569 if (is_error(ret
)) {
10574 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10575 if (!is_error(ret
) && arg3
) {
10576 abi_long ret2
= put_user_sal(off
, arg3
);
10577 if (is_error(ret2
)) {
10583 #ifdef TARGET_NR_sendfile64
10584 case TARGET_NR_sendfile64
:
10586 off_t
*offp
= NULL
;
10589 ret
= get_user_s64(off
, arg3
);
10590 if (is_error(ret
)) {
10595 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10596 if (!is_error(ret
) && arg3
) {
10597 abi_long ret2
= put_user_s64(off
, arg3
);
10598 if (is_error(ret2
)) {
10606 case TARGET_NR_sendfile
:
10607 #ifdef TARGET_NR_sendfile64
10608 case TARGET_NR_sendfile64
:
10610 goto unimplemented
;
10613 #ifdef TARGET_NR_getpmsg
10614 case TARGET_NR_getpmsg
:
10615 goto unimplemented
;
10617 #ifdef TARGET_NR_putpmsg
10618 case TARGET_NR_putpmsg
:
10619 goto unimplemented
;
10621 #ifdef TARGET_NR_vfork
10622 case TARGET_NR_vfork
:
10623 ret
= get_errno(do_fork(cpu_env
,
10624 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10628 #ifdef TARGET_NR_ugetrlimit
10629 case TARGET_NR_ugetrlimit
:
10631 struct rlimit rlim
;
10632 int resource
= target_to_host_resource(arg1
);
10633 ret
= get_errno(getrlimit(resource
, &rlim
));
10634 if (!is_error(ret
)) {
10635 struct target_rlimit
*target_rlim
;
10636 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10638 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10639 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10640 unlock_user_struct(target_rlim
, arg2
, 1);
10645 #ifdef TARGET_NR_truncate64
10646 case TARGET_NR_truncate64
:
10647 if (!(p
= lock_user_string(arg1
)))
10649 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10650 unlock_user(p
, arg1
, 0);
10653 #ifdef TARGET_NR_ftruncate64
10654 case TARGET_NR_ftruncate64
:
10655 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10658 #ifdef TARGET_NR_stat64
10659 case TARGET_NR_stat64
:
10660 if (!(p
= lock_user_string(arg1
)))
10662 ret
= get_errno(stat(path(p
), &st
));
10663 unlock_user(p
, arg1
, 0);
10664 if (!is_error(ret
))
10665 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10668 #ifdef TARGET_NR_lstat64
10669 case TARGET_NR_lstat64
:
10670 if (!(p
= lock_user_string(arg1
)))
10672 ret
= get_errno(lstat(path(p
), &st
));
10673 unlock_user(p
, arg1
, 0);
10674 if (!is_error(ret
))
10675 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10678 #ifdef TARGET_NR_fstat64
10679 case TARGET_NR_fstat64
:
10680 ret
= get_errno(fstat(arg1
, &st
));
10681 if (!is_error(ret
))
10682 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10685 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10686 #ifdef TARGET_NR_fstatat64
10687 case TARGET_NR_fstatat64
:
10689 #ifdef TARGET_NR_newfstatat
10690 case TARGET_NR_newfstatat
:
10692 if (!(p
= lock_user_string(arg2
)))
10694 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10695 if (!is_error(ret
))
10696 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10699 #ifdef TARGET_NR_lchown
10700 case TARGET_NR_lchown
:
10701 if (!(p
= lock_user_string(arg1
)))
10703 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10704 unlock_user(p
, arg1
, 0);
10707 #ifdef TARGET_NR_getuid
10708 case TARGET_NR_getuid
:
10709 ret
= get_errno(high2lowuid(getuid()));
10712 #ifdef TARGET_NR_getgid
10713 case TARGET_NR_getgid
:
10714 ret
= get_errno(high2lowgid(getgid()));
10717 #ifdef TARGET_NR_geteuid
10718 case TARGET_NR_geteuid
:
10719 ret
= get_errno(high2lowuid(geteuid()));
10722 #ifdef TARGET_NR_getegid
10723 case TARGET_NR_getegid
:
10724 ret
= get_errno(high2lowgid(getegid()));
10727 case TARGET_NR_setreuid
:
10728 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10730 case TARGET_NR_setregid
:
10731 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10733 case TARGET_NR_getgroups
:
10735 int gidsetsize
= arg1
;
10736 target_id
*target_grouplist
;
10740 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10741 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10742 if (gidsetsize
== 0)
10744 if (!is_error(ret
)) {
10745 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10746 if (!target_grouplist
)
10748 for(i
= 0;i
< ret
; i
++)
10749 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10750 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10754 case TARGET_NR_setgroups
:
10756 int gidsetsize
= arg1
;
10757 target_id
*target_grouplist
;
10758 gid_t
*grouplist
= NULL
;
10761 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10762 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10763 if (!target_grouplist
) {
10764 ret
= -TARGET_EFAULT
;
10767 for (i
= 0; i
< gidsetsize
; i
++) {
10768 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10770 unlock_user(target_grouplist
, arg2
, 0);
10772 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10775 case TARGET_NR_fchown
:
10776 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10778 #if defined(TARGET_NR_fchownat)
10779 case TARGET_NR_fchownat
:
10780 if (!(p
= lock_user_string(arg2
)))
10782 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10783 low2highgid(arg4
), arg5
));
10784 unlock_user(p
, arg2
, 0);
10787 #ifdef TARGET_NR_setresuid
10788 case TARGET_NR_setresuid
:
10789 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10791 low2highuid(arg3
)));
10794 #ifdef TARGET_NR_getresuid
10795 case TARGET_NR_getresuid
:
10797 uid_t ruid
, euid
, suid
;
10798 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10799 if (!is_error(ret
)) {
10800 if (put_user_id(high2lowuid(ruid
), arg1
)
10801 || put_user_id(high2lowuid(euid
), arg2
)
10802 || put_user_id(high2lowuid(suid
), arg3
))
10808 #ifdef TARGET_NR_getresgid
10809 case TARGET_NR_setresgid
:
10810 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10812 low2highgid(arg3
)));
10815 #ifdef TARGET_NR_getresgid
10816 case TARGET_NR_getresgid
:
10818 gid_t rgid
, egid
, sgid
;
10819 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10820 if (!is_error(ret
)) {
10821 if (put_user_id(high2lowgid(rgid
), arg1
)
10822 || put_user_id(high2lowgid(egid
), arg2
)
10823 || put_user_id(high2lowgid(sgid
), arg3
))
10829 #ifdef TARGET_NR_chown
10830 case TARGET_NR_chown
:
10831 if (!(p
= lock_user_string(arg1
)))
10833 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10834 unlock_user(p
, arg1
, 0);
10837 case TARGET_NR_setuid
:
10838 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10840 case TARGET_NR_setgid
:
10841 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10843 case TARGET_NR_setfsuid
:
10844 ret
= get_errno(setfsuid(arg1
));
10846 case TARGET_NR_setfsgid
:
10847 ret
= get_errno(setfsgid(arg1
));
10850 #ifdef TARGET_NR_lchown32
10851 case TARGET_NR_lchown32
:
10852 if (!(p
= lock_user_string(arg1
)))
10854 ret
= get_errno(lchown(p
, arg2
, arg3
));
10855 unlock_user(p
, arg1
, 0);
10858 #ifdef TARGET_NR_getuid32
10859 case TARGET_NR_getuid32
:
10860 ret
= get_errno(getuid());
10864 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10865 /* Alpha specific */
10866 case TARGET_NR_getxuid
:
10870 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10872 ret
= get_errno(getuid());
10875 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10876 /* Alpha specific */
10877 case TARGET_NR_getxgid
:
10881 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10883 ret
= get_errno(getgid());
10886 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10887 /* Alpha specific */
10888 case TARGET_NR_osf_getsysinfo
:
10889 ret
= -TARGET_EOPNOTSUPP
;
10891 case TARGET_GSI_IEEE_FP_CONTROL
:
10893 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10895 /* Copied from linux ieee_fpcr_to_swcr. */
10896 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10897 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10898 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10899 | SWCR_TRAP_ENABLE_DZE
10900 | SWCR_TRAP_ENABLE_OVF
);
10901 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10902 | SWCR_TRAP_ENABLE_INE
);
10903 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10904 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10906 if (put_user_u64 (swcr
, arg2
))
10912 /* case GSI_IEEE_STATE_AT_SIGNAL:
10913 -- Not implemented in linux kernel.
10915 -- Retrieves current unaligned access state; not much used.
10916 case GSI_PROC_TYPE:
10917 -- Retrieves implver information; surely not used.
10918 case GSI_GET_HWRPB:
10919 -- Grabs a copy of the HWRPB; surely not used.
10924 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10925 /* Alpha specific */
10926 case TARGET_NR_osf_setsysinfo
:
10927 ret
= -TARGET_EOPNOTSUPP
;
10929 case TARGET_SSI_IEEE_FP_CONTROL
:
10931 uint64_t swcr
, fpcr
, orig_fpcr
;
10933 if (get_user_u64 (swcr
, arg2
)) {
10936 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10937 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10939 /* Copied from linux ieee_swcr_to_fpcr. */
10940 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10941 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10942 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10943 | SWCR_TRAP_ENABLE_DZE
10944 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10945 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10946 | SWCR_TRAP_ENABLE_INE
)) << 57;
10947 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10948 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10950 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10955 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10957 uint64_t exc
, fpcr
, orig_fpcr
;
10960 if (get_user_u64(exc
, arg2
)) {
10964 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10966 /* We only add to the exception status here. */
10967 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10969 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10972 /* Old exceptions are not signaled. */
10973 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10975 /* If any exceptions set by this call,
10976 and are unmasked, send a signal. */
10978 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10979 si_code
= TARGET_FPE_FLTRES
;
10981 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10982 si_code
= TARGET_FPE_FLTUND
;
10984 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10985 si_code
= TARGET_FPE_FLTOVF
;
10987 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10988 si_code
= TARGET_FPE_FLTDIV
;
10990 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10991 si_code
= TARGET_FPE_FLTINV
;
10993 if (si_code
!= 0) {
10994 target_siginfo_t info
;
10995 info
.si_signo
= SIGFPE
;
10997 info
.si_code
= si_code
;
10998 info
._sifields
._sigfault
._addr
10999 = ((CPUArchState
*)cpu_env
)->pc
;
11000 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11001 QEMU_SI_FAULT
, &info
);
11006 /* case SSI_NVPAIRS:
11007 -- Used with SSIN_UACPROC to enable unaligned accesses.
11008 case SSI_IEEE_STATE_AT_SIGNAL:
11009 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11010 -- Not implemented in linux kernel
11015 #ifdef TARGET_NR_osf_sigprocmask
11016 /* Alpha specific. */
11017 case TARGET_NR_osf_sigprocmask
:
11021 sigset_t set
, oldset
;
11024 case TARGET_SIG_BLOCK
:
11027 case TARGET_SIG_UNBLOCK
:
11030 case TARGET_SIG_SETMASK
:
11034 ret
= -TARGET_EINVAL
;
11038 target_to_host_old_sigset(&set
, &mask
);
11039 ret
= do_sigprocmask(how
, &set
, &oldset
);
11041 host_to_target_old_sigset(&mask
, &oldset
);
11048 #ifdef TARGET_NR_getgid32
11049 case TARGET_NR_getgid32
:
11050 ret
= get_errno(getgid());
11053 #ifdef TARGET_NR_geteuid32
11054 case TARGET_NR_geteuid32
:
11055 ret
= get_errno(geteuid());
11058 #ifdef TARGET_NR_getegid32
11059 case TARGET_NR_getegid32
:
11060 ret
= get_errno(getegid());
11063 #ifdef TARGET_NR_setreuid32
11064 case TARGET_NR_setreuid32
:
11065 ret
= get_errno(setreuid(arg1
, arg2
));
11068 #ifdef TARGET_NR_setregid32
11069 case TARGET_NR_setregid32
:
11070 ret
= get_errno(setregid(arg1
, arg2
));
11073 #ifdef TARGET_NR_getgroups32
11074 case TARGET_NR_getgroups32
:
11076 int gidsetsize
= arg1
;
11077 uint32_t *target_grouplist
;
11081 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11082 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11083 if (gidsetsize
== 0)
11085 if (!is_error(ret
)) {
11086 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11087 if (!target_grouplist
) {
11088 ret
= -TARGET_EFAULT
;
11091 for(i
= 0;i
< ret
; i
++)
11092 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11093 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11098 #ifdef TARGET_NR_setgroups32
11099 case TARGET_NR_setgroups32
:
11101 int gidsetsize
= arg1
;
11102 uint32_t *target_grouplist
;
11106 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11107 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11108 if (!target_grouplist
) {
11109 ret
= -TARGET_EFAULT
;
11112 for(i
= 0;i
< gidsetsize
; i
++)
11113 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11114 unlock_user(target_grouplist
, arg2
, 0);
11115 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11119 #ifdef TARGET_NR_fchown32
11120 case TARGET_NR_fchown32
:
11121 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11124 #ifdef TARGET_NR_setresuid32
11125 case TARGET_NR_setresuid32
:
11126 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11129 #ifdef TARGET_NR_getresuid32
11130 case TARGET_NR_getresuid32
:
11132 uid_t ruid
, euid
, suid
;
11133 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11134 if (!is_error(ret
)) {
11135 if (put_user_u32(ruid
, arg1
)
11136 || put_user_u32(euid
, arg2
)
11137 || put_user_u32(suid
, arg3
))
11143 #ifdef TARGET_NR_setresgid32
11144 case TARGET_NR_setresgid32
:
11145 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11148 #ifdef TARGET_NR_getresgid32
11149 case TARGET_NR_getresgid32
:
11151 gid_t rgid
, egid
, sgid
;
11152 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11153 if (!is_error(ret
)) {
11154 if (put_user_u32(rgid
, arg1
)
11155 || put_user_u32(egid
, arg2
)
11156 || put_user_u32(sgid
, arg3
))
11162 #ifdef TARGET_NR_chown32
11163 case TARGET_NR_chown32
:
11164 if (!(p
= lock_user_string(arg1
)))
11166 ret
= get_errno(chown(p
, arg2
, arg3
));
11167 unlock_user(p
, arg1
, 0);
11170 #ifdef TARGET_NR_setuid32
11171 case TARGET_NR_setuid32
:
11172 ret
= get_errno(sys_setuid(arg1
));
11175 #ifdef TARGET_NR_setgid32
11176 case TARGET_NR_setgid32
:
11177 ret
= get_errno(sys_setgid(arg1
));
11180 #ifdef TARGET_NR_setfsuid32
11181 case TARGET_NR_setfsuid32
:
11182 ret
= get_errno(setfsuid(arg1
));
11185 #ifdef TARGET_NR_setfsgid32
11186 case TARGET_NR_setfsgid32
:
11187 ret
= get_errno(setfsgid(arg1
));
11191 case TARGET_NR_pivot_root
:
11192 goto unimplemented
;
11193 #ifdef TARGET_NR_mincore
11194 case TARGET_NR_mincore
:
11197 ret
= -TARGET_ENOMEM
;
11198 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11202 ret
= -TARGET_EFAULT
;
11203 p
= lock_user_string(arg3
);
11207 ret
= get_errno(mincore(a
, arg2
, p
));
11208 unlock_user(p
, arg3
, ret
);
11210 unlock_user(a
, arg1
, 0);
11214 #ifdef TARGET_NR_arm_fadvise64_64
11215 case TARGET_NR_arm_fadvise64_64
:
11216 /* arm_fadvise64_64 looks like fadvise64_64 but
11217 * with different argument order: fd, advice, offset, len
11218 * rather than the usual fd, offset, len, advice.
11219 * Note that offset and len are both 64-bit so appear as
11220 * pairs of 32-bit registers.
11222 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11223 target_offset64(arg5
, arg6
), arg2
);
11224 ret
= -host_to_target_errno(ret
);
11228 #if TARGET_ABI_BITS == 32
11230 #ifdef TARGET_NR_fadvise64_64
11231 case TARGET_NR_fadvise64_64
:
11232 /* 6 args: fd, offset (high, low), len (high, low), advice */
11233 if (regpairs_aligned(cpu_env
)) {
11234 /* offset is in (3,4), len in (5,6) and advice in 7 */
11241 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11242 target_offset64(arg2
, arg3
),
11243 target_offset64(arg4
, arg5
),
11248 #ifdef TARGET_NR_fadvise64
11249 case TARGET_NR_fadvise64
:
11250 /* 5 args: fd, offset (high, low), len, advice */
11251 if (regpairs_aligned(cpu_env
)) {
11252 /* offset is in (3,4), len in 5 and advice in 6 */
11258 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11259 target_offset64(arg2
, arg3
),
11264 #else /* not a 32-bit ABI */
11265 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11266 #ifdef TARGET_NR_fadvise64_64
11267 case TARGET_NR_fadvise64_64
:
11269 #ifdef TARGET_NR_fadvise64
11270 case TARGET_NR_fadvise64
:
11272 #ifdef TARGET_S390X
11274 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11275 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11276 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11277 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11281 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11284 #endif /* end of 64-bit ABI fadvise handling */
11286 #ifdef TARGET_NR_madvise
11287 case TARGET_NR_madvise
:
11288 /* A straight passthrough may not be safe because qemu sometimes
11289 turns private file-backed mappings into anonymous mappings.
11290 This will break MADV_DONTNEED.
11291 This is a hint, so ignoring and returning success is ok. */
11292 ret
= get_errno(0);
11295 #if TARGET_ABI_BITS == 32
11296 case TARGET_NR_fcntl64
:
11300 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11301 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11304 if (((CPUARMState
*)cpu_env
)->eabi
) {
11305 copyfrom
= copy_from_user_eabi_flock64
;
11306 copyto
= copy_to_user_eabi_flock64
;
11310 cmd
= target_to_host_fcntl_cmd(arg2
);
11311 if (cmd
== -TARGET_EINVAL
) {
11317 case TARGET_F_GETLK64
:
11318 ret
= copyfrom(&fl
, arg3
);
11322 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11324 ret
= copyto(arg3
, &fl
);
11328 case TARGET_F_SETLK64
:
11329 case TARGET_F_SETLKW64
:
11330 ret
= copyfrom(&fl
, arg3
);
11334 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11337 ret
= do_fcntl(arg1
, arg2
, arg3
);
11343 #ifdef TARGET_NR_cacheflush
11344 case TARGET_NR_cacheflush
:
11345 /* self-modifying code is handled automatically, so nothing needed */
11349 #ifdef TARGET_NR_security
11350 case TARGET_NR_security
:
11351 goto unimplemented
;
11353 #ifdef TARGET_NR_getpagesize
11354 case TARGET_NR_getpagesize
:
11355 ret
= TARGET_PAGE_SIZE
;
11358 case TARGET_NR_gettid
:
11359 ret
= get_errno(gettid());
11361 #ifdef TARGET_NR_readahead
11362 case TARGET_NR_readahead
:
11363 #if TARGET_ABI_BITS == 32
11364 if (regpairs_aligned(cpu_env
)) {
11369 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11371 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11376 #ifdef TARGET_NR_setxattr
11377 case TARGET_NR_listxattr
:
11378 case TARGET_NR_llistxattr
:
11382 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11384 ret
= -TARGET_EFAULT
;
11388 p
= lock_user_string(arg1
);
11390 if (num
== TARGET_NR_listxattr
) {
11391 ret
= get_errno(listxattr(p
, b
, arg3
));
11393 ret
= get_errno(llistxattr(p
, b
, arg3
));
11396 ret
= -TARGET_EFAULT
;
11398 unlock_user(p
, arg1
, 0);
11399 unlock_user(b
, arg2
, arg3
);
11402 case TARGET_NR_flistxattr
:
11406 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11408 ret
= -TARGET_EFAULT
;
11412 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11413 unlock_user(b
, arg2
, arg3
);
11416 case TARGET_NR_setxattr
:
11417 case TARGET_NR_lsetxattr
:
11419 void *p
, *n
, *v
= 0;
11421 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11423 ret
= -TARGET_EFAULT
;
11427 p
= lock_user_string(arg1
);
11428 n
= lock_user_string(arg2
);
11430 if (num
== TARGET_NR_setxattr
) {
11431 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11433 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11436 ret
= -TARGET_EFAULT
;
11438 unlock_user(p
, arg1
, 0);
11439 unlock_user(n
, arg2
, 0);
11440 unlock_user(v
, arg3
, 0);
11443 case TARGET_NR_fsetxattr
:
11447 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11449 ret
= -TARGET_EFAULT
;
11453 n
= lock_user_string(arg2
);
11455 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11457 ret
= -TARGET_EFAULT
;
11459 unlock_user(n
, arg2
, 0);
11460 unlock_user(v
, arg3
, 0);
11463 case TARGET_NR_getxattr
:
11464 case TARGET_NR_lgetxattr
:
11466 void *p
, *n
, *v
= 0;
11468 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11470 ret
= -TARGET_EFAULT
;
11474 p
= lock_user_string(arg1
);
11475 n
= lock_user_string(arg2
);
11477 if (num
== TARGET_NR_getxattr
) {
11478 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11480 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11483 ret
= -TARGET_EFAULT
;
11485 unlock_user(p
, arg1
, 0);
11486 unlock_user(n
, arg2
, 0);
11487 unlock_user(v
, arg3
, arg4
);
11490 case TARGET_NR_fgetxattr
:
11494 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11496 ret
= -TARGET_EFAULT
;
11500 n
= lock_user_string(arg2
);
11502 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11504 ret
= -TARGET_EFAULT
;
11506 unlock_user(n
, arg2
, 0);
11507 unlock_user(v
, arg3
, arg4
);
11510 case TARGET_NR_removexattr
:
11511 case TARGET_NR_lremovexattr
:
11514 p
= lock_user_string(arg1
);
11515 n
= lock_user_string(arg2
);
11517 if (num
== TARGET_NR_removexattr
) {
11518 ret
= get_errno(removexattr(p
, n
));
11520 ret
= get_errno(lremovexattr(p
, n
));
11523 ret
= -TARGET_EFAULT
;
11525 unlock_user(p
, arg1
, 0);
11526 unlock_user(n
, arg2
, 0);
11529 case TARGET_NR_fremovexattr
:
11532 n
= lock_user_string(arg2
);
11534 ret
= get_errno(fremovexattr(arg1
, n
));
11536 ret
= -TARGET_EFAULT
;
11538 unlock_user(n
, arg2
, 0);
11542 #endif /* CONFIG_ATTR */
11543 #ifdef TARGET_NR_set_thread_area
11544 case TARGET_NR_set_thread_area
:
11545 #if defined(TARGET_MIPS)
11546 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11549 #elif defined(TARGET_CRIS)
11551 ret
= -TARGET_EINVAL
;
11553 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11557 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11558 ret
= do_set_thread_area(cpu_env
, arg1
);
11560 #elif defined(TARGET_M68K)
11562 TaskState
*ts
= cpu
->opaque
;
11563 ts
->tp_value
= arg1
;
11568 goto unimplemented_nowarn
;
11571 #ifdef TARGET_NR_get_thread_area
11572 case TARGET_NR_get_thread_area
:
11573 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11574 ret
= do_get_thread_area(cpu_env
, arg1
);
11576 #elif defined(TARGET_M68K)
11578 TaskState
*ts
= cpu
->opaque
;
11579 ret
= ts
->tp_value
;
11583 goto unimplemented_nowarn
;
11586 #ifdef TARGET_NR_getdomainname
11587 case TARGET_NR_getdomainname
:
11588 goto unimplemented_nowarn
;
11591 #ifdef TARGET_NR_clock_gettime
11592 case TARGET_NR_clock_gettime
:
11594 struct timespec ts
;
11595 ret
= get_errno(clock_gettime(arg1
, &ts
));
11596 if (!is_error(ret
)) {
11597 host_to_target_timespec(arg2
, &ts
);
11602 #ifdef TARGET_NR_clock_getres
11603 case TARGET_NR_clock_getres
:
11605 struct timespec ts
;
11606 ret
= get_errno(clock_getres(arg1
, &ts
));
11607 if (!is_error(ret
)) {
11608 host_to_target_timespec(arg2
, &ts
);
11613 #ifdef TARGET_NR_clock_nanosleep
11614 case TARGET_NR_clock_nanosleep
:
11616 struct timespec ts
;
11617 target_to_host_timespec(&ts
, arg3
);
11618 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11619 &ts
, arg4
? &ts
: NULL
));
11621 host_to_target_timespec(arg4
, &ts
);
11623 #if defined(TARGET_PPC)
11624 /* clock_nanosleep is odd in that it returns positive errno values.
11625 * On PPC, CR0 bit 3 should be set in such a situation. */
11626 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11627 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11634 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11635 case TARGET_NR_set_tid_address
:
11636 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11640 case TARGET_NR_tkill
:
11641 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11644 case TARGET_NR_tgkill
:
11645 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11646 target_to_host_signal(arg3
)));
11649 #ifdef TARGET_NR_set_robust_list
11650 case TARGET_NR_set_robust_list
:
11651 case TARGET_NR_get_robust_list
:
11652 /* The ABI for supporting robust futexes has userspace pass
11653 * the kernel a pointer to a linked list which is updated by
11654 * userspace after the syscall; the list is walked by the kernel
11655 * when the thread exits. Since the linked list in QEMU guest
11656 * memory isn't a valid linked list for the host and we have
11657 * no way to reliably intercept the thread-death event, we can't
11658 * support these. Silently return ENOSYS so that guest userspace
11659 * falls back to a non-robust futex implementation (which should
11660 * be OK except in the corner case of the guest crashing while
11661 * holding a mutex that is shared with another process via
11664 goto unimplemented_nowarn
;
11667 #if defined(TARGET_NR_utimensat)
11668 case TARGET_NR_utimensat
:
11670 struct timespec
*tsp
, ts
[2];
11674 target_to_host_timespec(ts
, arg3
);
11675 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11679 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11681 if (!(p
= lock_user_string(arg2
))) {
11682 ret
= -TARGET_EFAULT
;
11685 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11686 unlock_user(p
, arg2
, 0);
11691 case TARGET_NR_futex
:
11692 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11694 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11695 case TARGET_NR_inotify_init
:
11696 ret
= get_errno(sys_inotify_init());
11699 #ifdef CONFIG_INOTIFY1
11700 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11701 case TARGET_NR_inotify_init1
:
11702 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11703 fcntl_flags_tbl
)));
11707 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11708 case TARGET_NR_inotify_add_watch
:
11709 p
= lock_user_string(arg2
);
11710 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11711 unlock_user(p
, arg2
, 0);
11714 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11715 case TARGET_NR_inotify_rm_watch
:
11716 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11720 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11721 case TARGET_NR_mq_open
:
11723 struct mq_attr posix_mq_attr
;
11724 struct mq_attr
*pposix_mq_attr
;
11727 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11728 pposix_mq_attr
= NULL
;
11730 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11733 pposix_mq_attr
= &posix_mq_attr
;
11735 p
= lock_user_string(arg1
- 1);
11739 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11740 unlock_user (p
, arg1
, 0);
11744 case TARGET_NR_mq_unlink
:
11745 p
= lock_user_string(arg1
- 1);
11747 ret
= -TARGET_EFAULT
;
11750 ret
= get_errno(mq_unlink(p
));
11751 unlock_user (p
, arg1
, 0);
11754 case TARGET_NR_mq_timedsend
:
11756 struct timespec ts
;
11758 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11760 target_to_host_timespec(&ts
, arg5
);
11761 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11762 host_to_target_timespec(arg5
, &ts
);
11764 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11766 unlock_user (p
, arg2
, arg3
);
11770 case TARGET_NR_mq_timedreceive
:
11772 struct timespec ts
;
11775 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11777 target_to_host_timespec(&ts
, arg5
);
11778 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11780 host_to_target_timespec(arg5
, &ts
);
11782 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11785 unlock_user (p
, arg2
, arg3
);
11787 put_user_u32(prio
, arg4
);
11791 /* Not implemented for now... */
11792 /* case TARGET_NR_mq_notify: */
11795 case TARGET_NR_mq_getsetattr
:
11797 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11800 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11801 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11804 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11805 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11812 #ifdef CONFIG_SPLICE
11813 #ifdef TARGET_NR_tee
11814 case TARGET_NR_tee
:
11816 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11820 #ifdef TARGET_NR_splice
11821 case TARGET_NR_splice
:
11823 loff_t loff_in
, loff_out
;
11824 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11826 if (get_user_u64(loff_in
, arg2
)) {
11829 ploff_in
= &loff_in
;
11832 if (get_user_u64(loff_out
, arg4
)) {
11835 ploff_out
= &loff_out
;
11837 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11839 if (put_user_u64(loff_in
, arg2
)) {
11844 if (put_user_u64(loff_out
, arg4
)) {
11851 #ifdef TARGET_NR_vmsplice
11852 case TARGET_NR_vmsplice
:
11854 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11856 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11857 unlock_iovec(vec
, arg2
, arg3
, 0);
11859 ret
= -host_to_target_errno(errno
);
11864 #endif /* CONFIG_SPLICE */
11865 #ifdef CONFIG_EVENTFD
11866 #if defined(TARGET_NR_eventfd)
11867 case TARGET_NR_eventfd
:
11868 ret
= get_errno(eventfd(arg1
, 0));
11869 fd_trans_unregister(ret
);
11872 #if defined(TARGET_NR_eventfd2)
11873 case TARGET_NR_eventfd2
:
11875 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11876 if (arg2
& TARGET_O_NONBLOCK
) {
11877 host_flags
|= O_NONBLOCK
;
11879 if (arg2
& TARGET_O_CLOEXEC
) {
11880 host_flags
|= O_CLOEXEC
;
11882 ret
= get_errno(eventfd(arg1
, host_flags
));
11883 fd_trans_unregister(ret
);
11887 #endif /* CONFIG_EVENTFD */
11888 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11889 case TARGET_NR_fallocate
:
11890 #if TARGET_ABI_BITS == 32
11891 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11892 target_offset64(arg5
, arg6
)));
11894 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11898 #if defined(CONFIG_SYNC_FILE_RANGE)
11899 #if defined(TARGET_NR_sync_file_range)
11900 case TARGET_NR_sync_file_range
:
11901 #if TARGET_ABI_BITS == 32
11902 #if defined(TARGET_MIPS)
11903 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11904 target_offset64(arg5
, arg6
), arg7
));
11906 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11907 target_offset64(arg4
, arg5
), arg6
));
11908 #endif /* !TARGET_MIPS */
11910 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11914 #if defined(TARGET_NR_sync_file_range2)
11915 case TARGET_NR_sync_file_range2
:
11916 /* This is like sync_file_range but the arguments are reordered */
11917 #if TARGET_ABI_BITS == 32
11918 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11919 target_offset64(arg5
, arg6
), arg2
));
11921 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11926 #if defined(TARGET_NR_signalfd4)
11927 case TARGET_NR_signalfd4
:
11928 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11931 #if defined(TARGET_NR_signalfd)
11932 case TARGET_NR_signalfd
:
11933 ret
= do_signalfd4(arg1
, arg2
, 0);
11936 #if defined(CONFIG_EPOLL)
11937 #if defined(TARGET_NR_epoll_create)
11938 case TARGET_NR_epoll_create
:
11939 ret
= get_errno(epoll_create(arg1
));
11942 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11943 case TARGET_NR_epoll_create1
:
11944 ret
= get_errno(epoll_create1(arg1
));
11947 #if defined(TARGET_NR_epoll_ctl)
11948 case TARGET_NR_epoll_ctl
:
11950 struct epoll_event ep
;
11951 struct epoll_event
*epp
= 0;
11953 struct target_epoll_event
*target_ep
;
11954 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11957 ep
.events
= tswap32(target_ep
->events
);
11958 /* The epoll_data_t union is just opaque data to the kernel,
11959 * so we transfer all 64 bits across and need not worry what
11960 * actual data type it is.
11962 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11963 unlock_user_struct(target_ep
, arg4
, 0);
11966 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11971 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11972 #if defined(TARGET_NR_epoll_wait)
11973 case TARGET_NR_epoll_wait
:
11975 #if defined(TARGET_NR_epoll_pwait)
11976 case TARGET_NR_epoll_pwait
:
11979 struct target_epoll_event
*target_ep
;
11980 struct epoll_event
*ep
;
11982 int maxevents
= arg3
;
11983 int timeout
= arg4
;
11985 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11986 ret
= -TARGET_EINVAL
;
11990 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11991 maxevents
* sizeof(struct target_epoll_event
), 1);
11996 ep
= g_try_new(struct epoll_event
, maxevents
);
11998 unlock_user(target_ep
, arg2
, 0);
11999 ret
= -TARGET_ENOMEM
;
12004 #if defined(TARGET_NR_epoll_pwait)
12005 case TARGET_NR_epoll_pwait
:
12007 target_sigset_t
*target_set
;
12008 sigset_t _set
, *set
= &_set
;
12011 if (arg6
!= sizeof(target_sigset_t
)) {
12012 ret
= -TARGET_EINVAL
;
12016 target_set
= lock_user(VERIFY_READ
, arg5
,
12017 sizeof(target_sigset_t
), 1);
12019 ret
= -TARGET_EFAULT
;
12022 target_to_host_sigset(set
, target_set
);
12023 unlock_user(target_set
, arg5
, 0);
12028 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12029 set
, SIGSET_T_SIZE
));
12033 #if defined(TARGET_NR_epoll_wait)
12034 case TARGET_NR_epoll_wait
:
12035 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12040 ret
= -TARGET_ENOSYS
;
12042 if (!is_error(ret
)) {
12044 for (i
= 0; i
< ret
; i
++) {
12045 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12046 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12048 unlock_user(target_ep
, arg2
,
12049 ret
* sizeof(struct target_epoll_event
));
12051 unlock_user(target_ep
, arg2
, 0);
12058 #ifdef TARGET_NR_prlimit64
12059 case TARGET_NR_prlimit64
:
12061 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12062 struct target_rlimit64
*target_rnew
, *target_rold
;
12063 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12064 int resource
= target_to_host_resource(arg2
);
12066 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12069 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12070 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12071 unlock_user_struct(target_rnew
, arg3
, 0);
12075 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12076 if (!is_error(ret
) && arg4
) {
12077 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12080 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12081 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12082 unlock_user_struct(target_rold
, arg4
, 1);
12087 #ifdef TARGET_NR_gethostname
12088 case TARGET_NR_gethostname
:
12090 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12092 ret
= get_errno(gethostname(name
, arg2
));
12093 unlock_user(name
, arg1
, arg2
);
12095 ret
= -TARGET_EFAULT
;
12100 #ifdef TARGET_NR_atomic_cmpxchg_32
12101 case TARGET_NR_atomic_cmpxchg_32
:
12103 /* should use start_exclusive from main.c */
12104 abi_ulong mem_value
;
12105 if (get_user_u32(mem_value
, arg6
)) {
12106 target_siginfo_t info
;
12107 info
.si_signo
= SIGSEGV
;
12109 info
.si_code
= TARGET_SEGV_MAPERR
;
12110 info
._sifields
._sigfault
._addr
= arg6
;
12111 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12112 QEMU_SI_FAULT
, &info
);
12116 if (mem_value
== arg2
)
12117 put_user_u32(arg1
, arg6
);
12122 #ifdef TARGET_NR_atomic_barrier
12123 case TARGET_NR_atomic_barrier
:
12125 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12131 #ifdef TARGET_NR_timer_create
12132 case TARGET_NR_timer_create
:
12134 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12136 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12139 int timer_index
= next_free_host_timer();
12141 if (timer_index
< 0) {
12142 ret
= -TARGET_EAGAIN
;
12144 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12147 phost_sevp
= &host_sevp
;
12148 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12154 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12158 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12167 #ifdef TARGET_NR_timer_settime
12168 case TARGET_NR_timer_settime
:
12170 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12171 * struct itimerspec * old_value */
12172 target_timer_t timerid
= get_timer_id(arg1
);
12176 } else if (arg3
== 0) {
12177 ret
= -TARGET_EINVAL
;
12179 timer_t htimer
= g_posix_timers
[timerid
];
12180 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12182 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12186 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12187 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12195 #ifdef TARGET_NR_timer_gettime
12196 case TARGET_NR_timer_gettime
:
12198 /* args: timer_t timerid, struct itimerspec *curr_value */
12199 target_timer_t timerid
= get_timer_id(arg1
);
12203 } else if (!arg2
) {
12204 ret
= -TARGET_EFAULT
;
12206 timer_t htimer
= g_posix_timers
[timerid
];
12207 struct itimerspec hspec
;
12208 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12210 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12211 ret
= -TARGET_EFAULT
;
12218 #ifdef TARGET_NR_timer_getoverrun
12219 case TARGET_NR_timer_getoverrun
:
12221 /* args: timer_t timerid */
12222 target_timer_t timerid
= get_timer_id(arg1
);
12227 timer_t htimer
= g_posix_timers
[timerid
];
12228 ret
= get_errno(timer_getoverrun(htimer
));
12230 fd_trans_unregister(ret
);
12235 #ifdef TARGET_NR_timer_delete
12236 case TARGET_NR_timer_delete
:
12238 /* args: timer_t timerid */
12239 target_timer_t timerid
= get_timer_id(arg1
);
12244 timer_t htimer
= g_posix_timers
[timerid
];
12245 ret
= get_errno(timer_delete(htimer
));
12246 g_posix_timers
[timerid
] = 0;
12252 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12253 case TARGET_NR_timerfd_create
:
12254 ret
= get_errno(timerfd_create(arg1
,
12255 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12259 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12260 case TARGET_NR_timerfd_gettime
:
12262 struct itimerspec its_curr
;
12264 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12266 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12273 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12274 case TARGET_NR_timerfd_settime
:
12276 struct itimerspec its_new
, its_old
, *p_new
;
12279 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12287 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12289 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12296 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12297 case TARGET_NR_ioprio_get
:
12298 ret
= get_errno(ioprio_get(arg1
, arg2
));
12302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12303 case TARGET_NR_ioprio_set
:
12304 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12308 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12309 case TARGET_NR_setns
:
12310 ret
= get_errno(setns(arg1
, arg2
));
12313 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12314 case TARGET_NR_unshare
:
12315 ret
= get_errno(unshare(arg1
));
12318 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12319 case TARGET_NR_kcmp
:
12320 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12326 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12327 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12328 unimplemented_nowarn
:
12330 ret
= -TARGET_ENOSYS
;
12335 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12338 print_syscall_ret(num
, ret
);
12339 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12342 ret
= -TARGET_EFAULT
;