4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
167 * once. This exercises the codepaths for restart.
169 //#define DEBUG_ERESTARTSYS
171 //#include <linux/msdos_fs.h>
172 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
173 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
183 #define _syscall0(type,name) \
184 static type name (void) \
186 return syscall(__NR_##name); \
189 #define _syscall1(type,name,type1,arg1) \
190 static type name (type1 arg1) \
192 return syscall(__NR_##name, arg1); \
195 #define _syscall2(type,name,type1,arg1,type2,arg2) \
196 static type name (type1 arg1,type2 arg2) \
198 return syscall(__NR_##name, arg1, arg2); \
201 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
202 static type name (type1 arg1,type2 arg2,type3 arg3) \
204 return syscall(__NR_##name, arg1, arg2, arg3); \
207 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
208 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
210 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
213 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
217 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
221 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
222 type5,arg5,type6,arg6) \
223 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
226 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
230 #define __NR_sys_uname __NR_uname
231 #define __NR_sys_getcwd1 __NR_getcwd
232 #define __NR_sys_getdents __NR_getdents
233 #define __NR_sys_getdents64 __NR_getdents64
234 #define __NR_sys_getpriority __NR_getpriority
235 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
236 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
244 #define __NR__llseek __NR_lseek
247 /* Newer kernel ports have llseek() instead of _llseek() */
248 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
249 #define TARGET_NR__llseek TARGET_NR_llseek
253 _syscall0(int, gettid
)
255 /* This is a replacement for the host gettid() and must return a host
257 static int gettid(void) {
262 /* For the 64-bit guest on 32-bit host case we must emulate
263 * getdents using getdents64, because otherwise the host
264 * might hand us back more dirent records than we can fit
265 * into the guest buffer after structure format conversion.
266 * Otherwise we emulate getdents with getdents if the host has it.
268 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
269 #define EMULATE_GETDENTS_WITH_GETDENTS
272 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
273 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
275 #if (defined(TARGET_NR_getdents) && \
276 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
277 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
278 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
280 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
281 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
282 loff_t
*, res
, uint
, wh
);
284 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
285 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
287 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
288 #ifdef __NR_exit_group
289 _syscall1(int,exit_group
,int,error_code
)
291 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
292 _syscall1(int,set_tid_address
,int *,tidptr
)
294 #if defined(TARGET_NR_futex) && defined(__NR_futex)
295 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
296 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
298 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
299 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
300 unsigned long *, user_mask_ptr
);
301 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
302 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
303 unsigned long *, user_mask_ptr
);
304 #define __NR_sys_getcpu __NR_getcpu
305 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
306 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
308 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
309 struct __user_cap_data_struct
*, data
);
310 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
311 struct __user_cap_data_struct
*, data
);
312 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
313 _syscall2(int, ioprio_get
, int, which
, int, who
)
315 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
316 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
318 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
319 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
322 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
323 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
324 unsigned long, idx1
, unsigned long, idx2
)
327 static bitmask_transtbl fcntl_flags_tbl
[] = {
328 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
329 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
330 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
331 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
332 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
333 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
334 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
335 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
336 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
337 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
338 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
339 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
340 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
341 #if defined(O_DIRECT)
342 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
344 #if defined(O_NOATIME)
345 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
347 #if defined(O_CLOEXEC)
348 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
351 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
353 #if defined(O_TMPFILE)
354 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
356 /* Don't terminate the list prematurely on 64-bit host+guest. */
357 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
358 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
365 QEMU_IFLA_BR_FORWARD_DELAY
,
366 QEMU_IFLA_BR_HELLO_TIME
,
367 QEMU_IFLA_BR_MAX_AGE
,
368 QEMU_IFLA_BR_AGEING_TIME
,
369 QEMU_IFLA_BR_STP_STATE
,
370 QEMU_IFLA_BR_PRIORITY
,
371 QEMU_IFLA_BR_VLAN_FILTERING
,
372 QEMU_IFLA_BR_VLAN_PROTOCOL
,
373 QEMU_IFLA_BR_GROUP_FWD_MASK
,
374 QEMU_IFLA_BR_ROOT_ID
,
375 QEMU_IFLA_BR_BRIDGE_ID
,
376 QEMU_IFLA_BR_ROOT_PORT
,
377 QEMU_IFLA_BR_ROOT_PATH_COST
,
378 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
380 QEMU_IFLA_BR_HELLO_TIMER
,
381 QEMU_IFLA_BR_TCN_TIMER
,
382 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
383 QEMU_IFLA_BR_GC_TIMER
,
384 QEMU_IFLA_BR_GROUP_ADDR
,
385 QEMU_IFLA_BR_FDB_FLUSH
,
386 QEMU_IFLA_BR_MCAST_ROUTER
,
387 QEMU_IFLA_BR_MCAST_SNOOPING
,
388 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
389 QEMU_IFLA_BR_MCAST_QUERIER
,
390 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
391 QEMU_IFLA_BR_MCAST_HASH_MAX
,
392 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
393 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
394 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
395 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
396 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
399 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
400 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
401 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
402 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
403 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
405 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
406 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_IGMP_VERSION
,
408 QEMU_IFLA_BR_MCAST_MLD_VERSION
,
432 QEMU_IFLA_NET_NS_PID
,
435 QEMU_IFLA_VFINFO_LIST
,
443 QEMU_IFLA_PROMISCUITY
,
444 QEMU_IFLA_NUM_TX_QUEUES
,
445 QEMU_IFLA_NUM_RX_QUEUES
,
447 QEMU_IFLA_PHYS_PORT_ID
,
448 QEMU_IFLA_CARRIER_CHANGES
,
449 QEMU_IFLA_PHYS_SWITCH_ID
,
450 QEMU_IFLA_LINK_NETNSID
,
451 QEMU_IFLA_PHYS_PORT_NAME
,
452 QEMU_IFLA_PROTO_DOWN
,
453 QEMU_IFLA_GSO_MAX_SEGS
,
454 QEMU_IFLA_GSO_MAX_SIZE
,
458 QEMU_IFLA_NEW_NETNSID
,
459 QEMU_IFLA_IF_NETNSID
,
460 QEMU_IFLA_CARRIER_UP_COUNT
,
461 QEMU_IFLA_CARRIER_DOWN_COUNT
,
462 QEMU_IFLA_NEW_IFINDEX
,
467 QEMU_IFLA_BRPORT_UNSPEC
,
468 QEMU_IFLA_BRPORT_STATE
,
469 QEMU_IFLA_BRPORT_PRIORITY
,
470 QEMU_IFLA_BRPORT_COST
,
471 QEMU_IFLA_BRPORT_MODE
,
472 QEMU_IFLA_BRPORT_GUARD
,
473 QEMU_IFLA_BRPORT_PROTECT
,
474 QEMU_IFLA_BRPORT_FAST_LEAVE
,
475 QEMU_IFLA_BRPORT_LEARNING
,
476 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
477 QEMU_IFLA_BRPORT_PROXYARP
,
478 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
479 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
480 QEMU_IFLA_BRPORT_ROOT_ID
,
481 QEMU_IFLA_BRPORT_BRIDGE_ID
,
482 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
483 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
486 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
487 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
488 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
489 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
490 QEMU_IFLA_BRPORT_HOLD_TIMER
,
491 QEMU_IFLA_BRPORT_FLUSH
,
492 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
493 QEMU_IFLA_BRPORT_PAD
,
494 QEMU_IFLA_BRPORT_MCAST_FLOOD
,
495 QEMU_IFLA_BRPORT_MCAST_TO_UCAST
,
496 QEMU_IFLA_BRPORT_VLAN_TUNNEL
,
497 QEMU_IFLA_BRPORT_BCAST_FLOOD
,
498 QEMU_IFLA_BRPORT_GROUP_FWD_MASK
,
499 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
,
500 QEMU___IFLA_BRPORT_MAX
504 QEMU_IFLA_TUN_UNSPEC
,
509 QEMU_IFLA_TUN_VNET_HDR
,
510 QEMU_IFLA_TUN_PERSIST
,
511 QEMU_IFLA_TUN_MULTI_QUEUE
,
512 QEMU_IFLA_TUN_NUM_QUEUES
,
513 QEMU_IFLA_TUN_NUM_DISABLED_QUEUES
,
518 QEMU_IFLA_INFO_UNSPEC
,
521 QEMU_IFLA_INFO_XSTATS
,
522 QEMU_IFLA_INFO_SLAVE_KIND
,
523 QEMU_IFLA_INFO_SLAVE_DATA
,
524 QEMU___IFLA_INFO_MAX
,
528 QEMU_IFLA_INET_UNSPEC
,
530 QEMU___IFLA_INET_MAX
,
534 QEMU_IFLA_INET6_UNSPEC
,
535 QEMU_IFLA_INET6_FLAGS
,
536 QEMU_IFLA_INET6_CONF
,
537 QEMU_IFLA_INET6_STATS
,
538 QEMU_IFLA_INET6_MCAST
,
539 QEMU_IFLA_INET6_CACHEINFO
,
540 QEMU_IFLA_INET6_ICMP6STATS
,
541 QEMU_IFLA_INET6_TOKEN
,
542 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
543 QEMU___IFLA_INET6_MAX
547 QEMU_IFLA_XDP_UNSPEC
,
549 QEMU_IFLA_XDP_ATTACHED
,
551 QEMU_IFLA_XDP_PROG_ID
,
566 QEMU_RTA_PROTOINFO
, /* no longer used */
569 QEMU_RTA_SESSION
, /* no longer used */
570 QEMU_RTA_MP_ALGO
, /* no longer used */
582 QEMU_RTA_TTL_PROPAGATE
,
589 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
590 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
591 typedef struct TargetFdTrans
{
592 TargetFdDataFunc host_to_target_data
;
593 TargetFdDataFunc target_to_host_data
;
594 TargetFdAddrFunc target_to_host_addr
;
597 static TargetFdTrans
**target_fd_trans
;
599 static unsigned int target_fd_max
;
601 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
603 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
604 return target_fd_trans
[fd
]->target_to_host_data
;
609 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
611 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
612 return target_fd_trans
[fd
]->host_to_target_data
;
617 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
619 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
620 return target_fd_trans
[fd
]->target_to_host_addr
;
625 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
629 if (fd
>= target_fd_max
) {
630 oldmax
= target_fd_max
;
631 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
632 target_fd_trans
= g_renew(TargetFdTrans
*,
633 target_fd_trans
, target_fd_max
);
634 memset((void *)(target_fd_trans
+ oldmax
), 0,
635 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
637 target_fd_trans
[fd
] = trans
;
640 static void fd_trans_unregister(int fd
)
642 if (fd
>= 0 && fd
< target_fd_max
) {
643 target_fd_trans
[fd
] = NULL
;
647 static void fd_trans_dup(int oldfd
, int newfd
)
649 fd_trans_unregister(newfd
);
650 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
651 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
655 static int sys_getcwd1(char *buf
, size_t size
)
657 if (getcwd(buf
, size
) == NULL
) {
658 /* getcwd() sets errno */
661 return strlen(buf
)+1;
664 #ifdef TARGET_NR_utimensat
665 #if defined(__NR_utimensat)
666 #define __NR_sys_utimensat __NR_utimensat
667 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
668 const struct timespec
*,tsp
,int,flags
)
670 static int sys_utimensat(int dirfd
, const char *pathname
,
671 const struct timespec times
[2], int flags
)
677 #endif /* TARGET_NR_utimensat */
679 #ifdef TARGET_NR_renameat2
680 #if defined(__NR_renameat2)
681 #define __NR_sys_renameat2 __NR_renameat2
682 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
683 const char *, new, unsigned int, flags
)
685 static int sys_renameat2(int oldfd
, const char *old
,
686 int newfd
, const char *new, int flags
)
689 return renameat(oldfd
, old
, newfd
, new);
695 #endif /* TARGET_NR_renameat2 */
697 #ifdef CONFIG_INOTIFY
698 #include <sys/inotify.h>
700 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
701 static int sys_inotify_init(void)
703 return (inotify_init());
706 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
707 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
709 return (inotify_add_watch(fd
, pathname
, mask
));
712 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
713 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
715 return (inotify_rm_watch(fd
, wd
));
718 #ifdef CONFIG_INOTIFY1
719 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
720 static int sys_inotify_init1(int flags
)
722 return (inotify_init1(flags
));
727 /* Userspace can usually survive runtime without inotify */
728 #undef TARGET_NR_inotify_init
729 #undef TARGET_NR_inotify_init1
730 #undef TARGET_NR_inotify_add_watch
731 #undef TARGET_NR_inotify_rm_watch
732 #endif /* CONFIG_INOTIFY */
734 #if defined(TARGET_NR_prlimit64)
735 #ifndef __NR_prlimit64
736 # define __NR_prlimit64 -1
738 #define __NR_sys_prlimit64 __NR_prlimit64
739 /* The glibc rlimit structure may not be that used by the underlying syscall */
740 struct host_rlimit64
{
744 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
745 const struct host_rlimit64
*, new_limit
,
746 struct host_rlimit64
*, old_limit
)
750 #if defined(TARGET_NR_timer_create)
751 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
752 static timer_t g_posix_timers
[32] = { 0, } ;
754 static inline int next_free_host_timer(void)
757 /* FIXME: Does finding the next free slot require a lock? */
758 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
759 if (g_posix_timers
[k
] == 0) {
760 g_posix_timers
[k
] = (timer_t
) 1;
768 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
770 static inline int regpairs_aligned(void *cpu_env
, int num
)
772 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
774 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
775 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
776 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
777 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
778 * of registers which translates to the same as ARM/MIPS, because we start with
780 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
781 #elif defined(TARGET_SH4)
782 /* SH4 doesn't align register pairs, except for p{read,write}64 */
783 static inline int regpairs_aligned(void *cpu_env
, int num
)
786 case TARGET_NR_pread64
:
787 case TARGET_NR_pwrite64
:
794 #elif defined(TARGET_XTENSA)
795 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
797 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
800 #define ERRNO_TABLE_SIZE 1200
802 /* target_to_host_errno_table[] is initialized from
803 * host_to_target_errno_table[] in syscall_init(). */
804 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
808 * This list is the union of errno values overridden in asm-<arch>/errno.h
809 * minus the errnos that are not actually generic to all archs.
811 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
812 [EAGAIN
] = TARGET_EAGAIN
,
813 [EIDRM
] = TARGET_EIDRM
,
814 [ECHRNG
] = TARGET_ECHRNG
,
815 [EL2NSYNC
] = TARGET_EL2NSYNC
,
816 [EL3HLT
] = TARGET_EL3HLT
,
817 [EL3RST
] = TARGET_EL3RST
,
818 [ELNRNG
] = TARGET_ELNRNG
,
819 [EUNATCH
] = TARGET_EUNATCH
,
820 [ENOCSI
] = TARGET_ENOCSI
,
821 [EL2HLT
] = TARGET_EL2HLT
,
822 [EDEADLK
] = TARGET_EDEADLK
,
823 [ENOLCK
] = TARGET_ENOLCK
,
824 [EBADE
] = TARGET_EBADE
,
825 [EBADR
] = TARGET_EBADR
,
826 [EXFULL
] = TARGET_EXFULL
,
827 [ENOANO
] = TARGET_ENOANO
,
828 [EBADRQC
] = TARGET_EBADRQC
,
829 [EBADSLT
] = TARGET_EBADSLT
,
830 [EBFONT
] = TARGET_EBFONT
,
831 [ENOSTR
] = TARGET_ENOSTR
,
832 [ENODATA
] = TARGET_ENODATA
,
833 [ETIME
] = TARGET_ETIME
,
834 [ENOSR
] = TARGET_ENOSR
,
835 [ENONET
] = TARGET_ENONET
,
836 [ENOPKG
] = TARGET_ENOPKG
,
837 [EREMOTE
] = TARGET_EREMOTE
,
838 [ENOLINK
] = TARGET_ENOLINK
,
839 [EADV
] = TARGET_EADV
,
840 [ESRMNT
] = TARGET_ESRMNT
,
841 [ECOMM
] = TARGET_ECOMM
,
842 [EPROTO
] = TARGET_EPROTO
,
843 [EDOTDOT
] = TARGET_EDOTDOT
,
844 [EMULTIHOP
] = TARGET_EMULTIHOP
,
845 [EBADMSG
] = TARGET_EBADMSG
,
846 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
847 [EOVERFLOW
] = TARGET_EOVERFLOW
,
848 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
849 [EBADFD
] = TARGET_EBADFD
,
850 [EREMCHG
] = TARGET_EREMCHG
,
851 [ELIBACC
] = TARGET_ELIBACC
,
852 [ELIBBAD
] = TARGET_ELIBBAD
,
853 [ELIBSCN
] = TARGET_ELIBSCN
,
854 [ELIBMAX
] = TARGET_ELIBMAX
,
855 [ELIBEXEC
] = TARGET_ELIBEXEC
,
856 [EILSEQ
] = TARGET_EILSEQ
,
857 [ENOSYS
] = TARGET_ENOSYS
,
858 [ELOOP
] = TARGET_ELOOP
,
859 [ERESTART
] = TARGET_ERESTART
,
860 [ESTRPIPE
] = TARGET_ESTRPIPE
,
861 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
862 [EUSERS
] = TARGET_EUSERS
,
863 [ENOTSOCK
] = TARGET_ENOTSOCK
,
864 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
865 [EMSGSIZE
] = TARGET_EMSGSIZE
,
866 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
867 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
868 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
869 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
870 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
871 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
872 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
873 [EADDRINUSE
] = TARGET_EADDRINUSE
,
874 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
875 [ENETDOWN
] = TARGET_ENETDOWN
,
876 [ENETUNREACH
] = TARGET_ENETUNREACH
,
877 [ENETRESET
] = TARGET_ENETRESET
,
878 [ECONNABORTED
] = TARGET_ECONNABORTED
,
879 [ECONNRESET
] = TARGET_ECONNRESET
,
880 [ENOBUFS
] = TARGET_ENOBUFS
,
881 [EISCONN
] = TARGET_EISCONN
,
882 [ENOTCONN
] = TARGET_ENOTCONN
,
883 [EUCLEAN
] = TARGET_EUCLEAN
,
884 [ENOTNAM
] = TARGET_ENOTNAM
,
885 [ENAVAIL
] = TARGET_ENAVAIL
,
886 [EISNAM
] = TARGET_EISNAM
,
887 [EREMOTEIO
] = TARGET_EREMOTEIO
,
888 [EDQUOT
] = TARGET_EDQUOT
,
889 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
890 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
891 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
892 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
893 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
894 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
895 [EALREADY
] = TARGET_EALREADY
,
896 [EINPROGRESS
] = TARGET_EINPROGRESS
,
897 [ESTALE
] = TARGET_ESTALE
,
898 [ECANCELED
] = TARGET_ECANCELED
,
899 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
900 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
902 [ENOKEY
] = TARGET_ENOKEY
,
905 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
908 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
911 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
914 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
916 #ifdef ENOTRECOVERABLE
917 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
920 [ENOMSG
] = TARGET_ENOMSG
,
923 [ERFKILL
] = TARGET_ERFKILL
,
926 [EHWPOISON
] = TARGET_EHWPOISON
,
930 static inline int host_to_target_errno(int err
)
932 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
933 host_to_target_errno_table
[err
]) {
934 return host_to_target_errno_table
[err
];
939 static inline int target_to_host_errno(int err
)
941 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
942 target_to_host_errno_table
[err
]) {
943 return target_to_host_errno_table
[err
];
948 static inline abi_long
get_errno(abi_long ret
)
951 return -host_to_target_errno(errno
);
956 const char *target_strerror(int err
)
958 if (err
== TARGET_ERESTARTSYS
) {
959 return "To be restarted";
961 if (err
== TARGET_QEMU_ESIGRETURN
) {
962 return "Successful exit from sigreturn";
965 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
968 return strerror(target_to_host_errno(err
));
971 #define safe_syscall0(type, name) \
972 static type safe_##name(void) \
974 return safe_syscall(__NR_##name); \
977 #define safe_syscall1(type, name, type1, arg1) \
978 static type safe_##name(type1 arg1) \
980 return safe_syscall(__NR_##name, arg1); \
983 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
984 static type safe_##name(type1 arg1, type2 arg2) \
986 return safe_syscall(__NR_##name, arg1, arg2); \
989 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
990 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
992 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
995 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
997 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
999 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
1002 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1003 type4, arg4, type5, arg5) \
1004 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1007 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
1010 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1011 type4, arg4, type5, arg5, type6, arg6) \
1012 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1013 type5 arg5, type6 arg6) \
1015 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
1018 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
1019 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
1020 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
1021 int, flags
, mode_t
, mode
)
1022 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
1023 struct rusage
*, rusage
)
1024 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
1025 int, options
, struct rusage
*, rusage
)
1026 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
1027 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
1028 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
1029 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
1030 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
1032 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
1033 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
1035 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
1036 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
1037 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
1038 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
1039 safe_syscall2(int, tkill
, int, tid
, int, sig
)
1040 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
1041 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
1042 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
1043 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
1044 unsigned long, pos_l
, unsigned long, pos_h
)
1045 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
1046 unsigned long, pos_l
, unsigned long, pos_h
)
1047 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
1049 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
1050 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
1051 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
1052 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
1053 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
1054 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
1055 safe_syscall2(int, flock
, int, fd
, int, operation
)
1056 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
1057 const struct timespec
*, uts
, size_t, sigsetsize
)
1058 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
1060 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
1061 struct timespec
*, rem
)
1062 #ifdef TARGET_NR_clock_nanosleep
1063 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
1064 const struct timespec
*, req
, struct timespec
*, rem
)
1067 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1069 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1070 long, msgtype
, int, flags
)
1071 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1072 unsigned, nsops
, const struct timespec
*, timeout
)
1074 /* This host kernel architecture uses a single ipc syscall; fake up
1075 * wrappers for the sub-operations to hide this implementation detail.
1076 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1077 * for the call parameter because some structs in there conflict with the
1078 * sys/ipc.h ones. So we just define them here, and rely on them being
1079 * the same for all host architectures.
1081 #define Q_SEMTIMEDOP 4
1084 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1086 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1087 void *, ptr
, long, fifth
)
1088 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1090 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1092 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1094 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1096 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1097 const struct timespec
*timeout
)
1099 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1103 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1104 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1105 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1106 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1107 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1109 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1110 * "third argument might be integer or pointer or not present" behaviour of
1111 * the libc function.
1113 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1114 /* Similarly for fcntl. Note that callers must always:
1115 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1116 * use the flock64 struct rather than unsuffixed flock
1117 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1120 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1122 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1125 static inline int host_to_target_sock_type(int host_type
)
1129 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1131 target_type
= TARGET_SOCK_DGRAM
;
1134 target_type
= TARGET_SOCK_STREAM
;
1137 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1141 #if defined(SOCK_CLOEXEC)
1142 if (host_type
& SOCK_CLOEXEC
) {
1143 target_type
|= TARGET_SOCK_CLOEXEC
;
1147 #if defined(SOCK_NONBLOCK)
1148 if (host_type
& SOCK_NONBLOCK
) {
1149 target_type
|= TARGET_SOCK_NONBLOCK
;
1156 static abi_ulong target_brk
;
1157 static abi_ulong target_original_brk
;
1158 static abi_ulong brk_page
;
1160 void target_set_brk(abi_ulong new_brk
)
1162 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1163 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1166 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1167 #define DEBUGF_BRK(message, args...)
1169 /* do_brk() must return target values and target errnos. */
1170 abi_long
do_brk(abi_ulong new_brk
)
1172 abi_long mapped_addr
;
1173 abi_ulong new_alloc_size
;
1175 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1178 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1181 if (new_brk
< target_original_brk
) {
1182 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1187 /* If the new brk is less than the highest page reserved to the
1188 * target heap allocation, set it and we're almost done... */
1189 if (new_brk
<= brk_page
) {
1190 /* Heap contents are initialized to zero, as for anonymous
1192 if (new_brk
> target_brk
) {
1193 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1195 target_brk
= new_brk
;
1196 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1200 /* We need to allocate more memory after the brk... Note that
1201 * we don't use MAP_FIXED because that will map over the top of
1202 * any existing mapping (like the one with the host libc or qemu
1203 * itself); instead we treat "mapped but at wrong address" as
1204 * a failure and unmap again.
1206 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1207 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1208 PROT_READ
|PROT_WRITE
,
1209 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1211 if (mapped_addr
== brk_page
) {
1212 /* Heap contents are initialized to zero, as for anonymous
1213 * mapped pages. Technically the new pages are already
1214 * initialized to zero since they *are* anonymous mapped
1215 * pages, however we have to take care with the contents that
1216 * come from the remaining part of the previous page: it may
1217 * contains garbage data due to a previous heap usage (grown
1218 * then shrunken). */
1219 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1221 target_brk
= new_brk
;
1222 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1223 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1226 } else if (mapped_addr
!= -1) {
1227 /* Mapped but at wrong address, meaning there wasn't actually
1228 * enough space for this brk.
1230 target_munmap(mapped_addr
, new_alloc_size
);
1232 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1235 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1238 #if defined(TARGET_ALPHA)
1239 /* We (partially) emulate OSF/1 on Alpha, which requires we
1240 return a proper errno, not an unchanged brk value. */
1241 return -TARGET_ENOMEM
;
1243 /* For everything else, return the previous break. */
1247 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1248 abi_ulong target_fds_addr
,
1252 abi_ulong b
, *target_fds
;
1254 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1255 if (!(target_fds
= lock_user(VERIFY_READ
,
1257 sizeof(abi_ulong
) * nw
,
1259 return -TARGET_EFAULT
;
1263 for (i
= 0; i
< nw
; i
++) {
1264 /* grab the abi_ulong */
1265 __get_user(b
, &target_fds
[i
]);
1266 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1267 /* check the bit inside the abi_ulong */
1274 unlock_user(target_fds
, target_fds_addr
, 0);
1279 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1280 abi_ulong target_fds_addr
,
1283 if (target_fds_addr
) {
1284 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1285 return -TARGET_EFAULT
;
1293 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1299 abi_ulong
*target_fds
;
1301 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1302 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1304 sizeof(abi_ulong
) * nw
,
1306 return -TARGET_EFAULT
;
1309 for (i
= 0; i
< nw
; i
++) {
1311 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1312 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1315 __put_user(v
, &target_fds
[i
]);
1318 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1323 #if defined(__alpha__)
1324 #define HOST_HZ 1024
1329 static inline abi_long
host_to_target_clock_t(long ticks
)
1331 #if HOST_HZ == TARGET_HZ
1334 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1338 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1339 const struct rusage
*rusage
)
1341 struct target_rusage
*target_rusage
;
1343 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1344 return -TARGET_EFAULT
;
1345 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1346 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1347 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1348 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1349 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1350 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1351 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1352 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1353 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1354 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1355 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1356 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1357 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1358 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1359 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1360 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1361 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1362 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1363 unlock_user_struct(target_rusage
, target_addr
, 1);
1368 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1370 abi_ulong target_rlim_swap
;
1373 target_rlim_swap
= tswapal(target_rlim
);
1374 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1375 return RLIM_INFINITY
;
1377 result
= target_rlim_swap
;
1378 if (target_rlim_swap
!= (rlim_t
)result
)
1379 return RLIM_INFINITY
;
1384 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1386 abi_ulong target_rlim_swap
;
1389 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1390 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1392 target_rlim_swap
= rlim
;
1393 result
= tswapal(target_rlim_swap
);
1398 static inline int target_to_host_resource(int code
)
1401 case TARGET_RLIMIT_AS
:
1403 case TARGET_RLIMIT_CORE
:
1405 case TARGET_RLIMIT_CPU
:
1407 case TARGET_RLIMIT_DATA
:
1409 case TARGET_RLIMIT_FSIZE
:
1410 return RLIMIT_FSIZE
;
1411 case TARGET_RLIMIT_LOCKS
:
1412 return RLIMIT_LOCKS
;
1413 case TARGET_RLIMIT_MEMLOCK
:
1414 return RLIMIT_MEMLOCK
;
1415 case TARGET_RLIMIT_MSGQUEUE
:
1416 return RLIMIT_MSGQUEUE
;
1417 case TARGET_RLIMIT_NICE
:
1419 case TARGET_RLIMIT_NOFILE
:
1420 return RLIMIT_NOFILE
;
1421 case TARGET_RLIMIT_NPROC
:
1422 return RLIMIT_NPROC
;
1423 case TARGET_RLIMIT_RSS
:
1425 case TARGET_RLIMIT_RTPRIO
:
1426 return RLIMIT_RTPRIO
;
1427 case TARGET_RLIMIT_SIGPENDING
:
1428 return RLIMIT_SIGPENDING
;
1429 case TARGET_RLIMIT_STACK
:
1430 return RLIMIT_STACK
;
1436 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1437 abi_ulong target_tv_addr
)
1439 struct target_timeval
*target_tv
;
1441 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1442 return -TARGET_EFAULT
;
1444 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1445 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1447 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1452 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1453 const struct timeval
*tv
)
1455 struct target_timeval
*target_tv
;
1457 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1458 return -TARGET_EFAULT
;
1460 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1461 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1463 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1468 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1469 abi_ulong target_tz_addr
)
1471 struct target_timezone
*target_tz
;
1473 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1474 return -TARGET_EFAULT
;
1477 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1478 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1480 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1485 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1488 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1489 abi_ulong target_mq_attr_addr
)
1491 struct target_mq_attr
*target_mq_attr
;
1493 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1494 target_mq_attr_addr
, 1))
1495 return -TARGET_EFAULT
;
1497 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1498 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1499 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1500 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1502 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1507 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1508 const struct mq_attr
*attr
)
1510 struct target_mq_attr
*target_mq_attr
;
1512 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1513 target_mq_attr_addr
, 0))
1514 return -TARGET_EFAULT
;
1516 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1517 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1518 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1519 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1521 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1527 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1528 /* do_select() must return target values and target errnos. */
1529 static abi_long
do_select(int n
,
1530 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1531 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1533 fd_set rfds
, wfds
, efds
;
1534 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1536 struct timespec ts
, *ts_ptr
;
1539 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1543 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1547 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1552 if (target_tv_addr
) {
1553 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1554 return -TARGET_EFAULT
;
1555 ts
.tv_sec
= tv
.tv_sec
;
1556 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1562 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1565 if (!is_error(ret
)) {
1566 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1567 return -TARGET_EFAULT
;
1568 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1569 return -TARGET_EFAULT
;
1570 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1571 return -TARGET_EFAULT
;
1573 if (target_tv_addr
) {
1574 tv
.tv_sec
= ts
.tv_sec
;
1575 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1576 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1577 return -TARGET_EFAULT
;
1585 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1586 static abi_long
do_old_select(abi_ulong arg1
)
1588 struct target_sel_arg_struct
*sel
;
1589 abi_ulong inp
, outp
, exp
, tvp
;
1592 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1593 return -TARGET_EFAULT
;
1596 nsel
= tswapal(sel
->n
);
1597 inp
= tswapal(sel
->inp
);
1598 outp
= tswapal(sel
->outp
);
1599 exp
= tswapal(sel
->exp
);
1600 tvp
= tswapal(sel
->tvp
);
1602 unlock_user_struct(sel
, arg1
, 0);
1604 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1609 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1612 return pipe2(host_pipe
, flags
);
1618 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1619 int flags
, int is_pipe2
)
1623 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1626 return get_errno(ret
);
1628 /* Several targets have special calling conventions for the original
1629 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1631 #if defined(TARGET_ALPHA)
1632 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1633 return host_pipe
[0];
1634 #elif defined(TARGET_MIPS)
1635 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1636 return host_pipe
[0];
1637 #elif defined(TARGET_SH4)
1638 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1639 return host_pipe
[0];
1640 #elif defined(TARGET_SPARC)
1641 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1642 return host_pipe
[0];
1646 if (put_user_s32(host_pipe
[0], pipedes
)
1647 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1648 return -TARGET_EFAULT
;
1649 return get_errno(ret
);
1652 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1653 abi_ulong target_addr
,
1656 struct target_ip_mreqn
*target_smreqn
;
1658 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1660 return -TARGET_EFAULT
;
1661 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1662 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1663 if (len
== sizeof(struct target_ip_mreqn
))
1664 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1665 unlock_user(target_smreqn
, target_addr
, 0);
1670 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1671 abi_ulong target_addr
,
1674 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1675 sa_family_t sa_family
;
1676 struct target_sockaddr
*target_saddr
;
1678 if (fd_trans_target_to_host_addr(fd
)) {
1679 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1682 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1684 return -TARGET_EFAULT
;
1686 sa_family
= tswap16(target_saddr
->sa_family
);
1688 /* Oops. The caller might send a incomplete sun_path; sun_path
1689 * must be terminated by \0 (see the manual page), but
1690 * unfortunately it is quite common to specify sockaddr_un
1691 * length as "strlen(x->sun_path)" while it should be
1692 * "strlen(...) + 1". We'll fix that here if needed.
1693 * Linux kernel has a similar feature.
1696 if (sa_family
== AF_UNIX
) {
1697 if (len
< unix_maxlen
&& len
> 0) {
1698 char *cp
= (char*)target_saddr
;
1700 if ( cp
[len
-1] && !cp
[len
] )
1703 if (len
> unix_maxlen
)
1707 memcpy(addr
, target_saddr
, len
);
1708 addr
->sa_family
= sa_family
;
1709 if (sa_family
== AF_NETLINK
) {
1710 struct sockaddr_nl
*nladdr
;
1712 nladdr
= (struct sockaddr_nl
*)addr
;
1713 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1714 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1715 } else if (sa_family
== AF_PACKET
) {
1716 struct target_sockaddr_ll
*lladdr
;
1718 lladdr
= (struct target_sockaddr_ll
*)addr
;
1719 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1720 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1722 unlock_user(target_saddr
, target_addr
, 0);
1727 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1728 struct sockaddr
*addr
,
1731 struct target_sockaddr
*target_saddr
;
1738 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1740 return -TARGET_EFAULT
;
1741 memcpy(target_saddr
, addr
, len
);
1742 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1743 sizeof(target_saddr
->sa_family
)) {
1744 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1746 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1747 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1748 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1749 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1750 } else if (addr
->sa_family
== AF_PACKET
) {
1751 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1752 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1753 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1754 } else if (addr
->sa_family
== AF_INET6
&&
1755 len
>= sizeof(struct target_sockaddr_in6
)) {
1756 struct target_sockaddr_in6
*target_in6
=
1757 (struct target_sockaddr_in6
*)target_saddr
;
1758 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1760 unlock_user(target_saddr
, target_addr
, len
);
1765 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1766 struct target_msghdr
*target_msgh
)
1768 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1769 abi_long msg_controllen
;
1770 abi_ulong target_cmsg_addr
;
1771 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1772 socklen_t space
= 0;
1774 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1775 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1777 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1778 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1779 target_cmsg_start
= target_cmsg
;
1781 return -TARGET_EFAULT
;
1783 while (cmsg
&& target_cmsg
) {
1784 void *data
= CMSG_DATA(cmsg
);
1785 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1787 int len
= tswapal(target_cmsg
->cmsg_len
)
1788 - sizeof(struct target_cmsghdr
);
1790 space
+= CMSG_SPACE(len
);
1791 if (space
> msgh
->msg_controllen
) {
1792 space
-= CMSG_SPACE(len
);
1793 /* This is a QEMU bug, since we allocated the payload
1794 * area ourselves (unlike overflow in host-to-target
1795 * conversion, which is just the guest giving us a buffer
1796 * that's too small). It can't happen for the payload types
1797 * we currently support; if it becomes an issue in future
1798 * we would need to improve our allocation strategy to
1799 * something more intelligent than "twice the size of the
1800 * target buffer we're reading from".
1802 gemu_log("Host cmsg overflow\n");
1806 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1807 cmsg
->cmsg_level
= SOL_SOCKET
;
1809 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1811 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1812 cmsg
->cmsg_len
= CMSG_LEN(len
);
1814 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1815 int *fd
= (int *)data
;
1816 int *target_fd
= (int *)target_data
;
1817 int i
, numfds
= len
/ sizeof(int);
1819 for (i
= 0; i
< numfds
; i
++) {
1820 __get_user(fd
[i
], target_fd
+ i
);
1822 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1823 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1824 struct ucred
*cred
= (struct ucred
*)data
;
1825 struct target_ucred
*target_cred
=
1826 (struct target_ucred
*)target_data
;
1828 __get_user(cred
->pid
, &target_cred
->pid
);
1829 __get_user(cred
->uid
, &target_cred
->uid
);
1830 __get_user(cred
->gid
, &target_cred
->gid
);
1832 gemu_log("Unsupported ancillary data: %d/%d\n",
1833 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1834 memcpy(data
, target_data
, len
);
1837 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1838 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1841 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1843 msgh
->msg_controllen
= space
;
1847 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1848 struct msghdr
*msgh
)
1850 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1851 abi_long msg_controllen
;
1852 abi_ulong target_cmsg_addr
;
1853 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1854 socklen_t space
= 0;
1856 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1857 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1859 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1860 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1861 target_cmsg_start
= target_cmsg
;
1863 return -TARGET_EFAULT
;
1865 while (cmsg
&& target_cmsg
) {
1866 void *data
= CMSG_DATA(cmsg
);
1867 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1869 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1870 int tgt_len
, tgt_space
;
1872 /* We never copy a half-header but may copy half-data;
1873 * this is Linux's behaviour in put_cmsg(). Note that
1874 * truncation here is a guest problem (which we report
1875 * to the guest via the CTRUNC bit), unlike truncation
1876 * in target_to_host_cmsg, which is a QEMU bug.
1878 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1879 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1883 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1884 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1886 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1888 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1890 /* Payload types which need a different size of payload on
1891 * the target must adjust tgt_len here.
1894 switch (cmsg
->cmsg_level
) {
1896 switch (cmsg
->cmsg_type
) {
1898 tgt_len
= sizeof(struct target_timeval
);
1908 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1909 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1910 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1913 /* We must now copy-and-convert len bytes of payload
1914 * into tgt_len bytes of destination space. Bear in mind
1915 * that in both source and destination we may be dealing
1916 * with a truncated value!
1918 switch (cmsg
->cmsg_level
) {
1920 switch (cmsg
->cmsg_type
) {
1923 int *fd
= (int *)data
;
1924 int *target_fd
= (int *)target_data
;
1925 int i
, numfds
= tgt_len
/ sizeof(int);
1927 for (i
= 0; i
< numfds
; i
++) {
1928 __put_user(fd
[i
], target_fd
+ i
);
1934 struct timeval
*tv
= (struct timeval
*)data
;
1935 struct target_timeval
*target_tv
=
1936 (struct target_timeval
*)target_data
;
1938 if (len
!= sizeof(struct timeval
) ||
1939 tgt_len
!= sizeof(struct target_timeval
)) {
1943 /* copy struct timeval to target */
1944 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1945 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1948 case SCM_CREDENTIALS
:
1950 struct ucred
*cred
= (struct ucred
*)data
;
1951 struct target_ucred
*target_cred
=
1952 (struct target_ucred
*)target_data
;
1954 __put_user(cred
->pid
, &target_cred
->pid
);
1955 __put_user(cred
->uid
, &target_cred
->uid
);
1956 __put_user(cred
->gid
, &target_cred
->gid
);
1965 switch (cmsg
->cmsg_type
) {
1968 uint32_t *v
= (uint32_t *)data
;
1969 uint32_t *t_int
= (uint32_t *)target_data
;
1971 if (len
!= sizeof(uint32_t) ||
1972 tgt_len
!= sizeof(uint32_t)) {
1975 __put_user(*v
, t_int
);
1981 struct sock_extended_err ee
;
1982 struct sockaddr_in offender
;
1984 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1985 struct errhdr_t
*target_errh
=
1986 (struct errhdr_t
*)target_data
;
1988 if (len
!= sizeof(struct errhdr_t
) ||
1989 tgt_len
!= sizeof(struct errhdr_t
)) {
1992 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1993 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1994 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1995 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1996 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1997 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1998 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1999 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2000 (void *) &errh
->offender
, sizeof(errh
->offender
));
2009 switch (cmsg
->cmsg_type
) {
2012 uint32_t *v
= (uint32_t *)data
;
2013 uint32_t *t_int
= (uint32_t *)target_data
;
2015 if (len
!= sizeof(uint32_t) ||
2016 tgt_len
!= sizeof(uint32_t)) {
2019 __put_user(*v
, t_int
);
2025 struct sock_extended_err ee
;
2026 struct sockaddr_in6 offender
;
2028 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2029 struct errhdr6_t
*target_errh
=
2030 (struct errhdr6_t
*)target_data
;
2032 if (len
!= sizeof(struct errhdr6_t
) ||
2033 tgt_len
!= sizeof(struct errhdr6_t
)) {
2036 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2037 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2038 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2039 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2040 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2041 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2042 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2043 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2044 (void *) &errh
->offender
, sizeof(errh
->offender
));
2054 gemu_log("Unsupported ancillary data: %d/%d\n",
2055 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2056 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2057 if (tgt_len
> len
) {
2058 memset(target_data
+ len
, 0, tgt_len
- len
);
2062 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2063 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2064 if (msg_controllen
< tgt_space
) {
2065 tgt_space
= msg_controllen
;
2067 msg_controllen
-= tgt_space
;
2069 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2070 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2073 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2075 target_msgh
->msg_controllen
= tswapal(space
);
2079 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2081 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2082 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2083 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2084 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2085 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2088 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2090 abi_long (*host_to_target_nlmsg
)
2091 (struct nlmsghdr
*))
2096 while (len
> sizeof(struct nlmsghdr
)) {
2098 nlmsg_len
= nlh
->nlmsg_len
;
2099 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2104 switch (nlh
->nlmsg_type
) {
2106 tswap_nlmsghdr(nlh
);
2112 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2113 e
->error
= tswap32(e
->error
);
2114 tswap_nlmsghdr(&e
->msg
);
2115 tswap_nlmsghdr(nlh
);
2119 ret
= host_to_target_nlmsg(nlh
);
2121 tswap_nlmsghdr(nlh
);
2126 tswap_nlmsghdr(nlh
);
2127 len
-= NLMSG_ALIGN(nlmsg_len
);
2128 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2133 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2135 abi_long (*target_to_host_nlmsg
)
2136 (struct nlmsghdr
*))
2140 while (len
> sizeof(struct nlmsghdr
)) {
2141 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2142 tswap32(nlh
->nlmsg_len
) > len
) {
2145 tswap_nlmsghdr(nlh
);
2146 switch (nlh
->nlmsg_type
) {
2153 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2154 e
->error
= tswap32(e
->error
);
2155 tswap_nlmsghdr(&e
->msg
);
2159 ret
= target_to_host_nlmsg(nlh
);
2164 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2165 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2170 #ifdef CONFIG_RTNETLINK
2171 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2172 size_t len
, void *context
,
2173 abi_long (*host_to_target_nlattr
)
2177 unsigned short nla_len
;
2180 while (len
> sizeof(struct nlattr
)) {
2181 nla_len
= nlattr
->nla_len
;
2182 if (nla_len
< sizeof(struct nlattr
) ||
2186 ret
= host_to_target_nlattr(nlattr
, context
);
2187 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2188 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2192 len
-= NLA_ALIGN(nla_len
);
2193 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2198 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2200 abi_long (*host_to_target_rtattr
)
2203 unsigned short rta_len
;
2206 while (len
> sizeof(struct rtattr
)) {
2207 rta_len
= rtattr
->rta_len
;
2208 if (rta_len
< sizeof(struct rtattr
) ||
2212 ret
= host_to_target_rtattr(rtattr
);
2213 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2214 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2218 len
-= RTA_ALIGN(rta_len
);
2219 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2224 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2226 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2233 switch (nlattr
->nla_type
) {
2235 case QEMU_IFLA_BR_FDB_FLUSH
:
2238 case QEMU_IFLA_BR_GROUP_ADDR
:
2241 case QEMU_IFLA_BR_VLAN_FILTERING
:
2242 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2243 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2244 case QEMU_IFLA_BR_MCAST_ROUTER
:
2245 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2246 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2247 case QEMU_IFLA_BR_MCAST_QUERIER
:
2248 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2249 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2250 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2251 case QEMU_IFLA_BR_VLAN_STATS_ENABLED
:
2252 case QEMU_IFLA_BR_MCAST_STATS_ENABLED
:
2253 case QEMU_IFLA_BR_MCAST_IGMP_VERSION
:
2254 case QEMU_IFLA_BR_MCAST_MLD_VERSION
:
2257 case QEMU_IFLA_BR_PRIORITY
:
2258 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2259 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2260 case QEMU_IFLA_BR_ROOT_PORT
:
2261 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2262 u16
= NLA_DATA(nlattr
);
2263 *u16
= tswap16(*u16
);
2266 case QEMU_IFLA_BR_FORWARD_DELAY
:
2267 case QEMU_IFLA_BR_HELLO_TIME
:
2268 case QEMU_IFLA_BR_MAX_AGE
:
2269 case QEMU_IFLA_BR_AGEING_TIME
:
2270 case QEMU_IFLA_BR_STP_STATE
:
2271 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2272 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2273 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2274 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2275 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2276 u32
= NLA_DATA(nlattr
);
2277 *u32
= tswap32(*u32
);
2280 case QEMU_IFLA_BR_HELLO_TIMER
:
2281 case QEMU_IFLA_BR_TCN_TIMER
:
2282 case QEMU_IFLA_BR_GC_TIMER
:
2283 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2284 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2285 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2286 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2287 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2288 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2289 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2290 u64
= NLA_DATA(nlattr
);
2291 *u64
= tswap64(*u64
);
2293 /* ifla_bridge_id: uin8_t[] */
2294 case QEMU_IFLA_BR_ROOT_ID
:
2295 case QEMU_IFLA_BR_BRIDGE_ID
:
2298 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2304 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2311 switch (nlattr
->nla_type
) {
2313 case QEMU_IFLA_BRPORT_STATE
:
2314 case QEMU_IFLA_BRPORT_MODE
:
2315 case QEMU_IFLA_BRPORT_GUARD
:
2316 case QEMU_IFLA_BRPORT_PROTECT
:
2317 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2318 case QEMU_IFLA_BRPORT_LEARNING
:
2319 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2320 case QEMU_IFLA_BRPORT_PROXYARP
:
2321 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2322 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2323 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2324 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2325 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2326 case QEMU_IFLA_BRPORT_MCAST_FLOOD
:
2327 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST
:
2328 case QEMU_IFLA_BRPORT_VLAN_TUNNEL
:
2329 case QEMU_IFLA_BRPORT_BCAST_FLOOD
:
2330 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
:
2333 case QEMU_IFLA_BRPORT_PRIORITY
:
2334 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2335 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2336 case QEMU_IFLA_BRPORT_ID
:
2337 case QEMU_IFLA_BRPORT_NO
:
2338 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK
:
2339 u16
= NLA_DATA(nlattr
);
2340 *u16
= tswap16(*u16
);
2343 case QEMU_IFLA_BRPORT_COST
:
2344 u32
= NLA_DATA(nlattr
);
2345 *u32
= tswap32(*u32
);
2348 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2349 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2350 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2351 u64
= NLA_DATA(nlattr
);
2352 *u64
= tswap64(*u64
);
2354 /* ifla_bridge_id: uint8_t[] */
2355 case QEMU_IFLA_BRPORT_ROOT_ID
:
2356 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2359 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2365 static abi_long
host_to_target_data_tun_nlattr(struct nlattr
*nlattr
,
2370 switch (nlattr
->nla_type
) {
2372 case QEMU_IFLA_TUN_TYPE
:
2373 case QEMU_IFLA_TUN_PI
:
2374 case QEMU_IFLA_TUN_VNET_HDR
:
2375 case QEMU_IFLA_TUN_PERSIST
:
2376 case QEMU_IFLA_TUN_MULTI_QUEUE
:
2379 case QEMU_IFLA_TUN_NUM_QUEUES
:
2380 case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES
:
2381 case QEMU_IFLA_TUN_OWNER
:
2382 case QEMU_IFLA_TUN_GROUP
:
2383 u32
= NLA_DATA(nlattr
);
2384 *u32
= tswap32(*u32
);
2387 gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr
->nla_type
);
2393 struct linkinfo_context
{
2400 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2403 struct linkinfo_context
*li_context
= context
;
2405 switch (nlattr
->nla_type
) {
2407 case QEMU_IFLA_INFO_KIND
:
2408 li_context
->name
= NLA_DATA(nlattr
);
2409 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2411 case QEMU_IFLA_INFO_SLAVE_KIND
:
2412 li_context
->slave_name
= NLA_DATA(nlattr
);
2413 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2416 case QEMU_IFLA_INFO_XSTATS
:
2417 /* FIXME: only used by CAN */
2420 case QEMU_IFLA_INFO_DATA
:
2421 if (strncmp(li_context
->name
, "bridge",
2422 li_context
->len
) == 0) {
2423 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2426 host_to_target_data_bridge_nlattr
);
2427 } else if (strncmp(li_context
->name
, "tun",
2428 li_context
->len
) == 0) {
2429 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2432 host_to_target_data_tun_nlattr
);
2434 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2437 case QEMU_IFLA_INFO_SLAVE_DATA
:
2438 if (strncmp(li_context
->slave_name
, "bridge",
2439 li_context
->slave_len
) == 0) {
2440 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2443 host_to_target_slave_data_bridge_nlattr
);
2445 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2446 li_context
->slave_name
);
2450 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2457 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2463 switch (nlattr
->nla_type
) {
2464 case QEMU_IFLA_INET_CONF
:
2465 u32
= NLA_DATA(nlattr
);
2466 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2468 u32
[i
] = tswap32(u32
[i
]);
2472 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2477 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2482 struct ifla_cacheinfo
*ci
;
2485 switch (nlattr
->nla_type
) {
2487 case QEMU_IFLA_INET6_TOKEN
:
2490 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2493 case QEMU_IFLA_INET6_FLAGS
:
2494 u32
= NLA_DATA(nlattr
);
2495 *u32
= tswap32(*u32
);
2498 case QEMU_IFLA_INET6_CONF
:
2499 u32
= NLA_DATA(nlattr
);
2500 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2502 u32
[i
] = tswap32(u32
[i
]);
2505 /* ifla_cacheinfo */
2506 case QEMU_IFLA_INET6_CACHEINFO
:
2507 ci
= NLA_DATA(nlattr
);
2508 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2509 ci
->tstamp
= tswap32(ci
->tstamp
);
2510 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2511 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2514 case QEMU_IFLA_INET6_STATS
:
2515 case QEMU_IFLA_INET6_ICMP6STATS
:
2516 u64
= NLA_DATA(nlattr
);
2517 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2519 u64
[i
] = tswap64(u64
[i
]);
2523 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2528 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2531 switch (nlattr
->nla_type
) {
2533 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2535 host_to_target_data_inet_nlattr
);
2537 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2539 host_to_target_data_inet6_nlattr
);
2541 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2547 static abi_long
host_to_target_data_xdp_nlattr(struct nlattr
*nlattr
,
2552 switch (nlattr
->nla_type
) {
2554 case QEMU_IFLA_XDP_ATTACHED
:
2557 case QEMU_IFLA_XDP_PROG_ID
:
2558 u32
= NLA_DATA(nlattr
);
2559 *u32
= tswap32(*u32
);
2562 gemu_log("Unknown host XDP type: %d\n", nlattr
->nla_type
);
2568 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2571 struct rtnl_link_stats
*st
;
2572 struct rtnl_link_stats64
*st64
;
2573 struct rtnl_link_ifmap
*map
;
2574 struct linkinfo_context li_context
;
2576 switch (rtattr
->rta_type
) {
2578 case QEMU_IFLA_ADDRESS
:
2579 case QEMU_IFLA_BROADCAST
:
2581 case QEMU_IFLA_IFNAME
:
2582 case QEMU_IFLA_QDISC
:
2585 case QEMU_IFLA_OPERSTATE
:
2586 case QEMU_IFLA_LINKMODE
:
2587 case QEMU_IFLA_CARRIER
:
2588 case QEMU_IFLA_PROTO_DOWN
:
2592 case QEMU_IFLA_LINK
:
2593 case QEMU_IFLA_WEIGHT
:
2594 case QEMU_IFLA_TXQLEN
:
2595 case QEMU_IFLA_CARRIER_CHANGES
:
2596 case QEMU_IFLA_NUM_RX_QUEUES
:
2597 case QEMU_IFLA_NUM_TX_QUEUES
:
2598 case QEMU_IFLA_PROMISCUITY
:
2599 case QEMU_IFLA_EXT_MASK
:
2600 case QEMU_IFLA_LINK_NETNSID
:
2601 case QEMU_IFLA_GROUP
:
2602 case QEMU_IFLA_MASTER
:
2603 case QEMU_IFLA_NUM_VF
:
2604 case QEMU_IFLA_GSO_MAX_SEGS
:
2605 case QEMU_IFLA_GSO_MAX_SIZE
:
2606 case QEMU_IFLA_CARRIER_UP_COUNT
:
2607 case QEMU_IFLA_CARRIER_DOWN_COUNT
:
2608 u32
= RTA_DATA(rtattr
);
2609 *u32
= tswap32(*u32
);
2611 /* struct rtnl_link_stats */
2612 case QEMU_IFLA_STATS
:
2613 st
= RTA_DATA(rtattr
);
2614 st
->rx_packets
= tswap32(st
->rx_packets
);
2615 st
->tx_packets
= tswap32(st
->tx_packets
);
2616 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2617 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2618 st
->rx_errors
= tswap32(st
->rx_errors
);
2619 st
->tx_errors
= tswap32(st
->tx_errors
);
2620 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2621 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2622 st
->multicast
= tswap32(st
->multicast
);
2623 st
->collisions
= tswap32(st
->collisions
);
2625 /* detailed rx_errors: */
2626 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2627 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2628 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2629 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2630 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2631 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2633 /* detailed tx_errors */
2634 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2635 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2636 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2637 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2638 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2641 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2642 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2644 /* struct rtnl_link_stats64 */
2645 case QEMU_IFLA_STATS64
:
2646 st64
= RTA_DATA(rtattr
);
2647 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2648 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2649 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2650 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2651 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2652 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2653 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2654 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2655 st64
->multicast
= tswap64(st64
->multicast
);
2656 st64
->collisions
= tswap64(st64
->collisions
);
2658 /* detailed rx_errors: */
2659 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2660 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2661 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2662 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2663 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2664 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2666 /* detailed tx_errors */
2667 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2668 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2669 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2670 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2671 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2674 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2675 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2677 /* struct rtnl_link_ifmap */
2679 map
= RTA_DATA(rtattr
);
2680 map
->mem_start
= tswap64(map
->mem_start
);
2681 map
->mem_end
= tswap64(map
->mem_end
);
2682 map
->base_addr
= tswap64(map
->base_addr
);
2683 map
->irq
= tswap16(map
->irq
);
2686 case QEMU_IFLA_LINKINFO
:
2687 memset(&li_context
, 0, sizeof(li_context
));
2688 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2690 host_to_target_data_linkinfo_nlattr
);
2691 case QEMU_IFLA_AF_SPEC
:
2692 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2694 host_to_target_data_spec_nlattr
);
2696 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2698 host_to_target_data_xdp_nlattr
);
2700 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2706 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2709 struct ifa_cacheinfo
*ci
;
2711 switch (rtattr
->rta_type
) {
2712 /* binary: depends on family type */
2722 u32
= RTA_DATA(rtattr
);
2723 *u32
= tswap32(*u32
);
2725 /* struct ifa_cacheinfo */
2727 ci
= RTA_DATA(rtattr
);
2728 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2729 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2730 ci
->cstamp
= tswap32(ci
->cstamp
);
2731 ci
->tstamp
= tswap32(ci
->tstamp
);
2734 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2740 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2743 struct rta_cacheinfo
*ci
;
2745 switch (rtattr
->rta_type
) {
2746 /* binary: depends on family type */
2747 case QEMU_RTA_GATEWAY
:
2749 case QEMU_RTA_PREFSRC
:
2755 case QEMU_RTA_PRIORITY
:
2756 case QEMU_RTA_TABLE
:
2758 u32
= RTA_DATA(rtattr
);
2759 *u32
= tswap32(*u32
);
2761 /* struct rta_cacheinfo */
2762 case QEMU_RTA_CACHEINFO
:
2763 ci
= RTA_DATA(rtattr
);
2764 ci
->rta_clntref
= tswap32(ci
->rta_clntref
);
2765 ci
->rta_lastuse
= tswap32(ci
->rta_lastuse
);
2766 ci
->rta_expires
= tswap32(ci
->rta_expires
);
2767 ci
->rta_error
= tswap32(ci
->rta_error
);
2768 ci
->rta_used
= tswap32(ci
->rta_used
);
2769 #if defined(RTNETLINK_HAVE_PEERINFO)
2770 ci
->rta_id
= tswap32(ci
->rta_id
);
2771 ci
->rta_ts
= tswap32(ci
->rta_ts
);
2772 ci
->rta_tsage
= tswap32(ci
->rta_tsage
);
2776 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2782 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2783 uint32_t rtattr_len
)
2785 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2786 host_to_target_data_link_rtattr
);
2789 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2790 uint32_t rtattr_len
)
2792 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2793 host_to_target_data_addr_rtattr
);
2796 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2797 uint32_t rtattr_len
)
2799 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2800 host_to_target_data_route_rtattr
);
2803 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2806 struct ifinfomsg
*ifi
;
2807 struct ifaddrmsg
*ifa
;
2810 nlmsg_len
= nlh
->nlmsg_len
;
2811 switch (nlh
->nlmsg_type
) {
2815 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2816 ifi
= NLMSG_DATA(nlh
);
2817 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2818 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2819 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2820 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2821 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2822 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2828 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2829 ifa
= NLMSG_DATA(nlh
);
2830 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2831 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2832 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2838 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2839 rtm
= NLMSG_DATA(nlh
);
2840 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2841 host_to_target_route_rtattr(RTM_RTA(rtm
),
2842 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2846 return -TARGET_EINVAL
;
2851 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2854 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2857 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2859 abi_long (*target_to_host_rtattr
)
2864 while (len
>= sizeof(struct rtattr
)) {
2865 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2866 tswap16(rtattr
->rta_len
) > len
) {
2869 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2870 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2871 ret
= target_to_host_rtattr(rtattr
);
2875 len
-= RTA_ALIGN(rtattr
->rta_len
);
2876 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2877 RTA_ALIGN(rtattr
->rta_len
));
2882 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2884 switch (rtattr
->rta_type
) {
2886 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2892 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2894 switch (rtattr
->rta_type
) {
2895 /* binary: depends on family type */
2900 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2906 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2909 switch (rtattr
->rta_type
) {
2910 /* binary: depends on family type */
2913 case QEMU_RTA_GATEWAY
:
2916 case QEMU_RTA_PRIORITY
:
2918 u32
= RTA_DATA(rtattr
);
2919 *u32
= tswap32(*u32
);
2922 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2928 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2929 uint32_t rtattr_len
)
2931 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2932 target_to_host_data_link_rtattr
);
2935 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2936 uint32_t rtattr_len
)
2938 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2939 target_to_host_data_addr_rtattr
);
2942 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2943 uint32_t rtattr_len
)
2945 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2946 target_to_host_data_route_rtattr
);
2949 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2951 struct ifinfomsg
*ifi
;
2952 struct ifaddrmsg
*ifa
;
2955 switch (nlh
->nlmsg_type
) {
2960 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2961 ifi
= NLMSG_DATA(nlh
);
2962 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2963 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2964 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2965 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2966 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2967 NLMSG_LENGTH(sizeof(*ifi
)));
2973 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2974 ifa
= NLMSG_DATA(nlh
);
2975 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2976 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2977 NLMSG_LENGTH(sizeof(*ifa
)));
2984 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2985 rtm
= NLMSG_DATA(nlh
);
2986 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2987 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2988 NLMSG_LENGTH(sizeof(*rtm
)));
2992 return -TARGET_EOPNOTSUPP
;
2997 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2999 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
3001 #endif /* CONFIG_RTNETLINK */
3003 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
3005 switch (nlh
->nlmsg_type
) {
3007 gemu_log("Unknown host audit message type %d\n",
3009 return -TARGET_EINVAL
;
3014 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
3017 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
3020 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
3022 switch (nlh
->nlmsg_type
) {
3024 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
3025 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
3028 gemu_log("Unknown target audit message type %d\n",
3030 return -TARGET_EINVAL
;
3036 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
3038 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
3041 /* do_setsockopt() Must return target values and target errnos. */
3042 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
3043 abi_ulong optval_addr
, socklen_t optlen
)
3047 struct ip_mreqn
*ip_mreq
;
3048 struct ip_mreq_source
*ip_mreq_source
;
3052 /* TCP options all take an 'int' value. */
3053 if (optlen
< sizeof(uint32_t))
3054 return -TARGET_EINVAL
;
3056 if (get_user_u32(val
, optval_addr
))
3057 return -TARGET_EFAULT
;
3058 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
3065 case IP_ROUTER_ALERT
:
3069 case IP_MTU_DISCOVER
:
3076 case IP_MULTICAST_TTL
:
3077 case IP_MULTICAST_LOOP
:
3079 if (optlen
>= sizeof(uint32_t)) {
3080 if (get_user_u32(val
, optval_addr
))
3081 return -TARGET_EFAULT
;
3082 } else if (optlen
>= 1) {
3083 if (get_user_u8(val
, optval_addr
))
3084 return -TARGET_EFAULT
;
3086 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
3088 case IP_ADD_MEMBERSHIP
:
3089 case IP_DROP_MEMBERSHIP
:
3090 if (optlen
< sizeof (struct target_ip_mreq
) ||
3091 optlen
> sizeof (struct target_ip_mreqn
))
3092 return -TARGET_EINVAL
;
3094 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
3095 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
3096 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
3099 case IP_BLOCK_SOURCE
:
3100 case IP_UNBLOCK_SOURCE
:
3101 case IP_ADD_SOURCE_MEMBERSHIP
:
3102 case IP_DROP_SOURCE_MEMBERSHIP
:
3103 if (optlen
!= sizeof (struct target_ip_mreq_source
))
3104 return -TARGET_EINVAL
;
3106 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3107 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
3108 unlock_user (ip_mreq_source
, optval_addr
, 0);
3117 case IPV6_MTU_DISCOVER
:
3120 case IPV6_RECVPKTINFO
:
3121 case IPV6_UNICAST_HOPS
:
3122 case IPV6_MULTICAST_HOPS
:
3123 case IPV6_MULTICAST_LOOP
:
3125 case IPV6_RECVHOPLIMIT
:
3126 case IPV6_2292HOPLIMIT
:
3129 if (optlen
< sizeof(uint32_t)) {
3130 return -TARGET_EINVAL
;
3132 if (get_user_u32(val
, optval_addr
)) {
3133 return -TARGET_EFAULT
;
3135 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3136 &val
, sizeof(val
)));
3140 struct in6_pktinfo pki
;
3142 if (optlen
< sizeof(pki
)) {
3143 return -TARGET_EINVAL
;
3146 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
3147 return -TARGET_EFAULT
;
3150 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
3152 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3153 &pki
, sizeof(pki
)));
3164 struct icmp6_filter icmp6f
;
3166 if (optlen
> sizeof(icmp6f
)) {
3167 optlen
= sizeof(icmp6f
);
3170 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3171 return -TARGET_EFAULT
;
3174 for (val
= 0; val
< 8; val
++) {
3175 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3178 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3190 /* those take an u32 value */
3191 if (optlen
< sizeof(uint32_t)) {
3192 return -TARGET_EINVAL
;
3195 if (get_user_u32(val
, optval_addr
)) {
3196 return -TARGET_EFAULT
;
3198 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3199 &val
, sizeof(val
)));
3206 case TARGET_SOL_SOCKET
:
3208 case TARGET_SO_RCVTIMEO
:
3212 optname
= SO_RCVTIMEO
;
3215 if (optlen
!= sizeof(struct target_timeval
)) {
3216 return -TARGET_EINVAL
;
3219 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3220 return -TARGET_EFAULT
;
3223 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3227 case TARGET_SO_SNDTIMEO
:
3228 optname
= SO_SNDTIMEO
;
3230 case TARGET_SO_ATTACH_FILTER
:
3232 struct target_sock_fprog
*tfprog
;
3233 struct target_sock_filter
*tfilter
;
3234 struct sock_fprog fprog
;
3235 struct sock_filter
*filter
;
3238 if (optlen
!= sizeof(*tfprog
)) {
3239 return -TARGET_EINVAL
;
3241 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3242 return -TARGET_EFAULT
;
3244 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3245 tswapal(tfprog
->filter
), 0)) {
3246 unlock_user_struct(tfprog
, optval_addr
, 1);
3247 return -TARGET_EFAULT
;
3250 fprog
.len
= tswap16(tfprog
->len
);
3251 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3252 if (filter
== NULL
) {
3253 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3254 unlock_user_struct(tfprog
, optval_addr
, 1);
3255 return -TARGET_ENOMEM
;
3257 for (i
= 0; i
< fprog
.len
; i
++) {
3258 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3259 filter
[i
].jt
= tfilter
[i
].jt
;
3260 filter
[i
].jf
= tfilter
[i
].jf
;
3261 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3263 fprog
.filter
= filter
;
3265 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3266 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3269 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3270 unlock_user_struct(tfprog
, optval_addr
, 1);
3273 case TARGET_SO_BINDTODEVICE
:
3275 char *dev_ifname
, *addr_ifname
;
3277 if (optlen
> IFNAMSIZ
- 1) {
3278 optlen
= IFNAMSIZ
- 1;
3280 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3282 return -TARGET_EFAULT
;
3284 optname
= SO_BINDTODEVICE
;
3285 addr_ifname
= alloca(IFNAMSIZ
);
3286 memcpy(addr_ifname
, dev_ifname
, optlen
);
3287 addr_ifname
[optlen
] = 0;
3288 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3289 addr_ifname
, optlen
));
3290 unlock_user (dev_ifname
, optval_addr
, 0);
3293 /* Options with 'int' argument. */
3294 case TARGET_SO_DEBUG
:
3297 case TARGET_SO_REUSEADDR
:
3298 optname
= SO_REUSEADDR
;
3300 case TARGET_SO_TYPE
:
3303 case TARGET_SO_ERROR
:
3306 case TARGET_SO_DONTROUTE
:
3307 optname
= SO_DONTROUTE
;
3309 case TARGET_SO_BROADCAST
:
3310 optname
= SO_BROADCAST
;
3312 case TARGET_SO_SNDBUF
:
3313 optname
= SO_SNDBUF
;
3315 case TARGET_SO_SNDBUFFORCE
:
3316 optname
= SO_SNDBUFFORCE
;
3318 case TARGET_SO_RCVBUF
:
3319 optname
= SO_RCVBUF
;
3321 case TARGET_SO_RCVBUFFORCE
:
3322 optname
= SO_RCVBUFFORCE
;
3324 case TARGET_SO_KEEPALIVE
:
3325 optname
= SO_KEEPALIVE
;
3327 case TARGET_SO_OOBINLINE
:
3328 optname
= SO_OOBINLINE
;
3330 case TARGET_SO_NO_CHECK
:
3331 optname
= SO_NO_CHECK
;
3333 case TARGET_SO_PRIORITY
:
3334 optname
= SO_PRIORITY
;
3337 case TARGET_SO_BSDCOMPAT
:
3338 optname
= SO_BSDCOMPAT
;
3341 case TARGET_SO_PASSCRED
:
3342 optname
= SO_PASSCRED
;
3344 case TARGET_SO_PASSSEC
:
3345 optname
= SO_PASSSEC
;
3347 case TARGET_SO_TIMESTAMP
:
3348 optname
= SO_TIMESTAMP
;
3350 case TARGET_SO_RCVLOWAT
:
3351 optname
= SO_RCVLOWAT
;
3356 if (optlen
< sizeof(uint32_t))
3357 return -TARGET_EINVAL
;
3359 if (get_user_u32(val
, optval_addr
))
3360 return -TARGET_EFAULT
;
3361 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3365 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3366 ret
= -TARGET_ENOPROTOOPT
;
3371 /* do_getsockopt() Must return target values and target errnos. */
3372 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3373 abi_ulong optval_addr
, abi_ulong optlen
)
3380 case TARGET_SOL_SOCKET
:
3383 /* These don't just return a single integer */
3384 case TARGET_SO_LINGER
:
3385 case TARGET_SO_RCVTIMEO
:
3386 case TARGET_SO_SNDTIMEO
:
3387 case TARGET_SO_PEERNAME
:
3389 case TARGET_SO_PEERCRED
: {
3392 struct target_ucred
*tcr
;
3394 if (get_user_u32(len
, optlen
)) {
3395 return -TARGET_EFAULT
;
3398 return -TARGET_EINVAL
;
3402 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3410 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3411 return -TARGET_EFAULT
;
3413 __put_user(cr
.pid
, &tcr
->pid
);
3414 __put_user(cr
.uid
, &tcr
->uid
);
3415 __put_user(cr
.gid
, &tcr
->gid
);
3416 unlock_user_struct(tcr
, optval_addr
, 1);
3417 if (put_user_u32(len
, optlen
)) {
3418 return -TARGET_EFAULT
;
3422 /* Options with 'int' argument. */
3423 case TARGET_SO_DEBUG
:
3426 case TARGET_SO_REUSEADDR
:
3427 optname
= SO_REUSEADDR
;
3429 case TARGET_SO_TYPE
:
3432 case TARGET_SO_ERROR
:
3435 case TARGET_SO_DONTROUTE
:
3436 optname
= SO_DONTROUTE
;
3438 case TARGET_SO_BROADCAST
:
3439 optname
= SO_BROADCAST
;
3441 case TARGET_SO_SNDBUF
:
3442 optname
= SO_SNDBUF
;
3444 case TARGET_SO_RCVBUF
:
3445 optname
= SO_RCVBUF
;
3447 case TARGET_SO_KEEPALIVE
:
3448 optname
= SO_KEEPALIVE
;
3450 case TARGET_SO_OOBINLINE
:
3451 optname
= SO_OOBINLINE
;
3453 case TARGET_SO_NO_CHECK
:
3454 optname
= SO_NO_CHECK
;
3456 case TARGET_SO_PRIORITY
:
3457 optname
= SO_PRIORITY
;
3460 case TARGET_SO_BSDCOMPAT
:
3461 optname
= SO_BSDCOMPAT
;
3464 case TARGET_SO_PASSCRED
:
3465 optname
= SO_PASSCRED
;
3467 case TARGET_SO_TIMESTAMP
:
3468 optname
= SO_TIMESTAMP
;
3470 case TARGET_SO_RCVLOWAT
:
3471 optname
= SO_RCVLOWAT
;
3473 case TARGET_SO_ACCEPTCONN
:
3474 optname
= SO_ACCEPTCONN
;
3481 /* TCP options all take an 'int' value. */
3483 if (get_user_u32(len
, optlen
))
3484 return -TARGET_EFAULT
;
3486 return -TARGET_EINVAL
;
3488 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3491 if (optname
== SO_TYPE
) {
3492 val
= host_to_target_sock_type(val
);
3497 if (put_user_u32(val
, optval_addr
))
3498 return -TARGET_EFAULT
;
3500 if (put_user_u8(val
, optval_addr
))
3501 return -TARGET_EFAULT
;
3503 if (put_user_u32(len
, optlen
))
3504 return -TARGET_EFAULT
;
3511 case IP_ROUTER_ALERT
:
3515 case IP_MTU_DISCOVER
:
3521 case IP_MULTICAST_TTL
:
3522 case IP_MULTICAST_LOOP
:
3523 if (get_user_u32(len
, optlen
))
3524 return -TARGET_EFAULT
;
3526 return -TARGET_EINVAL
;
3528 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3531 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3533 if (put_user_u32(len
, optlen
)
3534 || put_user_u8(val
, optval_addr
))
3535 return -TARGET_EFAULT
;
3537 if (len
> sizeof(int))
3539 if (put_user_u32(len
, optlen
)
3540 || put_user_u32(val
, optval_addr
))
3541 return -TARGET_EFAULT
;
3545 ret
= -TARGET_ENOPROTOOPT
;
3551 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3553 ret
= -TARGET_EOPNOTSUPP
;
3559 /* Convert target low/high pair representing file offset into the host
3560 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3561 * as the kernel doesn't handle them either.
3563 static void target_to_host_low_high(abi_ulong tlow
,
3565 unsigned long *hlow
,
3566 unsigned long *hhigh
)
3568 uint64_t off
= tlow
|
3569 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3570 TARGET_LONG_BITS
/ 2;
3573 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3576 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3577 abi_ulong count
, int copy
)
3579 struct target_iovec
*target_vec
;
3581 abi_ulong total_len
, max_len
;
3584 bool bad_address
= false;
3590 if (count
> IOV_MAX
) {
3595 vec
= g_try_new0(struct iovec
, count
);
3601 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3602 count
* sizeof(struct target_iovec
), 1);
3603 if (target_vec
== NULL
) {
3608 /* ??? If host page size > target page size, this will result in a
3609 value larger than what we can actually support. */
3610 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3613 for (i
= 0; i
< count
; i
++) {
3614 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3615 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3620 } else if (len
== 0) {
3621 /* Zero length pointer is ignored. */
3622 vec
[i
].iov_base
= 0;
3624 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3625 /* If the first buffer pointer is bad, this is a fault. But
3626 * subsequent bad buffers will result in a partial write; this
3627 * is realized by filling the vector with null pointers and
3629 if (!vec
[i
].iov_base
) {
3640 if (len
> max_len
- total_len
) {
3641 len
= max_len
- total_len
;
3644 vec
[i
].iov_len
= len
;
3648 unlock_user(target_vec
, target_addr
, 0);
3653 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3654 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3657 unlock_user(target_vec
, target_addr
, 0);
3664 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3665 abi_ulong count
, int copy
)
3667 struct target_iovec
*target_vec
;
3670 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3671 count
* sizeof(struct target_iovec
), 1);
3673 for (i
= 0; i
< count
; i
++) {
3674 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3675 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3679 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3681 unlock_user(target_vec
, target_addr
, 0);
3687 static inline int target_to_host_sock_type(int *type
)
3690 int target_type
= *type
;
3692 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3693 case TARGET_SOCK_DGRAM
:
3694 host_type
= SOCK_DGRAM
;
3696 case TARGET_SOCK_STREAM
:
3697 host_type
= SOCK_STREAM
;
3700 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3703 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3704 #if defined(SOCK_CLOEXEC)
3705 host_type
|= SOCK_CLOEXEC
;
3707 return -TARGET_EINVAL
;
3710 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3711 #if defined(SOCK_NONBLOCK)
3712 host_type
|= SOCK_NONBLOCK
;
3713 #elif !defined(O_NONBLOCK)
3714 return -TARGET_EINVAL
;
3721 /* Try to emulate socket type flags after socket creation. */
3722 static int sock_flags_fixup(int fd
, int target_type
)
3724 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3725 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3726 int flags
= fcntl(fd
, F_GETFL
);
3727 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3729 return -TARGET_EINVAL
;
3736 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3737 abi_ulong target_addr
,
3740 struct sockaddr
*addr
= host_addr
;
3741 struct target_sockaddr
*target_saddr
;
3743 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3744 if (!target_saddr
) {
3745 return -TARGET_EFAULT
;
3748 memcpy(addr
, target_saddr
, len
);
3749 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3750 /* spkt_protocol is big-endian */
3752 unlock_user(target_saddr
, target_addr
, 0);
3756 static TargetFdTrans target_packet_trans
= {
3757 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3760 #ifdef CONFIG_RTNETLINK
3761 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3765 ret
= target_to_host_nlmsg_route(buf
, len
);
3773 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3777 ret
= host_to_target_nlmsg_route(buf
, len
);
3785 static TargetFdTrans target_netlink_route_trans
= {
3786 .target_to_host_data
= netlink_route_target_to_host
,
3787 .host_to_target_data
= netlink_route_host_to_target
,
3789 #endif /* CONFIG_RTNETLINK */
3791 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3795 ret
= target_to_host_nlmsg_audit(buf
, len
);
3803 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3807 ret
= host_to_target_nlmsg_audit(buf
, len
);
3815 static TargetFdTrans target_netlink_audit_trans
= {
3816 .target_to_host_data
= netlink_audit_target_to_host
,
3817 .host_to_target_data
= netlink_audit_host_to_target
,
3820 /* do_socket() Must return target values and target errnos. */
3821 static abi_long
do_socket(int domain
, int type
, int protocol
)
3823 int target_type
= type
;
3826 ret
= target_to_host_sock_type(&type
);
3831 if (domain
== PF_NETLINK
&& !(
3832 #ifdef CONFIG_RTNETLINK
3833 protocol
== NETLINK_ROUTE
||
3835 protocol
== NETLINK_KOBJECT_UEVENT
||
3836 protocol
== NETLINK_AUDIT
)) {
3837 return -EPFNOSUPPORT
;
3840 if (domain
== AF_PACKET
||
3841 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3842 protocol
= tswap16(protocol
);
3845 ret
= get_errno(socket(domain
, type
, protocol
));
3847 ret
= sock_flags_fixup(ret
, target_type
);
3848 if (type
== SOCK_PACKET
) {
3849 /* Manage an obsolete case :
3850 * if socket type is SOCK_PACKET, bind by name
3852 fd_trans_register(ret
, &target_packet_trans
);
3853 } else if (domain
== PF_NETLINK
) {
3855 #ifdef CONFIG_RTNETLINK
3857 fd_trans_register(ret
, &target_netlink_route_trans
);
3860 case NETLINK_KOBJECT_UEVENT
:
3861 /* nothing to do: messages are strings */
3864 fd_trans_register(ret
, &target_netlink_audit_trans
);
3867 g_assert_not_reached();
3874 /* do_bind() Must return target values and target errnos. */
3875 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3881 if ((int)addrlen
< 0) {
3882 return -TARGET_EINVAL
;
3885 addr
= alloca(addrlen
+1);
3887 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3891 return get_errno(bind(sockfd
, addr
, addrlen
));
3894 /* do_connect() Must return target values and target errnos. */
3895 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3901 if ((int)addrlen
< 0) {
3902 return -TARGET_EINVAL
;
3905 addr
= alloca(addrlen
+1);
3907 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3911 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3914 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3915 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3916 int flags
, int send
)
3922 abi_ulong target_vec
;
3924 if (msgp
->msg_name
) {
3925 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3926 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3927 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3928 tswapal(msgp
->msg_name
),
3930 if (ret
== -TARGET_EFAULT
) {
3931 /* For connected sockets msg_name and msg_namelen must
3932 * be ignored, so returning EFAULT immediately is wrong.
3933 * Instead, pass a bad msg_name to the host kernel, and
3934 * let it decide whether to return EFAULT or not.
3936 msg
.msg_name
= (void *)-1;
3941 msg
.msg_name
= NULL
;
3942 msg
.msg_namelen
= 0;
3944 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3945 msg
.msg_control
= alloca(msg
.msg_controllen
);
3946 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3948 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3950 count
= tswapal(msgp
->msg_iovlen
);
3951 target_vec
= tswapal(msgp
->msg_iov
);
3953 if (count
> IOV_MAX
) {
3954 /* sendrcvmsg returns a different errno for this condition than
3955 * readv/writev, so we must catch it here before lock_iovec() does.
3957 ret
= -TARGET_EMSGSIZE
;
3961 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3962 target_vec
, count
, send
);
3964 ret
= -host_to_target_errno(errno
);
3967 msg
.msg_iovlen
= count
;
3971 if (fd_trans_target_to_host_data(fd
)) {
3974 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3975 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3976 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3977 msg
.msg_iov
->iov_len
);
3979 msg
.msg_iov
->iov_base
= host_msg
;
3980 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3984 ret
= target_to_host_cmsg(&msg
, msgp
);
3986 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3990 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3991 if (!is_error(ret
)) {
3993 if (fd_trans_host_to_target_data(fd
)) {
3994 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3995 MIN(msg
.msg_iov
->iov_len
, len
));
3997 ret
= host_to_target_cmsg(msgp
, &msg
);
3999 if (!is_error(ret
)) {
4000 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
4001 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
4002 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
4003 msg
.msg_name
, msg
.msg_namelen
);
4015 unlock_iovec(vec
, target_vec
, count
, !send
);
4020 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
4021 int flags
, int send
)
4024 struct target_msghdr
*msgp
;
4026 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
4030 return -TARGET_EFAULT
;
4032 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
4033 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
4037 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
4038 * so it might not have this *mmsg-specific flag either.
4040 #ifndef MSG_WAITFORONE
4041 #define MSG_WAITFORONE 0x10000
4044 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
4045 unsigned int vlen
, unsigned int flags
,
4048 struct target_mmsghdr
*mmsgp
;
4052 if (vlen
> UIO_MAXIOV
) {
4056 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
4058 return -TARGET_EFAULT
;
4061 for (i
= 0; i
< vlen
; i
++) {
4062 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
4063 if (is_error(ret
)) {
4066 mmsgp
[i
].msg_len
= tswap32(ret
);
4067 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
4068 if (flags
& MSG_WAITFORONE
) {
4069 flags
|= MSG_DONTWAIT
;
4073 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
4075 /* Return number of datagrams sent if we sent any at all;
4076 * otherwise return the error.
4084 /* do_accept4() Must return target values and target errnos. */
4085 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
4086 abi_ulong target_addrlen_addr
, int flags
)
4093 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
4095 if (target_addr
== 0) {
4096 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
4099 /* linux returns EINVAL if addrlen pointer is invalid */
4100 if (get_user_u32(addrlen
, target_addrlen_addr
))
4101 return -TARGET_EINVAL
;
4103 if ((int)addrlen
< 0) {
4104 return -TARGET_EINVAL
;
4107 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4108 return -TARGET_EINVAL
;
4110 addr
= alloca(addrlen
);
4112 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
4113 if (!is_error(ret
)) {
4114 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4115 if (put_user_u32(addrlen
, target_addrlen_addr
))
4116 ret
= -TARGET_EFAULT
;
4121 /* do_getpeername() Must return target values and target errnos. */
4122 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
4123 abi_ulong target_addrlen_addr
)
4129 if (get_user_u32(addrlen
, target_addrlen_addr
))
4130 return -TARGET_EFAULT
;
4132 if ((int)addrlen
< 0) {
4133 return -TARGET_EINVAL
;
4136 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4137 return -TARGET_EFAULT
;
4139 addr
= alloca(addrlen
);
4141 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
4142 if (!is_error(ret
)) {
4143 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4144 if (put_user_u32(addrlen
, target_addrlen_addr
))
4145 ret
= -TARGET_EFAULT
;
4150 /* do_getsockname() Must return target values and target errnos. */
4151 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
4152 abi_ulong target_addrlen_addr
)
4158 if (get_user_u32(addrlen
, target_addrlen_addr
))
4159 return -TARGET_EFAULT
;
4161 if ((int)addrlen
< 0) {
4162 return -TARGET_EINVAL
;
4165 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4166 return -TARGET_EFAULT
;
4168 addr
= alloca(addrlen
);
4170 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4171 if (!is_error(ret
)) {
4172 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4173 if (put_user_u32(addrlen
, target_addrlen_addr
))
4174 ret
= -TARGET_EFAULT
;
4179 /* do_socketpair() Must return target values and target errnos. */
4180 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4181 abi_ulong target_tab_addr
)
4186 target_to_host_sock_type(&type
);
4188 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4189 if (!is_error(ret
)) {
4190 if (put_user_s32(tab
[0], target_tab_addr
)
4191 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4192 ret
= -TARGET_EFAULT
;
4197 /* do_sendto() Must return target values and target errnos. */
4198 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4199 abi_ulong target_addr
, socklen_t addrlen
)
4203 void *copy_msg
= NULL
;
4206 if ((int)addrlen
< 0) {
4207 return -TARGET_EINVAL
;
4210 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4212 return -TARGET_EFAULT
;
4213 if (fd_trans_target_to_host_data(fd
)) {
4214 copy_msg
= host_msg
;
4215 host_msg
= g_malloc(len
);
4216 memcpy(host_msg
, copy_msg
, len
);
4217 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4223 addr
= alloca(addrlen
+1);
4224 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4228 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4230 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4235 host_msg
= copy_msg
;
4237 unlock_user(host_msg
, msg
, 0);
4241 /* do_recvfrom() Must return target values and target errnos. */
4242 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4243 abi_ulong target_addr
,
4244 abi_ulong target_addrlen
)
4251 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4253 return -TARGET_EFAULT
;
4255 if (get_user_u32(addrlen
, target_addrlen
)) {
4256 ret
= -TARGET_EFAULT
;
4259 if ((int)addrlen
< 0) {
4260 ret
= -TARGET_EINVAL
;
4263 addr
= alloca(addrlen
);
4264 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4267 addr
= NULL
; /* To keep compiler quiet. */
4268 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4270 if (!is_error(ret
)) {
4271 if (fd_trans_host_to_target_data(fd
)) {
4273 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
4274 if (is_error(trans
)) {
4280 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4281 if (put_user_u32(addrlen
, target_addrlen
)) {
4282 ret
= -TARGET_EFAULT
;
4286 unlock_user(host_msg
, msg
, len
);
4289 unlock_user(host_msg
, msg
, 0);
4294 #ifdef TARGET_NR_socketcall
4295 /* do_socketcall() must return target values and target errnos. */
4296 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4298 static const unsigned nargs
[] = { /* number of arguments per operation */
4299 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4300 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4301 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4302 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4303 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4304 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4305 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4306 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4307 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4308 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4309 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4310 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4311 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4312 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4313 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4314 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4315 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4316 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4317 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4318 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4320 abi_long a
[6]; /* max 6 args */
4323 /* check the range of the first argument num */
4324 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4325 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4326 return -TARGET_EINVAL
;
4328 /* ensure we have space for args */
4329 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4330 return -TARGET_EINVAL
;
4332 /* collect the arguments in a[] according to nargs[] */
4333 for (i
= 0; i
< nargs
[num
]; ++i
) {
4334 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4335 return -TARGET_EFAULT
;
4338 /* now when we have the args, invoke the appropriate underlying function */
4340 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4341 return do_socket(a
[0], a
[1], a
[2]);
4342 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4343 return do_bind(a
[0], a
[1], a
[2]);
4344 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4345 return do_connect(a
[0], a
[1], a
[2]);
4346 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4347 return get_errno(listen(a
[0], a
[1]));
4348 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4349 return do_accept4(a
[0], a
[1], a
[2], 0);
4350 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4351 return do_getsockname(a
[0], a
[1], a
[2]);
4352 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4353 return do_getpeername(a
[0], a
[1], a
[2]);
4354 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4355 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4356 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4357 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4358 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4359 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4360 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4361 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4362 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4363 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4364 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4365 return get_errno(shutdown(a
[0], a
[1]));
4366 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4367 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4368 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4369 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4370 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4371 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4372 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4373 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4374 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4375 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4376 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4377 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4378 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4379 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4381 gemu_log("Unsupported socketcall: %d\n", num
);
4382 return -TARGET_EINVAL
;
4387 #define N_SHM_REGIONS 32
4389 static struct shm_region
{
4393 } shm_regions
[N_SHM_REGIONS
];
4395 #ifndef TARGET_SEMID64_DS
4396 /* asm-generic version of this struct */
4397 struct target_semid64_ds
4399 struct target_ipc_perm sem_perm
;
4400 abi_ulong sem_otime
;
4401 #if TARGET_ABI_BITS == 32
4402 abi_ulong __unused1
;
4404 abi_ulong sem_ctime
;
4405 #if TARGET_ABI_BITS == 32
4406 abi_ulong __unused2
;
4408 abi_ulong sem_nsems
;
4409 abi_ulong __unused3
;
4410 abi_ulong __unused4
;
4414 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4415 abi_ulong target_addr
)
4417 struct target_ipc_perm
*target_ip
;
4418 struct target_semid64_ds
*target_sd
;
4420 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4421 return -TARGET_EFAULT
;
4422 target_ip
= &(target_sd
->sem_perm
);
4423 host_ip
->__key
= tswap32(target_ip
->__key
);
4424 host_ip
->uid
= tswap32(target_ip
->uid
);
4425 host_ip
->gid
= tswap32(target_ip
->gid
);
4426 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4427 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4428 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4429 host_ip
->mode
= tswap32(target_ip
->mode
);
4431 host_ip
->mode
= tswap16(target_ip
->mode
);
4433 #if defined(TARGET_PPC)
4434 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4436 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4438 unlock_user_struct(target_sd
, target_addr
, 0);
4442 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4443 struct ipc_perm
*host_ip
)
4445 struct target_ipc_perm
*target_ip
;
4446 struct target_semid64_ds
*target_sd
;
4448 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4449 return -TARGET_EFAULT
;
4450 target_ip
= &(target_sd
->sem_perm
);
4451 target_ip
->__key
= tswap32(host_ip
->__key
);
4452 target_ip
->uid
= tswap32(host_ip
->uid
);
4453 target_ip
->gid
= tswap32(host_ip
->gid
);
4454 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4455 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4456 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4457 target_ip
->mode
= tswap32(host_ip
->mode
);
4459 target_ip
->mode
= tswap16(host_ip
->mode
);
4461 #if defined(TARGET_PPC)
4462 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4464 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4466 unlock_user_struct(target_sd
, target_addr
, 1);
4470 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4471 abi_ulong target_addr
)
4473 struct target_semid64_ds
*target_sd
;
4475 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4476 return -TARGET_EFAULT
;
4477 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4478 return -TARGET_EFAULT
;
4479 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4480 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4481 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4482 unlock_user_struct(target_sd
, target_addr
, 0);
4486 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4487 struct semid_ds
*host_sd
)
4489 struct target_semid64_ds
*target_sd
;
4491 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4492 return -TARGET_EFAULT
;
4493 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4494 return -TARGET_EFAULT
;
4495 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4496 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4497 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4498 unlock_user_struct(target_sd
, target_addr
, 1);
4502 struct target_seminfo
{
4515 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4516 struct seminfo
*host_seminfo
)
4518 struct target_seminfo
*target_seminfo
;
4519 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4520 return -TARGET_EFAULT
;
4521 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4522 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4523 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4524 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4525 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4526 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4527 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4528 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4529 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4530 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4531 unlock_user_struct(target_seminfo
, target_addr
, 1);
4537 struct semid_ds
*buf
;
4538 unsigned short *array
;
4539 struct seminfo
*__buf
;
4542 union target_semun
{
4549 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4550 abi_ulong target_addr
)
4553 unsigned short *array
;
4555 struct semid_ds semid_ds
;
4558 semun
.buf
= &semid_ds
;
4560 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4562 return get_errno(ret
);
4564 nsems
= semid_ds
.sem_nsems
;
4566 *host_array
= g_try_new(unsigned short, nsems
);
4568 return -TARGET_ENOMEM
;
4570 array
= lock_user(VERIFY_READ
, target_addr
,
4571 nsems
*sizeof(unsigned short), 1);
4573 g_free(*host_array
);
4574 return -TARGET_EFAULT
;
4577 for(i
=0; i
<nsems
; i
++) {
4578 __get_user((*host_array
)[i
], &array
[i
]);
4580 unlock_user(array
, target_addr
, 0);
4585 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4586 unsigned short **host_array
)
4589 unsigned short *array
;
4591 struct semid_ds semid_ds
;
4594 semun
.buf
= &semid_ds
;
4596 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4598 return get_errno(ret
);
4600 nsems
= semid_ds
.sem_nsems
;
4602 array
= lock_user(VERIFY_WRITE
, target_addr
,
4603 nsems
*sizeof(unsigned short), 0);
4605 return -TARGET_EFAULT
;
4607 for(i
=0; i
<nsems
; i
++) {
4608 __put_user((*host_array
)[i
], &array
[i
]);
4610 g_free(*host_array
);
4611 unlock_user(array
, target_addr
, 1);
4616 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4617 abi_ulong target_arg
)
4619 union target_semun target_su
= { .buf
= target_arg
};
4621 struct semid_ds dsarg
;
4622 unsigned short *array
= NULL
;
4623 struct seminfo seminfo
;
4624 abi_long ret
= -TARGET_EINVAL
;
4631 /* In 64 bit cross-endian situations, we will erroneously pick up
4632 * the wrong half of the union for the "val" element. To rectify
4633 * this, the entire 8-byte structure is byteswapped, followed by
4634 * a swap of the 4 byte val field. In other cases, the data is
4635 * already in proper host byte order. */
4636 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4637 target_su
.buf
= tswapal(target_su
.buf
);
4638 arg
.val
= tswap32(target_su
.val
);
4640 arg
.val
= target_su
.val
;
4642 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4646 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4650 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4651 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4658 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4662 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4663 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4669 arg
.__buf
= &seminfo
;
4670 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4671 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4679 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4686 struct target_sembuf
{
4687 unsigned short sem_num
;
4692 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4693 abi_ulong target_addr
,
4696 struct target_sembuf
*target_sembuf
;
4699 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4700 nsops
*sizeof(struct target_sembuf
), 1);
4702 return -TARGET_EFAULT
;
4704 for(i
=0; i
<nsops
; i
++) {
4705 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4706 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4707 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4710 unlock_user(target_sembuf
, target_addr
, 0);
4715 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4717 struct sembuf sops
[nsops
];
4719 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4720 return -TARGET_EFAULT
;
4722 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4725 struct target_msqid_ds
4727 struct target_ipc_perm msg_perm
;
4728 abi_ulong msg_stime
;
4729 #if TARGET_ABI_BITS == 32
4730 abi_ulong __unused1
;
4732 abi_ulong msg_rtime
;
4733 #if TARGET_ABI_BITS == 32
4734 abi_ulong __unused2
;
4736 abi_ulong msg_ctime
;
4737 #if TARGET_ABI_BITS == 32
4738 abi_ulong __unused3
;
4740 abi_ulong __msg_cbytes
;
4742 abi_ulong msg_qbytes
;
4743 abi_ulong msg_lspid
;
4744 abi_ulong msg_lrpid
;
4745 abi_ulong __unused4
;
4746 abi_ulong __unused5
;
4749 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4750 abi_ulong target_addr
)
4752 struct target_msqid_ds
*target_md
;
4754 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4755 return -TARGET_EFAULT
;
4756 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4757 return -TARGET_EFAULT
;
4758 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4759 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4760 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4761 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4762 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4763 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4764 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4765 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4766 unlock_user_struct(target_md
, target_addr
, 0);
4770 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4771 struct msqid_ds
*host_md
)
4773 struct target_msqid_ds
*target_md
;
4775 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4776 return -TARGET_EFAULT
;
4777 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4778 return -TARGET_EFAULT
;
4779 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4780 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4781 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4782 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4783 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4784 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4785 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4786 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4787 unlock_user_struct(target_md
, target_addr
, 1);
4791 struct target_msginfo
{
4799 unsigned short int msgseg
;
4802 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4803 struct msginfo
*host_msginfo
)
4805 struct target_msginfo
*target_msginfo
;
4806 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4807 return -TARGET_EFAULT
;
4808 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4809 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4810 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4811 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4812 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4813 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4814 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4815 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4816 unlock_user_struct(target_msginfo
, target_addr
, 1);
4820 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4822 struct msqid_ds dsarg
;
4823 struct msginfo msginfo
;
4824 abi_long ret
= -TARGET_EINVAL
;
4832 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4833 return -TARGET_EFAULT
;
4834 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4835 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4836 return -TARGET_EFAULT
;
4839 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4843 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4844 if (host_to_target_msginfo(ptr
, &msginfo
))
4845 return -TARGET_EFAULT
;
4852 struct target_msgbuf
{
4857 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4858 ssize_t msgsz
, int msgflg
)
4860 struct target_msgbuf
*target_mb
;
4861 struct msgbuf
*host_mb
;
4865 return -TARGET_EINVAL
;
4868 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4869 return -TARGET_EFAULT
;
4870 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4872 unlock_user_struct(target_mb
, msgp
, 0);
4873 return -TARGET_ENOMEM
;
4875 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4876 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4877 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4879 unlock_user_struct(target_mb
, msgp
, 0);
4884 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4885 ssize_t msgsz
, abi_long msgtyp
,
4888 struct target_msgbuf
*target_mb
;
4890 struct msgbuf
*host_mb
;
4894 return -TARGET_EINVAL
;
4897 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4898 return -TARGET_EFAULT
;
4900 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4902 ret
= -TARGET_ENOMEM
;
4905 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4908 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4909 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4910 if (!target_mtext
) {
4911 ret
= -TARGET_EFAULT
;
4914 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4915 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4918 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4922 unlock_user_struct(target_mb
, msgp
, 1);
4927 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4928 abi_ulong target_addr
)
4930 struct target_shmid_ds
*target_sd
;
4932 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4933 return -TARGET_EFAULT
;
4934 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4935 return -TARGET_EFAULT
;
4936 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4937 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4938 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4939 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4940 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4941 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4942 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4943 unlock_user_struct(target_sd
, target_addr
, 0);
4947 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4948 struct shmid_ds
*host_sd
)
4950 struct target_shmid_ds
*target_sd
;
4952 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4953 return -TARGET_EFAULT
;
4954 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4955 return -TARGET_EFAULT
;
4956 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4957 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4958 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4959 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4960 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4961 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4962 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4963 unlock_user_struct(target_sd
, target_addr
, 1);
4967 struct target_shminfo
{
4975 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4976 struct shminfo
*host_shminfo
)
4978 struct target_shminfo
*target_shminfo
;
4979 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4980 return -TARGET_EFAULT
;
4981 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4982 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4983 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4984 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4985 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4986 unlock_user_struct(target_shminfo
, target_addr
, 1);
4990 struct target_shm_info
{
4995 abi_ulong swap_attempts
;
4996 abi_ulong swap_successes
;
4999 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
5000 struct shm_info
*host_shm_info
)
5002 struct target_shm_info
*target_shm_info
;
5003 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
5004 return -TARGET_EFAULT
;
5005 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
5006 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
5007 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
5008 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
5009 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
5010 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
5011 unlock_user_struct(target_shm_info
, target_addr
, 1);
5015 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
5017 struct shmid_ds dsarg
;
5018 struct shminfo shminfo
;
5019 struct shm_info shm_info
;
5020 abi_long ret
= -TARGET_EINVAL
;
5028 if (target_to_host_shmid_ds(&dsarg
, buf
))
5029 return -TARGET_EFAULT
;
5030 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
5031 if (host_to_target_shmid_ds(buf
, &dsarg
))
5032 return -TARGET_EFAULT
;
5035 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
5036 if (host_to_target_shminfo(buf
, &shminfo
))
5037 return -TARGET_EFAULT
;
5040 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
5041 if (host_to_target_shm_info(buf
, &shm_info
))
5042 return -TARGET_EFAULT
;
5047 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
5054 #ifndef TARGET_FORCE_SHMLBA
5055 /* For most architectures, SHMLBA is the same as the page size;
5056 * some architectures have larger values, in which case they should
5057 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
5058 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
5059 * and defining its own value for SHMLBA.
5061 * The kernel also permits SHMLBA to be set by the architecture to a
5062 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
5063 * this means that addresses are rounded to the large size if
5064 * SHM_RND is set but addresses not aligned to that size are not rejected
5065 * as long as they are at least page-aligned. Since the only architecture
5066 * which uses this is ia64 this code doesn't provide for that oddity.
5068 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
5070 return TARGET_PAGE_SIZE
;
5074 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
5075 int shmid
, abi_ulong shmaddr
, int shmflg
)
5079 struct shmid_ds shm_info
;
5083 /* find out the length of the shared memory segment */
5084 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
5085 if (is_error(ret
)) {
5086 /* can't get length, bail out */
5090 shmlba
= target_shmlba(cpu_env
);
5092 if (shmaddr
& (shmlba
- 1)) {
5093 if (shmflg
& SHM_RND
) {
5094 shmaddr
&= ~(shmlba
- 1);
5096 return -TARGET_EINVAL
;
5099 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
5100 return -TARGET_EINVAL
;
5106 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
5108 abi_ulong mmap_start
;
5110 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
5112 if (mmap_start
== -1) {
5114 host_raddr
= (void *)-1;
5116 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
5119 if (host_raddr
== (void *)-1) {
5121 return get_errno((long)host_raddr
);
5123 raddr
=h2g((unsigned long)host_raddr
);
5125 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
5126 PAGE_VALID
| PAGE_READ
|
5127 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
5129 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
5130 if (!shm_regions
[i
].in_use
) {
5131 shm_regions
[i
].in_use
= true;
5132 shm_regions
[i
].start
= raddr
;
5133 shm_regions
[i
].size
= shm_info
.shm_segsz
;
5143 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
5150 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
5151 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
5152 shm_regions
[i
].in_use
= false;
5153 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
5157 rv
= get_errno(shmdt(g2h(shmaddr
)));
5164 #ifdef TARGET_NR_ipc
5165 /* ??? This only works with linear mappings. */
5166 /* do_ipc() must return target values and target errnos. */
5167 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5168 unsigned int call
, abi_long first
,
5169 abi_long second
, abi_long third
,
5170 abi_long ptr
, abi_long fifth
)
5175 version
= call
>> 16;
5180 ret
= do_semop(first
, ptr
, second
);
5184 ret
= get_errno(semget(first
, second
, third
));
5187 case IPCOP_semctl
: {
5188 /* The semun argument to semctl is passed by value, so dereference the
5191 get_user_ual(atptr
, ptr
);
5192 ret
= do_semctl(first
, second
, third
, atptr
);
5197 ret
= get_errno(msgget(first
, second
));
5201 ret
= do_msgsnd(first
, ptr
, second
, third
);
5205 ret
= do_msgctl(first
, second
, ptr
);
5212 struct target_ipc_kludge
{
5217 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5218 ret
= -TARGET_EFAULT
;
5222 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5224 unlock_user_struct(tmp
, ptr
, 0);
5228 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5237 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5238 if (is_error(raddr
))
5239 return get_errno(raddr
);
5240 if (put_user_ual(raddr
, third
))
5241 return -TARGET_EFAULT
;
5245 ret
= -TARGET_EINVAL
;
5250 ret
= do_shmdt(ptr
);
5254 /* IPC_* flag values are the same on all linux platforms */
5255 ret
= get_errno(shmget(first
, second
, third
));
5258 /* IPC_* and SHM_* command values are the same on all linux platforms */
5260 ret
= do_shmctl(first
, second
, ptr
);
5263 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5264 ret
= -TARGET_ENOSYS
;
5271 /* kernel structure types definitions */
5273 #define STRUCT(name, ...) STRUCT_ ## name,
5274 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5276 #include "syscall_types.h"
5280 #undef STRUCT_SPECIAL
5282 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5283 #define STRUCT_SPECIAL(name)
5284 #include "syscall_types.h"
5286 #undef STRUCT_SPECIAL
5288 typedef struct IOCTLEntry IOCTLEntry
;
5290 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5291 int fd
, int cmd
, abi_long arg
);
5295 unsigned int host_cmd
;
5298 do_ioctl_fn
*do_ioctl
;
5299 const argtype arg_type
[5];
5302 #define IOC_R 0x0001
5303 #define IOC_W 0x0002
5304 #define IOC_RW (IOC_R | IOC_W)
5306 #define MAX_STRUCT_SIZE 4096
5308 #ifdef CONFIG_FIEMAP
5309 /* So fiemap access checks don't overflow on 32 bit systems.
5310 * This is very slightly smaller than the limit imposed by
5311 * the underlying kernel.
5313 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5314 / sizeof(struct fiemap_extent))
5316 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5317 int fd
, int cmd
, abi_long arg
)
5319 /* The parameter for this ioctl is a struct fiemap followed
5320 * by an array of struct fiemap_extent whose size is set
5321 * in fiemap->fm_extent_count. The array is filled in by the
5324 int target_size_in
, target_size_out
;
5326 const argtype
*arg_type
= ie
->arg_type
;
5327 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5330 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5334 assert(arg_type
[0] == TYPE_PTR
);
5335 assert(ie
->access
== IOC_RW
);
5337 target_size_in
= thunk_type_size(arg_type
, 0);
5338 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5340 return -TARGET_EFAULT
;
5342 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5343 unlock_user(argptr
, arg
, 0);
5344 fm
= (struct fiemap
*)buf_temp
;
5345 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5346 return -TARGET_EINVAL
;
5349 outbufsz
= sizeof (*fm
) +
5350 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5352 if (outbufsz
> MAX_STRUCT_SIZE
) {
5353 /* We can't fit all the extents into the fixed size buffer.
5354 * Allocate one that is large enough and use it instead.
5356 fm
= g_try_malloc(outbufsz
);
5358 return -TARGET_ENOMEM
;
5360 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5363 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5364 if (!is_error(ret
)) {
5365 target_size_out
= target_size_in
;
5366 /* An extent_count of 0 means we were only counting the extents
5367 * so there are no structs to copy
5369 if (fm
->fm_extent_count
!= 0) {
5370 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5372 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5374 ret
= -TARGET_EFAULT
;
5376 /* Convert the struct fiemap */
5377 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5378 if (fm
->fm_extent_count
!= 0) {
5379 p
= argptr
+ target_size_in
;
5380 /* ...and then all the struct fiemap_extents */
5381 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5382 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5387 unlock_user(argptr
, arg
, target_size_out
);
5397 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5398 int fd
, int cmd
, abi_long arg
)
5400 const argtype
*arg_type
= ie
->arg_type
;
5404 struct ifconf
*host_ifconf
;
5406 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5407 int target_ifreq_size
;
5412 abi_long target_ifc_buf
;
5416 assert(arg_type
[0] == TYPE_PTR
);
5417 assert(ie
->access
== IOC_RW
);
5420 target_size
= thunk_type_size(arg_type
, 0);
5422 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5424 return -TARGET_EFAULT
;
5425 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5426 unlock_user(argptr
, arg
, 0);
5428 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5429 target_ifc_len
= host_ifconf
->ifc_len
;
5430 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5432 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5433 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5434 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5436 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5437 if (outbufsz
> MAX_STRUCT_SIZE
) {
5438 /* We can't fit all the extents into the fixed size buffer.
5439 * Allocate one that is large enough and use it instead.
5441 host_ifconf
= malloc(outbufsz
);
5443 return -TARGET_ENOMEM
;
5445 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5448 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5450 host_ifconf
->ifc_len
= host_ifc_len
;
5451 host_ifconf
->ifc_buf
= host_ifc_buf
;
5453 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5454 if (!is_error(ret
)) {
5455 /* convert host ifc_len to target ifc_len */
5457 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5458 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5459 host_ifconf
->ifc_len
= target_ifc_len
;
5461 /* restore target ifc_buf */
5463 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5465 /* copy struct ifconf to target user */
5467 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5469 return -TARGET_EFAULT
;
5470 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5471 unlock_user(argptr
, arg
, target_size
);
5473 /* copy ifreq[] to target user */
5475 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5476 for (i
= 0; i
< nb_ifreq
; i
++) {
5477 thunk_convert(argptr
+ i
* target_ifreq_size
,
5478 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5479 ifreq_arg_type
, THUNK_TARGET
);
5481 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5491 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5492 int cmd
, abi_long arg
)
5495 struct dm_ioctl
*host_dm
;
5496 abi_long guest_data
;
5497 uint32_t guest_data_size
;
5499 const argtype
*arg_type
= ie
->arg_type
;
5501 void *big_buf
= NULL
;
5505 target_size
= thunk_type_size(arg_type
, 0);
5506 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5508 ret
= -TARGET_EFAULT
;
5511 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5512 unlock_user(argptr
, arg
, 0);
5514 /* buf_temp is too small, so fetch things into a bigger buffer */
5515 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5516 memcpy(big_buf
, buf_temp
, target_size
);
5520 guest_data
= arg
+ host_dm
->data_start
;
5521 if ((guest_data
- arg
) < 0) {
5522 ret
= -TARGET_EINVAL
;
5525 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5526 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5528 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5530 ret
= -TARGET_EFAULT
;
5534 switch (ie
->host_cmd
) {
5536 case DM_LIST_DEVICES
:
5539 case DM_DEV_SUSPEND
:
5542 case DM_TABLE_STATUS
:
5543 case DM_TABLE_CLEAR
:
5545 case DM_LIST_VERSIONS
:
5549 case DM_DEV_SET_GEOMETRY
:
5550 /* data contains only strings */
5551 memcpy(host_data
, argptr
, guest_data_size
);
5554 memcpy(host_data
, argptr
, guest_data_size
);
5555 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5559 void *gspec
= argptr
;
5560 void *cur_data
= host_data
;
5561 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5562 int spec_size
= thunk_type_size(arg_type
, 0);
5565 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5566 struct dm_target_spec
*spec
= cur_data
;
5570 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5571 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5573 spec
->next
= sizeof(*spec
) + slen
;
5574 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5576 cur_data
+= spec
->next
;
5581 ret
= -TARGET_EINVAL
;
5582 unlock_user(argptr
, guest_data
, 0);
5585 unlock_user(argptr
, guest_data
, 0);
5587 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5588 if (!is_error(ret
)) {
5589 guest_data
= arg
+ host_dm
->data_start
;
5590 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5591 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5592 switch (ie
->host_cmd
) {
5597 case DM_DEV_SUSPEND
:
5600 case DM_TABLE_CLEAR
:
5602 case DM_DEV_SET_GEOMETRY
:
5603 /* no return data */
5605 case DM_LIST_DEVICES
:
5607 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5608 uint32_t remaining_data
= guest_data_size
;
5609 void *cur_data
= argptr
;
5610 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5611 int nl_size
= 12; /* can't use thunk_size due to alignment */
5614 uint32_t next
= nl
->next
;
5616 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5618 if (remaining_data
< nl
->next
) {
5619 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5622 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5623 strcpy(cur_data
+ nl_size
, nl
->name
);
5624 cur_data
+= nl
->next
;
5625 remaining_data
-= nl
->next
;
5629 nl
= (void*)nl
+ next
;
5634 case DM_TABLE_STATUS
:
5636 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5637 void *cur_data
= argptr
;
5638 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5639 int spec_size
= thunk_type_size(arg_type
, 0);
5642 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5643 uint32_t next
= spec
->next
;
5644 int slen
= strlen((char*)&spec
[1]) + 1;
5645 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5646 if (guest_data_size
< spec
->next
) {
5647 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5650 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5651 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5652 cur_data
= argptr
+ spec
->next
;
5653 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5659 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5660 int count
= *(uint32_t*)hdata
;
5661 uint64_t *hdev
= hdata
+ 8;
5662 uint64_t *gdev
= argptr
+ 8;
5665 *(uint32_t*)argptr
= tswap32(count
);
5666 for (i
= 0; i
< count
; i
++) {
5667 *gdev
= tswap64(*hdev
);
5673 case DM_LIST_VERSIONS
:
5675 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5676 uint32_t remaining_data
= guest_data_size
;
5677 void *cur_data
= argptr
;
5678 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5679 int vers_size
= thunk_type_size(arg_type
, 0);
5682 uint32_t next
= vers
->next
;
5684 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5686 if (remaining_data
< vers
->next
) {
5687 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5690 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5691 strcpy(cur_data
+ vers_size
, vers
->name
);
5692 cur_data
+= vers
->next
;
5693 remaining_data
-= vers
->next
;
5697 vers
= (void*)vers
+ next
;
5702 unlock_user(argptr
, guest_data
, 0);
5703 ret
= -TARGET_EINVAL
;
5706 unlock_user(argptr
, guest_data
, guest_data_size
);
5708 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5710 ret
= -TARGET_EFAULT
;
5713 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5714 unlock_user(argptr
, arg
, target_size
);
5721 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5722 int cmd
, abi_long arg
)
5726 const argtype
*arg_type
= ie
->arg_type
;
5727 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5730 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5731 struct blkpg_partition host_part
;
5733 /* Read and convert blkpg */
5735 target_size
= thunk_type_size(arg_type
, 0);
5736 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5738 ret
= -TARGET_EFAULT
;
5741 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5742 unlock_user(argptr
, arg
, 0);
5744 switch (host_blkpg
->op
) {
5745 case BLKPG_ADD_PARTITION
:
5746 case BLKPG_DEL_PARTITION
:
5747 /* payload is struct blkpg_partition */
5750 /* Unknown opcode */
5751 ret
= -TARGET_EINVAL
;
5755 /* Read and convert blkpg->data */
5756 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5757 target_size
= thunk_type_size(part_arg_type
, 0);
5758 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5760 ret
= -TARGET_EFAULT
;
5763 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5764 unlock_user(argptr
, arg
, 0);
5766 /* Swizzle the data pointer to our local copy and call! */
5767 host_blkpg
->data
= &host_part
;
5768 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5774 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5775 int fd
, int cmd
, abi_long arg
)
5777 const argtype
*arg_type
= ie
->arg_type
;
5778 const StructEntry
*se
;
5779 const argtype
*field_types
;
5780 const int *dst_offsets
, *src_offsets
;
5783 abi_ulong
*target_rt_dev_ptr
;
5784 unsigned long *host_rt_dev_ptr
;
5788 assert(ie
->access
== IOC_W
);
5789 assert(*arg_type
== TYPE_PTR
);
5791 assert(*arg_type
== TYPE_STRUCT
);
5792 target_size
= thunk_type_size(arg_type
, 0);
5793 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5795 return -TARGET_EFAULT
;
5798 assert(*arg_type
== (int)STRUCT_rtentry
);
5799 se
= struct_entries
+ *arg_type
++;
5800 assert(se
->convert
[0] == NULL
);
5801 /* convert struct here to be able to catch rt_dev string */
5802 field_types
= se
->field_types
;
5803 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5804 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5805 for (i
= 0; i
< se
->nb_fields
; i
++) {
5806 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5807 assert(*field_types
== TYPE_PTRVOID
);
5808 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5809 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5810 if (*target_rt_dev_ptr
!= 0) {
5811 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5812 tswapal(*target_rt_dev_ptr
));
5813 if (!*host_rt_dev_ptr
) {
5814 unlock_user(argptr
, arg
, 0);
5815 return -TARGET_EFAULT
;
5818 *host_rt_dev_ptr
= 0;
5823 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5824 argptr
+ src_offsets
[i
],
5825 field_types
, THUNK_HOST
);
5827 unlock_user(argptr
, arg
, 0);
5829 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5830 if (*host_rt_dev_ptr
!= 0) {
5831 unlock_user((void *)*host_rt_dev_ptr
,
5832 *target_rt_dev_ptr
, 0);
5837 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5838 int fd
, int cmd
, abi_long arg
)
5840 int sig
= target_to_host_signal(arg
);
5841 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5845 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5846 int fd
, int cmd
, abi_long arg
)
5848 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5849 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5853 static IOCTLEntry ioctl_entries
[] = {
5854 #define IOCTL(cmd, access, ...) \
5855 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5856 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5857 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5858 #define IOCTL_IGNORE(cmd) \
5859 { TARGET_ ## cmd, 0, #cmd },
5864 /* ??? Implement proper locking for ioctls. */
5865 /* do_ioctl() Must return target values and target errnos. */
5866 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5868 const IOCTLEntry
*ie
;
5869 const argtype
*arg_type
;
5871 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5877 if (ie
->target_cmd
== 0) {
5878 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5879 return -TARGET_ENOSYS
;
5881 if (ie
->target_cmd
== cmd
)
5885 arg_type
= ie
->arg_type
;
5887 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5888 } else if (!ie
->host_cmd
) {
5889 /* Some architectures define BSD ioctls in their headers
5890 that are not implemented in Linux. */
5891 return -TARGET_ENOSYS
;
5894 switch(arg_type
[0]) {
5897 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5901 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5905 target_size
= thunk_type_size(arg_type
, 0);
5906 switch(ie
->access
) {
5908 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5909 if (!is_error(ret
)) {
5910 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5912 return -TARGET_EFAULT
;
5913 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5914 unlock_user(argptr
, arg
, target_size
);
5918 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5920 return -TARGET_EFAULT
;
5921 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5922 unlock_user(argptr
, arg
, 0);
5923 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5927 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5929 return -TARGET_EFAULT
;
5930 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5931 unlock_user(argptr
, arg
, 0);
5932 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5933 if (!is_error(ret
)) {
5934 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5936 return -TARGET_EFAULT
;
5937 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5938 unlock_user(argptr
, arg
, target_size
);
5944 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5945 (long)cmd
, arg_type
[0]);
5946 ret
= -TARGET_ENOSYS
;
5952 static const bitmask_transtbl iflag_tbl
[] = {
5953 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5954 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5955 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5956 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5957 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5958 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5959 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5960 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5961 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5962 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5963 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5964 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5965 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5966 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5970 static const bitmask_transtbl oflag_tbl
[] = {
5971 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5972 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5973 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5974 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5975 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5976 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5977 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5978 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5979 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5980 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5981 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5982 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5983 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5984 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5985 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5986 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5987 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5988 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5989 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5990 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5991 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5992 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5993 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5994 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5998 static const bitmask_transtbl cflag_tbl
[] = {
5999 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
6000 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
6001 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
6002 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
6003 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
6004 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
6005 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
6006 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
6007 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
6008 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
6009 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
6010 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
6011 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
6012 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
6013 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
6014 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
6015 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
6016 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
6017 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
6018 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
6019 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
6020 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
6021 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
6022 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
6023 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
6024 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
6025 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
6026 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
6027 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
6028 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
6029 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
6033 static const bitmask_transtbl lflag_tbl
[] = {
6034 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
6035 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
6036 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
6037 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
6038 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
6039 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
6040 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
6041 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
6042 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
6043 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
6044 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
6045 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
6046 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
6047 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
6048 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
6052 static void target_to_host_termios (void *dst
, const void *src
)
6054 struct host_termios
*host
= dst
;
6055 const struct target_termios
*target
= src
;
6058 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
6060 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
6062 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
6064 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
6065 host
->c_line
= target
->c_line
;
6067 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
6068 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
6069 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
6070 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
6071 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
6072 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
6073 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
6074 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
6075 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
6076 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
6077 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6078 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6079 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6080 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6081 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6082 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6083 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6084 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6087 static void host_to_target_termios (void *dst
, const void *src
)
6089 struct target_termios
*target
= dst
;
6090 const struct host_termios
*host
= src
;
6093 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6095 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6097 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6099 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6100 target
->c_line
= host
->c_line
;
6102 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6103 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6104 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6105 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6106 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6107 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6108 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6109 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6110 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6111 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6112 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6113 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6114 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6115 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6116 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6117 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6118 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6119 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6122 static const StructEntry struct_termios_def
= {
6123 .convert
= { host_to_target_termios
, target_to_host_termios
},
6124 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6125 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6128 static bitmask_transtbl mmap_flags_tbl
[] = {
6129 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6130 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6131 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6132 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6133 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6134 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6135 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6136 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6137 MAP_DENYWRITE
, MAP_DENYWRITE
},
6138 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6139 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6140 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6141 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6142 MAP_NORESERVE
, MAP_NORESERVE
},
6143 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6144 /* MAP_STACK had been ignored by the kernel for quite some time.
6145 Recognize it for the target insofar as we do not want to pass
6146 it through to the host. */
6147 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6151 #if defined(TARGET_I386)
6153 /* NOTE: there is really one LDT for all the threads */
6154 static uint8_t *ldt_table
;
6156 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6163 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6164 if (size
> bytecount
)
6166 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6168 return -TARGET_EFAULT
;
6169 /* ??? Should this by byteswapped? */
6170 memcpy(p
, ldt_table
, size
);
6171 unlock_user(p
, ptr
, size
);
6175 /* XXX: add locking support */
6176 static abi_long
write_ldt(CPUX86State
*env
,
6177 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6179 struct target_modify_ldt_ldt_s ldt_info
;
6180 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6181 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6182 int seg_not_present
, useable
, lm
;
6183 uint32_t *lp
, entry_1
, entry_2
;
6185 if (bytecount
!= sizeof(ldt_info
))
6186 return -TARGET_EINVAL
;
6187 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6188 return -TARGET_EFAULT
;
6189 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6190 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6191 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6192 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6193 unlock_user_struct(target_ldt_info
, ptr
, 0);
6195 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6196 return -TARGET_EINVAL
;
6197 seg_32bit
= ldt_info
.flags
& 1;
6198 contents
= (ldt_info
.flags
>> 1) & 3;
6199 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6200 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6201 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6202 useable
= (ldt_info
.flags
>> 6) & 1;
6206 lm
= (ldt_info
.flags
>> 7) & 1;
6208 if (contents
== 3) {
6210 return -TARGET_EINVAL
;
6211 if (seg_not_present
== 0)
6212 return -TARGET_EINVAL
;
6214 /* allocate the LDT */
6216 env
->ldt
.base
= target_mmap(0,
6217 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6218 PROT_READ
|PROT_WRITE
,
6219 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6220 if (env
->ldt
.base
== -1)
6221 return -TARGET_ENOMEM
;
6222 memset(g2h(env
->ldt
.base
), 0,
6223 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6224 env
->ldt
.limit
= 0xffff;
6225 ldt_table
= g2h(env
->ldt
.base
);
6228 /* NOTE: same code as Linux kernel */
6229 /* Allow LDTs to be cleared by the user. */
6230 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6233 read_exec_only
== 1 &&
6235 limit_in_pages
== 0 &&
6236 seg_not_present
== 1 &&
6244 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6245 (ldt_info
.limit
& 0x0ffff);
6246 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6247 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6248 (ldt_info
.limit
& 0xf0000) |
6249 ((read_exec_only
^ 1) << 9) |
6251 ((seg_not_present
^ 1) << 15) |
6253 (limit_in_pages
<< 23) |
6257 entry_2
|= (useable
<< 20);
6259 /* Install the new entry ... */
6261 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6262 lp
[0] = tswap32(entry_1
);
6263 lp
[1] = tswap32(entry_2
);
6267 /* specific and weird i386 syscalls */
6268 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6269 unsigned long bytecount
)
6275 ret
= read_ldt(ptr
, bytecount
);
6278 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6281 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6284 ret
= -TARGET_ENOSYS
;
6290 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6291 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6293 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6294 struct target_modify_ldt_ldt_s ldt_info
;
6295 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6296 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6297 int seg_not_present
, useable
, lm
;
6298 uint32_t *lp
, entry_1
, entry_2
;
6301 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6302 if (!target_ldt_info
)
6303 return -TARGET_EFAULT
;
6304 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6305 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6306 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6307 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6308 if (ldt_info
.entry_number
== -1) {
6309 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6310 if (gdt_table
[i
] == 0) {
6311 ldt_info
.entry_number
= i
;
6312 target_ldt_info
->entry_number
= tswap32(i
);
6317 unlock_user_struct(target_ldt_info
, ptr
, 1);
6319 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6320 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6321 return -TARGET_EINVAL
;
6322 seg_32bit
= ldt_info
.flags
& 1;
6323 contents
= (ldt_info
.flags
>> 1) & 3;
6324 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6325 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6326 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6327 useable
= (ldt_info
.flags
>> 6) & 1;
6331 lm
= (ldt_info
.flags
>> 7) & 1;
6334 if (contents
== 3) {
6335 if (seg_not_present
== 0)
6336 return -TARGET_EINVAL
;
6339 /* NOTE: same code as Linux kernel */
6340 /* Allow LDTs to be cleared by the user. */
6341 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6342 if ((contents
== 0 &&
6343 read_exec_only
== 1 &&
6345 limit_in_pages
== 0 &&
6346 seg_not_present
== 1 &&
6354 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6355 (ldt_info
.limit
& 0x0ffff);
6356 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6357 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6358 (ldt_info
.limit
& 0xf0000) |
6359 ((read_exec_only
^ 1) << 9) |
6361 ((seg_not_present
^ 1) << 15) |
6363 (limit_in_pages
<< 23) |
6368 /* Install the new entry ... */
6370 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6371 lp
[0] = tswap32(entry_1
);
6372 lp
[1] = tswap32(entry_2
);
6376 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6378 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6379 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6380 uint32_t base_addr
, limit
, flags
;
6381 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6382 int seg_not_present
, useable
, lm
;
6383 uint32_t *lp
, entry_1
, entry_2
;
6385 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6386 if (!target_ldt_info
)
6387 return -TARGET_EFAULT
;
6388 idx
= tswap32(target_ldt_info
->entry_number
);
6389 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6390 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6391 unlock_user_struct(target_ldt_info
, ptr
, 1);
6392 return -TARGET_EINVAL
;
6394 lp
= (uint32_t *)(gdt_table
+ idx
);
6395 entry_1
= tswap32(lp
[0]);
6396 entry_2
= tswap32(lp
[1]);
6398 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6399 contents
= (entry_2
>> 10) & 3;
6400 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6401 seg_32bit
= (entry_2
>> 22) & 1;
6402 limit_in_pages
= (entry_2
>> 23) & 1;
6403 useable
= (entry_2
>> 20) & 1;
6407 lm
= (entry_2
>> 21) & 1;
6409 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6410 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6411 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6412 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6413 base_addr
= (entry_1
>> 16) |
6414 (entry_2
& 0xff000000) |
6415 ((entry_2
& 0xff) << 16);
6416 target_ldt_info
->base_addr
= tswapal(base_addr
);
6417 target_ldt_info
->limit
= tswap32(limit
);
6418 target_ldt_info
->flags
= tswap32(flags
);
6419 unlock_user_struct(target_ldt_info
, ptr
, 1);
6422 #endif /* TARGET_I386 && TARGET_ABI32 */
6424 #ifndef TARGET_ABI32
6425 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6432 case TARGET_ARCH_SET_GS
:
6433 case TARGET_ARCH_SET_FS
:
6434 if (code
== TARGET_ARCH_SET_GS
)
6438 cpu_x86_load_seg(env
, idx
, 0);
6439 env
->segs
[idx
].base
= addr
;
6441 case TARGET_ARCH_GET_GS
:
6442 case TARGET_ARCH_GET_FS
:
6443 if (code
== TARGET_ARCH_GET_GS
)
6447 val
= env
->segs
[idx
].base
;
6448 if (put_user(val
, addr
, abi_ulong
))
6449 ret
= -TARGET_EFAULT
;
6452 ret
= -TARGET_EINVAL
;
6459 #endif /* defined(TARGET_I386) */
6461 #define NEW_STACK_SIZE 0x40000
6464 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6467 pthread_mutex_t mutex
;
6468 pthread_cond_t cond
;
6471 abi_ulong child_tidptr
;
6472 abi_ulong parent_tidptr
;
6476 static void *clone_func(void *arg
)
6478 new_thread_info
*info
= arg
;
6483 rcu_register_thread();
6484 tcg_register_thread();
6486 cpu
= ENV_GET_CPU(env
);
6488 ts
= (TaskState
*)cpu
->opaque
;
6489 info
->tid
= gettid();
6491 if (info
->child_tidptr
)
6492 put_user_u32(info
->tid
, info
->child_tidptr
);
6493 if (info
->parent_tidptr
)
6494 put_user_u32(info
->tid
, info
->parent_tidptr
);
6495 /* Enable signals. */
6496 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6497 /* Signal to the parent that we're ready. */
6498 pthread_mutex_lock(&info
->mutex
);
6499 pthread_cond_broadcast(&info
->cond
);
6500 pthread_mutex_unlock(&info
->mutex
);
6501 /* Wait until the parent has finished initializing the tls state. */
6502 pthread_mutex_lock(&clone_lock
);
6503 pthread_mutex_unlock(&clone_lock
);
6509 /* do_fork() Must return host values and target errnos (unlike most
6510 do_*() functions). */
6511 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6512 abi_ulong parent_tidptr
, target_ulong newtls
,
6513 abi_ulong child_tidptr
)
6515 CPUState
*cpu
= ENV_GET_CPU(env
);
6519 CPUArchState
*new_env
;
6522 flags
&= ~CLONE_IGNORED_FLAGS
;
6524 /* Emulate vfork() with fork() */
6525 if (flags
& CLONE_VFORK
)
6526 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6528 if (flags
& CLONE_VM
) {
6529 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6530 new_thread_info info
;
6531 pthread_attr_t attr
;
6533 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6534 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6535 return -TARGET_EINVAL
;
6538 ts
= g_new0(TaskState
, 1);
6539 init_task_state(ts
);
6541 /* Grab a mutex so that thread setup appears atomic. */
6542 pthread_mutex_lock(&clone_lock
);
6544 /* we create a new CPU instance. */
6545 new_env
= cpu_copy(env
);
6546 /* Init regs that differ from the parent. */
6547 cpu_clone_regs(new_env
, newsp
);
6548 new_cpu
= ENV_GET_CPU(new_env
);
6549 new_cpu
->opaque
= ts
;
6550 ts
->bprm
= parent_ts
->bprm
;
6551 ts
->info
= parent_ts
->info
;
6552 ts
->signal_mask
= parent_ts
->signal_mask
;
6554 if (flags
& CLONE_CHILD_CLEARTID
) {
6555 ts
->child_tidptr
= child_tidptr
;
6558 if (flags
& CLONE_SETTLS
) {
6559 cpu_set_tls (new_env
, newtls
);
6562 memset(&info
, 0, sizeof(info
));
6563 pthread_mutex_init(&info
.mutex
, NULL
);
6564 pthread_mutex_lock(&info
.mutex
);
6565 pthread_cond_init(&info
.cond
, NULL
);
6567 if (flags
& CLONE_CHILD_SETTID
) {
6568 info
.child_tidptr
= child_tidptr
;
6570 if (flags
& CLONE_PARENT_SETTID
) {
6571 info
.parent_tidptr
= parent_tidptr
;
6574 ret
= pthread_attr_init(&attr
);
6575 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6576 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6577 /* It is not safe to deliver signals until the child has finished
6578 initializing, so temporarily block all signals. */
6579 sigfillset(&sigmask
);
6580 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6582 /* If this is our first additional thread, we need to ensure we
6583 * generate code for parallel execution and flush old translations.
6585 if (!parallel_cpus
) {
6586 parallel_cpus
= true;
6590 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6591 /* TODO: Free new CPU state if thread creation failed. */
6593 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6594 pthread_attr_destroy(&attr
);
6596 /* Wait for the child to initialize. */
6597 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6602 pthread_mutex_unlock(&info
.mutex
);
6603 pthread_cond_destroy(&info
.cond
);
6604 pthread_mutex_destroy(&info
.mutex
);
6605 pthread_mutex_unlock(&clone_lock
);
6607 /* if no CLONE_VM, we consider it is a fork */
6608 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6609 return -TARGET_EINVAL
;
6612 /* We can't support custom termination signals */
6613 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6614 return -TARGET_EINVAL
;
6617 if (block_signals()) {
6618 return -TARGET_ERESTARTSYS
;
6624 /* Child Process. */
6625 cpu_clone_regs(env
, newsp
);
6627 /* There is a race condition here. The parent process could
6628 theoretically read the TID in the child process before the child
6629 tid is set. This would require using either ptrace
6630 (not implemented) or having *_tidptr to point at a shared memory
6631 mapping. We can't repeat the spinlock hack used above because
6632 the child process gets its own copy of the lock. */
6633 if (flags
& CLONE_CHILD_SETTID
)
6634 put_user_u32(gettid(), child_tidptr
);
6635 if (flags
& CLONE_PARENT_SETTID
)
6636 put_user_u32(gettid(), parent_tidptr
);
6637 ts
= (TaskState
*)cpu
->opaque
;
6638 if (flags
& CLONE_SETTLS
)
6639 cpu_set_tls (env
, newtls
);
6640 if (flags
& CLONE_CHILD_CLEARTID
)
6641 ts
->child_tidptr
= child_tidptr
;
6649 /* warning : doesn't handle linux specific flags... */
6650 static int target_to_host_fcntl_cmd(int cmd
)
6655 case TARGET_F_DUPFD
:
6656 case TARGET_F_GETFD
:
6657 case TARGET_F_SETFD
:
6658 case TARGET_F_GETFL
:
6659 case TARGET_F_SETFL
:
6662 case TARGET_F_GETLK
:
6665 case TARGET_F_SETLK
:
6668 case TARGET_F_SETLKW
:
6671 case TARGET_F_GETOWN
:
6674 case TARGET_F_SETOWN
:
6677 case TARGET_F_GETSIG
:
6680 case TARGET_F_SETSIG
:
6683 #if TARGET_ABI_BITS == 32
6684 case TARGET_F_GETLK64
:
6687 case TARGET_F_SETLK64
:
6690 case TARGET_F_SETLKW64
:
6694 case TARGET_F_SETLEASE
:
6697 case TARGET_F_GETLEASE
:
6700 #ifdef F_DUPFD_CLOEXEC
6701 case TARGET_F_DUPFD_CLOEXEC
:
6702 ret
= F_DUPFD_CLOEXEC
;
6705 case TARGET_F_NOTIFY
:
6709 case TARGET_F_GETOWN_EX
:
6714 case TARGET_F_SETOWN_EX
:
6719 case TARGET_F_SETPIPE_SZ
:
6722 case TARGET_F_GETPIPE_SZ
:
6727 ret
= -TARGET_EINVAL
;
6731 #if defined(__powerpc64__)
6732 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6733 * is not supported by kernel. The glibc fcntl call actually adjusts
6734 * them to 5, 6 and 7 before making the syscall(). Since we make the
6735 * syscall directly, adjust to what is supported by the kernel.
6737 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6738 ret
-= F_GETLK64
- 5;
6745 #define FLOCK_TRANSTBL \
6747 TRANSTBL_CONVERT(F_RDLCK); \
6748 TRANSTBL_CONVERT(F_WRLCK); \
6749 TRANSTBL_CONVERT(F_UNLCK); \
6750 TRANSTBL_CONVERT(F_EXLCK); \
6751 TRANSTBL_CONVERT(F_SHLCK); \
6754 static int target_to_host_flock(int type
)
6756 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6758 #undef TRANSTBL_CONVERT
6759 return -TARGET_EINVAL
;
6762 static int host_to_target_flock(int type
)
6764 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6766 #undef TRANSTBL_CONVERT
6767 /* if we don't know how to convert the value coming
6768 * from the host we copy to the target field as-is
6773 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6774 abi_ulong target_flock_addr
)
6776 struct target_flock
*target_fl
;
6779 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6780 return -TARGET_EFAULT
;
6783 __get_user(l_type
, &target_fl
->l_type
);
6784 l_type
= target_to_host_flock(l_type
);
6788 fl
->l_type
= l_type
;
6789 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6790 __get_user(fl
->l_start
, &target_fl
->l_start
);
6791 __get_user(fl
->l_len
, &target_fl
->l_len
);
6792 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6793 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6797 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6798 const struct flock64
*fl
)
6800 struct target_flock
*target_fl
;
6803 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6804 return -TARGET_EFAULT
;
6807 l_type
= host_to_target_flock(fl
->l_type
);
6808 __put_user(l_type
, &target_fl
->l_type
);
6809 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6810 __put_user(fl
->l_start
, &target_fl
->l_start
);
6811 __put_user(fl
->l_len
, &target_fl
->l_len
);
6812 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6813 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6817 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6818 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6820 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6821 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6822 abi_ulong target_flock_addr
)
6824 struct target_oabi_flock64
*target_fl
;
6827 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6828 return -TARGET_EFAULT
;
6831 __get_user(l_type
, &target_fl
->l_type
);
6832 l_type
= target_to_host_flock(l_type
);
6836 fl
->l_type
= l_type
;
6837 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6838 __get_user(fl
->l_start
, &target_fl
->l_start
);
6839 __get_user(fl
->l_len
, &target_fl
->l_len
);
6840 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6841 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6845 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6846 const struct flock64
*fl
)
6848 struct target_oabi_flock64
*target_fl
;
6851 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6852 return -TARGET_EFAULT
;
6855 l_type
= host_to_target_flock(fl
->l_type
);
6856 __put_user(l_type
, &target_fl
->l_type
);
6857 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6858 __put_user(fl
->l_start
, &target_fl
->l_start
);
6859 __put_user(fl
->l_len
, &target_fl
->l_len
);
6860 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6861 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6866 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6867 abi_ulong target_flock_addr
)
6869 struct target_flock64
*target_fl
;
6872 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6873 return -TARGET_EFAULT
;
6876 __get_user(l_type
, &target_fl
->l_type
);
6877 l_type
= target_to_host_flock(l_type
);
6881 fl
->l_type
= l_type
;
6882 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6883 __get_user(fl
->l_start
, &target_fl
->l_start
);
6884 __get_user(fl
->l_len
, &target_fl
->l_len
);
6885 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6886 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6890 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6891 const struct flock64
*fl
)
6893 struct target_flock64
*target_fl
;
6896 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6897 return -TARGET_EFAULT
;
6900 l_type
= host_to_target_flock(fl
->l_type
);
6901 __put_user(l_type
, &target_fl
->l_type
);
6902 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6903 __put_user(fl
->l_start
, &target_fl
->l_start
);
6904 __put_user(fl
->l_len
, &target_fl
->l_len
);
6905 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6906 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6910 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6912 struct flock64 fl64
;
6914 struct f_owner_ex fox
;
6915 struct target_f_owner_ex
*target_fox
;
6918 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6920 if (host_cmd
== -TARGET_EINVAL
)
6924 case TARGET_F_GETLK
:
6925 ret
= copy_from_user_flock(&fl64
, arg
);
6929 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6931 ret
= copy_to_user_flock(arg
, &fl64
);
6935 case TARGET_F_SETLK
:
6936 case TARGET_F_SETLKW
:
6937 ret
= copy_from_user_flock(&fl64
, arg
);
6941 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6944 case TARGET_F_GETLK64
:
6945 ret
= copy_from_user_flock64(&fl64
, arg
);
6949 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6951 ret
= copy_to_user_flock64(arg
, &fl64
);
6954 case TARGET_F_SETLK64
:
6955 case TARGET_F_SETLKW64
:
6956 ret
= copy_from_user_flock64(&fl64
, arg
);
6960 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6963 case TARGET_F_GETFL
:
6964 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6966 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6970 case TARGET_F_SETFL
:
6971 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6972 target_to_host_bitmask(arg
,
6977 case TARGET_F_GETOWN_EX
:
6978 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6980 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6981 return -TARGET_EFAULT
;
6982 target_fox
->type
= tswap32(fox
.type
);
6983 target_fox
->pid
= tswap32(fox
.pid
);
6984 unlock_user_struct(target_fox
, arg
, 1);
6990 case TARGET_F_SETOWN_EX
:
6991 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6992 return -TARGET_EFAULT
;
6993 fox
.type
= tswap32(target_fox
->type
);
6994 fox
.pid
= tswap32(target_fox
->pid
);
6995 unlock_user_struct(target_fox
, arg
, 0);
6996 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7000 case TARGET_F_SETOWN
:
7001 case TARGET_F_GETOWN
:
7002 case TARGET_F_SETSIG
:
7003 case TARGET_F_GETSIG
:
7004 case TARGET_F_SETLEASE
:
7005 case TARGET_F_GETLEASE
:
7006 case TARGET_F_SETPIPE_SZ
:
7007 case TARGET_F_GETPIPE_SZ
:
7008 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7012 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7020 static inline int high2lowuid(int uid
)
7028 static inline int high2lowgid(int gid
)
7036 static inline int low2highuid(int uid
)
7038 if ((int16_t)uid
== -1)
7044 static inline int low2highgid(int gid
)
7046 if ((int16_t)gid
== -1)
7051 static inline int tswapid(int id
)
7056 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7058 #else /* !USE_UID16 */
7059 static inline int high2lowuid(int uid
)
7063 static inline int high2lowgid(int gid
)
7067 static inline int low2highuid(int uid
)
7071 static inline int low2highgid(int gid
)
7075 static inline int tswapid(int id
)
7080 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7082 #endif /* USE_UID16 */
7084 /* We must do direct syscalls for setting UID/GID, because we want to
7085 * implement the Linux system call semantics of "change only for this thread",
7086 * not the libc/POSIX semantics of "change for all threads in process".
7087 * (See http://ewontfix.com/17/ for more details.)
7088 * We use the 32-bit version of the syscalls if present; if it is not
7089 * then either the host architecture supports 32-bit UIDs natively with
7090 * the standard syscall, or the 16-bit UID is the best we can do.
7092 #ifdef __NR_setuid32
7093 #define __NR_sys_setuid __NR_setuid32
7095 #define __NR_sys_setuid __NR_setuid
7097 #ifdef __NR_setgid32
7098 #define __NR_sys_setgid __NR_setgid32
7100 #define __NR_sys_setgid __NR_setgid
7102 #ifdef __NR_setresuid32
7103 #define __NR_sys_setresuid __NR_setresuid32
7105 #define __NR_sys_setresuid __NR_setresuid
7107 #ifdef __NR_setresgid32
7108 #define __NR_sys_setresgid __NR_setresgid32
7110 #define __NR_sys_setresgid __NR_setresgid
7113 _syscall1(int, sys_setuid
, uid_t
, uid
)
7114 _syscall1(int, sys_setgid
, gid_t
, gid
)
7115 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7116 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7118 void syscall_init(void)
7121 const argtype
*arg_type
;
7125 thunk_init(STRUCT_MAX
);
7127 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7128 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7129 #include "syscall_types.h"
7131 #undef STRUCT_SPECIAL
7133 /* Build target_to_host_errno_table[] table from
7134 * host_to_target_errno_table[]. */
7135 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7136 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7139 /* we patch the ioctl size if necessary. We rely on the fact that
7140 no ioctl has all the bits at '1' in the size field */
7142 while (ie
->target_cmd
!= 0) {
7143 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7144 TARGET_IOC_SIZEMASK
) {
7145 arg_type
= ie
->arg_type
;
7146 if (arg_type
[0] != TYPE_PTR
) {
7147 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7152 size
= thunk_type_size(arg_type
, 0);
7153 ie
->target_cmd
= (ie
->target_cmd
&
7154 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7155 (size
<< TARGET_IOC_SIZESHIFT
);
7158 /* automatic consistency check if same arch */
7159 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7160 (defined(__x86_64__) && defined(TARGET_X86_64))
7161 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7162 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7163 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7170 #if TARGET_ABI_BITS == 32
7171 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
7173 #ifdef TARGET_WORDS_BIGENDIAN
7174 return ((uint64_t)word0
<< 32) | word1
;
7176 return ((uint64_t)word1
<< 32) | word0
;
7179 #else /* TARGET_ABI_BITS == 32 */
7180 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
7184 #endif /* TARGET_ABI_BITS != 32 */
7186 #ifdef TARGET_NR_truncate64
7187 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7192 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7196 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7200 #ifdef TARGET_NR_ftruncate64
7201 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7206 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7210 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7214 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
7215 abi_ulong target_addr
)
7217 struct target_timespec
*target_ts
;
7219 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
7220 return -TARGET_EFAULT
;
7221 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7222 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7223 unlock_user_struct(target_ts
, target_addr
, 0);
7227 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7228 struct timespec
*host_ts
)
7230 struct target_timespec
*target_ts
;
7232 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7233 return -TARGET_EFAULT
;
7234 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7235 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7236 unlock_user_struct(target_ts
, target_addr
, 1);
7240 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7241 abi_ulong target_addr
)
7243 struct target_itimerspec
*target_itspec
;
7245 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7246 return -TARGET_EFAULT
;
7249 host_itspec
->it_interval
.tv_sec
=
7250 tswapal(target_itspec
->it_interval
.tv_sec
);
7251 host_itspec
->it_interval
.tv_nsec
=
7252 tswapal(target_itspec
->it_interval
.tv_nsec
);
7253 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7254 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7256 unlock_user_struct(target_itspec
, target_addr
, 1);
7260 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7261 struct itimerspec
*host_its
)
7263 struct target_itimerspec
*target_itspec
;
7265 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7266 return -TARGET_EFAULT
;
7269 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7270 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7272 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7273 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7275 unlock_user_struct(target_itspec
, target_addr
, 0);
7279 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7280 abi_long target_addr
)
7282 struct target_timex
*target_tx
;
7284 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7285 return -TARGET_EFAULT
;
7288 __get_user(host_tx
->modes
, &target_tx
->modes
);
7289 __get_user(host_tx
->offset
, &target_tx
->offset
);
7290 __get_user(host_tx
->freq
, &target_tx
->freq
);
7291 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7292 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7293 __get_user(host_tx
->status
, &target_tx
->status
);
7294 __get_user(host_tx
->constant
, &target_tx
->constant
);
7295 __get_user(host_tx
->precision
, &target_tx
->precision
);
7296 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7297 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7298 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7299 __get_user(host_tx
->tick
, &target_tx
->tick
);
7300 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7301 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7302 __get_user(host_tx
->shift
, &target_tx
->shift
);
7303 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7304 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7305 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7306 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7307 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7308 __get_user(host_tx
->tai
, &target_tx
->tai
);
7310 unlock_user_struct(target_tx
, target_addr
, 0);
7314 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7315 struct timex
*host_tx
)
7317 struct target_timex
*target_tx
;
7319 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7320 return -TARGET_EFAULT
;
7323 __put_user(host_tx
->modes
, &target_tx
->modes
);
7324 __put_user(host_tx
->offset
, &target_tx
->offset
);
7325 __put_user(host_tx
->freq
, &target_tx
->freq
);
7326 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7327 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7328 __put_user(host_tx
->status
, &target_tx
->status
);
7329 __put_user(host_tx
->constant
, &target_tx
->constant
);
7330 __put_user(host_tx
->precision
, &target_tx
->precision
);
7331 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7332 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7333 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7334 __put_user(host_tx
->tick
, &target_tx
->tick
);
7335 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7336 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7337 __put_user(host_tx
->shift
, &target_tx
->shift
);
7338 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7339 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7340 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7341 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7342 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7343 __put_user(host_tx
->tai
, &target_tx
->tai
);
7345 unlock_user_struct(target_tx
, target_addr
, 1);
7350 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7351 abi_ulong target_addr
)
7353 struct target_sigevent
*target_sevp
;
7355 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7356 return -TARGET_EFAULT
;
7359 /* This union is awkward on 64 bit systems because it has a 32 bit
7360 * integer and a pointer in it; we follow the conversion approach
7361 * used for handling sigval types in signal.c so the guest should get
7362 * the correct value back even if we did a 64 bit byteswap and it's
7363 * using the 32 bit integer.
7365 host_sevp
->sigev_value
.sival_ptr
=
7366 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7367 host_sevp
->sigev_signo
=
7368 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7369 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7370 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7372 unlock_user_struct(target_sevp
, target_addr
, 1);
7376 #if defined(TARGET_NR_mlockall)
7377 static inline int target_to_host_mlockall_arg(int arg
)
7381 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7382 result
|= MCL_CURRENT
;
7384 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7385 result
|= MCL_FUTURE
;
7391 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7392 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7393 defined(TARGET_NR_newfstatat))
7394 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7395 abi_ulong target_addr
,
7396 struct stat
*host_st
)
7398 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7399 if (((CPUARMState
*)cpu_env
)->eabi
) {
7400 struct target_eabi_stat64
*target_st
;
7402 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7403 return -TARGET_EFAULT
;
7404 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7405 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7406 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7407 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7408 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7410 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7411 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7412 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7413 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7414 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7415 __put_user(host_st
->st_size
, &target_st
->st_size
);
7416 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7417 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7418 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7419 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7420 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7421 unlock_user_struct(target_st
, target_addr
, 1);
7425 #if defined(TARGET_HAS_STRUCT_STAT64)
7426 struct target_stat64
*target_st
;
7428 struct target_stat
*target_st
;
7431 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7432 return -TARGET_EFAULT
;
7433 memset(target_st
, 0, sizeof(*target_st
));
7434 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7435 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7436 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7437 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7439 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7440 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7441 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7442 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7443 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7444 /* XXX: better use of kernel struct */
7445 __put_user(host_st
->st_size
, &target_st
->st_size
);
7446 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7447 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7448 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7449 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7450 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7451 unlock_user_struct(target_st
, target_addr
, 1);
7458 /* ??? Using host futex calls even when target atomic operations
7459 are not really atomic probably breaks things. However implementing
7460 futexes locally would make futexes shared between multiple processes
7461 tricky. However they're probably useless because guest atomic
7462 operations won't work either. */
7463 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7464 target_ulong uaddr2
, int val3
)
7466 struct timespec ts
, *pts
;
7469 /* ??? We assume FUTEX_* constants are the same on both host
7471 #ifdef FUTEX_CMD_MASK
7472 base_op
= op
& FUTEX_CMD_MASK
;
7478 case FUTEX_WAIT_BITSET
:
7481 target_to_host_timespec(pts
, timeout
);
7485 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7488 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7490 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7492 case FUTEX_CMP_REQUEUE
:
7494 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7495 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7496 But the prototype takes a `struct timespec *'; insert casts
7497 to satisfy the compiler. We do not need to tswap TIMEOUT
7498 since it's not compared to guest memory. */
7499 pts
= (struct timespec
*)(uintptr_t) timeout
;
7500 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7502 (base_op
== FUTEX_CMP_REQUEUE
7506 return -TARGET_ENOSYS
;
7509 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7510 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7511 abi_long handle
, abi_long mount_id
,
7514 struct file_handle
*target_fh
;
7515 struct file_handle
*fh
;
7519 unsigned int size
, total_size
;
7521 if (get_user_s32(size
, handle
)) {
7522 return -TARGET_EFAULT
;
7525 name
= lock_user_string(pathname
);
7527 return -TARGET_EFAULT
;
7530 total_size
= sizeof(struct file_handle
) + size
;
7531 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7533 unlock_user(name
, pathname
, 0);
7534 return -TARGET_EFAULT
;
7537 fh
= g_malloc0(total_size
);
7538 fh
->handle_bytes
= size
;
7540 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7541 unlock_user(name
, pathname
, 0);
7543 /* man name_to_handle_at(2):
7544 * Other than the use of the handle_bytes field, the caller should treat
7545 * the file_handle structure as an opaque data type
7548 memcpy(target_fh
, fh
, total_size
);
7549 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7550 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7552 unlock_user(target_fh
, handle
, total_size
);
7554 if (put_user_s32(mid
, mount_id
)) {
7555 return -TARGET_EFAULT
;
7563 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7564 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7567 struct file_handle
*target_fh
;
7568 struct file_handle
*fh
;
7569 unsigned int size
, total_size
;
7572 if (get_user_s32(size
, handle
)) {
7573 return -TARGET_EFAULT
;
7576 total_size
= sizeof(struct file_handle
) + size
;
7577 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7579 return -TARGET_EFAULT
;
7582 fh
= g_memdup(target_fh
, total_size
);
7583 fh
->handle_bytes
= size
;
7584 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7586 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7587 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7591 unlock_user(target_fh
, handle
, total_size
);
7597 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7599 /* signalfd siginfo conversion */
7602 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7603 const struct signalfd_siginfo
*info
)
7605 int sig
= host_to_target_signal(info
->ssi_signo
);
7607 /* linux/signalfd.h defines a ssi_addr_lsb
7608 * not defined in sys/signalfd.h but used by some kernels
7611 #ifdef BUS_MCEERR_AO
7612 if (tinfo
->ssi_signo
== SIGBUS
&&
7613 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7614 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7615 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7616 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7617 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7621 tinfo
->ssi_signo
= tswap32(sig
);
7622 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7623 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7624 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7625 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7626 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7627 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7628 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7629 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7630 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7631 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7632 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7633 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7634 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7635 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7636 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7639 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7643 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7644 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7650 static TargetFdTrans target_signalfd_trans
= {
7651 .host_to_target_data
= host_to_target_data_signalfd
,
7654 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7657 target_sigset_t
*target_mask
;
7661 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7662 return -TARGET_EINVAL
;
7664 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7665 return -TARGET_EFAULT
;
7668 target_to_host_sigset(&host_mask
, target_mask
);
7670 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7672 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7674 fd_trans_register(ret
, &target_signalfd_trans
);
7677 unlock_user_struct(target_mask
, mask
, 0);
7683 /* Map host to target signal numbers for the wait family of syscalls.
7684 Assume all other status bits are the same. */
7685 int host_to_target_waitstatus(int status
)
7687 if (WIFSIGNALED(status
)) {
7688 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7690 if (WIFSTOPPED(status
)) {
7691 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7697 static int open_self_cmdline(void *cpu_env
, int fd
)
7699 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7700 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7703 for (i
= 0; i
< bprm
->argc
; i
++) {
7704 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7706 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7714 static int open_self_maps(void *cpu_env
, int fd
)
7716 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7717 TaskState
*ts
= cpu
->opaque
;
7723 fp
= fopen("/proc/self/maps", "r");
7728 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7729 int fields
, dev_maj
, dev_min
, inode
;
7730 uint64_t min
, max
, offset
;
7731 char flag_r
, flag_w
, flag_x
, flag_p
;
7732 char path
[512] = "";
7733 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7734 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7735 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7737 if ((fields
< 10) || (fields
> 11)) {
7740 if (h2g_valid(min
)) {
7741 int flags
= page_get_flags(h2g(min
));
7742 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7743 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7746 if (h2g(min
) == ts
->info
->stack_limit
) {
7747 pstrcpy(path
, sizeof(path
), " [stack]");
7749 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7750 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7751 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7752 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7753 path
[0] ? " " : "", path
);
7763 static int open_self_stat(void *cpu_env
, int fd
)
7765 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7766 TaskState
*ts
= cpu
->opaque
;
7767 abi_ulong start_stack
= ts
->info
->start_stack
;
7770 for (i
= 0; i
< 44; i
++) {
7778 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7779 } else if (i
== 1) {
7781 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7782 } else if (i
== 27) {
7785 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7787 /* for the rest, there is MasterCard */
7788 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7792 if (write(fd
, buf
, len
) != len
) {
7800 static int open_self_auxv(void *cpu_env
, int fd
)
7802 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7803 TaskState
*ts
= cpu
->opaque
;
7804 abi_ulong auxv
= ts
->info
->saved_auxv
;
7805 abi_ulong len
= ts
->info
->auxv_len
;
7809 * Auxiliary vector is stored in target process stack.
7810 * read in whole auxv vector and copy it to file
7812 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7816 r
= write(fd
, ptr
, len
);
7823 lseek(fd
, 0, SEEK_SET
);
7824 unlock_user(ptr
, auxv
, len
);
7830 static int is_proc_myself(const char *filename
, const char *entry
)
7832 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7833 filename
+= strlen("/proc/");
7834 if (!strncmp(filename
, "self/", strlen("self/"))) {
7835 filename
+= strlen("self/");
7836 } else if (*filename
>= '1' && *filename
<= '9') {
7838 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7839 if (!strncmp(filename
, myself
, strlen(myself
))) {
7840 filename
+= strlen(myself
);
7847 if (!strcmp(filename
, entry
)) {
7854 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7855 static int is_proc(const char *filename
, const char *entry
)
7857 return strcmp(filename
, entry
) == 0;
7860 static int open_net_route(void *cpu_env
, int fd
)
7867 fp
= fopen("/proc/net/route", "r");
7874 read
= getline(&line
, &len
, fp
);
7875 dprintf(fd
, "%s", line
);
7879 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7881 uint32_t dest
, gw
, mask
;
7882 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7883 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7884 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7885 &mask
, &mtu
, &window
, &irtt
);
7886 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7887 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7888 metric
, tswap32(mask
), mtu
, window
, irtt
);
7898 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7901 const char *filename
;
7902 int (*fill
)(void *cpu_env
, int fd
);
7903 int (*cmp
)(const char *s1
, const char *s2
);
7905 const struct fake_open
*fake_open
;
7906 static const struct fake_open fakes
[] = {
7907 { "maps", open_self_maps
, is_proc_myself
},
7908 { "stat", open_self_stat
, is_proc_myself
},
7909 { "auxv", open_self_auxv
, is_proc_myself
},
7910 { "cmdline", open_self_cmdline
, is_proc_myself
},
7911 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7912 { "/proc/net/route", open_net_route
, is_proc
},
7914 { NULL
, NULL
, NULL
}
7917 if (is_proc_myself(pathname
, "exe")) {
7918 int execfd
= qemu_getauxval(AT_EXECFD
);
7919 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7922 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7923 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7928 if (fake_open
->filename
) {
7930 char filename
[PATH_MAX
];
7933 /* create temporary file to map stat to */
7934 tmpdir
= getenv("TMPDIR");
7937 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7938 fd
= mkstemp(filename
);
7944 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7950 lseek(fd
, 0, SEEK_SET
);
7955 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7958 #define TIMER_MAGIC 0x0caf0000
7959 #define TIMER_MAGIC_MASK 0xffff0000
7961 /* Convert QEMU provided timer ID back to internal 16bit index format */
7962 static target_timer_t
get_timer_id(abi_long arg
)
7964 target_timer_t timerid
= arg
;
7966 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7967 return -TARGET_EINVAL
;
7972 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7973 return -TARGET_EINVAL
;
7979 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7981 uint64_t *counter
= buf
;
7984 if (len
< sizeof(uint64_t)) {
7988 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7989 *counter
= tswap64(*counter
);
7996 static TargetFdTrans target_eventfd_trans
= {
7997 .host_to_target_data
= swap_data_eventfd
,
7998 .target_to_host_data
= swap_data_eventfd
,
8001 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
8002 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
8003 defined(__NR_inotify_init1))
8004 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
8006 struct inotify_event
*ev
;
8010 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
8011 ev
= (struct inotify_event
*)((char *)buf
+ i
);
8014 ev
->wd
= tswap32(ev
->wd
);
8015 ev
->mask
= tswap32(ev
->mask
);
8016 ev
->cookie
= tswap32(ev
->cookie
);
8017 ev
->len
= tswap32(name_len
);
8023 static TargetFdTrans target_inotify_trans
= {
8024 .host_to_target_data
= host_to_target_data_inotify
,
8028 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8030 abi_ulong target_addr
,
8033 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8034 unsigned host_bits
= sizeof(*host_mask
) * 8;
8035 abi_ulong
*target_mask
;
8038 assert(host_size
>= target_size
);
8040 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8042 return -TARGET_EFAULT
;
8044 memset(host_mask
, 0, host_size
);
8046 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8047 unsigned bit
= i
* target_bits
;
8050 __get_user(val
, &target_mask
[i
]);
8051 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8052 if (val
& (1UL << j
)) {
8053 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8058 unlock_user(target_mask
, target_addr
, 0);
8062 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8064 abi_ulong target_addr
,
8067 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8068 unsigned host_bits
= sizeof(*host_mask
) * 8;
8069 abi_ulong
*target_mask
;
8072 assert(host_size
>= target_size
);
8074 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8076 return -TARGET_EFAULT
;
8079 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8080 unsigned bit
= i
* target_bits
;
8083 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8084 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8088 __put_user(val
, &target_mask
[i
]);
8091 unlock_user(target_mask
, target_addr
, target_size
);
8095 /* This is an internal helper for do_syscall so that it is easier
8096 * to have a single return point, so that actions, such as logging
8097 * of syscall results, can be performed.
8098 * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8100 static abi_long
do_syscall1(void *cpu_env
, int num
, abi_long arg1
,
8101 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8102 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8105 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
8107 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8108 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8109 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8112 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8113 || defined(TARGET_NR_fstatfs)
8119 case TARGET_NR_exit
:
8120 /* In old applications this may be used to implement _exit(2).
8121 However in threaded applictions it is used for thread termination,
8122 and _exit_group is used for application termination.
8123 Do thread termination if we have more then one thread. */
8125 if (block_signals()) {
8126 return -TARGET_ERESTARTSYS
;
8131 if (CPU_NEXT(first_cpu
)) {
8134 /* Remove the CPU from the list. */
8135 QTAILQ_REMOVE_RCU(&cpus
, cpu
, node
);
8140 if (ts
->child_tidptr
) {
8141 put_user_u32(0, ts
->child_tidptr
);
8142 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8146 object_unref(OBJECT(cpu
));
8148 rcu_unregister_thread();
8153 preexit_cleanup(cpu_env
, arg1
);
8155 return 0; /* avoid warning */
8156 case TARGET_NR_read
:
8160 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8161 return -TARGET_EFAULT
;
8162 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8164 fd_trans_host_to_target_data(arg1
)) {
8165 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8167 unlock_user(p
, arg2
, ret
);
8170 case TARGET_NR_write
:
8171 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8172 return -TARGET_EFAULT
;
8173 if (fd_trans_target_to_host_data(arg1
)) {
8174 void *copy
= g_malloc(arg3
);
8175 memcpy(copy
, p
, arg3
);
8176 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8178 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8182 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8184 unlock_user(p
, arg2
, 0);
8187 #ifdef TARGET_NR_open
8188 case TARGET_NR_open
:
8189 if (!(p
= lock_user_string(arg1
)))
8190 return -TARGET_EFAULT
;
8191 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8192 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8194 fd_trans_unregister(ret
);
8195 unlock_user(p
, arg1
, 0);
8198 case TARGET_NR_openat
:
8199 if (!(p
= lock_user_string(arg2
)))
8200 return -TARGET_EFAULT
;
8201 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8202 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8204 fd_trans_unregister(ret
);
8205 unlock_user(p
, arg2
, 0);
8207 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8208 case TARGET_NR_name_to_handle_at
:
8209 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8212 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8213 case TARGET_NR_open_by_handle_at
:
8214 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8215 fd_trans_unregister(ret
);
8218 case TARGET_NR_close
:
8219 fd_trans_unregister(arg1
);
8220 return get_errno(close(arg1
));
8223 return do_brk(arg1
);
8224 #ifdef TARGET_NR_fork
8225 case TARGET_NR_fork
:
8226 return get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8228 #ifdef TARGET_NR_waitpid
8229 case TARGET_NR_waitpid
:
8232 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8233 if (!is_error(ret
) && arg2
&& ret
8234 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8235 return -TARGET_EFAULT
;
8239 #ifdef TARGET_NR_waitid
8240 case TARGET_NR_waitid
:
8244 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8245 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8246 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8247 return -TARGET_EFAULT
;
8248 host_to_target_siginfo(p
, &info
);
8249 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8254 #ifdef TARGET_NR_creat /* not on alpha */
8255 case TARGET_NR_creat
:
8256 if (!(p
= lock_user_string(arg1
)))
8257 return -TARGET_EFAULT
;
8258 ret
= get_errno(creat(p
, arg2
));
8259 fd_trans_unregister(ret
);
8260 unlock_user(p
, arg1
, 0);
8263 #ifdef TARGET_NR_link
8264 case TARGET_NR_link
:
8267 p
= lock_user_string(arg1
);
8268 p2
= lock_user_string(arg2
);
8270 ret
= -TARGET_EFAULT
;
8272 ret
= get_errno(link(p
, p2
));
8273 unlock_user(p2
, arg2
, 0);
8274 unlock_user(p
, arg1
, 0);
8278 #if defined(TARGET_NR_linkat)
8279 case TARGET_NR_linkat
:
8283 return -TARGET_EFAULT
;
8284 p
= lock_user_string(arg2
);
8285 p2
= lock_user_string(arg4
);
8287 ret
= -TARGET_EFAULT
;
8289 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8290 unlock_user(p
, arg2
, 0);
8291 unlock_user(p2
, arg4
, 0);
8295 #ifdef TARGET_NR_unlink
8296 case TARGET_NR_unlink
:
8297 if (!(p
= lock_user_string(arg1
)))
8298 return -TARGET_EFAULT
;
8299 ret
= get_errno(unlink(p
));
8300 unlock_user(p
, arg1
, 0);
8303 #if defined(TARGET_NR_unlinkat)
8304 case TARGET_NR_unlinkat
:
8305 if (!(p
= lock_user_string(arg2
)))
8306 return -TARGET_EFAULT
;
8307 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8308 unlock_user(p
, arg2
, 0);
8311 case TARGET_NR_execve
:
8313 char **argp
, **envp
;
8316 abi_ulong guest_argp
;
8317 abi_ulong guest_envp
;
8324 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8325 if (get_user_ual(addr
, gp
))
8326 return -TARGET_EFAULT
;
8333 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8334 if (get_user_ual(addr
, gp
))
8335 return -TARGET_EFAULT
;
8341 argp
= g_new0(char *, argc
+ 1);
8342 envp
= g_new0(char *, envc
+ 1);
8344 for (gp
= guest_argp
, q
= argp
; gp
;
8345 gp
+= sizeof(abi_ulong
), q
++) {
8346 if (get_user_ual(addr
, gp
))
8350 if (!(*q
= lock_user_string(addr
)))
8352 total_size
+= strlen(*q
) + 1;
8356 for (gp
= guest_envp
, q
= envp
; gp
;
8357 gp
+= sizeof(abi_ulong
), q
++) {
8358 if (get_user_ual(addr
, gp
))
8362 if (!(*q
= lock_user_string(addr
)))
8364 total_size
+= strlen(*q
) + 1;
8368 if (!(p
= lock_user_string(arg1
)))
8370 /* Although execve() is not an interruptible syscall it is
8371 * a special case where we must use the safe_syscall wrapper:
8372 * if we allow a signal to happen before we make the host
8373 * syscall then we will 'lose' it, because at the point of
8374 * execve the process leaves QEMU's control. So we use the
8375 * safe syscall wrapper to ensure that we either take the
8376 * signal as a guest signal, or else it does not happen
8377 * before the execve completes and makes it the other
8378 * program's problem.
8380 ret
= get_errno(safe_execve(p
, argp
, envp
));
8381 unlock_user(p
, arg1
, 0);
8386 ret
= -TARGET_EFAULT
;
8389 for (gp
= guest_argp
, q
= argp
; *q
;
8390 gp
+= sizeof(abi_ulong
), q
++) {
8391 if (get_user_ual(addr
, gp
)
8394 unlock_user(*q
, addr
, 0);
8396 for (gp
= guest_envp
, q
= envp
; *q
;
8397 gp
+= sizeof(abi_ulong
), q
++) {
8398 if (get_user_ual(addr
, gp
)
8401 unlock_user(*q
, addr
, 0);
8408 case TARGET_NR_chdir
:
8409 if (!(p
= lock_user_string(arg1
)))
8410 return -TARGET_EFAULT
;
8411 ret
= get_errno(chdir(p
));
8412 unlock_user(p
, arg1
, 0);
8414 #ifdef TARGET_NR_time
8415 case TARGET_NR_time
:
8418 ret
= get_errno(time(&host_time
));
8421 && put_user_sal(host_time
, arg1
))
8422 return -TARGET_EFAULT
;
8426 #ifdef TARGET_NR_mknod
8427 case TARGET_NR_mknod
:
8428 if (!(p
= lock_user_string(arg1
)))
8429 return -TARGET_EFAULT
;
8430 ret
= get_errno(mknod(p
, arg2
, arg3
));
8431 unlock_user(p
, arg1
, 0);
8434 #if defined(TARGET_NR_mknodat)
8435 case TARGET_NR_mknodat
:
8436 if (!(p
= lock_user_string(arg2
)))
8437 return -TARGET_EFAULT
;
8438 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8439 unlock_user(p
, arg2
, 0);
8442 #ifdef TARGET_NR_chmod
8443 case TARGET_NR_chmod
:
8444 if (!(p
= lock_user_string(arg1
)))
8445 return -TARGET_EFAULT
;
8446 ret
= get_errno(chmod(p
, arg2
));
8447 unlock_user(p
, arg1
, 0);
8450 #ifdef TARGET_NR_lseek
8451 case TARGET_NR_lseek
:
8452 return get_errno(lseek(arg1
, arg2
, arg3
));
8454 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8455 /* Alpha specific */
8456 case TARGET_NR_getxpid
:
8457 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8458 return get_errno(getpid());
8460 #ifdef TARGET_NR_getpid
8461 case TARGET_NR_getpid
:
8462 return get_errno(getpid());
8464 case TARGET_NR_mount
:
8466 /* need to look at the data field */
8470 p
= lock_user_string(arg1
);
8472 return -TARGET_EFAULT
;
8478 p2
= lock_user_string(arg2
);
8481 unlock_user(p
, arg1
, 0);
8483 return -TARGET_EFAULT
;
8487 p3
= lock_user_string(arg3
);
8490 unlock_user(p
, arg1
, 0);
8492 unlock_user(p2
, arg2
, 0);
8493 return -TARGET_EFAULT
;
8499 /* FIXME - arg5 should be locked, but it isn't clear how to
8500 * do that since it's not guaranteed to be a NULL-terminated
8504 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8506 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8508 ret
= get_errno(ret
);
8511 unlock_user(p
, arg1
, 0);
8513 unlock_user(p2
, arg2
, 0);
8515 unlock_user(p3
, arg3
, 0);
8519 #ifdef TARGET_NR_umount
8520 case TARGET_NR_umount
:
8521 if (!(p
= lock_user_string(arg1
)))
8522 return -TARGET_EFAULT
;
8523 ret
= get_errno(umount(p
));
8524 unlock_user(p
, arg1
, 0);
8527 #ifdef TARGET_NR_stime /* not on alpha */
8528 case TARGET_NR_stime
:
8531 if (get_user_sal(host_time
, arg1
))
8532 return -TARGET_EFAULT
;
8533 return get_errno(stime(&host_time
));
8536 #ifdef TARGET_NR_alarm /* not on alpha */
8537 case TARGET_NR_alarm
:
8540 #ifdef TARGET_NR_pause /* not on alpha */
8541 case TARGET_NR_pause
:
8542 if (!block_signals()) {
8543 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8545 return -TARGET_EINTR
;
8547 #ifdef TARGET_NR_utime
8548 case TARGET_NR_utime
:
8550 struct utimbuf tbuf
, *host_tbuf
;
8551 struct target_utimbuf
*target_tbuf
;
8553 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8554 return -TARGET_EFAULT
;
8555 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8556 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8557 unlock_user_struct(target_tbuf
, arg2
, 0);
8562 if (!(p
= lock_user_string(arg1
)))
8563 return -TARGET_EFAULT
;
8564 ret
= get_errno(utime(p
, host_tbuf
));
8565 unlock_user(p
, arg1
, 0);
8569 #ifdef TARGET_NR_utimes
8570 case TARGET_NR_utimes
:
8572 struct timeval
*tvp
, tv
[2];
8574 if (copy_from_user_timeval(&tv
[0], arg2
)
8575 || copy_from_user_timeval(&tv
[1],
8576 arg2
+ sizeof(struct target_timeval
)))
8577 return -TARGET_EFAULT
;
8582 if (!(p
= lock_user_string(arg1
)))
8583 return -TARGET_EFAULT
;
8584 ret
= get_errno(utimes(p
, tvp
));
8585 unlock_user(p
, arg1
, 0);
8589 #if defined(TARGET_NR_futimesat)
8590 case TARGET_NR_futimesat
:
8592 struct timeval
*tvp
, tv
[2];
8594 if (copy_from_user_timeval(&tv
[0], arg3
)
8595 || copy_from_user_timeval(&tv
[1],
8596 arg3
+ sizeof(struct target_timeval
)))
8597 return -TARGET_EFAULT
;
8602 if (!(p
= lock_user_string(arg2
))) {
8603 return -TARGET_EFAULT
;
8605 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8606 unlock_user(p
, arg2
, 0);
8610 #ifdef TARGET_NR_access
8611 case TARGET_NR_access
:
8612 if (!(p
= lock_user_string(arg1
))) {
8613 return -TARGET_EFAULT
;
8615 ret
= get_errno(access(path(p
), arg2
));
8616 unlock_user(p
, arg1
, 0);
8619 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8620 case TARGET_NR_faccessat
:
8621 if (!(p
= lock_user_string(arg2
))) {
8622 return -TARGET_EFAULT
;
8624 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8625 unlock_user(p
, arg2
, 0);
8628 #ifdef TARGET_NR_nice /* not on alpha */
8629 case TARGET_NR_nice
:
8630 return get_errno(nice(arg1
));
8632 case TARGET_NR_sync
:
8635 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8636 case TARGET_NR_syncfs
:
8637 return get_errno(syncfs(arg1
));
8639 case TARGET_NR_kill
:
8640 return get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8641 #ifdef TARGET_NR_rename
8642 case TARGET_NR_rename
:
8645 p
= lock_user_string(arg1
);
8646 p2
= lock_user_string(arg2
);
8648 ret
= -TARGET_EFAULT
;
8650 ret
= get_errno(rename(p
, p2
));
8651 unlock_user(p2
, arg2
, 0);
8652 unlock_user(p
, arg1
, 0);
8656 #if defined(TARGET_NR_renameat)
8657 case TARGET_NR_renameat
:
8660 p
= lock_user_string(arg2
);
8661 p2
= lock_user_string(arg4
);
8663 ret
= -TARGET_EFAULT
;
8665 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8666 unlock_user(p2
, arg4
, 0);
8667 unlock_user(p
, arg2
, 0);
8671 #if defined(TARGET_NR_renameat2)
8672 case TARGET_NR_renameat2
:
8675 p
= lock_user_string(arg2
);
8676 p2
= lock_user_string(arg4
);
8678 ret
= -TARGET_EFAULT
;
8680 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8682 unlock_user(p2
, arg4
, 0);
8683 unlock_user(p
, arg2
, 0);
8687 #ifdef TARGET_NR_mkdir
8688 case TARGET_NR_mkdir
:
8689 if (!(p
= lock_user_string(arg1
)))
8690 return -TARGET_EFAULT
;
8691 ret
= get_errno(mkdir(p
, arg2
));
8692 unlock_user(p
, arg1
, 0);
8695 #if defined(TARGET_NR_mkdirat)
8696 case TARGET_NR_mkdirat
:
8697 if (!(p
= lock_user_string(arg2
)))
8698 return -TARGET_EFAULT
;
8699 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8700 unlock_user(p
, arg2
, 0);
8703 #ifdef TARGET_NR_rmdir
8704 case TARGET_NR_rmdir
:
8705 if (!(p
= lock_user_string(arg1
)))
8706 return -TARGET_EFAULT
;
8707 ret
= get_errno(rmdir(p
));
8708 unlock_user(p
, arg1
, 0);
8712 ret
= get_errno(dup(arg1
));
8714 fd_trans_dup(arg1
, ret
);
8717 #ifdef TARGET_NR_pipe
8718 case TARGET_NR_pipe
:
8719 return do_pipe(cpu_env
, arg1
, 0, 0);
8721 #ifdef TARGET_NR_pipe2
8722 case TARGET_NR_pipe2
:
8723 return do_pipe(cpu_env
, arg1
,
8724 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8726 case TARGET_NR_times
:
8728 struct target_tms
*tmsp
;
8730 ret
= get_errno(times(&tms
));
8732 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8734 return -TARGET_EFAULT
;
8735 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8736 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8737 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8738 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8741 ret
= host_to_target_clock_t(ret
);
8744 case TARGET_NR_acct
:
8746 ret
= get_errno(acct(NULL
));
8748 if (!(p
= lock_user_string(arg1
))) {
8749 return -TARGET_EFAULT
;
8751 ret
= get_errno(acct(path(p
)));
8752 unlock_user(p
, arg1
, 0);
8755 #ifdef TARGET_NR_umount2
8756 case TARGET_NR_umount2
:
8757 if (!(p
= lock_user_string(arg1
)))
8758 return -TARGET_EFAULT
;
8759 ret
= get_errno(umount2(p
, arg2
));
8760 unlock_user(p
, arg1
, 0);
8763 case TARGET_NR_ioctl
:
8764 return do_ioctl(arg1
, arg2
, arg3
);
8765 #ifdef TARGET_NR_fcntl
8766 case TARGET_NR_fcntl
:
8767 return do_fcntl(arg1
, arg2
, arg3
);
8769 case TARGET_NR_setpgid
:
8770 return get_errno(setpgid(arg1
, arg2
));
8771 case TARGET_NR_umask
:
8772 return get_errno(umask(arg1
));
8773 case TARGET_NR_chroot
:
8774 if (!(p
= lock_user_string(arg1
)))
8775 return -TARGET_EFAULT
;
8776 ret
= get_errno(chroot(p
));
8777 unlock_user(p
, arg1
, 0);
8779 #ifdef TARGET_NR_dup2
8780 case TARGET_NR_dup2
:
8781 ret
= get_errno(dup2(arg1
, arg2
));
8783 fd_trans_dup(arg1
, arg2
);
8787 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8788 case TARGET_NR_dup3
:
8792 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8795 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8796 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8798 fd_trans_dup(arg1
, arg2
);
8803 #ifdef TARGET_NR_getppid /* not on alpha */
8804 case TARGET_NR_getppid
:
8805 return get_errno(getppid());
8807 #ifdef TARGET_NR_getpgrp
8808 case TARGET_NR_getpgrp
:
8809 return get_errno(getpgrp());
8811 case TARGET_NR_setsid
:
8812 return get_errno(setsid());
8813 #ifdef TARGET_NR_sigaction
8814 case TARGET_NR_sigaction
:
8816 #if defined(TARGET_ALPHA)
8817 struct target_sigaction act
, oact
, *pact
= 0;
8818 struct target_old_sigaction
*old_act
;
8820 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8821 return -TARGET_EFAULT
;
8822 act
._sa_handler
= old_act
->_sa_handler
;
8823 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8824 act
.sa_flags
= old_act
->sa_flags
;
8825 act
.sa_restorer
= 0;
8826 unlock_user_struct(old_act
, arg2
, 0);
8829 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8830 if (!is_error(ret
) && arg3
) {
8831 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8832 return -TARGET_EFAULT
;
8833 old_act
->_sa_handler
= oact
._sa_handler
;
8834 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8835 old_act
->sa_flags
= oact
.sa_flags
;
8836 unlock_user_struct(old_act
, arg3
, 1);
8838 #elif defined(TARGET_MIPS)
8839 struct target_sigaction act
, oact
, *pact
, *old_act
;
8842 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8843 return -TARGET_EFAULT
;
8844 act
._sa_handler
= old_act
->_sa_handler
;
8845 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8846 act
.sa_flags
= old_act
->sa_flags
;
8847 unlock_user_struct(old_act
, arg2
, 0);
8853 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8855 if (!is_error(ret
) && arg3
) {
8856 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8857 return -TARGET_EFAULT
;
8858 old_act
->_sa_handler
= oact
._sa_handler
;
8859 old_act
->sa_flags
= oact
.sa_flags
;
8860 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8861 old_act
->sa_mask
.sig
[1] = 0;
8862 old_act
->sa_mask
.sig
[2] = 0;
8863 old_act
->sa_mask
.sig
[3] = 0;
8864 unlock_user_struct(old_act
, arg3
, 1);
8867 struct target_old_sigaction
*old_act
;
8868 struct target_sigaction act
, oact
, *pact
;
8870 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8871 return -TARGET_EFAULT
;
8872 act
._sa_handler
= old_act
->_sa_handler
;
8873 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8874 act
.sa_flags
= old_act
->sa_flags
;
8875 act
.sa_restorer
= old_act
->sa_restorer
;
8876 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8877 act
.ka_restorer
= 0;
8879 unlock_user_struct(old_act
, arg2
, 0);
8884 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8885 if (!is_error(ret
) && arg3
) {
8886 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8887 return -TARGET_EFAULT
;
8888 old_act
->_sa_handler
= oact
._sa_handler
;
8889 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8890 old_act
->sa_flags
= oact
.sa_flags
;
8891 old_act
->sa_restorer
= oact
.sa_restorer
;
8892 unlock_user_struct(old_act
, arg3
, 1);
8898 case TARGET_NR_rt_sigaction
:
8900 #if defined(TARGET_ALPHA)
8901 /* For Alpha and SPARC this is a 5 argument syscall, with
8902 * a 'restorer' parameter which must be copied into the
8903 * sa_restorer field of the sigaction struct.
8904 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8905 * and arg5 is the sigsetsize.
8906 * Alpha also has a separate rt_sigaction struct that it uses
8907 * here; SPARC uses the usual sigaction struct.
8909 struct target_rt_sigaction
*rt_act
;
8910 struct target_sigaction act
, oact
, *pact
= 0;
8912 if (arg4
!= sizeof(target_sigset_t
)) {
8913 return -TARGET_EINVAL
;
8916 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8917 return -TARGET_EFAULT
;
8918 act
._sa_handler
= rt_act
->_sa_handler
;
8919 act
.sa_mask
= rt_act
->sa_mask
;
8920 act
.sa_flags
= rt_act
->sa_flags
;
8921 act
.sa_restorer
= arg5
;
8922 unlock_user_struct(rt_act
, arg2
, 0);
8925 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8926 if (!is_error(ret
) && arg3
) {
8927 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8928 return -TARGET_EFAULT
;
8929 rt_act
->_sa_handler
= oact
._sa_handler
;
8930 rt_act
->sa_mask
= oact
.sa_mask
;
8931 rt_act
->sa_flags
= oact
.sa_flags
;
8932 unlock_user_struct(rt_act
, arg3
, 1);
8936 target_ulong restorer
= arg4
;
8937 target_ulong sigsetsize
= arg5
;
8939 target_ulong sigsetsize
= arg4
;
8941 struct target_sigaction
*act
;
8942 struct target_sigaction
*oact
;
8944 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8945 return -TARGET_EINVAL
;
8948 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8949 return -TARGET_EFAULT
;
8951 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8952 act
->ka_restorer
= restorer
;
8958 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8959 ret
= -TARGET_EFAULT
;
8960 goto rt_sigaction_fail
;
8964 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8967 unlock_user_struct(act
, arg2
, 0);
8969 unlock_user_struct(oact
, arg3
, 1);
8973 #ifdef TARGET_NR_sgetmask /* not on alpha */
8974 case TARGET_NR_sgetmask
:
8977 abi_ulong target_set
;
8978 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8980 host_to_target_old_sigset(&target_set
, &cur_set
);
8986 #ifdef TARGET_NR_ssetmask /* not on alpha */
8987 case TARGET_NR_ssetmask
:
8990 abi_ulong target_set
= arg1
;
8991 target_to_host_old_sigset(&set
, &target_set
);
8992 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8994 host_to_target_old_sigset(&target_set
, &oset
);
9000 #ifdef TARGET_NR_sigprocmask
9001 case TARGET_NR_sigprocmask
:
9003 #if defined(TARGET_ALPHA)
9004 sigset_t set
, oldset
;
9009 case TARGET_SIG_BLOCK
:
9012 case TARGET_SIG_UNBLOCK
:
9015 case TARGET_SIG_SETMASK
:
9019 return -TARGET_EINVAL
;
9022 target_to_host_old_sigset(&set
, &mask
);
9024 ret
= do_sigprocmask(how
, &set
, &oldset
);
9025 if (!is_error(ret
)) {
9026 host_to_target_old_sigset(&mask
, &oldset
);
9028 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9031 sigset_t set
, oldset
, *set_ptr
;
9036 case TARGET_SIG_BLOCK
:
9039 case TARGET_SIG_UNBLOCK
:
9042 case TARGET_SIG_SETMASK
:
9046 return -TARGET_EINVAL
;
9048 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9049 return -TARGET_EFAULT
;
9050 target_to_host_old_sigset(&set
, p
);
9051 unlock_user(p
, arg2
, 0);
9057 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9058 if (!is_error(ret
) && arg3
) {
9059 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9060 return -TARGET_EFAULT
;
9061 host_to_target_old_sigset(p
, &oldset
);
9062 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9068 case TARGET_NR_rt_sigprocmask
:
9071 sigset_t set
, oldset
, *set_ptr
;
9073 if (arg4
!= sizeof(target_sigset_t
)) {
9074 return -TARGET_EINVAL
;
9079 case TARGET_SIG_BLOCK
:
9082 case TARGET_SIG_UNBLOCK
:
9085 case TARGET_SIG_SETMASK
:
9089 return -TARGET_EINVAL
;
9091 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9092 return -TARGET_EFAULT
;
9093 target_to_host_sigset(&set
, p
);
9094 unlock_user(p
, arg2
, 0);
9100 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9101 if (!is_error(ret
) && arg3
) {
9102 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9103 return -TARGET_EFAULT
;
9104 host_to_target_sigset(p
, &oldset
);
9105 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9109 #ifdef TARGET_NR_sigpending
9110 case TARGET_NR_sigpending
:
9113 ret
= get_errno(sigpending(&set
));
9114 if (!is_error(ret
)) {
9115 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9116 return -TARGET_EFAULT
;
9117 host_to_target_old_sigset(p
, &set
);
9118 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9123 case TARGET_NR_rt_sigpending
:
9127 /* Yes, this check is >, not != like most. We follow the kernel's
9128 * logic and it does it like this because it implements
9129 * NR_sigpending through the same code path, and in that case
9130 * the old_sigset_t is smaller in size.
9132 if (arg2
> sizeof(target_sigset_t
)) {
9133 return -TARGET_EINVAL
;
9136 ret
= get_errno(sigpending(&set
));
9137 if (!is_error(ret
)) {
9138 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9139 return -TARGET_EFAULT
;
9140 host_to_target_sigset(p
, &set
);
9141 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9145 #ifdef TARGET_NR_sigsuspend
9146 case TARGET_NR_sigsuspend
:
9148 TaskState
*ts
= cpu
->opaque
;
9149 #if defined(TARGET_ALPHA)
9150 abi_ulong mask
= arg1
;
9151 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9153 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9154 return -TARGET_EFAULT
;
9155 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9156 unlock_user(p
, arg1
, 0);
9158 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9160 if (ret
!= -TARGET_ERESTARTSYS
) {
9161 ts
->in_sigsuspend
= 1;
9166 case TARGET_NR_rt_sigsuspend
:
9168 TaskState
*ts
= cpu
->opaque
;
9170 if (arg2
!= sizeof(target_sigset_t
)) {
9171 return -TARGET_EINVAL
;
9173 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9174 return -TARGET_EFAULT
;
9175 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9176 unlock_user(p
, arg1
, 0);
9177 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9179 if (ret
!= -TARGET_ERESTARTSYS
) {
9180 ts
->in_sigsuspend
= 1;
9184 case TARGET_NR_rt_sigtimedwait
:
9187 struct timespec uts
, *puts
;
9190 if (arg4
!= sizeof(target_sigset_t
)) {
9191 return -TARGET_EINVAL
;
9194 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9195 return -TARGET_EFAULT
;
9196 target_to_host_sigset(&set
, p
);
9197 unlock_user(p
, arg1
, 0);
9200 target_to_host_timespec(puts
, arg3
);
9204 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9206 if (!is_error(ret
)) {
9208 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9211 return -TARGET_EFAULT
;
9213 host_to_target_siginfo(p
, &uinfo
);
9214 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9216 ret
= host_to_target_signal(ret
);
9220 case TARGET_NR_rt_sigqueueinfo
:
9224 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9226 return -TARGET_EFAULT
;
9228 target_to_host_siginfo(&uinfo
, p
);
9229 unlock_user(p
, arg3
, 0);
9230 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9233 case TARGET_NR_rt_tgsigqueueinfo
:
9237 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9239 return -TARGET_EFAULT
;
9241 target_to_host_siginfo(&uinfo
, p
);
9242 unlock_user(p
, arg4
, 0);
9243 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9246 #ifdef TARGET_NR_sigreturn
9247 case TARGET_NR_sigreturn
:
9248 if (block_signals()) {
9249 return -TARGET_ERESTARTSYS
;
9251 return do_sigreturn(cpu_env
);
9253 case TARGET_NR_rt_sigreturn
:
9254 if (block_signals()) {
9255 return -TARGET_ERESTARTSYS
;
9257 return do_rt_sigreturn(cpu_env
);
9258 case TARGET_NR_sethostname
:
9259 if (!(p
= lock_user_string(arg1
)))
9260 return -TARGET_EFAULT
;
9261 ret
= get_errno(sethostname(p
, arg2
));
9262 unlock_user(p
, arg1
, 0);
9264 #ifdef TARGET_NR_setrlimit
9265 case TARGET_NR_setrlimit
:
9267 int resource
= target_to_host_resource(arg1
);
9268 struct target_rlimit
*target_rlim
;
9270 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9271 return -TARGET_EFAULT
;
9272 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9273 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9274 unlock_user_struct(target_rlim
, arg2
, 0);
9275 return get_errno(setrlimit(resource
, &rlim
));
9278 #ifdef TARGET_NR_getrlimit
9279 case TARGET_NR_getrlimit
:
9281 int resource
= target_to_host_resource(arg1
);
9282 struct target_rlimit
*target_rlim
;
9285 ret
= get_errno(getrlimit(resource
, &rlim
));
9286 if (!is_error(ret
)) {
9287 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9288 return -TARGET_EFAULT
;
9289 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9290 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9291 unlock_user_struct(target_rlim
, arg2
, 1);
9296 case TARGET_NR_getrusage
:
9298 struct rusage rusage
;
9299 ret
= get_errno(getrusage(arg1
, &rusage
));
9300 if (!is_error(ret
)) {
9301 ret
= host_to_target_rusage(arg2
, &rusage
);
9305 case TARGET_NR_gettimeofday
:
9308 ret
= get_errno(gettimeofday(&tv
, NULL
));
9309 if (!is_error(ret
)) {
9310 if (copy_to_user_timeval(arg1
, &tv
))
9311 return -TARGET_EFAULT
;
9315 case TARGET_NR_settimeofday
:
9317 struct timeval tv
, *ptv
= NULL
;
9318 struct timezone tz
, *ptz
= NULL
;
9321 if (copy_from_user_timeval(&tv
, arg1
)) {
9322 return -TARGET_EFAULT
;
9328 if (copy_from_user_timezone(&tz
, arg2
)) {
9329 return -TARGET_EFAULT
;
9334 return get_errno(settimeofday(ptv
, ptz
));
9336 #if defined(TARGET_NR_select)
9337 case TARGET_NR_select
:
9338 #if defined(TARGET_WANT_NI_OLD_SELECT)
9339 /* some architectures used to have old_select here
9340 * but now ENOSYS it.
9342 ret
= -TARGET_ENOSYS
;
9343 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9344 ret
= do_old_select(arg1
);
9346 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9350 #ifdef TARGET_NR_pselect6
9351 case TARGET_NR_pselect6
:
9353 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9354 fd_set rfds
, wfds
, efds
;
9355 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9356 struct timespec ts
, *ts_ptr
;
9359 * The 6th arg is actually two args smashed together,
9360 * so we cannot use the C library.
9368 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9369 target_sigset_t
*target_sigset
;
9377 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9381 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9385 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9391 * This takes a timespec, and not a timeval, so we cannot
9392 * use the do_select() helper ...
9395 if (target_to_host_timespec(&ts
, ts_addr
)) {
9396 return -TARGET_EFAULT
;
9403 /* Extract the two packed args for the sigset */
9406 sig
.size
= SIGSET_T_SIZE
;
9408 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9410 return -TARGET_EFAULT
;
9412 arg_sigset
= tswapal(arg7
[0]);
9413 arg_sigsize
= tswapal(arg7
[1]);
9414 unlock_user(arg7
, arg6
, 0);
9418 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9419 /* Like the kernel, we enforce correct size sigsets */
9420 return -TARGET_EINVAL
;
9422 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9423 sizeof(*target_sigset
), 1);
9424 if (!target_sigset
) {
9425 return -TARGET_EFAULT
;
9427 target_to_host_sigset(&set
, target_sigset
);
9428 unlock_user(target_sigset
, arg_sigset
, 0);
9436 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9439 if (!is_error(ret
)) {
9440 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9441 return -TARGET_EFAULT
;
9442 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9443 return -TARGET_EFAULT
;
9444 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9445 return -TARGET_EFAULT
;
9447 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9448 return -TARGET_EFAULT
;
9453 #ifdef TARGET_NR_symlink
9454 case TARGET_NR_symlink
:
9457 p
= lock_user_string(arg1
);
9458 p2
= lock_user_string(arg2
);
9460 ret
= -TARGET_EFAULT
;
9462 ret
= get_errno(symlink(p
, p2
));
9463 unlock_user(p2
, arg2
, 0);
9464 unlock_user(p
, arg1
, 0);
9468 #if defined(TARGET_NR_symlinkat)
9469 case TARGET_NR_symlinkat
:
9472 p
= lock_user_string(arg1
);
9473 p2
= lock_user_string(arg3
);
9475 ret
= -TARGET_EFAULT
;
9477 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9478 unlock_user(p2
, arg3
, 0);
9479 unlock_user(p
, arg1
, 0);
9483 #ifdef TARGET_NR_readlink
9484 case TARGET_NR_readlink
:
9487 p
= lock_user_string(arg1
);
9488 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9490 ret
= -TARGET_EFAULT
;
9492 /* Short circuit this for the magic exe check. */
9493 ret
= -TARGET_EINVAL
;
9494 } else if (is_proc_myself((const char *)p
, "exe")) {
9495 char real
[PATH_MAX
], *temp
;
9496 temp
= realpath(exec_path
, real
);
9497 /* Return value is # of bytes that we wrote to the buffer. */
9499 ret
= get_errno(-1);
9501 /* Don't worry about sign mismatch as earlier mapping
9502 * logic would have thrown a bad address error. */
9503 ret
= MIN(strlen(real
), arg3
);
9504 /* We cannot NUL terminate the string. */
9505 memcpy(p2
, real
, ret
);
9508 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9510 unlock_user(p2
, arg2
, ret
);
9511 unlock_user(p
, arg1
, 0);
9515 #if defined(TARGET_NR_readlinkat)
9516 case TARGET_NR_readlinkat
:
9519 p
= lock_user_string(arg2
);
9520 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9522 ret
= -TARGET_EFAULT
;
9523 } else if (is_proc_myself((const char *)p
, "exe")) {
9524 char real
[PATH_MAX
], *temp
;
9525 temp
= realpath(exec_path
, real
);
9526 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9527 snprintf((char *)p2
, arg4
, "%s", real
);
9529 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9531 unlock_user(p2
, arg3
, ret
);
9532 unlock_user(p
, arg2
, 0);
9536 #ifdef TARGET_NR_swapon
9537 case TARGET_NR_swapon
:
9538 if (!(p
= lock_user_string(arg1
)))
9539 return -TARGET_EFAULT
;
9540 ret
= get_errno(swapon(p
, arg2
));
9541 unlock_user(p
, arg1
, 0);
9544 case TARGET_NR_reboot
:
9545 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9546 /* arg4 must be ignored in all other cases */
9547 p
= lock_user_string(arg4
);
9549 return -TARGET_EFAULT
;
9551 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9552 unlock_user(p
, arg4
, 0);
9554 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9557 #ifdef TARGET_NR_mmap
9558 case TARGET_NR_mmap
:
9559 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9560 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9561 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9562 || defined(TARGET_S390X)
9565 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9566 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9567 return -TARGET_EFAULT
;
9574 unlock_user(v
, arg1
, 0);
9575 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9576 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9580 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9581 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9587 #ifdef TARGET_NR_mmap2
9588 case TARGET_NR_mmap2
:
9590 #define MMAP_SHIFT 12
9592 ret
= target_mmap(arg1
, arg2
, arg3
,
9593 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9594 arg5
, arg6
<< MMAP_SHIFT
);
9595 return get_errno(ret
);
9597 case TARGET_NR_munmap
:
9598 return get_errno(target_munmap(arg1
, arg2
));
9599 case TARGET_NR_mprotect
:
9601 TaskState
*ts
= cpu
->opaque
;
9602 /* Special hack to detect libc making the stack executable. */
9603 if ((arg3
& PROT_GROWSDOWN
)
9604 && arg1
>= ts
->info
->stack_limit
9605 && arg1
<= ts
->info
->start_stack
) {
9606 arg3
&= ~PROT_GROWSDOWN
;
9607 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9608 arg1
= ts
->info
->stack_limit
;
9611 return get_errno(target_mprotect(arg1
, arg2
, arg3
));
9612 #ifdef TARGET_NR_mremap
9613 case TARGET_NR_mremap
:
9614 return get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9616 /* ??? msync/mlock/munlock are broken for softmmu. */
9617 #ifdef TARGET_NR_msync
9618 case TARGET_NR_msync
:
9619 return get_errno(msync(g2h(arg1
), arg2
, arg3
));
9621 #ifdef TARGET_NR_mlock
9622 case TARGET_NR_mlock
:
9623 return get_errno(mlock(g2h(arg1
), arg2
));
9625 #ifdef TARGET_NR_munlock
9626 case TARGET_NR_munlock
:
9627 return get_errno(munlock(g2h(arg1
), arg2
));
9629 #ifdef TARGET_NR_mlockall
9630 case TARGET_NR_mlockall
:
9631 return get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9633 #ifdef TARGET_NR_munlockall
9634 case TARGET_NR_munlockall
:
9635 return get_errno(munlockall());
9637 #ifdef TARGET_NR_truncate
9638 case TARGET_NR_truncate
:
9639 if (!(p
= lock_user_string(arg1
)))
9640 return -TARGET_EFAULT
;
9641 ret
= get_errno(truncate(p
, arg2
));
9642 unlock_user(p
, arg1
, 0);
9645 #ifdef TARGET_NR_ftruncate
9646 case TARGET_NR_ftruncate
:
9647 return get_errno(ftruncate(arg1
, arg2
));
9649 case TARGET_NR_fchmod
:
9650 return get_errno(fchmod(arg1
, arg2
));
9651 #if defined(TARGET_NR_fchmodat)
9652 case TARGET_NR_fchmodat
:
9653 if (!(p
= lock_user_string(arg2
)))
9654 return -TARGET_EFAULT
;
9655 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9656 unlock_user(p
, arg2
, 0);
9659 case TARGET_NR_getpriority
:
9660 /* Note that negative values are valid for getpriority, so we must
9661 differentiate based on errno settings. */
9663 ret
= getpriority(arg1
, arg2
);
9664 if (ret
== -1 && errno
!= 0) {
9665 return -host_to_target_errno(errno
);
9668 /* Return value is the unbiased priority. Signal no error. */
9669 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9671 /* Return value is a biased priority to avoid negative numbers. */
9675 case TARGET_NR_setpriority
:
9676 return get_errno(setpriority(arg1
, arg2
, arg3
));
9677 #ifdef TARGET_NR_statfs
9678 case TARGET_NR_statfs
:
9679 if (!(p
= lock_user_string(arg1
))) {
9680 return -TARGET_EFAULT
;
9682 ret
= get_errno(statfs(path(p
), &stfs
));
9683 unlock_user(p
, arg1
, 0);
9685 if (!is_error(ret
)) {
9686 struct target_statfs
*target_stfs
;
9688 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9689 return -TARGET_EFAULT
;
9690 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9691 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9692 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9693 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9694 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9695 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9696 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9697 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9698 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9699 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9700 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9701 #ifdef _STATFS_F_FLAGS
9702 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9704 __put_user(0, &target_stfs
->f_flags
);
9706 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9707 unlock_user_struct(target_stfs
, arg2
, 1);
9711 #ifdef TARGET_NR_fstatfs
9712 case TARGET_NR_fstatfs
:
9713 ret
= get_errno(fstatfs(arg1
, &stfs
));
9714 goto convert_statfs
;
9716 #ifdef TARGET_NR_statfs64
9717 case TARGET_NR_statfs64
:
9718 if (!(p
= lock_user_string(arg1
))) {
9719 return -TARGET_EFAULT
;
9721 ret
= get_errno(statfs(path(p
), &stfs
));
9722 unlock_user(p
, arg1
, 0);
9724 if (!is_error(ret
)) {
9725 struct target_statfs64
*target_stfs
;
9727 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9728 return -TARGET_EFAULT
;
9729 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9730 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9731 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9732 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9733 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9734 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9735 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9736 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9737 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9738 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9739 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9740 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9741 unlock_user_struct(target_stfs
, arg3
, 1);
9744 case TARGET_NR_fstatfs64
:
9745 ret
= get_errno(fstatfs(arg1
, &stfs
));
9746 goto convert_statfs64
;
9748 #ifdef TARGET_NR_socketcall
9749 case TARGET_NR_socketcall
:
9750 return do_socketcall(arg1
, arg2
);
9752 #ifdef TARGET_NR_accept
9753 case TARGET_NR_accept
:
9754 return do_accept4(arg1
, arg2
, arg3
, 0);
9756 #ifdef TARGET_NR_accept4
9757 case TARGET_NR_accept4
:
9758 return do_accept4(arg1
, arg2
, arg3
, arg4
);
9760 #ifdef TARGET_NR_bind
9761 case TARGET_NR_bind
:
9762 return do_bind(arg1
, arg2
, arg3
);
9764 #ifdef TARGET_NR_connect
9765 case TARGET_NR_connect
:
9766 return do_connect(arg1
, arg2
, arg3
);
9768 #ifdef TARGET_NR_getpeername
9769 case TARGET_NR_getpeername
:
9770 return do_getpeername(arg1
, arg2
, arg3
);
9772 #ifdef TARGET_NR_getsockname
9773 case TARGET_NR_getsockname
:
9774 return do_getsockname(arg1
, arg2
, arg3
);
9776 #ifdef TARGET_NR_getsockopt
9777 case TARGET_NR_getsockopt
:
9778 return do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9780 #ifdef TARGET_NR_listen
9781 case TARGET_NR_listen
:
9782 return get_errno(listen(arg1
, arg2
));
9784 #ifdef TARGET_NR_recv
9785 case TARGET_NR_recv
:
9786 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9788 #ifdef TARGET_NR_recvfrom
9789 case TARGET_NR_recvfrom
:
9790 return do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9792 #ifdef TARGET_NR_recvmsg
9793 case TARGET_NR_recvmsg
:
9794 return do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9796 #ifdef TARGET_NR_send
9797 case TARGET_NR_send
:
9798 return do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9800 #ifdef TARGET_NR_sendmsg
9801 case TARGET_NR_sendmsg
:
9802 return do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9804 #ifdef TARGET_NR_sendmmsg
9805 case TARGET_NR_sendmmsg
:
9806 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9807 case TARGET_NR_recvmmsg
:
9808 return do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9810 #ifdef TARGET_NR_sendto
9811 case TARGET_NR_sendto
:
9812 return do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9814 #ifdef TARGET_NR_shutdown
9815 case TARGET_NR_shutdown
:
9816 return get_errno(shutdown(arg1
, arg2
));
9818 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9819 case TARGET_NR_getrandom
:
9820 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9822 return -TARGET_EFAULT
;
9824 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9825 unlock_user(p
, arg1
, ret
);
9828 #ifdef TARGET_NR_socket
9829 case TARGET_NR_socket
:
9830 return do_socket(arg1
, arg2
, arg3
);
9832 #ifdef TARGET_NR_socketpair
9833 case TARGET_NR_socketpair
:
9834 return do_socketpair(arg1
, arg2
, arg3
, arg4
);
9836 #ifdef TARGET_NR_setsockopt
9837 case TARGET_NR_setsockopt
:
9838 return do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9840 #if defined(TARGET_NR_syslog)
9841 case TARGET_NR_syslog
:
9846 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9847 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9848 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9849 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9850 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9851 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9852 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9853 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9854 return get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9855 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9856 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9857 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9860 return -TARGET_EINVAL
;
9865 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9867 return -TARGET_EFAULT
;
9869 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9870 unlock_user(p
, arg2
, arg3
);
9874 return -TARGET_EINVAL
;
9879 case TARGET_NR_setitimer
:
9881 struct itimerval value
, ovalue
, *pvalue
;
9885 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9886 || copy_from_user_timeval(&pvalue
->it_value
,
9887 arg2
+ sizeof(struct target_timeval
)))
9888 return -TARGET_EFAULT
;
9892 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9893 if (!is_error(ret
) && arg3
) {
9894 if (copy_to_user_timeval(arg3
,
9895 &ovalue
.it_interval
)
9896 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9898 return -TARGET_EFAULT
;
9902 case TARGET_NR_getitimer
:
9904 struct itimerval value
;
9906 ret
= get_errno(getitimer(arg1
, &value
));
9907 if (!is_error(ret
) && arg2
) {
9908 if (copy_to_user_timeval(arg2
,
9910 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9912 return -TARGET_EFAULT
;
9916 #ifdef TARGET_NR_stat
9917 case TARGET_NR_stat
:
9918 if (!(p
= lock_user_string(arg1
))) {
9919 return -TARGET_EFAULT
;
9921 ret
= get_errno(stat(path(p
), &st
));
9922 unlock_user(p
, arg1
, 0);
9925 #ifdef TARGET_NR_lstat
9926 case TARGET_NR_lstat
:
9927 if (!(p
= lock_user_string(arg1
))) {
9928 return -TARGET_EFAULT
;
9930 ret
= get_errno(lstat(path(p
), &st
));
9931 unlock_user(p
, arg1
, 0);
9934 #ifdef TARGET_NR_fstat
9935 case TARGET_NR_fstat
:
9937 ret
= get_errno(fstat(arg1
, &st
));
9938 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9941 if (!is_error(ret
)) {
9942 struct target_stat
*target_st
;
9944 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9945 return -TARGET_EFAULT
;
9946 memset(target_st
, 0, sizeof(*target_st
));
9947 __put_user(st
.st_dev
, &target_st
->st_dev
);
9948 __put_user(st
.st_ino
, &target_st
->st_ino
);
9949 __put_user(st
.st_mode
, &target_st
->st_mode
);
9950 __put_user(st
.st_uid
, &target_st
->st_uid
);
9951 __put_user(st
.st_gid
, &target_st
->st_gid
);
9952 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9953 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9954 __put_user(st
.st_size
, &target_st
->st_size
);
9955 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9956 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9957 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9958 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9959 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9960 unlock_user_struct(target_st
, arg2
, 1);
9965 case TARGET_NR_vhangup
:
9966 return get_errno(vhangup());
9967 #ifdef TARGET_NR_syscall
9968 case TARGET_NR_syscall
:
9969 return do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9970 arg6
, arg7
, arg8
, 0);
9972 case TARGET_NR_wait4
:
9975 abi_long status_ptr
= arg2
;
9976 struct rusage rusage
, *rusage_ptr
;
9977 abi_ulong target_rusage
= arg4
;
9978 abi_long rusage_err
;
9980 rusage_ptr
= &rusage
;
9983 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9984 if (!is_error(ret
)) {
9985 if (status_ptr
&& ret
) {
9986 status
= host_to_target_waitstatus(status
);
9987 if (put_user_s32(status
, status_ptr
))
9988 return -TARGET_EFAULT
;
9990 if (target_rusage
) {
9991 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9999 #ifdef TARGET_NR_swapoff
10000 case TARGET_NR_swapoff
:
10001 if (!(p
= lock_user_string(arg1
)))
10002 return -TARGET_EFAULT
;
10003 ret
= get_errno(swapoff(p
));
10004 unlock_user(p
, arg1
, 0);
10007 case TARGET_NR_sysinfo
:
10009 struct target_sysinfo
*target_value
;
10010 struct sysinfo value
;
10011 ret
= get_errno(sysinfo(&value
));
10012 if (!is_error(ret
) && arg1
)
10014 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10015 return -TARGET_EFAULT
;
10016 __put_user(value
.uptime
, &target_value
->uptime
);
10017 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10018 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10019 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10020 __put_user(value
.totalram
, &target_value
->totalram
);
10021 __put_user(value
.freeram
, &target_value
->freeram
);
10022 __put_user(value
.sharedram
, &target_value
->sharedram
);
10023 __put_user(value
.bufferram
, &target_value
->bufferram
);
10024 __put_user(value
.totalswap
, &target_value
->totalswap
);
10025 __put_user(value
.freeswap
, &target_value
->freeswap
);
10026 __put_user(value
.procs
, &target_value
->procs
);
10027 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10028 __put_user(value
.freehigh
, &target_value
->freehigh
);
10029 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10030 unlock_user_struct(target_value
, arg1
, 1);
10034 #ifdef TARGET_NR_ipc
10035 case TARGET_NR_ipc
:
10036 return do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10038 #ifdef TARGET_NR_semget
10039 case TARGET_NR_semget
:
10040 return get_errno(semget(arg1
, arg2
, arg3
));
10042 #ifdef TARGET_NR_semop
10043 case TARGET_NR_semop
:
10044 return do_semop(arg1
, arg2
, arg3
);
10046 #ifdef TARGET_NR_semctl
10047 case TARGET_NR_semctl
:
10048 return do_semctl(arg1
, arg2
, arg3
, arg4
);
10050 #ifdef TARGET_NR_msgctl
10051 case TARGET_NR_msgctl
:
10052 return do_msgctl(arg1
, arg2
, arg3
);
10054 #ifdef TARGET_NR_msgget
10055 case TARGET_NR_msgget
:
10056 return get_errno(msgget(arg1
, arg2
));
10058 #ifdef TARGET_NR_msgrcv
10059 case TARGET_NR_msgrcv
:
10060 return do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10062 #ifdef TARGET_NR_msgsnd
10063 case TARGET_NR_msgsnd
:
10064 return do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10066 #ifdef TARGET_NR_shmget
10067 case TARGET_NR_shmget
:
10068 return get_errno(shmget(arg1
, arg2
, arg3
));
10070 #ifdef TARGET_NR_shmctl
10071 case TARGET_NR_shmctl
:
10072 return do_shmctl(arg1
, arg2
, arg3
);
10074 #ifdef TARGET_NR_shmat
10075 case TARGET_NR_shmat
:
10076 return do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10078 #ifdef TARGET_NR_shmdt
10079 case TARGET_NR_shmdt
:
10080 return do_shmdt(arg1
);
10082 case TARGET_NR_fsync
:
10083 return get_errno(fsync(arg1
));
10084 case TARGET_NR_clone
:
10085 /* Linux manages to have three different orderings for its
10086 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10087 * match the kernel's CONFIG_CLONE_* settings.
10088 * Microblaze is further special in that it uses a sixth
10089 * implicit argument to clone for the TLS pointer.
10091 #if defined(TARGET_MICROBLAZE)
10092 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10093 #elif defined(TARGET_CLONE_BACKWARDS)
10094 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10095 #elif defined(TARGET_CLONE_BACKWARDS2)
10096 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10098 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10101 #ifdef __NR_exit_group
10102 /* new thread calls */
10103 case TARGET_NR_exit_group
:
10104 preexit_cleanup(cpu_env
, arg1
);
10105 return get_errno(exit_group(arg1
));
10107 case TARGET_NR_setdomainname
:
10108 if (!(p
= lock_user_string(arg1
)))
10109 return -TARGET_EFAULT
;
10110 ret
= get_errno(setdomainname(p
, arg2
));
10111 unlock_user(p
, arg1
, 0);
10113 case TARGET_NR_uname
:
10114 /* no need to transcode because we use the linux syscall */
10116 struct new_utsname
* buf
;
10118 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10119 return -TARGET_EFAULT
;
10120 ret
= get_errno(sys_uname(buf
));
10121 if (!is_error(ret
)) {
10122 /* Overwrite the native machine name with whatever is being
10124 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10125 sizeof(buf
->machine
));
10126 /* Allow the user to override the reported release. */
10127 if (qemu_uname_release
&& *qemu_uname_release
) {
10128 g_strlcpy(buf
->release
, qemu_uname_release
,
10129 sizeof(buf
->release
));
10132 unlock_user_struct(buf
, arg1
, 1);
10136 case TARGET_NR_modify_ldt
:
10137 return do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10138 #if !defined(TARGET_X86_64)
10139 case TARGET_NR_vm86
:
10140 return do_vm86(cpu_env
, arg1
, arg2
);
10143 case TARGET_NR_adjtimex
:
10145 struct timex host_buf
;
10147 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10148 return -TARGET_EFAULT
;
10150 ret
= get_errno(adjtimex(&host_buf
));
10151 if (!is_error(ret
)) {
10152 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10153 return -TARGET_EFAULT
;
10158 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10159 case TARGET_NR_clock_adjtime
:
10161 struct timex htx
, *phtx
= &htx
;
10163 if (target_to_host_timex(phtx
, arg2
) != 0) {
10164 return -TARGET_EFAULT
;
10166 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10167 if (!is_error(ret
) && phtx
) {
10168 if (host_to_target_timex(arg2
, phtx
) != 0) {
10169 return -TARGET_EFAULT
;
10175 case TARGET_NR_getpgid
:
10176 return get_errno(getpgid(arg1
));
10177 case TARGET_NR_fchdir
:
10178 return get_errno(fchdir(arg1
));
10179 case TARGET_NR_personality
:
10180 return get_errno(personality(arg1
));
10181 #ifdef TARGET_NR__llseek /* Not on alpha */
10182 case TARGET_NR__llseek
:
10185 #if !defined(__NR_llseek)
10186 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10188 ret
= get_errno(res
);
10193 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10195 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10196 return -TARGET_EFAULT
;
10201 #ifdef TARGET_NR_getdents
10202 case TARGET_NR_getdents
:
10203 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10204 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10206 struct target_dirent
*target_dirp
;
10207 struct linux_dirent
*dirp
;
10208 abi_long count
= arg3
;
10210 dirp
= g_try_malloc(count
);
10212 return -TARGET_ENOMEM
;
10215 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10216 if (!is_error(ret
)) {
10217 struct linux_dirent
*de
;
10218 struct target_dirent
*tde
;
10220 int reclen
, treclen
;
10221 int count1
, tnamelen
;
10225 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10226 return -TARGET_EFAULT
;
10229 reclen
= de
->d_reclen
;
10230 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10231 assert(tnamelen
>= 0);
10232 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10233 assert(count1
+ treclen
<= count
);
10234 tde
->d_reclen
= tswap16(treclen
);
10235 tde
->d_ino
= tswapal(de
->d_ino
);
10236 tde
->d_off
= tswapal(de
->d_off
);
10237 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10238 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10240 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10244 unlock_user(target_dirp
, arg2
, ret
);
10250 struct linux_dirent
*dirp
;
10251 abi_long count
= arg3
;
10253 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10254 return -TARGET_EFAULT
;
10255 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10256 if (!is_error(ret
)) {
10257 struct linux_dirent
*de
;
10262 reclen
= de
->d_reclen
;
10265 de
->d_reclen
= tswap16(reclen
);
10266 tswapls(&de
->d_ino
);
10267 tswapls(&de
->d_off
);
10268 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10272 unlock_user(dirp
, arg2
, ret
);
10276 /* Implement getdents in terms of getdents64 */
10278 struct linux_dirent64
*dirp
;
10279 abi_long count
= arg3
;
10281 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10283 return -TARGET_EFAULT
;
10285 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10286 if (!is_error(ret
)) {
10287 /* Convert the dirent64 structs to target dirent. We do this
10288 * in-place, since we can guarantee that a target_dirent is no
10289 * larger than a dirent64; however this means we have to be
10290 * careful to read everything before writing in the new format.
10292 struct linux_dirent64
*de
;
10293 struct target_dirent
*tde
;
10298 tde
= (struct target_dirent
*)dirp
;
10300 int namelen
, treclen
;
10301 int reclen
= de
->d_reclen
;
10302 uint64_t ino
= de
->d_ino
;
10303 int64_t off
= de
->d_off
;
10304 uint8_t type
= de
->d_type
;
10306 namelen
= strlen(de
->d_name
);
10307 treclen
= offsetof(struct target_dirent
, d_name
)
10309 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10311 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10312 tde
->d_ino
= tswapal(ino
);
10313 tde
->d_off
= tswapal(off
);
10314 tde
->d_reclen
= tswap16(treclen
);
10315 /* The target_dirent type is in what was formerly a padding
10316 * byte at the end of the structure:
10318 *(((char *)tde
) + treclen
- 1) = type
;
10320 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10321 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10327 unlock_user(dirp
, arg2
, ret
);
10331 #endif /* TARGET_NR_getdents */
10332 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10333 case TARGET_NR_getdents64
:
10335 struct linux_dirent64
*dirp
;
10336 abi_long count
= arg3
;
10337 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10338 return -TARGET_EFAULT
;
10339 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10340 if (!is_error(ret
)) {
10341 struct linux_dirent64
*de
;
10346 reclen
= de
->d_reclen
;
10349 de
->d_reclen
= tswap16(reclen
);
10350 tswap64s((uint64_t *)&de
->d_ino
);
10351 tswap64s((uint64_t *)&de
->d_off
);
10352 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10356 unlock_user(dirp
, arg2
, ret
);
10359 #endif /* TARGET_NR_getdents64 */
10360 #if defined(TARGET_NR__newselect)
10361 case TARGET_NR__newselect
:
10362 return do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10364 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10365 # ifdef TARGET_NR_poll
10366 case TARGET_NR_poll
:
10368 # ifdef TARGET_NR_ppoll
10369 case TARGET_NR_ppoll
:
10372 struct target_pollfd
*target_pfd
;
10373 unsigned int nfds
= arg2
;
10374 struct pollfd
*pfd
;
10380 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10381 return -TARGET_EINVAL
;
10384 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10385 sizeof(struct target_pollfd
) * nfds
, 1);
10387 return -TARGET_EFAULT
;
10390 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10391 for (i
= 0; i
< nfds
; i
++) {
10392 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10393 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10398 # ifdef TARGET_NR_ppoll
10399 case TARGET_NR_ppoll
:
10401 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10402 target_sigset_t
*target_set
;
10403 sigset_t _set
, *set
= &_set
;
10406 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10407 unlock_user(target_pfd
, arg1
, 0);
10408 return -TARGET_EFAULT
;
10415 if (arg5
!= sizeof(target_sigset_t
)) {
10416 unlock_user(target_pfd
, arg1
, 0);
10417 return -TARGET_EINVAL
;
10420 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10422 unlock_user(target_pfd
, arg1
, 0);
10423 return -TARGET_EFAULT
;
10425 target_to_host_sigset(set
, target_set
);
10430 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10431 set
, SIGSET_T_SIZE
));
10433 if (!is_error(ret
) && arg3
) {
10434 host_to_target_timespec(arg3
, timeout_ts
);
10437 unlock_user(target_set
, arg4
, 0);
10442 # ifdef TARGET_NR_poll
10443 case TARGET_NR_poll
:
10445 struct timespec ts
, *pts
;
10448 /* Convert ms to secs, ns */
10449 ts
.tv_sec
= arg3
/ 1000;
10450 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10453 /* -ve poll() timeout means "infinite" */
10456 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10461 g_assert_not_reached();
10464 if (!is_error(ret
)) {
10465 for(i
= 0; i
< nfds
; i
++) {
10466 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10469 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10473 case TARGET_NR_flock
:
10474 /* NOTE: the flock constant seems to be the same for every
10476 return get_errno(safe_flock(arg1
, arg2
));
10477 case TARGET_NR_readv
:
10479 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10481 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10482 unlock_iovec(vec
, arg2
, arg3
, 1);
10484 ret
= -host_to_target_errno(errno
);
10488 case TARGET_NR_writev
:
10490 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10492 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10493 unlock_iovec(vec
, arg2
, arg3
, 0);
10495 ret
= -host_to_target_errno(errno
);
10499 #if defined(TARGET_NR_preadv)
10500 case TARGET_NR_preadv
:
10502 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10504 unsigned long low
, high
;
10506 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10507 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10508 unlock_iovec(vec
, arg2
, arg3
, 1);
10510 ret
= -host_to_target_errno(errno
);
10515 #if defined(TARGET_NR_pwritev)
10516 case TARGET_NR_pwritev
:
10518 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10520 unsigned long low
, high
;
10522 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10523 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10524 unlock_iovec(vec
, arg2
, arg3
, 0);
10526 ret
= -host_to_target_errno(errno
);
10531 case TARGET_NR_getsid
:
10532 return get_errno(getsid(arg1
));
10533 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10534 case TARGET_NR_fdatasync
:
10535 return get_errno(fdatasync(arg1
));
10537 #ifdef TARGET_NR__sysctl
10538 case TARGET_NR__sysctl
:
10539 /* We don't implement this, but ENOTDIR is always a safe
10541 return -TARGET_ENOTDIR
;
10543 case TARGET_NR_sched_getaffinity
:
10545 unsigned int mask_size
;
10546 unsigned long *mask
;
10549 * sched_getaffinity needs multiples of ulong, so need to take
10550 * care of mismatches between target ulong and host ulong sizes.
10552 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10553 return -TARGET_EINVAL
;
10555 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10557 mask
= alloca(mask_size
);
10558 memset(mask
, 0, mask_size
);
10559 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10561 if (!is_error(ret
)) {
10563 /* More data returned than the caller's buffer will fit.
10564 * This only happens if sizeof(abi_long) < sizeof(long)
10565 * and the caller passed us a buffer holding an odd number
10566 * of abi_longs. If the host kernel is actually using the
10567 * extra 4 bytes then fail EINVAL; otherwise we can just
10568 * ignore them and only copy the interesting part.
10570 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10571 if (numcpus
> arg2
* 8) {
10572 return -TARGET_EINVAL
;
10577 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10578 return -TARGET_EFAULT
;
10583 case TARGET_NR_sched_setaffinity
:
10585 unsigned int mask_size
;
10586 unsigned long *mask
;
10589 * sched_setaffinity needs multiples of ulong, so need to take
10590 * care of mismatches between target ulong and host ulong sizes.
10592 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10593 return -TARGET_EINVAL
;
10595 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10596 mask
= alloca(mask_size
);
10598 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10603 return get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10605 case TARGET_NR_getcpu
:
10607 unsigned cpu
, node
;
10608 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10609 arg2
? &node
: NULL
,
10611 if (is_error(ret
)) {
10614 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10615 return -TARGET_EFAULT
;
10617 if (arg2
&& put_user_u32(node
, arg2
)) {
10618 return -TARGET_EFAULT
;
10622 case TARGET_NR_sched_setparam
:
10624 struct sched_param
*target_schp
;
10625 struct sched_param schp
;
10628 return -TARGET_EINVAL
;
10630 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10631 return -TARGET_EFAULT
;
10632 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10633 unlock_user_struct(target_schp
, arg2
, 0);
10634 return get_errno(sched_setparam(arg1
, &schp
));
10636 case TARGET_NR_sched_getparam
:
10638 struct sched_param
*target_schp
;
10639 struct sched_param schp
;
10642 return -TARGET_EINVAL
;
10644 ret
= get_errno(sched_getparam(arg1
, &schp
));
10645 if (!is_error(ret
)) {
10646 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10647 return -TARGET_EFAULT
;
10648 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10649 unlock_user_struct(target_schp
, arg2
, 1);
10653 case TARGET_NR_sched_setscheduler
:
10655 struct sched_param
*target_schp
;
10656 struct sched_param schp
;
10658 return -TARGET_EINVAL
;
10660 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10661 return -TARGET_EFAULT
;
10662 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10663 unlock_user_struct(target_schp
, arg3
, 0);
10664 return get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10666 case TARGET_NR_sched_getscheduler
:
10667 return get_errno(sched_getscheduler(arg1
));
10668 case TARGET_NR_sched_yield
:
10669 return get_errno(sched_yield());
10670 case TARGET_NR_sched_get_priority_max
:
10671 return get_errno(sched_get_priority_max(arg1
));
10672 case TARGET_NR_sched_get_priority_min
:
10673 return get_errno(sched_get_priority_min(arg1
));
10674 case TARGET_NR_sched_rr_get_interval
:
10676 struct timespec ts
;
10677 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10678 if (!is_error(ret
)) {
10679 ret
= host_to_target_timespec(arg2
, &ts
);
10683 case TARGET_NR_nanosleep
:
10685 struct timespec req
, rem
;
10686 target_to_host_timespec(&req
, arg1
);
10687 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10688 if (is_error(ret
) && arg2
) {
10689 host_to_target_timespec(arg2
, &rem
);
10693 case TARGET_NR_prctl
:
10695 case PR_GET_PDEATHSIG
:
10698 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10699 if (!is_error(ret
) && arg2
10700 && put_user_ual(deathsig
, arg2
)) {
10701 return -TARGET_EFAULT
;
10708 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10710 return -TARGET_EFAULT
;
10712 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10713 arg3
, arg4
, arg5
));
10714 unlock_user(name
, arg2
, 16);
10719 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10721 return -TARGET_EFAULT
;
10723 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10724 arg3
, arg4
, arg5
));
10725 unlock_user(name
, arg2
, 0);
10729 #ifdef TARGET_AARCH64
10730 case TARGET_PR_SVE_SET_VL
:
10732 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10733 * PR_SVE_VL_INHERIT. Note the kernel definition
10734 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10735 * even though the current architectural maximum is VQ=16.
10737 ret
= -TARGET_EINVAL
;
10738 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10739 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10740 CPUARMState
*env
= cpu_env
;
10741 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10742 uint32_t vq
, old_vq
;
10744 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10745 vq
= MAX(arg2
/ 16, 1);
10746 vq
= MIN(vq
, cpu
->sve_max_vq
);
10749 aarch64_sve_narrow_vq(env
, vq
);
10751 env
->vfp
.zcr_el
[1] = vq
- 1;
10755 case TARGET_PR_SVE_GET_VL
:
10756 ret
= -TARGET_EINVAL
;
10757 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
10758 CPUARMState
*env
= cpu_env
;
10759 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10762 #endif /* AARCH64 */
10763 case PR_GET_SECCOMP
:
10764 case PR_SET_SECCOMP
:
10765 /* Disable seccomp to prevent the target disabling syscalls we
10767 return -TARGET_EINVAL
;
10769 /* Most prctl options have no pointer arguments */
10770 return get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10773 #ifdef TARGET_NR_arch_prctl
10774 case TARGET_NR_arch_prctl
:
10775 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10776 return do_arch_prctl(cpu_env
, arg1
, arg2
);
10781 #ifdef TARGET_NR_pread64
10782 case TARGET_NR_pread64
:
10783 if (regpairs_aligned(cpu_env
, num
)) {
10787 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10788 return -TARGET_EFAULT
;
10789 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10790 unlock_user(p
, arg2
, ret
);
10792 case TARGET_NR_pwrite64
:
10793 if (regpairs_aligned(cpu_env
, num
)) {
10797 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10798 return -TARGET_EFAULT
;
10799 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10800 unlock_user(p
, arg2
, 0);
10803 case TARGET_NR_getcwd
:
10804 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10805 return -TARGET_EFAULT
;
10806 ret
= get_errno(sys_getcwd1(p
, arg2
));
10807 unlock_user(p
, arg1
, ret
);
10809 case TARGET_NR_capget
:
10810 case TARGET_NR_capset
:
10812 struct target_user_cap_header
*target_header
;
10813 struct target_user_cap_data
*target_data
= NULL
;
10814 struct __user_cap_header_struct header
;
10815 struct __user_cap_data_struct data
[2];
10816 struct __user_cap_data_struct
*dataptr
= NULL
;
10817 int i
, target_datalen
;
10818 int data_items
= 1;
10820 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10821 return -TARGET_EFAULT
;
10823 header
.version
= tswap32(target_header
->version
);
10824 header
.pid
= tswap32(target_header
->pid
);
10826 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10827 /* Version 2 and up takes pointer to two user_data structs */
10831 target_datalen
= sizeof(*target_data
) * data_items
;
10834 if (num
== TARGET_NR_capget
) {
10835 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10837 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10839 if (!target_data
) {
10840 unlock_user_struct(target_header
, arg1
, 0);
10841 return -TARGET_EFAULT
;
10844 if (num
== TARGET_NR_capset
) {
10845 for (i
= 0; i
< data_items
; i
++) {
10846 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10847 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10848 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10855 if (num
== TARGET_NR_capget
) {
10856 ret
= get_errno(capget(&header
, dataptr
));
10858 ret
= get_errno(capset(&header
, dataptr
));
10861 /* The kernel always updates version for both capget and capset */
10862 target_header
->version
= tswap32(header
.version
);
10863 unlock_user_struct(target_header
, arg1
, 1);
10866 if (num
== TARGET_NR_capget
) {
10867 for (i
= 0; i
< data_items
; i
++) {
10868 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10869 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10870 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10872 unlock_user(target_data
, arg2
, target_datalen
);
10874 unlock_user(target_data
, arg2
, 0);
10879 case TARGET_NR_sigaltstack
:
10880 return do_sigaltstack(arg1
, arg2
,
10881 get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10883 #ifdef CONFIG_SENDFILE
10884 #ifdef TARGET_NR_sendfile
10885 case TARGET_NR_sendfile
:
10887 off_t
*offp
= NULL
;
10890 ret
= get_user_sal(off
, arg3
);
10891 if (is_error(ret
)) {
10896 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10897 if (!is_error(ret
) && arg3
) {
10898 abi_long ret2
= put_user_sal(off
, arg3
);
10899 if (is_error(ret2
)) {
10906 #ifdef TARGET_NR_sendfile64
10907 case TARGET_NR_sendfile64
:
10909 off_t
*offp
= NULL
;
10912 ret
= get_user_s64(off
, arg3
);
10913 if (is_error(ret
)) {
10918 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10919 if (!is_error(ret
) && arg3
) {
10920 abi_long ret2
= put_user_s64(off
, arg3
);
10921 if (is_error(ret2
)) {
10929 #ifdef TARGET_NR_vfork
10930 case TARGET_NR_vfork
:
10931 return get_errno(do_fork(cpu_env
,
10932 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10935 #ifdef TARGET_NR_ugetrlimit
10936 case TARGET_NR_ugetrlimit
:
10938 struct rlimit rlim
;
10939 int resource
= target_to_host_resource(arg1
);
10940 ret
= get_errno(getrlimit(resource
, &rlim
));
10941 if (!is_error(ret
)) {
10942 struct target_rlimit
*target_rlim
;
10943 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10944 return -TARGET_EFAULT
;
10945 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10946 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10947 unlock_user_struct(target_rlim
, arg2
, 1);
10952 #ifdef TARGET_NR_truncate64
10953 case TARGET_NR_truncate64
:
10954 if (!(p
= lock_user_string(arg1
)))
10955 return -TARGET_EFAULT
;
10956 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10957 unlock_user(p
, arg1
, 0);
10960 #ifdef TARGET_NR_ftruncate64
10961 case TARGET_NR_ftruncate64
:
10962 return target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10964 #ifdef TARGET_NR_stat64
10965 case TARGET_NR_stat64
:
10966 if (!(p
= lock_user_string(arg1
))) {
10967 return -TARGET_EFAULT
;
10969 ret
= get_errno(stat(path(p
), &st
));
10970 unlock_user(p
, arg1
, 0);
10971 if (!is_error(ret
))
10972 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10975 #ifdef TARGET_NR_lstat64
10976 case TARGET_NR_lstat64
:
10977 if (!(p
= lock_user_string(arg1
))) {
10978 return -TARGET_EFAULT
;
10980 ret
= get_errno(lstat(path(p
), &st
));
10981 unlock_user(p
, arg1
, 0);
10982 if (!is_error(ret
))
10983 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10986 #ifdef TARGET_NR_fstat64
10987 case TARGET_NR_fstat64
:
10988 ret
= get_errno(fstat(arg1
, &st
));
10989 if (!is_error(ret
))
10990 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10993 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10994 #ifdef TARGET_NR_fstatat64
10995 case TARGET_NR_fstatat64
:
10997 #ifdef TARGET_NR_newfstatat
10998 case TARGET_NR_newfstatat
:
11000 if (!(p
= lock_user_string(arg2
))) {
11001 return -TARGET_EFAULT
;
11003 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11004 unlock_user(p
, arg2
, 0);
11005 if (!is_error(ret
))
11006 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11009 #ifdef TARGET_NR_lchown
11010 case TARGET_NR_lchown
:
11011 if (!(p
= lock_user_string(arg1
)))
11012 return -TARGET_EFAULT
;
11013 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11014 unlock_user(p
, arg1
, 0);
11017 #ifdef TARGET_NR_getuid
11018 case TARGET_NR_getuid
:
11019 return get_errno(high2lowuid(getuid()));
11021 #ifdef TARGET_NR_getgid
11022 case TARGET_NR_getgid
:
11023 return get_errno(high2lowgid(getgid()));
11025 #ifdef TARGET_NR_geteuid
11026 case TARGET_NR_geteuid
:
11027 return get_errno(high2lowuid(geteuid()));
11029 #ifdef TARGET_NR_getegid
11030 case TARGET_NR_getegid
:
11031 return get_errno(high2lowgid(getegid()));
11033 case TARGET_NR_setreuid
:
11034 return get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11035 case TARGET_NR_setregid
:
11036 return get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11037 case TARGET_NR_getgroups
:
11039 int gidsetsize
= arg1
;
11040 target_id
*target_grouplist
;
11044 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11045 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11046 if (gidsetsize
== 0)
11048 if (!is_error(ret
)) {
11049 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11050 if (!target_grouplist
)
11051 return -TARGET_EFAULT
;
11052 for(i
= 0;i
< ret
; i
++)
11053 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11054 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11058 case TARGET_NR_setgroups
:
11060 int gidsetsize
= arg1
;
11061 target_id
*target_grouplist
;
11062 gid_t
*grouplist
= NULL
;
11065 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11066 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11067 if (!target_grouplist
) {
11068 return -TARGET_EFAULT
;
11070 for (i
= 0; i
< gidsetsize
; i
++) {
11071 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11073 unlock_user(target_grouplist
, arg2
, 0);
11075 return get_errno(setgroups(gidsetsize
, grouplist
));
11077 case TARGET_NR_fchown
:
11078 return get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11079 #if defined(TARGET_NR_fchownat)
11080 case TARGET_NR_fchownat
:
11081 if (!(p
= lock_user_string(arg2
)))
11082 return -TARGET_EFAULT
;
11083 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11084 low2highgid(arg4
), arg5
));
11085 unlock_user(p
, arg2
, 0);
11088 #ifdef TARGET_NR_setresuid
11089 case TARGET_NR_setresuid
:
11090 return get_errno(sys_setresuid(low2highuid(arg1
),
11092 low2highuid(arg3
)));
11094 #ifdef TARGET_NR_getresuid
11095 case TARGET_NR_getresuid
:
11097 uid_t ruid
, euid
, suid
;
11098 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11099 if (!is_error(ret
)) {
11100 if (put_user_id(high2lowuid(ruid
), arg1
)
11101 || put_user_id(high2lowuid(euid
), arg2
)
11102 || put_user_id(high2lowuid(suid
), arg3
))
11103 return -TARGET_EFAULT
;
11108 #ifdef TARGET_NR_getresgid
11109 case TARGET_NR_setresgid
:
11110 return get_errno(sys_setresgid(low2highgid(arg1
),
11112 low2highgid(arg3
)));
11114 #ifdef TARGET_NR_getresgid
11115 case TARGET_NR_getresgid
:
11117 gid_t rgid
, egid
, sgid
;
11118 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11119 if (!is_error(ret
)) {
11120 if (put_user_id(high2lowgid(rgid
), arg1
)
11121 || put_user_id(high2lowgid(egid
), arg2
)
11122 || put_user_id(high2lowgid(sgid
), arg3
))
11123 return -TARGET_EFAULT
;
11128 #ifdef TARGET_NR_chown
11129 case TARGET_NR_chown
:
11130 if (!(p
= lock_user_string(arg1
)))
11131 return -TARGET_EFAULT
;
11132 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11133 unlock_user(p
, arg1
, 0);
11136 case TARGET_NR_setuid
:
11137 return get_errno(sys_setuid(low2highuid(arg1
)));
11138 case TARGET_NR_setgid
:
11139 return get_errno(sys_setgid(low2highgid(arg1
)));
11140 case TARGET_NR_setfsuid
:
11141 return get_errno(setfsuid(arg1
));
11142 case TARGET_NR_setfsgid
:
11143 return get_errno(setfsgid(arg1
));
11145 #ifdef TARGET_NR_lchown32
11146 case TARGET_NR_lchown32
:
11147 if (!(p
= lock_user_string(arg1
)))
11148 return -TARGET_EFAULT
;
11149 ret
= get_errno(lchown(p
, arg2
, arg3
));
11150 unlock_user(p
, arg1
, 0);
11153 #ifdef TARGET_NR_getuid32
11154 case TARGET_NR_getuid32
:
11155 return get_errno(getuid());
11158 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11159 /* Alpha specific */
11160 case TARGET_NR_getxuid
:
11164 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11166 return get_errno(getuid());
11168 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11169 /* Alpha specific */
11170 case TARGET_NR_getxgid
:
11174 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11176 return get_errno(getgid());
11178 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11179 /* Alpha specific */
11180 case TARGET_NR_osf_getsysinfo
:
11181 ret
= -TARGET_EOPNOTSUPP
;
11183 case TARGET_GSI_IEEE_FP_CONTROL
:
11185 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11187 /* Copied from linux ieee_fpcr_to_swcr. */
11188 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11189 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11190 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11191 | SWCR_TRAP_ENABLE_DZE
11192 | SWCR_TRAP_ENABLE_OVF
);
11193 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11194 | SWCR_TRAP_ENABLE_INE
);
11195 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11196 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11198 if (put_user_u64 (swcr
, arg2
))
11199 return -TARGET_EFAULT
;
11204 /* case GSI_IEEE_STATE_AT_SIGNAL:
11205 -- Not implemented in linux kernel.
11207 -- Retrieves current unaligned access state; not much used.
11208 case GSI_PROC_TYPE:
11209 -- Retrieves implver information; surely not used.
11210 case GSI_GET_HWRPB:
11211 -- Grabs a copy of the HWRPB; surely not used.
11216 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11217 /* Alpha specific */
11218 case TARGET_NR_osf_setsysinfo
:
11219 ret
= -TARGET_EOPNOTSUPP
;
11221 case TARGET_SSI_IEEE_FP_CONTROL
:
11223 uint64_t swcr
, fpcr
, orig_fpcr
;
11225 if (get_user_u64 (swcr
, arg2
)) {
11226 return -TARGET_EFAULT
;
11228 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11229 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11231 /* Copied from linux ieee_swcr_to_fpcr. */
11232 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11233 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11234 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11235 | SWCR_TRAP_ENABLE_DZE
11236 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11237 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11238 | SWCR_TRAP_ENABLE_INE
)) << 57;
11239 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11240 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11242 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11247 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11249 uint64_t exc
, fpcr
, orig_fpcr
;
11252 if (get_user_u64(exc
, arg2
)) {
11253 return -TARGET_EFAULT
;
11256 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11258 /* We only add to the exception status here. */
11259 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11261 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11264 /* Old exceptions are not signaled. */
11265 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11267 /* If any exceptions set by this call,
11268 and are unmasked, send a signal. */
11270 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11271 si_code
= TARGET_FPE_FLTRES
;
11273 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11274 si_code
= TARGET_FPE_FLTUND
;
11276 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11277 si_code
= TARGET_FPE_FLTOVF
;
11279 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11280 si_code
= TARGET_FPE_FLTDIV
;
11282 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11283 si_code
= TARGET_FPE_FLTINV
;
11285 if (si_code
!= 0) {
11286 target_siginfo_t info
;
11287 info
.si_signo
= SIGFPE
;
11289 info
.si_code
= si_code
;
11290 info
._sifields
._sigfault
._addr
11291 = ((CPUArchState
*)cpu_env
)->pc
;
11292 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11293 QEMU_SI_FAULT
, &info
);
11298 /* case SSI_NVPAIRS:
11299 -- Used with SSIN_UACPROC to enable unaligned accesses.
11300 case SSI_IEEE_STATE_AT_SIGNAL:
11301 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11302 -- Not implemented in linux kernel
11307 #ifdef TARGET_NR_osf_sigprocmask
11308 /* Alpha specific. */
11309 case TARGET_NR_osf_sigprocmask
:
11313 sigset_t set
, oldset
;
11316 case TARGET_SIG_BLOCK
:
11319 case TARGET_SIG_UNBLOCK
:
11322 case TARGET_SIG_SETMASK
:
11326 return -TARGET_EINVAL
;
11329 target_to_host_old_sigset(&set
, &mask
);
11330 ret
= do_sigprocmask(how
, &set
, &oldset
);
11332 host_to_target_old_sigset(&mask
, &oldset
);
11339 #ifdef TARGET_NR_getgid32
11340 case TARGET_NR_getgid32
:
11341 return get_errno(getgid());
11343 #ifdef TARGET_NR_geteuid32
11344 case TARGET_NR_geteuid32
:
11345 return get_errno(geteuid());
11347 #ifdef TARGET_NR_getegid32
11348 case TARGET_NR_getegid32
:
11349 return get_errno(getegid());
11351 #ifdef TARGET_NR_setreuid32
11352 case TARGET_NR_setreuid32
:
11353 return get_errno(setreuid(arg1
, arg2
));
11355 #ifdef TARGET_NR_setregid32
11356 case TARGET_NR_setregid32
:
11357 return get_errno(setregid(arg1
, arg2
));
11359 #ifdef TARGET_NR_getgroups32
11360 case TARGET_NR_getgroups32
:
11362 int gidsetsize
= arg1
;
11363 uint32_t *target_grouplist
;
11367 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11368 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11369 if (gidsetsize
== 0)
11371 if (!is_error(ret
)) {
11372 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11373 if (!target_grouplist
) {
11374 return -TARGET_EFAULT
;
11376 for(i
= 0;i
< ret
; i
++)
11377 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11378 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11383 #ifdef TARGET_NR_setgroups32
11384 case TARGET_NR_setgroups32
:
11386 int gidsetsize
= arg1
;
11387 uint32_t *target_grouplist
;
11391 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11392 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11393 if (!target_grouplist
) {
11394 return -TARGET_EFAULT
;
11396 for(i
= 0;i
< gidsetsize
; i
++)
11397 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11398 unlock_user(target_grouplist
, arg2
, 0);
11399 return get_errno(setgroups(gidsetsize
, grouplist
));
11402 #ifdef TARGET_NR_fchown32
11403 case TARGET_NR_fchown32
:
11404 return get_errno(fchown(arg1
, arg2
, arg3
));
11406 #ifdef TARGET_NR_setresuid32
11407 case TARGET_NR_setresuid32
:
11408 return get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11410 #ifdef TARGET_NR_getresuid32
11411 case TARGET_NR_getresuid32
:
11413 uid_t ruid
, euid
, suid
;
11414 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11415 if (!is_error(ret
)) {
11416 if (put_user_u32(ruid
, arg1
)
11417 || put_user_u32(euid
, arg2
)
11418 || put_user_u32(suid
, arg3
))
11419 return -TARGET_EFAULT
;
11424 #ifdef TARGET_NR_setresgid32
11425 case TARGET_NR_setresgid32
:
11426 return get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11428 #ifdef TARGET_NR_getresgid32
11429 case TARGET_NR_getresgid32
:
11431 gid_t rgid
, egid
, sgid
;
11432 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11433 if (!is_error(ret
)) {
11434 if (put_user_u32(rgid
, arg1
)
11435 || put_user_u32(egid
, arg2
)
11436 || put_user_u32(sgid
, arg3
))
11437 return -TARGET_EFAULT
;
11442 #ifdef TARGET_NR_chown32
11443 case TARGET_NR_chown32
:
11444 if (!(p
= lock_user_string(arg1
)))
11445 return -TARGET_EFAULT
;
11446 ret
= get_errno(chown(p
, arg2
, arg3
));
11447 unlock_user(p
, arg1
, 0);
11450 #ifdef TARGET_NR_setuid32
11451 case TARGET_NR_setuid32
:
11452 return get_errno(sys_setuid(arg1
));
11454 #ifdef TARGET_NR_setgid32
11455 case TARGET_NR_setgid32
:
11456 return get_errno(sys_setgid(arg1
));
11458 #ifdef TARGET_NR_setfsuid32
11459 case TARGET_NR_setfsuid32
:
11460 return get_errno(setfsuid(arg1
));
11462 #ifdef TARGET_NR_setfsgid32
11463 case TARGET_NR_setfsgid32
:
11464 return get_errno(setfsgid(arg1
));
11466 #ifdef TARGET_NR_mincore
11467 case TARGET_NR_mincore
:
11469 void *a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11471 return -TARGET_ENOMEM
;
11473 p
= lock_user_string(arg3
);
11475 ret
= -TARGET_EFAULT
;
11477 ret
= get_errno(mincore(a
, arg2
, p
));
11478 unlock_user(p
, arg3
, ret
);
11480 unlock_user(a
, arg1
, 0);
11484 #ifdef TARGET_NR_arm_fadvise64_64
11485 case TARGET_NR_arm_fadvise64_64
:
11486 /* arm_fadvise64_64 looks like fadvise64_64 but
11487 * with different argument order: fd, advice, offset, len
11488 * rather than the usual fd, offset, len, advice.
11489 * Note that offset and len are both 64-bit so appear as
11490 * pairs of 32-bit registers.
11492 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11493 target_offset64(arg5
, arg6
), arg2
);
11494 return -host_to_target_errno(ret
);
11497 #if TARGET_ABI_BITS == 32
11499 #ifdef TARGET_NR_fadvise64_64
11500 case TARGET_NR_fadvise64_64
:
11501 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11502 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11510 /* 6 args: fd, offset (high, low), len (high, low), advice */
11511 if (regpairs_aligned(cpu_env
, num
)) {
11512 /* offset is in (3,4), len in (5,6) and advice in 7 */
11520 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
),
11521 target_offset64(arg4
, arg5
), arg6
);
11522 return -host_to_target_errno(ret
);
11525 #ifdef TARGET_NR_fadvise64
11526 case TARGET_NR_fadvise64
:
11527 /* 5 args: fd, offset (high, low), len, advice */
11528 if (regpairs_aligned(cpu_env
, num
)) {
11529 /* offset is in (3,4), len in 5 and advice in 6 */
11535 ret
= posix_fadvise(arg1
, target_offset64(arg2
, arg3
), arg4
, arg5
);
11536 return -host_to_target_errno(ret
);
11539 #else /* not a 32-bit ABI */
11540 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11541 #ifdef TARGET_NR_fadvise64_64
11542 case TARGET_NR_fadvise64_64
:
11544 #ifdef TARGET_NR_fadvise64
11545 case TARGET_NR_fadvise64
:
11547 #ifdef TARGET_S390X
11549 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11550 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11551 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11552 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11556 return -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11558 #endif /* end of 64-bit ABI fadvise handling */
11560 #ifdef TARGET_NR_madvise
11561 case TARGET_NR_madvise
:
11562 /* A straight passthrough may not be safe because qemu sometimes
11563 turns private file-backed mappings into anonymous mappings.
11564 This will break MADV_DONTNEED.
11565 This is a hint, so ignoring and returning success is ok. */
11568 #if TARGET_ABI_BITS == 32
11569 case TARGET_NR_fcntl64
:
11573 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11574 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11577 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11578 copyfrom
= copy_from_user_oabi_flock64
;
11579 copyto
= copy_to_user_oabi_flock64
;
11583 cmd
= target_to_host_fcntl_cmd(arg2
);
11584 if (cmd
== -TARGET_EINVAL
) {
11589 case TARGET_F_GETLK64
:
11590 ret
= copyfrom(&fl
, arg3
);
11594 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11596 ret
= copyto(arg3
, &fl
);
11600 case TARGET_F_SETLK64
:
11601 case TARGET_F_SETLKW64
:
11602 ret
= copyfrom(&fl
, arg3
);
11606 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11609 ret
= do_fcntl(arg1
, arg2
, arg3
);
11615 #ifdef TARGET_NR_cacheflush
11616 case TARGET_NR_cacheflush
:
11617 /* self-modifying code is handled automatically, so nothing needed */
11620 #ifdef TARGET_NR_getpagesize
11621 case TARGET_NR_getpagesize
:
11622 return TARGET_PAGE_SIZE
;
11624 case TARGET_NR_gettid
:
11625 return get_errno(gettid());
11626 #ifdef TARGET_NR_readahead
11627 case TARGET_NR_readahead
:
11628 #if TARGET_ABI_BITS == 32
11629 if (regpairs_aligned(cpu_env
, num
)) {
11634 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11636 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11641 #ifdef TARGET_NR_setxattr
11642 case TARGET_NR_listxattr
:
11643 case TARGET_NR_llistxattr
:
11647 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11649 return -TARGET_EFAULT
;
11652 p
= lock_user_string(arg1
);
11654 if (num
== TARGET_NR_listxattr
) {
11655 ret
= get_errno(listxattr(p
, b
, arg3
));
11657 ret
= get_errno(llistxattr(p
, b
, arg3
));
11660 ret
= -TARGET_EFAULT
;
11662 unlock_user(p
, arg1
, 0);
11663 unlock_user(b
, arg2
, arg3
);
11666 case TARGET_NR_flistxattr
:
11670 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11672 return -TARGET_EFAULT
;
11675 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11676 unlock_user(b
, arg2
, arg3
);
11679 case TARGET_NR_setxattr
:
11680 case TARGET_NR_lsetxattr
:
11682 void *p
, *n
, *v
= 0;
11684 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11686 return -TARGET_EFAULT
;
11689 p
= lock_user_string(arg1
);
11690 n
= lock_user_string(arg2
);
11692 if (num
== TARGET_NR_setxattr
) {
11693 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11695 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11698 ret
= -TARGET_EFAULT
;
11700 unlock_user(p
, arg1
, 0);
11701 unlock_user(n
, arg2
, 0);
11702 unlock_user(v
, arg3
, 0);
11705 case TARGET_NR_fsetxattr
:
11709 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11711 return -TARGET_EFAULT
;
11714 n
= lock_user_string(arg2
);
11716 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11718 ret
= -TARGET_EFAULT
;
11720 unlock_user(n
, arg2
, 0);
11721 unlock_user(v
, arg3
, 0);
11724 case TARGET_NR_getxattr
:
11725 case TARGET_NR_lgetxattr
:
11727 void *p
, *n
, *v
= 0;
11729 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11731 return -TARGET_EFAULT
;
11734 p
= lock_user_string(arg1
);
11735 n
= lock_user_string(arg2
);
11737 if (num
== TARGET_NR_getxattr
) {
11738 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11740 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11743 ret
= -TARGET_EFAULT
;
11745 unlock_user(p
, arg1
, 0);
11746 unlock_user(n
, arg2
, 0);
11747 unlock_user(v
, arg3
, arg4
);
11750 case TARGET_NR_fgetxattr
:
11754 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11756 return -TARGET_EFAULT
;
11759 n
= lock_user_string(arg2
);
11761 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11763 ret
= -TARGET_EFAULT
;
11765 unlock_user(n
, arg2
, 0);
11766 unlock_user(v
, arg3
, arg4
);
11769 case TARGET_NR_removexattr
:
11770 case TARGET_NR_lremovexattr
:
11773 p
= lock_user_string(arg1
);
11774 n
= lock_user_string(arg2
);
11776 if (num
== TARGET_NR_removexattr
) {
11777 ret
= get_errno(removexattr(p
, n
));
11779 ret
= get_errno(lremovexattr(p
, n
));
11782 ret
= -TARGET_EFAULT
;
11784 unlock_user(p
, arg1
, 0);
11785 unlock_user(n
, arg2
, 0);
11788 case TARGET_NR_fremovexattr
:
11791 n
= lock_user_string(arg2
);
11793 ret
= get_errno(fremovexattr(arg1
, n
));
11795 ret
= -TARGET_EFAULT
;
11797 unlock_user(n
, arg2
, 0);
11801 #endif /* CONFIG_ATTR */
11802 #ifdef TARGET_NR_set_thread_area
11803 case TARGET_NR_set_thread_area
:
11804 #if defined(TARGET_MIPS)
11805 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11807 #elif defined(TARGET_CRIS)
11809 ret
= -TARGET_EINVAL
;
11811 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11815 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11816 return do_set_thread_area(cpu_env
, arg1
);
11817 #elif defined(TARGET_M68K)
11819 TaskState
*ts
= cpu
->opaque
;
11820 ts
->tp_value
= arg1
;
11824 return -TARGET_ENOSYS
;
11827 #ifdef TARGET_NR_get_thread_area
11828 case TARGET_NR_get_thread_area
:
11829 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11830 return do_get_thread_area(cpu_env
, arg1
);
11831 #elif defined(TARGET_M68K)
11833 TaskState
*ts
= cpu
->opaque
;
11834 return ts
->tp_value
;
11837 return -TARGET_ENOSYS
;
11840 #ifdef TARGET_NR_getdomainname
11841 case TARGET_NR_getdomainname
:
11842 return -TARGET_ENOSYS
;
11845 #ifdef TARGET_NR_clock_settime
11846 case TARGET_NR_clock_settime
:
11848 struct timespec ts
;
11850 ret
= target_to_host_timespec(&ts
, arg2
);
11851 if (!is_error(ret
)) {
11852 ret
= get_errno(clock_settime(arg1
, &ts
));
11857 #ifdef TARGET_NR_clock_gettime
11858 case TARGET_NR_clock_gettime
:
11860 struct timespec ts
;
11861 ret
= get_errno(clock_gettime(arg1
, &ts
));
11862 if (!is_error(ret
)) {
11863 ret
= host_to_target_timespec(arg2
, &ts
);
11868 #ifdef TARGET_NR_clock_getres
11869 case TARGET_NR_clock_getres
:
11871 struct timespec ts
;
11872 ret
= get_errno(clock_getres(arg1
, &ts
));
11873 if (!is_error(ret
)) {
11874 host_to_target_timespec(arg2
, &ts
);
11879 #ifdef TARGET_NR_clock_nanosleep
11880 case TARGET_NR_clock_nanosleep
:
11882 struct timespec ts
;
11883 target_to_host_timespec(&ts
, arg3
);
11884 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11885 &ts
, arg4
? &ts
: NULL
));
11887 host_to_target_timespec(arg4
, &ts
);
11889 #if defined(TARGET_PPC)
11890 /* clock_nanosleep is odd in that it returns positive errno values.
11891 * On PPC, CR0 bit 3 should be set in such a situation. */
11892 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11893 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11900 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11901 case TARGET_NR_set_tid_address
:
11902 return get_errno(set_tid_address((int *)g2h(arg1
)));
11905 case TARGET_NR_tkill
:
11906 return get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11908 case TARGET_NR_tgkill
:
11909 return get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11910 target_to_host_signal(arg3
)));
11912 #ifdef TARGET_NR_set_robust_list
11913 case TARGET_NR_set_robust_list
:
11914 case TARGET_NR_get_robust_list
:
11915 /* The ABI for supporting robust futexes has userspace pass
11916 * the kernel a pointer to a linked list which is updated by
11917 * userspace after the syscall; the list is walked by the kernel
11918 * when the thread exits. Since the linked list in QEMU guest
11919 * memory isn't a valid linked list for the host and we have
11920 * no way to reliably intercept the thread-death event, we can't
11921 * support these. Silently return ENOSYS so that guest userspace
11922 * falls back to a non-robust futex implementation (which should
11923 * be OK except in the corner case of the guest crashing while
11924 * holding a mutex that is shared with another process via
11927 return -TARGET_ENOSYS
;
11930 #if defined(TARGET_NR_utimensat)
11931 case TARGET_NR_utimensat
:
11933 struct timespec
*tsp
, ts
[2];
11937 target_to_host_timespec(ts
, arg3
);
11938 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11942 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11944 if (!(p
= lock_user_string(arg2
))) {
11945 return -TARGET_EFAULT
;
11947 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11948 unlock_user(p
, arg2
, 0);
11953 case TARGET_NR_futex
:
11954 return do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11955 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11956 case TARGET_NR_inotify_init
:
11957 ret
= get_errno(sys_inotify_init());
11959 fd_trans_register(ret
, &target_inotify_trans
);
11963 #ifdef CONFIG_INOTIFY1
11964 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11965 case TARGET_NR_inotify_init1
:
11966 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11967 fcntl_flags_tbl
)));
11969 fd_trans_register(ret
, &target_inotify_trans
);
11974 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11975 case TARGET_NR_inotify_add_watch
:
11976 p
= lock_user_string(arg2
);
11977 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11978 unlock_user(p
, arg2
, 0);
11981 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11982 case TARGET_NR_inotify_rm_watch
:
11983 return get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11986 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11987 case TARGET_NR_mq_open
:
11989 struct mq_attr posix_mq_attr
;
11990 struct mq_attr
*pposix_mq_attr
;
11993 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11994 pposix_mq_attr
= NULL
;
11996 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11997 return -TARGET_EFAULT
;
11999 pposix_mq_attr
= &posix_mq_attr
;
12001 p
= lock_user_string(arg1
- 1);
12003 return -TARGET_EFAULT
;
12005 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12006 unlock_user (p
, arg1
, 0);
12010 case TARGET_NR_mq_unlink
:
12011 p
= lock_user_string(arg1
- 1);
12013 return -TARGET_EFAULT
;
12015 ret
= get_errno(mq_unlink(p
));
12016 unlock_user (p
, arg1
, 0);
12019 case TARGET_NR_mq_timedsend
:
12021 struct timespec ts
;
12023 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12025 target_to_host_timespec(&ts
, arg5
);
12026 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12027 host_to_target_timespec(arg5
, &ts
);
12029 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12031 unlock_user (p
, arg2
, arg3
);
12035 case TARGET_NR_mq_timedreceive
:
12037 struct timespec ts
;
12040 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12042 target_to_host_timespec(&ts
, arg5
);
12043 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12045 host_to_target_timespec(arg5
, &ts
);
12047 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12050 unlock_user (p
, arg2
, arg3
);
12052 put_user_u32(prio
, arg4
);
12056 /* Not implemented for now... */
12057 /* case TARGET_NR_mq_notify: */
12060 case TARGET_NR_mq_getsetattr
:
12062 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12065 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12066 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12067 &posix_mq_attr_out
));
12068 } else if (arg3
!= 0) {
12069 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12071 if (ret
== 0 && arg3
!= 0) {
12072 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12078 #ifdef CONFIG_SPLICE
12079 #ifdef TARGET_NR_tee
12080 case TARGET_NR_tee
:
12082 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12086 #ifdef TARGET_NR_splice
12087 case TARGET_NR_splice
:
12089 loff_t loff_in
, loff_out
;
12090 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12092 if (get_user_u64(loff_in
, arg2
)) {
12093 return -TARGET_EFAULT
;
12095 ploff_in
= &loff_in
;
12098 if (get_user_u64(loff_out
, arg4
)) {
12099 return -TARGET_EFAULT
;
12101 ploff_out
= &loff_out
;
12103 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12105 if (put_user_u64(loff_in
, arg2
)) {
12106 return -TARGET_EFAULT
;
12110 if (put_user_u64(loff_out
, arg4
)) {
12111 return -TARGET_EFAULT
;
12117 #ifdef TARGET_NR_vmsplice
12118 case TARGET_NR_vmsplice
:
12120 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12122 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12123 unlock_iovec(vec
, arg2
, arg3
, 0);
12125 ret
= -host_to_target_errno(errno
);
12130 #endif /* CONFIG_SPLICE */
12131 #ifdef CONFIG_EVENTFD
12132 #if defined(TARGET_NR_eventfd)
12133 case TARGET_NR_eventfd
:
12134 ret
= get_errno(eventfd(arg1
, 0));
12136 fd_trans_register(ret
, &target_eventfd_trans
);
12140 #if defined(TARGET_NR_eventfd2)
12141 case TARGET_NR_eventfd2
:
12143 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12144 if (arg2
& TARGET_O_NONBLOCK
) {
12145 host_flags
|= O_NONBLOCK
;
12147 if (arg2
& TARGET_O_CLOEXEC
) {
12148 host_flags
|= O_CLOEXEC
;
12150 ret
= get_errno(eventfd(arg1
, host_flags
));
12152 fd_trans_register(ret
, &target_eventfd_trans
);
12157 #endif /* CONFIG_EVENTFD */
12158 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12159 case TARGET_NR_fallocate
:
12160 #if TARGET_ABI_BITS == 32
12161 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12162 target_offset64(arg5
, arg6
)));
12164 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12168 #if defined(CONFIG_SYNC_FILE_RANGE)
12169 #if defined(TARGET_NR_sync_file_range)
12170 case TARGET_NR_sync_file_range
:
12171 #if TARGET_ABI_BITS == 32
12172 #if defined(TARGET_MIPS)
12173 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12174 target_offset64(arg5
, arg6
), arg7
));
12176 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12177 target_offset64(arg4
, arg5
), arg6
));
12178 #endif /* !TARGET_MIPS */
12180 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12184 #if defined(TARGET_NR_sync_file_range2)
12185 case TARGET_NR_sync_file_range2
:
12186 /* This is like sync_file_range but the arguments are reordered */
12187 #if TARGET_ABI_BITS == 32
12188 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12189 target_offset64(arg5
, arg6
), arg2
));
12191 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12196 #if defined(TARGET_NR_signalfd4)
12197 case TARGET_NR_signalfd4
:
12198 return do_signalfd4(arg1
, arg2
, arg4
);
12200 #if defined(TARGET_NR_signalfd)
12201 case TARGET_NR_signalfd
:
12202 return do_signalfd4(arg1
, arg2
, 0);
12204 #if defined(CONFIG_EPOLL)
12205 #if defined(TARGET_NR_epoll_create)
12206 case TARGET_NR_epoll_create
:
12207 return get_errno(epoll_create(arg1
));
12209 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12210 case TARGET_NR_epoll_create1
:
12211 return get_errno(epoll_create1(arg1
));
12213 #if defined(TARGET_NR_epoll_ctl)
12214 case TARGET_NR_epoll_ctl
:
12216 struct epoll_event ep
;
12217 struct epoll_event
*epp
= 0;
12219 struct target_epoll_event
*target_ep
;
12220 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12221 return -TARGET_EFAULT
;
12223 ep
.events
= tswap32(target_ep
->events
);
12224 /* The epoll_data_t union is just opaque data to the kernel,
12225 * so we transfer all 64 bits across and need not worry what
12226 * actual data type it is.
12228 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12229 unlock_user_struct(target_ep
, arg4
, 0);
12232 return get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12236 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12237 #if defined(TARGET_NR_epoll_wait)
12238 case TARGET_NR_epoll_wait
:
12240 #if defined(TARGET_NR_epoll_pwait)
12241 case TARGET_NR_epoll_pwait
:
12244 struct target_epoll_event
*target_ep
;
12245 struct epoll_event
*ep
;
12247 int maxevents
= arg3
;
12248 int timeout
= arg4
;
12250 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12251 return -TARGET_EINVAL
;
12254 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12255 maxevents
* sizeof(struct target_epoll_event
), 1);
12257 return -TARGET_EFAULT
;
12260 ep
= g_try_new(struct epoll_event
, maxevents
);
12262 unlock_user(target_ep
, arg2
, 0);
12263 return -TARGET_ENOMEM
;
12267 #if defined(TARGET_NR_epoll_pwait)
12268 case TARGET_NR_epoll_pwait
:
12270 target_sigset_t
*target_set
;
12271 sigset_t _set
, *set
= &_set
;
12274 if (arg6
!= sizeof(target_sigset_t
)) {
12275 ret
= -TARGET_EINVAL
;
12279 target_set
= lock_user(VERIFY_READ
, arg5
,
12280 sizeof(target_sigset_t
), 1);
12282 ret
= -TARGET_EFAULT
;
12285 target_to_host_sigset(set
, target_set
);
12286 unlock_user(target_set
, arg5
, 0);
12291 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12292 set
, SIGSET_T_SIZE
));
12296 #if defined(TARGET_NR_epoll_wait)
12297 case TARGET_NR_epoll_wait
:
12298 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12303 ret
= -TARGET_ENOSYS
;
12305 if (!is_error(ret
)) {
12307 for (i
= 0; i
< ret
; i
++) {
12308 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12309 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12311 unlock_user(target_ep
, arg2
,
12312 ret
* sizeof(struct target_epoll_event
));
12314 unlock_user(target_ep
, arg2
, 0);
12321 #ifdef TARGET_NR_prlimit64
12322 case TARGET_NR_prlimit64
:
12324 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12325 struct target_rlimit64
*target_rnew
, *target_rold
;
12326 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12327 int resource
= target_to_host_resource(arg2
);
12329 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12330 return -TARGET_EFAULT
;
12332 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12333 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12334 unlock_user_struct(target_rnew
, arg3
, 0);
12338 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12339 if (!is_error(ret
) && arg4
) {
12340 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12341 return -TARGET_EFAULT
;
12343 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12344 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12345 unlock_user_struct(target_rold
, arg4
, 1);
12350 #ifdef TARGET_NR_gethostname
12351 case TARGET_NR_gethostname
:
12353 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12355 ret
= get_errno(gethostname(name
, arg2
));
12356 unlock_user(name
, arg1
, arg2
);
12358 ret
= -TARGET_EFAULT
;
12363 #ifdef TARGET_NR_atomic_cmpxchg_32
12364 case TARGET_NR_atomic_cmpxchg_32
:
12366 /* should use start_exclusive from main.c */
12367 abi_ulong mem_value
;
12368 if (get_user_u32(mem_value
, arg6
)) {
12369 target_siginfo_t info
;
12370 info
.si_signo
= SIGSEGV
;
12372 info
.si_code
= TARGET_SEGV_MAPERR
;
12373 info
._sifields
._sigfault
._addr
= arg6
;
12374 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12375 QEMU_SI_FAULT
, &info
);
12379 if (mem_value
== arg2
)
12380 put_user_u32(arg1
, arg6
);
12384 #ifdef TARGET_NR_atomic_barrier
12385 case TARGET_NR_atomic_barrier
:
12386 /* Like the kernel implementation and the
12387 qemu arm barrier, no-op this? */
12391 #ifdef TARGET_NR_timer_create
12392 case TARGET_NR_timer_create
:
12394 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12396 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12399 int timer_index
= next_free_host_timer();
12401 if (timer_index
< 0) {
12402 ret
= -TARGET_EAGAIN
;
12404 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12407 phost_sevp
= &host_sevp
;
12408 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12414 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12418 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12419 return -TARGET_EFAULT
;
12427 #ifdef TARGET_NR_timer_settime
12428 case TARGET_NR_timer_settime
:
12430 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12431 * struct itimerspec * old_value */
12432 target_timer_t timerid
= get_timer_id(arg1
);
12436 } else if (arg3
== 0) {
12437 ret
= -TARGET_EINVAL
;
12439 timer_t htimer
= g_posix_timers
[timerid
];
12440 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12442 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12443 return -TARGET_EFAULT
;
12446 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12447 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12448 return -TARGET_EFAULT
;
12455 #ifdef TARGET_NR_timer_gettime
12456 case TARGET_NR_timer_gettime
:
12458 /* args: timer_t timerid, struct itimerspec *curr_value */
12459 target_timer_t timerid
= get_timer_id(arg1
);
12463 } else if (!arg2
) {
12464 ret
= -TARGET_EFAULT
;
12466 timer_t htimer
= g_posix_timers
[timerid
];
12467 struct itimerspec hspec
;
12468 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12470 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12471 ret
= -TARGET_EFAULT
;
12478 #ifdef TARGET_NR_timer_getoverrun
12479 case TARGET_NR_timer_getoverrun
:
12481 /* args: timer_t timerid */
12482 target_timer_t timerid
= get_timer_id(arg1
);
12487 timer_t htimer
= g_posix_timers
[timerid
];
12488 ret
= get_errno(timer_getoverrun(htimer
));
12490 fd_trans_unregister(ret
);
12495 #ifdef TARGET_NR_timer_delete
12496 case TARGET_NR_timer_delete
:
12498 /* args: timer_t timerid */
12499 target_timer_t timerid
= get_timer_id(arg1
);
12504 timer_t htimer
= g_posix_timers
[timerid
];
12505 ret
= get_errno(timer_delete(htimer
));
12506 g_posix_timers
[timerid
] = 0;
12512 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12513 case TARGET_NR_timerfd_create
:
12514 return get_errno(timerfd_create(arg1
,
12515 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12518 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12519 case TARGET_NR_timerfd_gettime
:
12521 struct itimerspec its_curr
;
12523 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12525 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12526 return -TARGET_EFAULT
;
12532 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12533 case TARGET_NR_timerfd_settime
:
12535 struct itimerspec its_new
, its_old
, *p_new
;
12538 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12539 return -TARGET_EFAULT
;
12546 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12548 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12549 return -TARGET_EFAULT
;
12555 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12556 case TARGET_NR_ioprio_get
:
12557 return get_errno(ioprio_get(arg1
, arg2
));
12560 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12561 case TARGET_NR_ioprio_set
:
12562 return get_errno(ioprio_set(arg1
, arg2
, arg3
));
12565 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12566 case TARGET_NR_setns
:
12567 return get_errno(setns(arg1
, arg2
));
12569 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12570 case TARGET_NR_unshare
:
12571 return get_errno(unshare(arg1
));
12573 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12574 case TARGET_NR_kcmp
:
12575 return get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12577 #ifdef TARGET_NR_swapcontext
12578 case TARGET_NR_swapcontext
:
12579 /* PowerPC specific. */
12580 return do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12584 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12585 return -TARGET_ENOSYS
;
12590 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
12591 abi_long arg2
, abi_long arg3
, abi_long arg4
,
12592 abi_long arg5
, abi_long arg6
, abi_long arg7
,
12595 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
12598 #ifdef DEBUG_ERESTARTSYS
12599 /* Debug-only code for exercising the syscall-restart code paths
12600 * in the per-architecture cpu main loops: restart every syscall
12601 * the guest makes once before letting it through.
12607 return -TARGET_ERESTARTSYS
;
12612 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
,
12613 arg5
, arg6
, arg7
, arg8
);
12615 if (unlikely(do_strace
)) {
12616 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12617 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12618 arg5
, arg6
, arg7
, arg8
);
12619 print_syscall_ret(num
, ret
);
12621 ret
= do_syscall1(cpu_env
, num
, arg1
, arg2
, arg3
, arg4
,
12622 arg5
, arg6
, arg7
, arg8
);
12625 trace_guest_user_syscall_ret(cpu
, num
, ret
);