4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
283 loff_t
*, res
, uint
, wh
);
285 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
286 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
288 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group
,int,error_code
)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address
,int *,tidptr
)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
297 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
307 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
309 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
310 struct __user_cap_data_struct
*, data
);
311 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get
, int, which
, int, who
)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
325 unsigned long, idx1
, unsigned long, idx2
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
366 QEMU_IFLA_BR_FORWARD_DELAY
,
367 QEMU_IFLA_BR_HELLO_TIME
,
368 QEMU_IFLA_BR_MAX_AGE
,
369 QEMU_IFLA_BR_AGEING_TIME
,
370 QEMU_IFLA_BR_STP_STATE
,
371 QEMU_IFLA_BR_PRIORITY
,
372 QEMU_IFLA_BR_VLAN_FILTERING
,
373 QEMU_IFLA_BR_VLAN_PROTOCOL
,
374 QEMU_IFLA_BR_GROUP_FWD_MASK
,
375 QEMU_IFLA_BR_ROOT_ID
,
376 QEMU_IFLA_BR_BRIDGE_ID
,
377 QEMU_IFLA_BR_ROOT_PORT
,
378 QEMU_IFLA_BR_ROOT_PATH_COST
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
381 QEMU_IFLA_BR_HELLO_TIMER
,
382 QEMU_IFLA_BR_TCN_TIMER
,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
384 QEMU_IFLA_BR_GC_TIMER
,
385 QEMU_IFLA_BR_GROUP_ADDR
,
386 QEMU_IFLA_BR_FDB_FLUSH
,
387 QEMU_IFLA_BR_MCAST_ROUTER
,
388 QEMU_IFLA_BR_MCAST_SNOOPING
,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
390 QEMU_IFLA_BR_MCAST_QUERIER
,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
392 QEMU_IFLA_BR_MCAST_HASH_MAX
,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION
,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION
,
433 QEMU_IFLA_NET_NS_PID
,
436 QEMU_IFLA_VFINFO_LIST
,
444 QEMU_IFLA_PROMISCUITY
,
445 QEMU_IFLA_NUM_TX_QUEUES
,
446 QEMU_IFLA_NUM_RX_QUEUES
,
448 QEMU_IFLA_PHYS_PORT_ID
,
449 QEMU_IFLA_CARRIER_CHANGES
,
450 QEMU_IFLA_PHYS_SWITCH_ID
,
451 QEMU_IFLA_LINK_NETNSID
,
452 QEMU_IFLA_PHYS_PORT_NAME
,
453 QEMU_IFLA_PROTO_DOWN
,
454 QEMU_IFLA_GSO_MAX_SEGS
,
455 QEMU_IFLA_GSO_MAX_SIZE
,
459 QEMU_IFLA_NEW_NETNSID
,
460 QEMU_IFLA_IF_NETNSID
,
461 QEMU_IFLA_CARRIER_UP_COUNT
,
462 QEMU_IFLA_CARRIER_DOWN_COUNT
,
463 QEMU_IFLA_NEW_IFINDEX
,
468 QEMU_IFLA_BRPORT_UNSPEC
,
469 QEMU_IFLA_BRPORT_STATE
,
470 QEMU_IFLA_BRPORT_PRIORITY
,
471 QEMU_IFLA_BRPORT_COST
,
472 QEMU_IFLA_BRPORT_MODE
,
473 QEMU_IFLA_BRPORT_GUARD
,
474 QEMU_IFLA_BRPORT_PROTECT
,
475 QEMU_IFLA_BRPORT_FAST_LEAVE
,
476 QEMU_IFLA_BRPORT_LEARNING
,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
478 QEMU_IFLA_BRPORT_PROXYARP
,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
481 QEMU_IFLA_BRPORT_ROOT_ID
,
482 QEMU_IFLA_BRPORT_BRIDGE_ID
,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
491 QEMU_IFLA_BRPORT_HOLD_TIMER
,
492 QEMU_IFLA_BRPORT_FLUSH
,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
494 QEMU_IFLA_BRPORT_PAD
,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD
,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST
,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL
,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD
,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK
,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
,
501 QEMU___IFLA_BRPORT_MAX
505 QEMU_IFLA_TUN_UNSPEC
,
510 QEMU_IFLA_TUN_VNET_HDR
,
511 QEMU_IFLA_TUN_PERSIST
,
512 QEMU_IFLA_TUN_MULTI_QUEUE
,
513 QEMU_IFLA_TUN_NUM_QUEUES
,
514 QEMU_IFLA_TUN_NUM_DISABLED_QUEUES
,
519 QEMU_IFLA_INFO_UNSPEC
,
522 QEMU_IFLA_INFO_XSTATS
,
523 QEMU_IFLA_INFO_SLAVE_KIND
,
524 QEMU_IFLA_INFO_SLAVE_DATA
,
525 QEMU___IFLA_INFO_MAX
,
529 QEMU_IFLA_INET_UNSPEC
,
531 QEMU___IFLA_INET_MAX
,
535 QEMU_IFLA_INET6_UNSPEC
,
536 QEMU_IFLA_INET6_FLAGS
,
537 QEMU_IFLA_INET6_CONF
,
538 QEMU_IFLA_INET6_STATS
,
539 QEMU_IFLA_INET6_MCAST
,
540 QEMU_IFLA_INET6_CACHEINFO
,
541 QEMU_IFLA_INET6_ICMP6STATS
,
542 QEMU_IFLA_INET6_TOKEN
,
543 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
544 QEMU___IFLA_INET6_MAX
548 QEMU_IFLA_XDP_UNSPEC
,
550 QEMU_IFLA_XDP_ATTACHED
,
552 QEMU_IFLA_XDP_PROG_ID
,
567 QEMU_RTA_PROTOINFO
, /* no longer used */
570 QEMU_RTA_SESSION
, /* no longer used */
571 QEMU_RTA_MP_ALGO
, /* no longer used */
583 QEMU_RTA_TTL_PROPAGATE
,
590 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
591 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
592 typedef struct TargetFdTrans
{
593 TargetFdDataFunc host_to_target_data
;
594 TargetFdDataFunc target_to_host_data
;
595 TargetFdAddrFunc target_to_host_addr
;
598 static TargetFdTrans
**target_fd_trans
;
600 static unsigned int target_fd_max
;
602 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
604 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
605 return target_fd_trans
[fd
]->target_to_host_data
;
610 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
612 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
613 return target_fd_trans
[fd
]->host_to_target_data
;
618 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
620 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
621 return target_fd_trans
[fd
]->target_to_host_addr
;
626 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
630 if (fd
>= target_fd_max
) {
631 oldmax
= target_fd_max
;
632 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
633 target_fd_trans
= g_renew(TargetFdTrans
*,
634 target_fd_trans
, target_fd_max
);
635 memset((void *)(target_fd_trans
+ oldmax
), 0,
636 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
638 target_fd_trans
[fd
] = trans
;
641 static void fd_trans_unregister(int fd
)
643 if (fd
>= 0 && fd
< target_fd_max
) {
644 target_fd_trans
[fd
] = NULL
;
648 static void fd_trans_dup(int oldfd
, int newfd
)
650 fd_trans_unregister(newfd
);
651 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
652 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
656 static int sys_getcwd1(char *buf
, size_t size
)
658 if (getcwd(buf
, size
) == NULL
) {
659 /* getcwd() sets errno */
662 return strlen(buf
)+1;
665 #ifdef TARGET_NR_utimensat
666 #if defined(__NR_utimensat)
667 #define __NR_sys_utimensat __NR_utimensat
668 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
669 const struct timespec
*,tsp
,int,flags
)
671 static int sys_utimensat(int dirfd
, const char *pathname
,
672 const struct timespec times
[2], int flags
)
678 #endif /* TARGET_NR_utimensat */
680 #ifdef TARGET_NR_renameat2
681 #if defined(__NR_renameat2)
682 #define __NR_sys_renameat2 __NR_renameat2
683 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
684 const char *, new, unsigned int, flags
)
686 static int sys_renameat2(int oldfd
, const char *old
,
687 int newfd
, const char *new, int flags
)
690 return renameat(oldfd
, old
, newfd
, new);
696 #endif /* TARGET_NR_renameat2 */
698 #ifdef CONFIG_INOTIFY
699 #include <sys/inotify.h>
701 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
702 static int sys_inotify_init(void)
704 return (inotify_init());
707 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
708 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
710 return (inotify_add_watch(fd
, pathname
, mask
));
713 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
714 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
716 return (inotify_rm_watch(fd
, wd
));
719 #ifdef CONFIG_INOTIFY1
720 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
721 static int sys_inotify_init1(int flags
)
723 return (inotify_init1(flags
));
728 /* Userspace can usually survive runtime without inotify */
729 #undef TARGET_NR_inotify_init
730 #undef TARGET_NR_inotify_init1
731 #undef TARGET_NR_inotify_add_watch
732 #undef TARGET_NR_inotify_rm_watch
733 #endif /* CONFIG_INOTIFY */
735 #if defined(TARGET_NR_prlimit64)
736 #ifndef __NR_prlimit64
737 # define __NR_prlimit64 -1
739 #define __NR_sys_prlimit64 __NR_prlimit64
740 /* The glibc rlimit structure may not be that used by the underlying syscall */
741 struct host_rlimit64
{
745 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
746 const struct host_rlimit64
*, new_limit
,
747 struct host_rlimit64
*, old_limit
)
751 #if defined(TARGET_NR_timer_create)
752 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
753 static timer_t g_posix_timers
[32] = { 0, } ;
755 static inline int next_free_host_timer(void)
758 /* FIXME: Does finding the next free slot require a lock? */
759 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
760 if (g_posix_timers
[k
] == 0) {
761 g_posix_timers
[k
] = (timer_t
) 1;
769 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
771 static inline int regpairs_aligned(void *cpu_env
, int num
)
773 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
775 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
776 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
777 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
778 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
779 * of registers which translates to the same as ARM/MIPS, because we start with
781 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
782 #elif defined(TARGET_SH4)
783 /* SH4 doesn't align register pairs, except for p{read,write}64 */
784 static inline int regpairs_aligned(void *cpu_env
, int num
)
787 case TARGET_NR_pread64
:
788 case TARGET_NR_pwrite64
:
795 #elif defined(TARGET_XTENSA)
796 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
798 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
801 #define ERRNO_TABLE_SIZE 1200
803 /* target_to_host_errno_table[] is initialized from
804 * host_to_target_errno_table[] in syscall_init(). */
805 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
809 * This list is the union of errno values overridden in asm-<arch>/errno.h
810 * minus the errnos that are not actually generic to all archs.
812 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
813 [EAGAIN
] = TARGET_EAGAIN
,
814 [EIDRM
] = TARGET_EIDRM
,
815 [ECHRNG
] = TARGET_ECHRNG
,
816 [EL2NSYNC
] = TARGET_EL2NSYNC
,
817 [EL3HLT
] = TARGET_EL3HLT
,
818 [EL3RST
] = TARGET_EL3RST
,
819 [ELNRNG
] = TARGET_ELNRNG
,
820 [EUNATCH
] = TARGET_EUNATCH
,
821 [ENOCSI
] = TARGET_ENOCSI
,
822 [EL2HLT
] = TARGET_EL2HLT
,
823 [EDEADLK
] = TARGET_EDEADLK
,
824 [ENOLCK
] = TARGET_ENOLCK
,
825 [EBADE
] = TARGET_EBADE
,
826 [EBADR
] = TARGET_EBADR
,
827 [EXFULL
] = TARGET_EXFULL
,
828 [ENOANO
] = TARGET_ENOANO
,
829 [EBADRQC
] = TARGET_EBADRQC
,
830 [EBADSLT
] = TARGET_EBADSLT
,
831 [EBFONT
] = TARGET_EBFONT
,
832 [ENOSTR
] = TARGET_ENOSTR
,
833 [ENODATA
] = TARGET_ENODATA
,
834 [ETIME
] = TARGET_ETIME
,
835 [ENOSR
] = TARGET_ENOSR
,
836 [ENONET
] = TARGET_ENONET
,
837 [ENOPKG
] = TARGET_ENOPKG
,
838 [EREMOTE
] = TARGET_EREMOTE
,
839 [ENOLINK
] = TARGET_ENOLINK
,
840 [EADV
] = TARGET_EADV
,
841 [ESRMNT
] = TARGET_ESRMNT
,
842 [ECOMM
] = TARGET_ECOMM
,
843 [EPROTO
] = TARGET_EPROTO
,
844 [EDOTDOT
] = TARGET_EDOTDOT
,
845 [EMULTIHOP
] = TARGET_EMULTIHOP
,
846 [EBADMSG
] = TARGET_EBADMSG
,
847 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
848 [EOVERFLOW
] = TARGET_EOVERFLOW
,
849 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
850 [EBADFD
] = TARGET_EBADFD
,
851 [EREMCHG
] = TARGET_EREMCHG
,
852 [ELIBACC
] = TARGET_ELIBACC
,
853 [ELIBBAD
] = TARGET_ELIBBAD
,
854 [ELIBSCN
] = TARGET_ELIBSCN
,
855 [ELIBMAX
] = TARGET_ELIBMAX
,
856 [ELIBEXEC
] = TARGET_ELIBEXEC
,
857 [EILSEQ
] = TARGET_EILSEQ
,
858 [ENOSYS
] = TARGET_ENOSYS
,
859 [ELOOP
] = TARGET_ELOOP
,
860 [ERESTART
] = TARGET_ERESTART
,
861 [ESTRPIPE
] = TARGET_ESTRPIPE
,
862 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
863 [EUSERS
] = TARGET_EUSERS
,
864 [ENOTSOCK
] = TARGET_ENOTSOCK
,
865 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
866 [EMSGSIZE
] = TARGET_EMSGSIZE
,
867 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
868 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
869 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
870 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
871 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
872 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
873 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
874 [EADDRINUSE
] = TARGET_EADDRINUSE
,
875 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
876 [ENETDOWN
] = TARGET_ENETDOWN
,
877 [ENETUNREACH
] = TARGET_ENETUNREACH
,
878 [ENETRESET
] = TARGET_ENETRESET
,
879 [ECONNABORTED
] = TARGET_ECONNABORTED
,
880 [ECONNRESET
] = TARGET_ECONNRESET
,
881 [ENOBUFS
] = TARGET_ENOBUFS
,
882 [EISCONN
] = TARGET_EISCONN
,
883 [ENOTCONN
] = TARGET_ENOTCONN
,
884 [EUCLEAN
] = TARGET_EUCLEAN
,
885 [ENOTNAM
] = TARGET_ENOTNAM
,
886 [ENAVAIL
] = TARGET_ENAVAIL
,
887 [EISNAM
] = TARGET_EISNAM
,
888 [EREMOTEIO
] = TARGET_EREMOTEIO
,
889 [EDQUOT
] = TARGET_EDQUOT
,
890 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
891 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
892 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
893 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
894 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
895 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
896 [EALREADY
] = TARGET_EALREADY
,
897 [EINPROGRESS
] = TARGET_EINPROGRESS
,
898 [ESTALE
] = TARGET_ESTALE
,
899 [ECANCELED
] = TARGET_ECANCELED
,
900 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
901 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
903 [ENOKEY
] = TARGET_ENOKEY
,
906 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
909 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
912 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
915 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
917 #ifdef ENOTRECOVERABLE
918 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
921 [ENOMSG
] = TARGET_ENOMSG
,
924 [ERFKILL
] = TARGET_ERFKILL
,
927 [EHWPOISON
] = TARGET_EHWPOISON
,
931 static inline int host_to_target_errno(int err
)
933 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
934 host_to_target_errno_table
[err
]) {
935 return host_to_target_errno_table
[err
];
940 static inline int target_to_host_errno(int err
)
942 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
943 target_to_host_errno_table
[err
]) {
944 return target_to_host_errno_table
[err
];
949 static inline abi_long
get_errno(abi_long ret
)
952 return -host_to_target_errno(errno
);
957 const char *target_strerror(int err
)
959 if (err
== TARGET_ERESTARTSYS
) {
960 return "To be restarted";
962 if (err
== TARGET_QEMU_ESIGRETURN
) {
963 return "Successful exit from sigreturn";
966 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
969 return strerror(target_to_host_errno(err
));
972 #define safe_syscall0(type, name) \
973 static type safe_##name(void) \
975 return safe_syscall(__NR_##name); \
978 #define safe_syscall1(type, name, type1, arg1) \
979 static type safe_##name(type1 arg1) \
981 return safe_syscall(__NR_##name, arg1); \
984 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
985 static type safe_##name(type1 arg1, type2 arg2) \
987 return safe_syscall(__NR_##name, arg1, arg2); \
990 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
991 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
993 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
996 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
998 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
1000 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
1003 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
1004 type4, arg4, type5, arg5) \
1005 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1008 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
1011 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
1012 type4, arg4, type5, arg5, type6, arg6) \
1013 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
1014 type5 arg5, type6 arg6) \
1016 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
1019 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
1020 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
1021 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
1022 int, flags
, mode_t
, mode
)
1023 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
1024 struct rusage
*, rusage
)
1025 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
1026 int, options
, struct rusage
*, rusage
)
1027 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
1028 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
1029 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
1030 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
1031 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
1033 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
1034 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
1036 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
1037 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
1038 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
1039 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
1040 safe_syscall2(int, tkill
, int, tid
, int, sig
)
1041 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
1042 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
1043 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
1044 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
1045 unsigned long, pos_l
, unsigned long, pos_h
)
1046 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
1047 unsigned long, pos_l
, unsigned long, pos_h
)
1048 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
1050 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
1051 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
1052 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
1053 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
1054 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
1055 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
1056 safe_syscall2(int, flock
, int, fd
, int, operation
)
1057 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
1058 const struct timespec
*, uts
, size_t, sigsetsize
)
1059 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
1061 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
1062 struct timespec
*, rem
)
1063 #ifdef TARGET_NR_clock_nanosleep
1064 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
1065 const struct timespec
*, req
, struct timespec
*, rem
)
1068 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1070 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1071 long, msgtype
, int, flags
)
1072 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1073 unsigned, nsops
, const struct timespec
*, timeout
)
1075 /* This host kernel architecture uses a single ipc syscall; fake up
1076 * wrappers for the sub-operations to hide this implementation detail.
1077 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1078 * for the call parameter because some structs in there conflict with the
1079 * sys/ipc.h ones. So we just define them here, and rely on them being
1080 * the same for all host architectures.
1082 #define Q_SEMTIMEDOP 4
1085 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1087 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1088 void *, ptr
, long, fifth
)
1089 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1091 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1093 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1095 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1097 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1098 const struct timespec
*timeout
)
1100 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1104 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1105 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1106 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1107 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1108 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1110 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1111 * "third argument might be integer or pointer or not present" behaviour of
1112 * the libc function.
1114 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1115 /* Similarly for fcntl. Note that callers must always:
1116 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1117 * use the flock64 struct rather than unsuffixed flock
1118 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1121 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1123 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1126 static inline int host_to_target_sock_type(int host_type
)
1130 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1132 target_type
= TARGET_SOCK_DGRAM
;
1135 target_type
= TARGET_SOCK_STREAM
;
1138 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1142 #if defined(SOCK_CLOEXEC)
1143 if (host_type
& SOCK_CLOEXEC
) {
1144 target_type
|= TARGET_SOCK_CLOEXEC
;
1148 #if defined(SOCK_NONBLOCK)
1149 if (host_type
& SOCK_NONBLOCK
) {
1150 target_type
|= TARGET_SOCK_NONBLOCK
;
1157 static abi_ulong target_brk
;
1158 static abi_ulong target_original_brk
;
1159 static abi_ulong brk_page
;
1161 void target_set_brk(abi_ulong new_brk
)
1163 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1164 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1167 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1168 #define DEBUGF_BRK(message, args...)
1170 /* do_brk() must return target values and target errnos. */
1171 abi_long
do_brk(abi_ulong new_brk
)
1173 abi_long mapped_addr
;
1174 abi_ulong new_alloc_size
;
1176 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1179 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1182 if (new_brk
< target_original_brk
) {
1183 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1188 /* If the new brk is less than the highest page reserved to the
1189 * target heap allocation, set it and we're almost done... */
1190 if (new_brk
<= brk_page
) {
1191 /* Heap contents are initialized to zero, as for anonymous
1193 if (new_brk
> target_brk
) {
1194 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1196 target_brk
= new_brk
;
1197 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1201 /* We need to allocate more memory after the brk... Note that
1202 * we don't use MAP_FIXED because that will map over the top of
1203 * any existing mapping (like the one with the host libc or qemu
1204 * itself); instead we treat "mapped but at wrong address" as
1205 * a failure and unmap again.
1207 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1208 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1209 PROT_READ
|PROT_WRITE
,
1210 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1212 if (mapped_addr
== brk_page
) {
1213 /* Heap contents are initialized to zero, as for anonymous
1214 * mapped pages. Technically the new pages are already
1215 * initialized to zero since they *are* anonymous mapped
1216 * pages, however we have to take care with the contents that
1217 * come from the remaining part of the previous page: it may
1218 * contains garbage data due to a previous heap usage (grown
1219 * then shrunken). */
1220 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1222 target_brk
= new_brk
;
1223 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1224 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1227 } else if (mapped_addr
!= -1) {
1228 /* Mapped but at wrong address, meaning there wasn't actually
1229 * enough space for this brk.
1231 target_munmap(mapped_addr
, new_alloc_size
);
1233 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1236 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1239 #if defined(TARGET_ALPHA)
1240 /* We (partially) emulate OSF/1 on Alpha, which requires we
1241 return a proper errno, not an unchanged brk value. */
1242 return -TARGET_ENOMEM
;
1244 /* For everything else, return the previous break. */
1248 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1249 abi_ulong target_fds_addr
,
1253 abi_ulong b
, *target_fds
;
1255 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1256 if (!(target_fds
= lock_user(VERIFY_READ
,
1258 sizeof(abi_ulong
) * nw
,
1260 return -TARGET_EFAULT
;
1264 for (i
= 0; i
< nw
; i
++) {
1265 /* grab the abi_ulong */
1266 __get_user(b
, &target_fds
[i
]);
1267 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1268 /* check the bit inside the abi_ulong */
1275 unlock_user(target_fds
, target_fds_addr
, 0);
1280 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1281 abi_ulong target_fds_addr
,
1284 if (target_fds_addr
) {
1285 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1286 return -TARGET_EFAULT
;
1294 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1300 abi_ulong
*target_fds
;
1302 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1303 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1305 sizeof(abi_ulong
) * nw
,
1307 return -TARGET_EFAULT
;
1310 for (i
= 0; i
< nw
; i
++) {
1312 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1313 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1316 __put_user(v
, &target_fds
[i
]);
1319 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1324 #if defined(__alpha__)
1325 #define HOST_HZ 1024
1330 static inline abi_long
host_to_target_clock_t(long ticks
)
1332 #if HOST_HZ == TARGET_HZ
1335 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1339 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1340 const struct rusage
*rusage
)
1342 struct target_rusage
*target_rusage
;
1344 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1345 return -TARGET_EFAULT
;
1346 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1347 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1348 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1349 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1350 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1351 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1352 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1353 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1354 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1355 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1356 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1357 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1358 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1359 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1360 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1361 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1362 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1363 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1364 unlock_user_struct(target_rusage
, target_addr
, 1);
1369 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1371 abi_ulong target_rlim_swap
;
1374 target_rlim_swap
= tswapal(target_rlim
);
1375 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1376 return RLIM_INFINITY
;
1378 result
= target_rlim_swap
;
1379 if (target_rlim_swap
!= (rlim_t
)result
)
1380 return RLIM_INFINITY
;
1385 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1387 abi_ulong target_rlim_swap
;
1390 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1391 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1393 target_rlim_swap
= rlim
;
1394 result
= tswapal(target_rlim_swap
);
1399 static inline int target_to_host_resource(int code
)
1402 case TARGET_RLIMIT_AS
:
1404 case TARGET_RLIMIT_CORE
:
1406 case TARGET_RLIMIT_CPU
:
1408 case TARGET_RLIMIT_DATA
:
1410 case TARGET_RLIMIT_FSIZE
:
1411 return RLIMIT_FSIZE
;
1412 case TARGET_RLIMIT_LOCKS
:
1413 return RLIMIT_LOCKS
;
1414 case TARGET_RLIMIT_MEMLOCK
:
1415 return RLIMIT_MEMLOCK
;
1416 case TARGET_RLIMIT_MSGQUEUE
:
1417 return RLIMIT_MSGQUEUE
;
1418 case TARGET_RLIMIT_NICE
:
1420 case TARGET_RLIMIT_NOFILE
:
1421 return RLIMIT_NOFILE
;
1422 case TARGET_RLIMIT_NPROC
:
1423 return RLIMIT_NPROC
;
1424 case TARGET_RLIMIT_RSS
:
1426 case TARGET_RLIMIT_RTPRIO
:
1427 return RLIMIT_RTPRIO
;
1428 case TARGET_RLIMIT_SIGPENDING
:
1429 return RLIMIT_SIGPENDING
;
1430 case TARGET_RLIMIT_STACK
:
1431 return RLIMIT_STACK
;
1437 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1438 abi_ulong target_tv_addr
)
1440 struct target_timeval
*target_tv
;
1442 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1443 return -TARGET_EFAULT
;
1445 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1446 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1448 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1453 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1454 const struct timeval
*tv
)
1456 struct target_timeval
*target_tv
;
1458 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1459 return -TARGET_EFAULT
;
1461 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1462 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1464 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1469 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1470 abi_ulong target_tz_addr
)
1472 struct target_timezone
*target_tz
;
1474 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1475 return -TARGET_EFAULT
;
1478 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1479 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1481 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1486 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1489 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1490 abi_ulong target_mq_attr_addr
)
1492 struct target_mq_attr
*target_mq_attr
;
1494 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1495 target_mq_attr_addr
, 1))
1496 return -TARGET_EFAULT
;
1498 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1499 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1500 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1501 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1503 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1508 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1509 const struct mq_attr
*attr
)
1511 struct target_mq_attr
*target_mq_attr
;
1513 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1514 target_mq_attr_addr
, 0))
1515 return -TARGET_EFAULT
;
1517 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1518 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1519 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1520 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1522 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1528 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1529 /* do_select() must return target values and target errnos. */
1530 static abi_long
do_select(int n
,
1531 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1532 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1534 fd_set rfds
, wfds
, efds
;
1535 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1537 struct timespec ts
, *ts_ptr
;
1540 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1544 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1548 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1553 if (target_tv_addr
) {
1554 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1555 return -TARGET_EFAULT
;
1556 ts
.tv_sec
= tv
.tv_sec
;
1557 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1563 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1566 if (!is_error(ret
)) {
1567 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1568 return -TARGET_EFAULT
;
1569 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1570 return -TARGET_EFAULT
;
1571 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1572 return -TARGET_EFAULT
;
1574 if (target_tv_addr
) {
1575 tv
.tv_sec
= ts
.tv_sec
;
1576 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1577 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1578 return -TARGET_EFAULT
;
1586 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1587 static abi_long
do_old_select(abi_ulong arg1
)
1589 struct target_sel_arg_struct
*sel
;
1590 abi_ulong inp
, outp
, exp
, tvp
;
1593 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1594 return -TARGET_EFAULT
;
1597 nsel
= tswapal(sel
->n
);
1598 inp
= tswapal(sel
->inp
);
1599 outp
= tswapal(sel
->outp
);
1600 exp
= tswapal(sel
->exp
);
1601 tvp
= tswapal(sel
->tvp
);
1603 unlock_user_struct(sel
, arg1
, 0);
1605 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1610 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1613 return pipe2(host_pipe
, flags
);
1619 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1620 int flags
, int is_pipe2
)
1624 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1627 return get_errno(ret
);
1629 /* Several targets have special calling conventions for the original
1630 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1632 #if defined(TARGET_ALPHA)
1633 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1634 return host_pipe
[0];
1635 #elif defined(TARGET_MIPS)
1636 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1637 return host_pipe
[0];
1638 #elif defined(TARGET_SH4)
1639 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1640 return host_pipe
[0];
1641 #elif defined(TARGET_SPARC)
1642 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1643 return host_pipe
[0];
1647 if (put_user_s32(host_pipe
[0], pipedes
)
1648 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1649 return -TARGET_EFAULT
;
1650 return get_errno(ret
);
1653 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1654 abi_ulong target_addr
,
1657 struct target_ip_mreqn
*target_smreqn
;
1659 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1661 return -TARGET_EFAULT
;
1662 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1663 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1664 if (len
== sizeof(struct target_ip_mreqn
))
1665 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1666 unlock_user(target_smreqn
, target_addr
, 0);
1671 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1672 abi_ulong target_addr
,
1675 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1676 sa_family_t sa_family
;
1677 struct target_sockaddr
*target_saddr
;
1679 if (fd_trans_target_to_host_addr(fd
)) {
1680 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1683 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1685 return -TARGET_EFAULT
;
1687 sa_family
= tswap16(target_saddr
->sa_family
);
1689 /* Oops. The caller might send a incomplete sun_path; sun_path
1690 * must be terminated by \0 (see the manual page), but
1691 * unfortunately it is quite common to specify sockaddr_un
1692 * length as "strlen(x->sun_path)" while it should be
1693 * "strlen(...) + 1". We'll fix that here if needed.
1694 * Linux kernel has a similar feature.
1697 if (sa_family
== AF_UNIX
) {
1698 if (len
< unix_maxlen
&& len
> 0) {
1699 char *cp
= (char*)target_saddr
;
1701 if ( cp
[len
-1] && !cp
[len
] )
1704 if (len
> unix_maxlen
)
1708 memcpy(addr
, target_saddr
, len
);
1709 addr
->sa_family
= sa_family
;
1710 if (sa_family
== AF_NETLINK
) {
1711 struct sockaddr_nl
*nladdr
;
1713 nladdr
= (struct sockaddr_nl
*)addr
;
1714 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1715 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1716 } else if (sa_family
== AF_PACKET
) {
1717 struct target_sockaddr_ll
*lladdr
;
1719 lladdr
= (struct target_sockaddr_ll
*)addr
;
1720 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1721 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1723 unlock_user(target_saddr
, target_addr
, 0);
1728 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1729 struct sockaddr
*addr
,
1732 struct target_sockaddr
*target_saddr
;
1739 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1741 return -TARGET_EFAULT
;
1742 memcpy(target_saddr
, addr
, len
);
1743 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1744 sizeof(target_saddr
->sa_family
)) {
1745 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1747 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1748 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1749 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1750 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1751 } else if (addr
->sa_family
== AF_PACKET
) {
1752 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1753 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1754 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1755 } else if (addr
->sa_family
== AF_INET6
&&
1756 len
>= sizeof(struct target_sockaddr_in6
)) {
1757 struct target_sockaddr_in6
*target_in6
=
1758 (struct target_sockaddr_in6
*)target_saddr
;
1759 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1761 unlock_user(target_saddr
, target_addr
, len
);
1766 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1767 struct target_msghdr
*target_msgh
)
1769 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1770 abi_long msg_controllen
;
1771 abi_ulong target_cmsg_addr
;
1772 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1773 socklen_t space
= 0;
1775 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1776 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1778 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1779 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1780 target_cmsg_start
= target_cmsg
;
1782 return -TARGET_EFAULT
;
1784 while (cmsg
&& target_cmsg
) {
1785 void *data
= CMSG_DATA(cmsg
);
1786 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1788 int len
= tswapal(target_cmsg
->cmsg_len
)
1789 - sizeof(struct target_cmsghdr
);
1791 space
+= CMSG_SPACE(len
);
1792 if (space
> msgh
->msg_controllen
) {
1793 space
-= CMSG_SPACE(len
);
1794 /* This is a QEMU bug, since we allocated the payload
1795 * area ourselves (unlike overflow in host-to-target
1796 * conversion, which is just the guest giving us a buffer
1797 * that's too small). It can't happen for the payload types
1798 * we currently support; if it becomes an issue in future
1799 * we would need to improve our allocation strategy to
1800 * something more intelligent than "twice the size of the
1801 * target buffer we're reading from".
1803 gemu_log("Host cmsg overflow\n");
1807 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1808 cmsg
->cmsg_level
= SOL_SOCKET
;
1810 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1812 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1813 cmsg
->cmsg_len
= CMSG_LEN(len
);
1815 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1816 int *fd
= (int *)data
;
1817 int *target_fd
= (int *)target_data
;
1818 int i
, numfds
= len
/ sizeof(int);
1820 for (i
= 0; i
< numfds
; i
++) {
1821 __get_user(fd
[i
], target_fd
+ i
);
1823 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1824 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1825 struct ucred
*cred
= (struct ucred
*)data
;
1826 struct target_ucred
*target_cred
=
1827 (struct target_ucred
*)target_data
;
1829 __get_user(cred
->pid
, &target_cred
->pid
);
1830 __get_user(cred
->uid
, &target_cred
->uid
);
1831 __get_user(cred
->gid
, &target_cred
->gid
);
1833 gemu_log("Unsupported ancillary data: %d/%d\n",
1834 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1835 memcpy(data
, target_data
, len
);
1838 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1839 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1842 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1844 msgh
->msg_controllen
= space
;
1848 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1849 struct msghdr
*msgh
)
1851 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1852 abi_long msg_controllen
;
1853 abi_ulong target_cmsg_addr
;
1854 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1855 socklen_t space
= 0;
1857 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1858 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1860 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1861 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1862 target_cmsg_start
= target_cmsg
;
1864 return -TARGET_EFAULT
;
1866 while (cmsg
&& target_cmsg
) {
1867 void *data
= CMSG_DATA(cmsg
);
1868 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1870 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1871 int tgt_len
, tgt_space
;
1873 /* We never copy a half-header but may copy half-data;
1874 * this is Linux's behaviour in put_cmsg(). Note that
1875 * truncation here is a guest problem (which we report
1876 * to the guest via the CTRUNC bit), unlike truncation
1877 * in target_to_host_cmsg, which is a QEMU bug.
1879 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1880 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1884 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1885 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1887 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1889 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1891 /* Payload types which need a different size of payload on
1892 * the target must adjust tgt_len here.
1895 switch (cmsg
->cmsg_level
) {
1897 switch (cmsg
->cmsg_type
) {
1899 tgt_len
= sizeof(struct target_timeval
);
1909 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1910 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1911 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1914 /* We must now copy-and-convert len bytes of payload
1915 * into tgt_len bytes of destination space. Bear in mind
1916 * that in both source and destination we may be dealing
1917 * with a truncated value!
1919 switch (cmsg
->cmsg_level
) {
1921 switch (cmsg
->cmsg_type
) {
1924 int *fd
= (int *)data
;
1925 int *target_fd
= (int *)target_data
;
1926 int i
, numfds
= tgt_len
/ sizeof(int);
1928 for (i
= 0; i
< numfds
; i
++) {
1929 __put_user(fd
[i
], target_fd
+ i
);
1935 struct timeval
*tv
= (struct timeval
*)data
;
1936 struct target_timeval
*target_tv
=
1937 (struct target_timeval
*)target_data
;
1939 if (len
!= sizeof(struct timeval
) ||
1940 tgt_len
!= sizeof(struct target_timeval
)) {
1944 /* copy struct timeval to target */
1945 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1946 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1949 case SCM_CREDENTIALS
:
1951 struct ucred
*cred
= (struct ucred
*)data
;
1952 struct target_ucred
*target_cred
=
1953 (struct target_ucred
*)target_data
;
1955 __put_user(cred
->pid
, &target_cred
->pid
);
1956 __put_user(cred
->uid
, &target_cred
->uid
);
1957 __put_user(cred
->gid
, &target_cred
->gid
);
1966 switch (cmsg
->cmsg_type
) {
1969 uint32_t *v
= (uint32_t *)data
;
1970 uint32_t *t_int
= (uint32_t *)target_data
;
1972 if (len
!= sizeof(uint32_t) ||
1973 tgt_len
!= sizeof(uint32_t)) {
1976 __put_user(*v
, t_int
);
1982 struct sock_extended_err ee
;
1983 struct sockaddr_in offender
;
1985 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1986 struct errhdr_t
*target_errh
=
1987 (struct errhdr_t
*)target_data
;
1989 if (len
!= sizeof(struct errhdr_t
) ||
1990 tgt_len
!= sizeof(struct errhdr_t
)) {
1993 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1994 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1995 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1996 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1997 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1998 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1999 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2000 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2001 (void *) &errh
->offender
, sizeof(errh
->offender
));
2010 switch (cmsg
->cmsg_type
) {
2013 uint32_t *v
= (uint32_t *)data
;
2014 uint32_t *t_int
= (uint32_t *)target_data
;
2016 if (len
!= sizeof(uint32_t) ||
2017 tgt_len
!= sizeof(uint32_t)) {
2020 __put_user(*v
, t_int
);
2026 struct sock_extended_err ee
;
2027 struct sockaddr_in6 offender
;
2029 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
2030 struct errhdr6_t
*target_errh
=
2031 (struct errhdr6_t
*)target_data
;
2033 if (len
!= sizeof(struct errhdr6_t
) ||
2034 tgt_len
!= sizeof(struct errhdr6_t
)) {
2037 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
2038 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
2039 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
2040 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
2041 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
2042 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
2043 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
2044 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
2045 (void *) &errh
->offender
, sizeof(errh
->offender
));
2055 gemu_log("Unsupported ancillary data: %d/%d\n",
2056 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2057 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2058 if (tgt_len
> len
) {
2059 memset(target_data
+ len
, 0, tgt_len
- len
);
2063 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2064 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2065 if (msg_controllen
< tgt_space
) {
2066 tgt_space
= msg_controllen
;
2068 msg_controllen
-= tgt_space
;
2070 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2071 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2074 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2076 target_msgh
->msg_controllen
= tswapal(space
);
2080 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2082 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2083 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2084 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2085 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2086 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2089 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2091 abi_long (*host_to_target_nlmsg
)
2092 (struct nlmsghdr
*))
2097 while (len
> sizeof(struct nlmsghdr
)) {
2099 nlmsg_len
= nlh
->nlmsg_len
;
2100 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2105 switch (nlh
->nlmsg_type
) {
2107 tswap_nlmsghdr(nlh
);
2113 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2114 e
->error
= tswap32(e
->error
);
2115 tswap_nlmsghdr(&e
->msg
);
2116 tswap_nlmsghdr(nlh
);
2120 ret
= host_to_target_nlmsg(nlh
);
2122 tswap_nlmsghdr(nlh
);
2127 tswap_nlmsghdr(nlh
);
2128 len
-= NLMSG_ALIGN(nlmsg_len
);
2129 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2134 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2136 abi_long (*target_to_host_nlmsg
)
2137 (struct nlmsghdr
*))
2141 while (len
> sizeof(struct nlmsghdr
)) {
2142 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2143 tswap32(nlh
->nlmsg_len
) > len
) {
2146 tswap_nlmsghdr(nlh
);
2147 switch (nlh
->nlmsg_type
) {
2154 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2155 e
->error
= tswap32(e
->error
);
2156 tswap_nlmsghdr(&e
->msg
);
2160 ret
= target_to_host_nlmsg(nlh
);
2165 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2166 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2171 #ifdef CONFIG_RTNETLINK
2172 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2173 size_t len
, void *context
,
2174 abi_long (*host_to_target_nlattr
)
2178 unsigned short nla_len
;
2181 while (len
> sizeof(struct nlattr
)) {
2182 nla_len
= nlattr
->nla_len
;
2183 if (nla_len
< sizeof(struct nlattr
) ||
2187 ret
= host_to_target_nlattr(nlattr
, context
);
2188 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2189 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2193 len
-= NLA_ALIGN(nla_len
);
2194 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2199 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2201 abi_long (*host_to_target_rtattr
)
2204 unsigned short rta_len
;
2207 while (len
> sizeof(struct rtattr
)) {
2208 rta_len
= rtattr
->rta_len
;
2209 if (rta_len
< sizeof(struct rtattr
) ||
2213 ret
= host_to_target_rtattr(rtattr
);
2214 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2215 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2219 len
-= RTA_ALIGN(rta_len
);
2220 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2225 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2227 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2234 switch (nlattr
->nla_type
) {
2236 case QEMU_IFLA_BR_FDB_FLUSH
:
2239 case QEMU_IFLA_BR_GROUP_ADDR
:
2242 case QEMU_IFLA_BR_VLAN_FILTERING
:
2243 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2244 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2245 case QEMU_IFLA_BR_MCAST_ROUTER
:
2246 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2247 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2248 case QEMU_IFLA_BR_MCAST_QUERIER
:
2249 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2250 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2251 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2252 case QEMU_IFLA_BR_VLAN_STATS_ENABLED
:
2253 case QEMU_IFLA_BR_MCAST_STATS_ENABLED
:
2254 case QEMU_IFLA_BR_MCAST_IGMP_VERSION
:
2255 case QEMU_IFLA_BR_MCAST_MLD_VERSION
:
2258 case QEMU_IFLA_BR_PRIORITY
:
2259 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2260 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2261 case QEMU_IFLA_BR_ROOT_PORT
:
2262 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2263 u16
= NLA_DATA(nlattr
);
2264 *u16
= tswap16(*u16
);
2267 case QEMU_IFLA_BR_FORWARD_DELAY
:
2268 case QEMU_IFLA_BR_HELLO_TIME
:
2269 case QEMU_IFLA_BR_MAX_AGE
:
2270 case QEMU_IFLA_BR_AGEING_TIME
:
2271 case QEMU_IFLA_BR_STP_STATE
:
2272 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2273 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2274 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2275 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2276 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2277 u32
= NLA_DATA(nlattr
);
2278 *u32
= tswap32(*u32
);
2281 case QEMU_IFLA_BR_HELLO_TIMER
:
2282 case QEMU_IFLA_BR_TCN_TIMER
:
2283 case QEMU_IFLA_BR_GC_TIMER
:
2284 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2285 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2286 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2287 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2288 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2289 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2290 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2291 u64
= NLA_DATA(nlattr
);
2292 *u64
= tswap64(*u64
);
2294 /* ifla_bridge_id: uin8_t[] */
2295 case QEMU_IFLA_BR_ROOT_ID
:
2296 case QEMU_IFLA_BR_BRIDGE_ID
:
2299 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2305 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2312 switch (nlattr
->nla_type
) {
2314 case QEMU_IFLA_BRPORT_STATE
:
2315 case QEMU_IFLA_BRPORT_MODE
:
2316 case QEMU_IFLA_BRPORT_GUARD
:
2317 case QEMU_IFLA_BRPORT_PROTECT
:
2318 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2319 case QEMU_IFLA_BRPORT_LEARNING
:
2320 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2321 case QEMU_IFLA_BRPORT_PROXYARP
:
2322 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2323 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2324 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2325 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2326 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2327 case QEMU_IFLA_BRPORT_MCAST_FLOOD
:
2328 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST
:
2329 case QEMU_IFLA_BRPORT_VLAN_TUNNEL
:
2330 case QEMU_IFLA_BRPORT_BCAST_FLOOD
:
2331 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
:
2334 case QEMU_IFLA_BRPORT_PRIORITY
:
2335 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2336 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2337 case QEMU_IFLA_BRPORT_ID
:
2338 case QEMU_IFLA_BRPORT_NO
:
2339 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK
:
2340 u16
= NLA_DATA(nlattr
);
2341 *u16
= tswap16(*u16
);
2344 case QEMU_IFLA_BRPORT_COST
:
2345 u32
= NLA_DATA(nlattr
);
2346 *u32
= tswap32(*u32
);
2349 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2350 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2351 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2352 u64
= NLA_DATA(nlattr
);
2353 *u64
= tswap64(*u64
);
2355 /* ifla_bridge_id: uint8_t[] */
2356 case QEMU_IFLA_BRPORT_ROOT_ID
:
2357 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2360 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2366 static abi_long
host_to_target_data_tun_nlattr(struct nlattr
*nlattr
,
2371 switch (nlattr
->nla_type
) {
2373 case QEMU_IFLA_TUN_TYPE
:
2374 case QEMU_IFLA_TUN_PI
:
2375 case QEMU_IFLA_TUN_VNET_HDR
:
2376 case QEMU_IFLA_TUN_PERSIST
:
2377 case QEMU_IFLA_TUN_MULTI_QUEUE
:
2380 case QEMU_IFLA_TUN_NUM_QUEUES
:
2381 case QEMU_IFLA_TUN_NUM_DISABLED_QUEUES
:
2382 case QEMU_IFLA_TUN_OWNER
:
2383 case QEMU_IFLA_TUN_GROUP
:
2384 u32
= NLA_DATA(nlattr
);
2385 *u32
= tswap32(*u32
);
2388 gemu_log("Unknown QEMU_IFLA_TUN type %d\n", nlattr
->nla_type
);
2394 struct linkinfo_context
{
2401 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2404 struct linkinfo_context
*li_context
= context
;
2406 switch (nlattr
->nla_type
) {
2408 case QEMU_IFLA_INFO_KIND
:
2409 li_context
->name
= NLA_DATA(nlattr
);
2410 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2412 case QEMU_IFLA_INFO_SLAVE_KIND
:
2413 li_context
->slave_name
= NLA_DATA(nlattr
);
2414 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2417 case QEMU_IFLA_INFO_XSTATS
:
2418 /* FIXME: only used by CAN */
2421 case QEMU_IFLA_INFO_DATA
:
2422 if (strncmp(li_context
->name
, "bridge",
2423 li_context
->len
) == 0) {
2424 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2427 host_to_target_data_bridge_nlattr
);
2428 } else if (strncmp(li_context
->name
, "tun",
2429 li_context
->len
) == 0) {
2430 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2433 host_to_target_data_tun_nlattr
);
2435 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2438 case QEMU_IFLA_INFO_SLAVE_DATA
:
2439 if (strncmp(li_context
->slave_name
, "bridge",
2440 li_context
->slave_len
) == 0) {
2441 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2444 host_to_target_slave_data_bridge_nlattr
);
2446 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2447 li_context
->slave_name
);
2451 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2458 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2464 switch (nlattr
->nla_type
) {
2465 case QEMU_IFLA_INET_CONF
:
2466 u32
= NLA_DATA(nlattr
);
2467 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2469 u32
[i
] = tswap32(u32
[i
]);
2473 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2478 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2483 struct ifla_cacheinfo
*ci
;
2486 switch (nlattr
->nla_type
) {
2488 case QEMU_IFLA_INET6_TOKEN
:
2491 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2494 case QEMU_IFLA_INET6_FLAGS
:
2495 u32
= NLA_DATA(nlattr
);
2496 *u32
= tswap32(*u32
);
2499 case QEMU_IFLA_INET6_CONF
:
2500 u32
= NLA_DATA(nlattr
);
2501 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2503 u32
[i
] = tswap32(u32
[i
]);
2506 /* ifla_cacheinfo */
2507 case QEMU_IFLA_INET6_CACHEINFO
:
2508 ci
= NLA_DATA(nlattr
);
2509 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2510 ci
->tstamp
= tswap32(ci
->tstamp
);
2511 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2512 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2515 case QEMU_IFLA_INET6_STATS
:
2516 case QEMU_IFLA_INET6_ICMP6STATS
:
2517 u64
= NLA_DATA(nlattr
);
2518 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2520 u64
[i
] = tswap64(u64
[i
]);
2524 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2529 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2532 switch (nlattr
->nla_type
) {
2534 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2536 host_to_target_data_inet_nlattr
);
2538 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2540 host_to_target_data_inet6_nlattr
);
2542 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2548 static abi_long
host_to_target_data_xdp_nlattr(struct nlattr
*nlattr
,
2553 switch (nlattr
->nla_type
) {
2555 case QEMU_IFLA_XDP_ATTACHED
:
2558 case QEMU_IFLA_XDP_PROG_ID
:
2559 u32
= NLA_DATA(nlattr
);
2560 *u32
= tswap32(*u32
);
2563 gemu_log("Unknown host XDP type: %d\n", nlattr
->nla_type
);
2569 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2572 struct rtnl_link_stats
*st
;
2573 struct rtnl_link_stats64
*st64
;
2574 struct rtnl_link_ifmap
*map
;
2575 struct linkinfo_context li_context
;
2577 switch (rtattr
->rta_type
) {
2579 case QEMU_IFLA_ADDRESS
:
2580 case QEMU_IFLA_BROADCAST
:
2582 case QEMU_IFLA_IFNAME
:
2583 case QEMU_IFLA_QDISC
:
2586 case QEMU_IFLA_OPERSTATE
:
2587 case QEMU_IFLA_LINKMODE
:
2588 case QEMU_IFLA_CARRIER
:
2589 case QEMU_IFLA_PROTO_DOWN
:
2593 case QEMU_IFLA_LINK
:
2594 case QEMU_IFLA_WEIGHT
:
2595 case QEMU_IFLA_TXQLEN
:
2596 case QEMU_IFLA_CARRIER_CHANGES
:
2597 case QEMU_IFLA_NUM_RX_QUEUES
:
2598 case QEMU_IFLA_NUM_TX_QUEUES
:
2599 case QEMU_IFLA_PROMISCUITY
:
2600 case QEMU_IFLA_EXT_MASK
:
2601 case QEMU_IFLA_LINK_NETNSID
:
2602 case QEMU_IFLA_GROUP
:
2603 case QEMU_IFLA_MASTER
:
2604 case QEMU_IFLA_NUM_VF
:
2605 case QEMU_IFLA_GSO_MAX_SEGS
:
2606 case QEMU_IFLA_GSO_MAX_SIZE
:
2607 case QEMU_IFLA_CARRIER_UP_COUNT
:
2608 case QEMU_IFLA_CARRIER_DOWN_COUNT
:
2609 u32
= RTA_DATA(rtattr
);
2610 *u32
= tswap32(*u32
);
2612 /* struct rtnl_link_stats */
2613 case QEMU_IFLA_STATS
:
2614 st
= RTA_DATA(rtattr
);
2615 st
->rx_packets
= tswap32(st
->rx_packets
);
2616 st
->tx_packets
= tswap32(st
->tx_packets
);
2617 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2618 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2619 st
->rx_errors
= tswap32(st
->rx_errors
);
2620 st
->tx_errors
= tswap32(st
->tx_errors
);
2621 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2622 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2623 st
->multicast
= tswap32(st
->multicast
);
2624 st
->collisions
= tswap32(st
->collisions
);
2626 /* detailed rx_errors: */
2627 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2628 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2629 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2630 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2631 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2632 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2634 /* detailed tx_errors */
2635 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2636 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2637 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2638 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2639 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2642 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2643 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2645 /* struct rtnl_link_stats64 */
2646 case QEMU_IFLA_STATS64
:
2647 st64
= RTA_DATA(rtattr
);
2648 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2649 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2650 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2651 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2652 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2653 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2654 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2655 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2656 st64
->multicast
= tswap64(st64
->multicast
);
2657 st64
->collisions
= tswap64(st64
->collisions
);
2659 /* detailed rx_errors: */
2660 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2661 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2662 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2663 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2664 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2665 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2667 /* detailed tx_errors */
2668 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2669 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2670 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2671 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2672 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2675 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2676 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2678 /* struct rtnl_link_ifmap */
2680 map
= RTA_DATA(rtattr
);
2681 map
->mem_start
= tswap64(map
->mem_start
);
2682 map
->mem_end
= tswap64(map
->mem_end
);
2683 map
->base_addr
= tswap64(map
->base_addr
);
2684 map
->irq
= tswap16(map
->irq
);
2687 case QEMU_IFLA_LINKINFO
:
2688 memset(&li_context
, 0, sizeof(li_context
));
2689 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2691 host_to_target_data_linkinfo_nlattr
);
2692 case QEMU_IFLA_AF_SPEC
:
2693 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2695 host_to_target_data_spec_nlattr
);
2697 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2699 host_to_target_data_xdp_nlattr
);
2701 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2707 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2710 struct ifa_cacheinfo
*ci
;
2712 switch (rtattr
->rta_type
) {
2713 /* binary: depends on family type */
2723 u32
= RTA_DATA(rtattr
);
2724 *u32
= tswap32(*u32
);
2726 /* struct ifa_cacheinfo */
2728 ci
= RTA_DATA(rtattr
);
2729 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2730 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2731 ci
->cstamp
= tswap32(ci
->cstamp
);
2732 ci
->tstamp
= tswap32(ci
->tstamp
);
2735 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2741 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2744 struct rta_cacheinfo
*ci
;
2746 switch (rtattr
->rta_type
) {
2747 /* binary: depends on family type */
2748 case QEMU_RTA_GATEWAY
:
2750 case QEMU_RTA_PREFSRC
:
2756 case QEMU_RTA_PRIORITY
:
2757 case QEMU_RTA_TABLE
:
2759 u32
= RTA_DATA(rtattr
);
2760 *u32
= tswap32(*u32
);
2762 /* struct rta_cacheinfo */
2763 case QEMU_RTA_CACHEINFO
:
2764 ci
= RTA_DATA(rtattr
);
2765 ci
->rta_clntref
= tswap32(ci
->rta_clntref
);
2766 ci
->rta_lastuse
= tswap32(ci
->rta_lastuse
);
2767 ci
->rta_expires
= tswap32(ci
->rta_expires
);
2768 ci
->rta_error
= tswap32(ci
->rta_error
);
2769 ci
->rta_used
= tswap32(ci
->rta_used
);
2770 #if defined(RTNETLINK_HAVE_PEERINFO)
2771 ci
->rta_id
= tswap32(ci
->rta_id
);
2772 ci
->rta_ts
= tswap32(ci
->rta_ts
);
2773 ci
->rta_tsage
= tswap32(ci
->rta_tsage
);
2777 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2783 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2784 uint32_t rtattr_len
)
2786 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2787 host_to_target_data_link_rtattr
);
2790 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2791 uint32_t rtattr_len
)
2793 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2794 host_to_target_data_addr_rtattr
);
2797 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2798 uint32_t rtattr_len
)
2800 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2801 host_to_target_data_route_rtattr
);
2804 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2807 struct ifinfomsg
*ifi
;
2808 struct ifaddrmsg
*ifa
;
2811 nlmsg_len
= nlh
->nlmsg_len
;
2812 switch (nlh
->nlmsg_type
) {
2816 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2817 ifi
= NLMSG_DATA(nlh
);
2818 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2819 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2820 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2821 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2822 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2823 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2829 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2830 ifa
= NLMSG_DATA(nlh
);
2831 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2832 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2833 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2839 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2840 rtm
= NLMSG_DATA(nlh
);
2841 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2842 host_to_target_route_rtattr(RTM_RTA(rtm
),
2843 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2847 return -TARGET_EINVAL
;
2852 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2855 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2858 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2860 abi_long (*target_to_host_rtattr
)
2865 while (len
>= sizeof(struct rtattr
)) {
2866 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2867 tswap16(rtattr
->rta_len
) > len
) {
2870 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2871 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2872 ret
= target_to_host_rtattr(rtattr
);
2876 len
-= RTA_ALIGN(rtattr
->rta_len
);
2877 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2878 RTA_ALIGN(rtattr
->rta_len
));
2883 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2885 switch (rtattr
->rta_type
) {
2887 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2893 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2895 switch (rtattr
->rta_type
) {
2896 /* binary: depends on family type */
2901 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2907 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2910 switch (rtattr
->rta_type
) {
2911 /* binary: depends on family type */
2914 case QEMU_RTA_GATEWAY
:
2917 case QEMU_RTA_PRIORITY
:
2919 u32
= RTA_DATA(rtattr
);
2920 *u32
= tswap32(*u32
);
2923 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2929 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2930 uint32_t rtattr_len
)
2932 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2933 target_to_host_data_link_rtattr
);
2936 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2937 uint32_t rtattr_len
)
2939 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2940 target_to_host_data_addr_rtattr
);
2943 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2944 uint32_t rtattr_len
)
2946 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2947 target_to_host_data_route_rtattr
);
2950 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2952 struct ifinfomsg
*ifi
;
2953 struct ifaddrmsg
*ifa
;
2956 switch (nlh
->nlmsg_type
) {
2961 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2962 ifi
= NLMSG_DATA(nlh
);
2963 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2964 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2965 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2966 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2967 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2968 NLMSG_LENGTH(sizeof(*ifi
)));
2974 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2975 ifa
= NLMSG_DATA(nlh
);
2976 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2977 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2978 NLMSG_LENGTH(sizeof(*ifa
)));
2985 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2986 rtm
= NLMSG_DATA(nlh
);
2987 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2988 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2989 NLMSG_LENGTH(sizeof(*rtm
)));
2993 return -TARGET_EOPNOTSUPP
;
2998 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
3000 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
3002 #endif /* CONFIG_RTNETLINK */
3004 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
3006 switch (nlh
->nlmsg_type
) {
3008 gemu_log("Unknown host audit message type %d\n",
3010 return -TARGET_EINVAL
;
3015 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
3018 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
3021 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
3023 switch (nlh
->nlmsg_type
) {
3025 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
3026 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
3029 gemu_log("Unknown target audit message type %d\n",
3031 return -TARGET_EINVAL
;
3037 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
3039 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
3042 /* do_setsockopt() Must return target values and target errnos. */
3043 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
3044 abi_ulong optval_addr
, socklen_t optlen
)
3048 struct ip_mreqn
*ip_mreq
;
3049 struct ip_mreq_source
*ip_mreq_source
;
3053 /* TCP options all take an 'int' value. */
3054 if (optlen
< sizeof(uint32_t))
3055 return -TARGET_EINVAL
;
3057 if (get_user_u32(val
, optval_addr
))
3058 return -TARGET_EFAULT
;
3059 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
3066 case IP_ROUTER_ALERT
:
3070 case IP_MTU_DISCOVER
:
3077 case IP_MULTICAST_TTL
:
3078 case IP_MULTICAST_LOOP
:
3080 if (optlen
>= sizeof(uint32_t)) {
3081 if (get_user_u32(val
, optval_addr
))
3082 return -TARGET_EFAULT
;
3083 } else if (optlen
>= 1) {
3084 if (get_user_u8(val
, optval_addr
))
3085 return -TARGET_EFAULT
;
3087 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
3089 case IP_ADD_MEMBERSHIP
:
3090 case IP_DROP_MEMBERSHIP
:
3091 if (optlen
< sizeof (struct target_ip_mreq
) ||
3092 optlen
> sizeof (struct target_ip_mreqn
))
3093 return -TARGET_EINVAL
;
3095 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
3096 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
3097 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
3100 case IP_BLOCK_SOURCE
:
3101 case IP_UNBLOCK_SOURCE
:
3102 case IP_ADD_SOURCE_MEMBERSHIP
:
3103 case IP_DROP_SOURCE_MEMBERSHIP
:
3104 if (optlen
!= sizeof (struct target_ip_mreq_source
))
3105 return -TARGET_EINVAL
;
3107 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3108 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
3109 unlock_user (ip_mreq_source
, optval_addr
, 0);
3118 case IPV6_MTU_DISCOVER
:
3121 case IPV6_RECVPKTINFO
:
3122 case IPV6_UNICAST_HOPS
:
3123 case IPV6_MULTICAST_HOPS
:
3124 case IPV6_MULTICAST_LOOP
:
3126 case IPV6_RECVHOPLIMIT
:
3127 case IPV6_2292HOPLIMIT
:
3130 if (optlen
< sizeof(uint32_t)) {
3131 return -TARGET_EINVAL
;
3133 if (get_user_u32(val
, optval_addr
)) {
3134 return -TARGET_EFAULT
;
3136 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3137 &val
, sizeof(val
)));
3141 struct in6_pktinfo pki
;
3143 if (optlen
< sizeof(pki
)) {
3144 return -TARGET_EINVAL
;
3147 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
3148 return -TARGET_EFAULT
;
3151 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
3153 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3154 &pki
, sizeof(pki
)));
3165 struct icmp6_filter icmp6f
;
3167 if (optlen
> sizeof(icmp6f
)) {
3168 optlen
= sizeof(icmp6f
);
3171 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3172 return -TARGET_EFAULT
;
3175 for (val
= 0; val
< 8; val
++) {
3176 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3179 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3191 /* those take an u32 value */
3192 if (optlen
< sizeof(uint32_t)) {
3193 return -TARGET_EINVAL
;
3196 if (get_user_u32(val
, optval_addr
)) {
3197 return -TARGET_EFAULT
;
3199 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3200 &val
, sizeof(val
)));
3207 case TARGET_SOL_SOCKET
:
3209 case TARGET_SO_RCVTIMEO
:
3213 optname
= SO_RCVTIMEO
;
3216 if (optlen
!= sizeof(struct target_timeval
)) {
3217 return -TARGET_EINVAL
;
3220 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3221 return -TARGET_EFAULT
;
3224 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3228 case TARGET_SO_SNDTIMEO
:
3229 optname
= SO_SNDTIMEO
;
3231 case TARGET_SO_ATTACH_FILTER
:
3233 struct target_sock_fprog
*tfprog
;
3234 struct target_sock_filter
*tfilter
;
3235 struct sock_fprog fprog
;
3236 struct sock_filter
*filter
;
3239 if (optlen
!= sizeof(*tfprog
)) {
3240 return -TARGET_EINVAL
;
3242 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3243 return -TARGET_EFAULT
;
3245 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3246 tswapal(tfprog
->filter
), 0)) {
3247 unlock_user_struct(tfprog
, optval_addr
, 1);
3248 return -TARGET_EFAULT
;
3251 fprog
.len
= tswap16(tfprog
->len
);
3252 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3253 if (filter
== NULL
) {
3254 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3255 unlock_user_struct(tfprog
, optval_addr
, 1);
3256 return -TARGET_ENOMEM
;
3258 for (i
= 0; i
< fprog
.len
; i
++) {
3259 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3260 filter
[i
].jt
= tfilter
[i
].jt
;
3261 filter
[i
].jf
= tfilter
[i
].jf
;
3262 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3264 fprog
.filter
= filter
;
3266 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3267 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3270 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3271 unlock_user_struct(tfprog
, optval_addr
, 1);
3274 case TARGET_SO_BINDTODEVICE
:
3276 char *dev_ifname
, *addr_ifname
;
3278 if (optlen
> IFNAMSIZ
- 1) {
3279 optlen
= IFNAMSIZ
- 1;
3281 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3283 return -TARGET_EFAULT
;
3285 optname
= SO_BINDTODEVICE
;
3286 addr_ifname
= alloca(IFNAMSIZ
);
3287 memcpy(addr_ifname
, dev_ifname
, optlen
);
3288 addr_ifname
[optlen
] = 0;
3289 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3290 addr_ifname
, optlen
));
3291 unlock_user (dev_ifname
, optval_addr
, 0);
3294 /* Options with 'int' argument. */
3295 case TARGET_SO_DEBUG
:
3298 case TARGET_SO_REUSEADDR
:
3299 optname
= SO_REUSEADDR
;
3301 case TARGET_SO_TYPE
:
3304 case TARGET_SO_ERROR
:
3307 case TARGET_SO_DONTROUTE
:
3308 optname
= SO_DONTROUTE
;
3310 case TARGET_SO_BROADCAST
:
3311 optname
= SO_BROADCAST
;
3313 case TARGET_SO_SNDBUF
:
3314 optname
= SO_SNDBUF
;
3316 case TARGET_SO_SNDBUFFORCE
:
3317 optname
= SO_SNDBUFFORCE
;
3319 case TARGET_SO_RCVBUF
:
3320 optname
= SO_RCVBUF
;
3322 case TARGET_SO_RCVBUFFORCE
:
3323 optname
= SO_RCVBUFFORCE
;
3325 case TARGET_SO_KEEPALIVE
:
3326 optname
= SO_KEEPALIVE
;
3328 case TARGET_SO_OOBINLINE
:
3329 optname
= SO_OOBINLINE
;
3331 case TARGET_SO_NO_CHECK
:
3332 optname
= SO_NO_CHECK
;
3334 case TARGET_SO_PRIORITY
:
3335 optname
= SO_PRIORITY
;
3338 case TARGET_SO_BSDCOMPAT
:
3339 optname
= SO_BSDCOMPAT
;
3342 case TARGET_SO_PASSCRED
:
3343 optname
= SO_PASSCRED
;
3345 case TARGET_SO_PASSSEC
:
3346 optname
= SO_PASSSEC
;
3348 case TARGET_SO_TIMESTAMP
:
3349 optname
= SO_TIMESTAMP
;
3351 case TARGET_SO_RCVLOWAT
:
3352 optname
= SO_RCVLOWAT
;
3357 if (optlen
< sizeof(uint32_t))
3358 return -TARGET_EINVAL
;
3360 if (get_user_u32(val
, optval_addr
))
3361 return -TARGET_EFAULT
;
3362 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3366 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3367 ret
= -TARGET_ENOPROTOOPT
;
3372 /* do_getsockopt() Must return target values and target errnos. */
3373 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3374 abi_ulong optval_addr
, abi_ulong optlen
)
3381 case TARGET_SOL_SOCKET
:
3384 /* These don't just return a single integer */
3385 case TARGET_SO_LINGER
:
3386 case TARGET_SO_RCVTIMEO
:
3387 case TARGET_SO_SNDTIMEO
:
3388 case TARGET_SO_PEERNAME
:
3390 case TARGET_SO_PEERCRED
: {
3393 struct target_ucred
*tcr
;
3395 if (get_user_u32(len
, optlen
)) {
3396 return -TARGET_EFAULT
;
3399 return -TARGET_EINVAL
;
3403 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3411 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3412 return -TARGET_EFAULT
;
3414 __put_user(cr
.pid
, &tcr
->pid
);
3415 __put_user(cr
.uid
, &tcr
->uid
);
3416 __put_user(cr
.gid
, &tcr
->gid
);
3417 unlock_user_struct(tcr
, optval_addr
, 1);
3418 if (put_user_u32(len
, optlen
)) {
3419 return -TARGET_EFAULT
;
3423 /* Options with 'int' argument. */
3424 case TARGET_SO_DEBUG
:
3427 case TARGET_SO_REUSEADDR
:
3428 optname
= SO_REUSEADDR
;
3430 case TARGET_SO_TYPE
:
3433 case TARGET_SO_ERROR
:
3436 case TARGET_SO_DONTROUTE
:
3437 optname
= SO_DONTROUTE
;
3439 case TARGET_SO_BROADCAST
:
3440 optname
= SO_BROADCAST
;
3442 case TARGET_SO_SNDBUF
:
3443 optname
= SO_SNDBUF
;
3445 case TARGET_SO_RCVBUF
:
3446 optname
= SO_RCVBUF
;
3448 case TARGET_SO_KEEPALIVE
:
3449 optname
= SO_KEEPALIVE
;
3451 case TARGET_SO_OOBINLINE
:
3452 optname
= SO_OOBINLINE
;
3454 case TARGET_SO_NO_CHECK
:
3455 optname
= SO_NO_CHECK
;
3457 case TARGET_SO_PRIORITY
:
3458 optname
= SO_PRIORITY
;
3461 case TARGET_SO_BSDCOMPAT
:
3462 optname
= SO_BSDCOMPAT
;
3465 case TARGET_SO_PASSCRED
:
3466 optname
= SO_PASSCRED
;
3468 case TARGET_SO_TIMESTAMP
:
3469 optname
= SO_TIMESTAMP
;
3471 case TARGET_SO_RCVLOWAT
:
3472 optname
= SO_RCVLOWAT
;
3474 case TARGET_SO_ACCEPTCONN
:
3475 optname
= SO_ACCEPTCONN
;
3482 /* TCP options all take an 'int' value. */
3484 if (get_user_u32(len
, optlen
))
3485 return -TARGET_EFAULT
;
3487 return -TARGET_EINVAL
;
3489 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3492 if (optname
== SO_TYPE
) {
3493 val
= host_to_target_sock_type(val
);
3498 if (put_user_u32(val
, optval_addr
))
3499 return -TARGET_EFAULT
;
3501 if (put_user_u8(val
, optval_addr
))
3502 return -TARGET_EFAULT
;
3504 if (put_user_u32(len
, optlen
))
3505 return -TARGET_EFAULT
;
3512 case IP_ROUTER_ALERT
:
3516 case IP_MTU_DISCOVER
:
3522 case IP_MULTICAST_TTL
:
3523 case IP_MULTICAST_LOOP
:
3524 if (get_user_u32(len
, optlen
))
3525 return -TARGET_EFAULT
;
3527 return -TARGET_EINVAL
;
3529 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3532 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3534 if (put_user_u32(len
, optlen
)
3535 || put_user_u8(val
, optval_addr
))
3536 return -TARGET_EFAULT
;
3538 if (len
> sizeof(int))
3540 if (put_user_u32(len
, optlen
)
3541 || put_user_u32(val
, optval_addr
))
3542 return -TARGET_EFAULT
;
3546 ret
= -TARGET_ENOPROTOOPT
;
3552 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3554 ret
= -TARGET_EOPNOTSUPP
;
3560 /* Convert target low/high pair representing file offset into the host
3561 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3562 * as the kernel doesn't handle them either.
3564 static void target_to_host_low_high(abi_ulong tlow
,
3566 unsigned long *hlow
,
3567 unsigned long *hhigh
)
3569 uint64_t off
= tlow
|
3570 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3571 TARGET_LONG_BITS
/ 2;
3574 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3577 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3578 abi_ulong count
, int copy
)
3580 struct target_iovec
*target_vec
;
3582 abi_ulong total_len
, max_len
;
3585 bool bad_address
= false;
3591 if (count
> IOV_MAX
) {
3596 vec
= g_try_new0(struct iovec
, count
);
3602 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3603 count
* sizeof(struct target_iovec
), 1);
3604 if (target_vec
== NULL
) {
3609 /* ??? If host page size > target page size, this will result in a
3610 value larger than what we can actually support. */
3611 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3614 for (i
= 0; i
< count
; i
++) {
3615 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3616 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3621 } else if (len
== 0) {
3622 /* Zero length pointer is ignored. */
3623 vec
[i
].iov_base
= 0;
3625 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3626 /* If the first buffer pointer is bad, this is a fault. But
3627 * subsequent bad buffers will result in a partial write; this
3628 * is realized by filling the vector with null pointers and
3630 if (!vec
[i
].iov_base
) {
3641 if (len
> max_len
- total_len
) {
3642 len
= max_len
- total_len
;
3645 vec
[i
].iov_len
= len
;
3649 unlock_user(target_vec
, target_addr
, 0);
3654 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3655 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3658 unlock_user(target_vec
, target_addr
, 0);
3665 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3666 abi_ulong count
, int copy
)
3668 struct target_iovec
*target_vec
;
3671 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3672 count
* sizeof(struct target_iovec
), 1);
3674 for (i
= 0; i
< count
; i
++) {
3675 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3676 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3680 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3682 unlock_user(target_vec
, target_addr
, 0);
3688 static inline int target_to_host_sock_type(int *type
)
3691 int target_type
= *type
;
3693 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3694 case TARGET_SOCK_DGRAM
:
3695 host_type
= SOCK_DGRAM
;
3697 case TARGET_SOCK_STREAM
:
3698 host_type
= SOCK_STREAM
;
3701 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3704 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3705 #if defined(SOCK_CLOEXEC)
3706 host_type
|= SOCK_CLOEXEC
;
3708 return -TARGET_EINVAL
;
3711 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3712 #if defined(SOCK_NONBLOCK)
3713 host_type
|= SOCK_NONBLOCK
;
3714 #elif !defined(O_NONBLOCK)
3715 return -TARGET_EINVAL
;
3722 /* Try to emulate socket type flags after socket creation. */
3723 static int sock_flags_fixup(int fd
, int target_type
)
3725 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3726 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3727 int flags
= fcntl(fd
, F_GETFL
);
3728 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3730 return -TARGET_EINVAL
;
3737 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3738 abi_ulong target_addr
,
3741 struct sockaddr
*addr
= host_addr
;
3742 struct target_sockaddr
*target_saddr
;
3744 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3745 if (!target_saddr
) {
3746 return -TARGET_EFAULT
;
3749 memcpy(addr
, target_saddr
, len
);
3750 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3751 /* spkt_protocol is big-endian */
3753 unlock_user(target_saddr
, target_addr
, 0);
3757 static TargetFdTrans target_packet_trans
= {
3758 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3761 #ifdef CONFIG_RTNETLINK
3762 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3766 ret
= target_to_host_nlmsg_route(buf
, len
);
3774 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3778 ret
= host_to_target_nlmsg_route(buf
, len
);
3786 static TargetFdTrans target_netlink_route_trans
= {
3787 .target_to_host_data
= netlink_route_target_to_host
,
3788 .host_to_target_data
= netlink_route_host_to_target
,
3790 #endif /* CONFIG_RTNETLINK */
3792 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3796 ret
= target_to_host_nlmsg_audit(buf
, len
);
3804 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3808 ret
= host_to_target_nlmsg_audit(buf
, len
);
3816 static TargetFdTrans target_netlink_audit_trans
= {
3817 .target_to_host_data
= netlink_audit_target_to_host
,
3818 .host_to_target_data
= netlink_audit_host_to_target
,
3821 /* do_socket() Must return target values and target errnos. */
3822 static abi_long
do_socket(int domain
, int type
, int protocol
)
3824 int target_type
= type
;
3827 ret
= target_to_host_sock_type(&type
);
3832 if (domain
== PF_NETLINK
&& !(
3833 #ifdef CONFIG_RTNETLINK
3834 protocol
== NETLINK_ROUTE
||
3836 protocol
== NETLINK_KOBJECT_UEVENT
||
3837 protocol
== NETLINK_AUDIT
)) {
3838 return -EPFNOSUPPORT
;
3841 if (domain
== AF_PACKET
||
3842 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3843 protocol
= tswap16(protocol
);
3846 ret
= get_errno(socket(domain
, type
, protocol
));
3848 ret
= sock_flags_fixup(ret
, target_type
);
3849 if (type
== SOCK_PACKET
) {
3850 /* Manage an obsolete case :
3851 * if socket type is SOCK_PACKET, bind by name
3853 fd_trans_register(ret
, &target_packet_trans
);
3854 } else if (domain
== PF_NETLINK
) {
3856 #ifdef CONFIG_RTNETLINK
3858 fd_trans_register(ret
, &target_netlink_route_trans
);
3861 case NETLINK_KOBJECT_UEVENT
:
3862 /* nothing to do: messages are strings */
3865 fd_trans_register(ret
, &target_netlink_audit_trans
);
3868 g_assert_not_reached();
3875 /* do_bind() Must return target values and target errnos. */
3876 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3882 if ((int)addrlen
< 0) {
3883 return -TARGET_EINVAL
;
3886 addr
= alloca(addrlen
+1);
3888 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3892 return get_errno(bind(sockfd
, addr
, addrlen
));
3895 /* do_connect() Must return target values and target errnos. */
3896 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3902 if ((int)addrlen
< 0) {
3903 return -TARGET_EINVAL
;
3906 addr
= alloca(addrlen
+1);
3908 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3912 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3915 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3916 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3917 int flags
, int send
)
3923 abi_ulong target_vec
;
3925 if (msgp
->msg_name
) {
3926 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3927 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3928 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3929 tswapal(msgp
->msg_name
),
3931 if (ret
== -TARGET_EFAULT
) {
3932 /* For connected sockets msg_name and msg_namelen must
3933 * be ignored, so returning EFAULT immediately is wrong.
3934 * Instead, pass a bad msg_name to the host kernel, and
3935 * let it decide whether to return EFAULT or not.
3937 msg
.msg_name
= (void *)-1;
3942 msg
.msg_name
= NULL
;
3943 msg
.msg_namelen
= 0;
3945 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3946 msg
.msg_control
= alloca(msg
.msg_controllen
);
3947 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3949 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3951 count
= tswapal(msgp
->msg_iovlen
);
3952 target_vec
= tswapal(msgp
->msg_iov
);
3954 if (count
> IOV_MAX
) {
3955 /* sendrcvmsg returns a different errno for this condition than
3956 * readv/writev, so we must catch it here before lock_iovec() does.
3958 ret
= -TARGET_EMSGSIZE
;
3962 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3963 target_vec
, count
, send
);
3965 ret
= -host_to_target_errno(errno
);
3968 msg
.msg_iovlen
= count
;
3972 if (fd_trans_target_to_host_data(fd
)) {
3975 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3976 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3977 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3978 msg
.msg_iov
->iov_len
);
3980 msg
.msg_iov
->iov_base
= host_msg
;
3981 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3985 ret
= target_to_host_cmsg(&msg
, msgp
);
3987 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3991 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3992 if (!is_error(ret
)) {
3994 if (fd_trans_host_to_target_data(fd
)) {
3995 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3996 MIN(msg
.msg_iov
->iov_len
, len
));
3998 ret
= host_to_target_cmsg(msgp
, &msg
);
4000 if (!is_error(ret
)) {
4001 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
4002 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
4003 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
4004 msg
.msg_name
, msg
.msg_namelen
);
4016 unlock_iovec(vec
, target_vec
, count
, !send
);
4021 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
4022 int flags
, int send
)
4025 struct target_msghdr
*msgp
;
4027 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
4031 return -TARGET_EFAULT
;
4033 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
4034 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
4038 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
4039 * so it might not have this *mmsg-specific flag either.
4041 #ifndef MSG_WAITFORONE
4042 #define MSG_WAITFORONE 0x10000
4045 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
4046 unsigned int vlen
, unsigned int flags
,
4049 struct target_mmsghdr
*mmsgp
;
4053 if (vlen
> UIO_MAXIOV
) {
4057 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
4059 return -TARGET_EFAULT
;
4062 for (i
= 0; i
< vlen
; i
++) {
4063 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
4064 if (is_error(ret
)) {
4067 mmsgp
[i
].msg_len
= tswap32(ret
);
4068 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
4069 if (flags
& MSG_WAITFORONE
) {
4070 flags
|= MSG_DONTWAIT
;
4074 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
4076 /* Return number of datagrams sent if we sent any at all;
4077 * otherwise return the error.
4085 /* do_accept4() Must return target values and target errnos. */
4086 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
4087 abi_ulong target_addrlen_addr
, int flags
)
4094 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
4096 if (target_addr
== 0) {
4097 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
4100 /* linux returns EINVAL if addrlen pointer is invalid */
4101 if (get_user_u32(addrlen
, target_addrlen_addr
))
4102 return -TARGET_EINVAL
;
4104 if ((int)addrlen
< 0) {
4105 return -TARGET_EINVAL
;
4108 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4109 return -TARGET_EINVAL
;
4111 addr
= alloca(addrlen
);
4113 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
4114 if (!is_error(ret
)) {
4115 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4116 if (put_user_u32(addrlen
, target_addrlen_addr
))
4117 ret
= -TARGET_EFAULT
;
4122 /* do_getpeername() Must return target values and target errnos. */
4123 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
4124 abi_ulong target_addrlen_addr
)
4130 if (get_user_u32(addrlen
, target_addrlen_addr
))
4131 return -TARGET_EFAULT
;
4133 if ((int)addrlen
< 0) {
4134 return -TARGET_EINVAL
;
4137 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4138 return -TARGET_EFAULT
;
4140 addr
= alloca(addrlen
);
4142 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
4143 if (!is_error(ret
)) {
4144 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4145 if (put_user_u32(addrlen
, target_addrlen_addr
))
4146 ret
= -TARGET_EFAULT
;
4151 /* do_getsockname() Must return target values and target errnos. */
4152 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
4153 abi_ulong target_addrlen_addr
)
4159 if (get_user_u32(addrlen
, target_addrlen_addr
))
4160 return -TARGET_EFAULT
;
4162 if ((int)addrlen
< 0) {
4163 return -TARGET_EINVAL
;
4166 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4167 return -TARGET_EFAULT
;
4169 addr
= alloca(addrlen
);
4171 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4172 if (!is_error(ret
)) {
4173 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4174 if (put_user_u32(addrlen
, target_addrlen_addr
))
4175 ret
= -TARGET_EFAULT
;
4180 /* do_socketpair() Must return target values and target errnos. */
4181 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4182 abi_ulong target_tab_addr
)
4187 target_to_host_sock_type(&type
);
4189 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4190 if (!is_error(ret
)) {
4191 if (put_user_s32(tab
[0], target_tab_addr
)
4192 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4193 ret
= -TARGET_EFAULT
;
4198 /* do_sendto() Must return target values and target errnos. */
4199 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4200 abi_ulong target_addr
, socklen_t addrlen
)
4204 void *copy_msg
= NULL
;
4207 if ((int)addrlen
< 0) {
4208 return -TARGET_EINVAL
;
4211 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4213 return -TARGET_EFAULT
;
4214 if (fd_trans_target_to_host_data(fd
)) {
4215 copy_msg
= host_msg
;
4216 host_msg
= g_malloc(len
);
4217 memcpy(host_msg
, copy_msg
, len
);
4218 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4224 addr
= alloca(addrlen
+1);
4225 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4229 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4231 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4236 host_msg
= copy_msg
;
4238 unlock_user(host_msg
, msg
, 0);
4242 /* do_recvfrom() Must return target values and target errnos. */
4243 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4244 abi_ulong target_addr
,
4245 abi_ulong target_addrlen
)
4252 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4254 return -TARGET_EFAULT
;
4256 if (get_user_u32(addrlen
, target_addrlen
)) {
4257 ret
= -TARGET_EFAULT
;
4260 if ((int)addrlen
< 0) {
4261 ret
= -TARGET_EINVAL
;
4264 addr
= alloca(addrlen
);
4265 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4268 addr
= NULL
; /* To keep compiler quiet. */
4269 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4271 if (!is_error(ret
)) {
4272 if (fd_trans_host_to_target_data(fd
)) {
4274 trans
= fd_trans_host_to_target_data(fd
)(host_msg
, MIN(ret
, len
));
4275 if (is_error(trans
)) {
4281 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4282 if (put_user_u32(addrlen
, target_addrlen
)) {
4283 ret
= -TARGET_EFAULT
;
4287 unlock_user(host_msg
, msg
, len
);
4290 unlock_user(host_msg
, msg
, 0);
4295 #ifdef TARGET_NR_socketcall
4296 /* do_socketcall() must return target values and target errnos. */
4297 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4299 static const unsigned nargs
[] = { /* number of arguments per operation */
4300 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4301 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4302 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4303 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4304 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4305 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4306 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4307 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4308 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4309 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4310 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4311 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4312 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4313 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4314 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4315 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4316 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4317 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4318 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4319 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4321 abi_long a
[6]; /* max 6 args */
4324 /* check the range of the first argument num */
4325 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4326 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4327 return -TARGET_EINVAL
;
4329 /* ensure we have space for args */
4330 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4331 return -TARGET_EINVAL
;
4333 /* collect the arguments in a[] according to nargs[] */
4334 for (i
= 0; i
< nargs
[num
]; ++i
) {
4335 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4336 return -TARGET_EFAULT
;
4339 /* now when we have the args, invoke the appropriate underlying function */
4341 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4342 return do_socket(a
[0], a
[1], a
[2]);
4343 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4344 return do_bind(a
[0], a
[1], a
[2]);
4345 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4346 return do_connect(a
[0], a
[1], a
[2]);
4347 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4348 return get_errno(listen(a
[0], a
[1]));
4349 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4350 return do_accept4(a
[0], a
[1], a
[2], 0);
4351 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4352 return do_getsockname(a
[0], a
[1], a
[2]);
4353 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4354 return do_getpeername(a
[0], a
[1], a
[2]);
4355 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4356 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4357 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4358 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4359 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4360 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4361 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4362 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4363 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4364 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4365 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4366 return get_errno(shutdown(a
[0], a
[1]));
4367 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4368 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4369 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4370 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4371 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4372 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4373 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4374 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4375 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4376 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4377 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4378 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4379 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4380 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4382 gemu_log("Unsupported socketcall: %d\n", num
);
4383 return -TARGET_EINVAL
;
4388 #define N_SHM_REGIONS 32
4390 static struct shm_region
{
4394 } shm_regions
[N_SHM_REGIONS
];
4396 #ifndef TARGET_SEMID64_DS
4397 /* asm-generic version of this struct */
4398 struct target_semid64_ds
4400 struct target_ipc_perm sem_perm
;
4401 abi_ulong sem_otime
;
4402 #if TARGET_ABI_BITS == 32
4403 abi_ulong __unused1
;
4405 abi_ulong sem_ctime
;
4406 #if TARGET_ABI_BITS == 32
4407 abi_ulong __unused2
;
4409 abi_ulong sem_nsems
;
4410 abi_ulong __unused3
;
4411 abi_ulong __unused4
;
4415 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4416 abi_ulong target_addr
)
4418 struct target_ipc_perm
*target_ip
;
4419 struct target_semid64_ds
*target_sd
;
4421 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4422 return -TARGET_EFAULT
;
4423 target_ip
= &(target_sd
->sem_perm
);
4424 host_ip
->__key
= tswap32(target_ip
->__key
);
4425 host_ip
->uid
= tswap32(target_ip
->uid
);
4426 host_ip
->gid
= tswap32(target_ip
->gid
);
4427 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4428 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4429 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4430 host_ip
->mode
= tswap32(target_ip
->mode
);
4432 host_ip
->mode
= tswap16(target_ip
->mode
);
4434 #if defined(TARGET_PPC)
4435 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4437 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4439 unlock_user_struct(target_sd
, target_addr
, 0);
4443 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4444 struct ipc_perm
*host_ip
)
4446 struct target_ipc_perm
*target_ip
;
4447 struct target_semid64_ds
*target_sd
;
4449 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4450 return -TARGET_EFAULT
;
4451 target_ip
= &(target_sd
->sem_perm
);
4452 target_ip
->__key
= tswap32(host_ip
->__key
);
4453 target_ip
->uid
= tswap32(host_ip
->uid
);
4454 target_ip
->gid
= tswap32(host_ip
->gid
);
4455 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4456 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4457 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4458 target_ip
->mode
= tswap32(host_ip
->mode
);
4460 target_ip
->mode
= tswap16(host_ip
->mode
);
4462 #if defined(TARGET_PPC)
4463 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4465 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4467 unlock_user_struct(target_sd
, target_addr
, 1);
4471 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4472 abi_ulong target_addr
)
4474 struct target_semid64_ds
*target_sd
;
4476 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4477 return -TARGET_EFAULT
;
4478 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4479 return -TARGET_EFAULT
;
4480 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4481 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4482 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4483 unlock_user_struct(target_sd
, target_addr
, 0);
4487 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4488 struct semid_ds
*host_sd
)
4490 struct target_semid64_ds
*target_sd
;
4492 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4493 return -TARGET_EFAULT
;
4494 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4495 return -TARGET_EFAULT
;
4496 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4497 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4498 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4499 unlock_user_struct(target_sd
, target_addr
, 1);
4503 struct target_seminfo
{
4516 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4517 struct seminfo
*host_seminfo
)
4519 struct target_seminfo
*target_seminfo
;
4520 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4521 return -TARGET_EFAULT
;
4522 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4523 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4524 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4525 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4526 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4527 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4528 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4529 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4530 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4531 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4532 unlock_user_struct(target_seminfo
, target_addr
, 1);
4538 struct semid_ds
*buf
;
4539 unsigned short *array
;
4540 struct seminfo
*__buf
;
4543 union target_semun
{
4550 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4551 abi_ulong target_addr
)
4554 unsigned short *array
;
4556 struct semid_ds semid_ds
;
4559 semun
.buf
= &semid_ds
;
4561 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4563 return get_errno(ret
);
4565 nsems
= semid_ds
.sem_nsems
;
4567 *host_array
= g_try_new(unsigned short, nsems
);
4569 return -TARGET_ENOMEM
;
4571 array
= lock_user(VERIFY_READ
, target_addr
,
4572 nsems
*sizeof(unsigned short), 1);
4574 g_free(*host_array
);
4575 return -TARGET_EFAULT
;
4578 for(i
=0; i
<nsems
; i
++) {
4579 __get_user((*host_array
)[i
], &array
[i
]);
4581 unlock_user(array
, target_addr
, 0);
4586 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4587 unsigned short **host_array
)
4590 unsigned short *array
;
4592 struct semid_ds semid_ds
;
4595 semun
.buf
= &semid_ds
;
4597 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4599 return get_errno(ret
);
4601 nsems
= semid_ds
.sem_nsems
;
4603 array
= lock_user(VERIFY_WRITE
, target_addr
,
4604 nsems
*sizeof(unsigned short), 0);
4606 return -TARGET_EFAULT
;
4608 for(i
=0; i
<nsems
; i
++) {
4609 __put_user((*host_array
)[i
], &array
[i
]);
4611 g_free(*host_array
);
4612 unlock_user(array
, target_addr
, 1);
4617 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4618 abi_ulong target_arg
)
4620 union target_semun target_su
= { .buf
= target_arg
};
4622 struct semid_ds dsarg
;
4623 unsigned short *array
= NULL
;
4624 struct seminfo seminfo
;
4625 abi_long ret
= -TARGET_EINVAL
;
4632 /* In 64 bit cross-endian situations, we will erroneously pick up
4633 * the wrong half of the union for the "val" element. To rectify
4634 * this, the entire 8-byte structure is byteswapped, followed by
4635 * a swap of the 4 byte val field. In other cases, the data is
4636 * already in proper host byte order. */
4637 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4638 target_su
.buf
= tswapal(target_su
.buf
);
4639 arg
.val
= tswap32(target_su
.val
);
4641 arg
.val
= target_su
.val
;
4643 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4647 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4651 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4652 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4659 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4663 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4664 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4670 arg
.__buf
= &seminfo
;
4671 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4672 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4680 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4687 struct target_sembuf
{
4688 unsigned short sem_num
;
4693 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4694 abi_ulong target_addr
,
4697 struct target_sembuf
*target_sembuf
;
4700 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4701 nsops
*sizeof(struct target_sembuf
), 1);
4703 return -TARGET_EFAULT
;
4705 for(i
=0; i
<nsops
; i
++) {
4706 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4707 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4708 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4711 unlock_user(target_sembuf
, target_addr
, 0);
4716 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4718 struct sembuf sops
[nsops
];
4720 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4721 return -TARGET_EFAULT
;
4723 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4726 struct target_msqid_ds
4728 struct target_ipc_perm msg_perm
;
4729 abi_ulong msg_stime
;
4730 #if TARGET_ABI_BITS == 32
4731 abi_ulong __unused1
;
4733 abi_ulong msg_rtime
;
4734 #if TARGET_ABI_BITS == 32
4735 abi_ulong __unused2
;
4737 abi_ulong msg_ctime
;
4738 #if TARGET_ABI_BITS == 32
4739 abi_ulong __unused3
;
4741 abi_ulong __msg_cbytes
;
4743 abi_ulong msg_qbytes
;
4744 abi_ulong msg_lspid
;
4745 abi_ulong msg_lrpid
;
4746 abi_ulong __unused4
;
4747 abi_ulong __unused5
;
4750 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4751 abi_ulong target_addr
)
4753 struct target_msqid_ds
*target_md
;
4755 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4756 return -TARGET_EFAULT
;
4757 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4758 return -TARGET_EFAULT
;
4759 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4760 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4761 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4762 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4763 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4764 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4765 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4766 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4767 unlock_user_struct(target_md
, target_addr
, 0);
4771 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4772 struct msqid_ds
*host_md
)
4774 struct target_msqid_ds
*target_md
;
4776 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4777 return -TARGET_EFAULT
;
4778 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4779 return -TARGET_EFAULT
;
4780 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4781 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4782 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4783 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4784 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4785 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4786 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4787 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4788 unlock_user_struct(target_md
, target_addr
, 1);
4792 struct target_msginfo
{
4800 unsigned short int msgseg
;
4803 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4804 struct msginfo
*host_msginfo
)
4806 struct target_msginfo
*target_msginfo
;
4807 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4808 return -TARGET_EFAULT
;
4809 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4810 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4811 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4812 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4813 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4814 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4815 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4816 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4817 unlock_user_struct(target_msginfo
, target_addr
, 1);
4821 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4823 struct msqid_ds dsarg
;
4824 struct msginfo msginfo
;
4825 abi_long ret
= -TARGET_EINVAL
;
4833 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4834 return -TARGET_EFAULT
;
4835 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4836 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4837 return -TARGET_EFAULT
;
4840 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4844 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4845 if (host_to_target_msginfo(ptr
, &msginfo
))
4846 return -TARGET_EFAULT
;
4853 struct target_msgbuf
{
4858 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4859 ssize_t msgsz
, int msgflg
)
4861 struct target_msgbuf
*target_mb
;
4862 struct msgbuf
*host_mb
;
4866 return -TARGET_EINVAL
;
4869 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4870 return -TARGET_EFAULT
;
4871 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4873 unlock_user_struct(target_mb
, msgp
, 0);
4874 return -TARGET_ENOMEM
;
4876 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4877 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4878 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4880 unlock_user_struct(target_mb
, msgp
, 0);
4885 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4886 ssize_t msgsz
, abi_long msgtyp
,
4889 struct target_msgbuf
*target_mb
;
4891 struct msgbuf
*host_mb
;
4895 return -TARGET_EINVAL
;
4898 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4899 return -TARGET_EFAULT
;
4901 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4903 ret
= -TARGET_ENOMEM
;
4906 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4909 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4910 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4911 if (!target_mtext
) {
4912 ret
= -TARGET_EFAULT
;
4915 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4916 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4919 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4923 unlock_user_struct(target_mb
, msgp
, 1);
4928 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4929 abi_ulong target_addr
)
4931 struct target_shmid_ds
*target_sd
;
4933 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4934 return -TARGET_EFAULT
;
4935 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4936 return -TARGET_EFAULT
;
4937 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4938 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4939 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4940 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4941 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4942 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4943 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4944 unlock_user_struct(target_sd
, target_addr
, 0);
4948 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4949 struct shmid_ds
*host_sd
)
4951 struct target_shmid_ds
*target_sd
;
4953 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4954 return -TARGET_EFAULT
;
4955 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4956 return -TARGET_EFAULT
;
4957 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4958 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4959 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4960 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4961 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4962 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4963 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4964 unlock_user_struct(target_sd
, target_addr
, 1);
4968 struct target_shminfo
{
4976 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4977 struct shminfo
*host_shminfo
)
4979 struct target_shminfo
*target_shminfo
;
4980 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4981 return -TARGET_EFAULT
;
4982 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4983 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4984 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4985 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4986 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4987 unlock_user_struct(target_shminfo
, target_addr
, 1);
4991 struct target_shm_info
{
4996 abi_ulong swap_attempts
;
4997 abi_ulong swap_successes
;
5000 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
5001 struct shm_info
*host_shm_info
)
5003 struct target_shm_info
*target_shm_info
;
5004 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
5005 return -TARGET_EFAULT
;
5006 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
5007 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
5008 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
5009 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
5010 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
5011 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
5012 unlock_user_struct(target_shm_info
, target_addr
, 1);
5016 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
5018 struct shmid_ds dsarg
;
5019 struct shminfo shminfo
;
5020 struct shm_info shm_info
;
5021 abi_long ret
= -TARGET_EINVAL
;
5029 if (target_to_host_shmid_ds(&dsarg
, buf
))
5030 return -TARGET_EFAULT
;
5031 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
5032 if (host_to_target_shmid_ds(buf
, &dsarg
))
5033 return -TARGET_EFAULT
;
5036 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
5037 if (host_to_target_shminfo(buf
, &shminfo
))
5038 return -TARGET_EFAULT
;
5041 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
5042 if (host_to_target_shm_info(buf
, &shm_info
))
5043 return -TARGET_EFAULT
;
5048 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
5055 #ifndef TARGET_FORCE_SHMLBA
5056 /* For most architectures, SHMLBA is the same as the page size;
5057 * some architectures have larger values, in which case they should
5058 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
5059 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
5060 * and defining its own value for SHMLBA.
5062 * The kernel also permits SHMLBA to be set by the architecture to a
5063 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
5064 * this means that addresses are rounded to the large size if
5065 * SHM_RND is set but addresses not aligned to that size are not rejected
5066 * as long as they are at least page-aligned. Since the only architecture
5067 * which uses this is ia64 this code doesn't provide for that oddity.
5069 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
5071 return TARGET_PAGE_SIZE
;
5075 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
5076 int shmid
, abi_ulong shmaddr
, int shmflg
)
5080 struct shmid_ds shm_info
;
5084 /* find out the length of the shared memory segment */
5085 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
5086 if (is_error(ret
)) {
5087 /* can't get length, bail out */
5091 shmlba
= target_shmlba(cpu_env
);
5093 if (shmaddr
& (shmlba
- 1)) {
5094 if (shmflg
& SHM_RND
) {
5095 shmaddr
&= ~(shmlba
- 1);
5097 return -TARGET_EINVAL
;
5100 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
5101 return -TARGET_EINVAL
;
5107 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
5109 abi_ulong mmap_start
;
5111 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
5113 if (mmap_start
== -1) {
5115 host_raddr
= (void *)-1;
5117 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
5120 if (host_raddr
== (void *)-1) {
5122 return get_errno((long)host_raddr
);
5124 raddr
=h2g((unsigned long)host_raddr
);
5126 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
5127 PAGE_VALID
| PAGE_READ
|
5128 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
5130 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
5131 if (!shm_regions
[i
].in_use
) {
5132 shm_regions
[i
].in_use
= true;
5133 shm_regions
[i
].start
= raddr
;
5134 shm_regions
[i
].size
= shm_info
.shm_segsz
;
5144 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
5151 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
5152 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
5153 shm_regions
[i
].in_use
= false;
5154 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
5158 rv
= get_errno(shmdt(g2h(shmaddr
)));
5165 #ifdef TARGET_NR_ipc
5166 /* ??? This only works with linear mappings. */
5167 /* do_ipc() must return target values and target errnos. */
5168 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5169 unsigned int call
, abi_long first
,
5170 abi_long second
, abi_long third
,
5171 abi_long ptr
, abi_long fifth
)
5176 version
= call
>> 16;
5181 ret
= do_semop(first
, ptr
, second
);
5185 ret
= get_errno(semget(first
, second
, third
));
5188 case IPCOP_semctl
: {
5189 /* The semun argument to semctl is passed by value, so dereference the
5192 get_user_ual(atptr
, ptr
);
5193 ret
= do_semctl(first
, second
, third
, atptr
);
5198 ret
= get_errno(msgget(first
, second
));
5202 ret
= do_msgsnd(first
, ptr
, second
, third
);
5206 ret
= do_msgctl(first
, second
, ptr
);
5213 struct target_ipc_kludge
{
5218 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5219 ret
= -TARGET_EFAULT
;
5223 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5225 unlock_user_struct(tmp
, ptr
, 0);
5229 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5238 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5239 if (is_error(raddr
))
5240 return get_errno(raddr
);
5241 if (put_user_ual(raddr
, third
))
5242 return -TARGET_EFAULT
;
5246 ret
= -TARGET_EINVAL
;
5251 ret
= do_shmdt(ptr
);
5255 /* IPC_* flag values are the same on all linux platforms */
5256 ret
= get_errno(shmget(first
, second
, third
));
5259 /* IPC_* and SHM_* command values are the same on all linux platforms */
5261 ret
= do_shmctl(first
, second
, ptr
);
5264 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5265 ret
= -TARGET_ENOSYS
;
5272 /* kernel structure types definitions */
5274 #define STRUCT(name, ...) STRUCT_ ## name,
5275 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5277 #include "syscall_types.h"
5281 #undef STRUCT_SPECIAL
5283 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5284 #define STRUCT_SPECIAL(name)
5285 #include "syscall_types.h"
5287 #undef STRUCT_SPECIAL
5289 typedef struct IOCTLEntry IOCTLEntry
;
5291 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5292 int fd
, int cmd
, abi_long arg
);
5296 unsigned int host_cmd
;
5299 do_ioctl_fn
*do_ioctl
;
5300 const argtype arg_type
[5];
5303 #define IOC_R 0x0001
5304 #define IOC_W 0x0002
5305 #define IOC_RW (IOC_R | IOC_W)
5307 #define MAX_STRUCT_SIZE 4096
5309 #ifdef CONFIG_FIEMAP
5310 /* So fiemap access checks don't overflow on 32 bit systems.
5311 * This is very slightly smaller than the limit imposed by
5312 * the underlying kernel.
5314 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5315 / sizeof(struct fiemap_extent))
5317 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5318 int fd
, int cmd
, abi_long arg
)
5320 /* The parameter for this ioctl is a struct fiemap followed
5321 * by an array of struct fiemap_extent whose size is set
5322 * in fiemap->fm_extent_count. The array is filled in by the
5325 int target_size_in
, target_size_out
;
5327 const argtype
*arg_type
= ie
->arg_type
;
5328 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5331 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5335 assert(arg_type
[0] == TYPE_PTR
);
5336 assert(ie
->access
== IOC_RW
);
5338 target_size_in
= thunk_type_size(arg_type
, 0);
5339 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5341 return -TARGET_EFAULT
;
5343 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5344 unlock_user(argptr
, arg
, 0);
5345 fm
= (struct fiemap
*)buf_temp
;
5346 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5347 return -TARGET_EINVAL
;
5350 outbufsz
= sizeof (*fm
) +
5351 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5353 if (outbufsz
> MAX_STRUCT_SIZE
) {
5354 /* We can't fit all the extents into the fixed size buffer.
5355 * Allocate one that is large enough and use it instead.
5357 fm
= g_try_malloc(outbufsz
);
5359 return -TARGET_ENOMEM
;
5361 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5364 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5365 if (!is_error(ret
)) {
5366 target_size_out
= target_size_in
;
5367 /* An extent_count of 0 means we were only counting the extents
5368 * so there are no structs to copy
5370 if (fm
->fm_extent_count
!= 0) {
5371 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5373 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5375 ret
= -TARGET_EFAULT
;
5377 /* Convert the struct fiemap */
5378 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5379 if (fm
->fm_extent_count
!= 0) {
5380 p
= argptr
+ target_size_in
;
5381 /* ...and then all the struct fiemap_extents */
5382 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5383 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5388 unlock_user(argptr
, arg
, target_size_out
);
5398 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5399 int fd
, int cmd
, abi_long arg
)
5401 const argtype
*arg_type
= ie
->arg_type
;
5405 struct ifconf
*host_ifconf
;
5407 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5408 int target_ifreq_size
;
5413 abi_long target_ifc_buf
;
5417 assert(arg_type
[0] == TYPE_PTR
);
5418 assert(ie
->access
== IOC_RW
);
5421 target_size
= thunk_type_size(arg_type
, 0);
5423 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5425 return -TARGET_EFAULT
;
5426 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5427 unlock_user(argptr
, arg
, 0);
5429 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5430 target_ifc_len
= host_ifconf
->ifc_len
;
5431 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5433 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5434 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5435 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5437 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5438 if (outbufsz
> MAX_STRUCT_SIZE
) {
5439 /* We can't fit all the extents into the fixed size buffer.
5440 * Allocate one that is large enough and use it instead.
5442 host_ifconf
= malloc(outbufsz
);
5444 return -TARGET_ENOMEM
;
5446 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5449 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5451 host_ifconf
->ifc_len
= host_ifc_len
;
5452 host_ifconf
->ifc_buf
= host_ifc_buf
;
5454 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5455 if (!is_error(ret
)) {
5456 /* convert host ifc_len to target ifc_len */
5458 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5459 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5460 host_ifconf
->ifc_len
= target_ifc_len
;
5462 /* restore target ifc_buf */
5464 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5466 /* copy struct ifconf to target user */
5468 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5470 return -TARGET_EFAULT
;
5471 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5472 unlock_user(argptr
, arg
, target_size
);
5474 /* copy ifreq[] to target user */
5476 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5477 for (i
= 0; i
< nb_ifreq
; i
++) {
5478 thunk_convert(argptr
+ i
* target_ifreq_size
,
5479 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5480 ifreq_arg_type
, THUNK_TARGET
);
5482 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5492 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5493 int cmd
, abi_long arg
)
5496 struct dm_ioctl
*host_dm
;
5497 abi_long guest_data
;
5498 uint32_t guest_data_size
;
5500 const argtype
*arg_type
= ie
->arg_type
;
5502 void *big_buf
= NULL
;
5506 target_size
= thunk_type_size(arg_type
, 0);
5507 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5509 ret
= -TARGET_EFAULT
;
5512 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5513 unlock_user(argptr
, arg
, 0);
5515 /* buf_temp is too small, so fetch things into a bigger buffer */
5516 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5517 memcpy(big_buf
, buf_temp
, target_size
);
5521 guest_data
= arg
+ host_dm
->data_start
;
5522 if ((guest_data
- arg
) < 0) {
5523 ret
= -TARGET_EINVAL
;
5526 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5527 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5529 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5531 ret
= -TARGET_EFAULT
;
5535 switch (ie
->host_cmd
) {
5537 case DM_LIST_DEVICES
:
5540 case DM_DEV_SUSPEND
:
5543 case DM_TABLE_STATUS
:
5544 case DM_TABLE_CLEAR
:
5546 case DM_LIST_VERSIONS
:
5550 case DM_DEV_SET_GEOMETRY
:
5551 /* data contains only strings */
5552 memcpy(host_data
, argptr
, guest_data_size
);
5555 memcpy(host_data
, argptr
, guest_data_size
);
5556 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5560 void *gspec
= argptr
;
5561 void *cur_data
= host_data
;
5562 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5563 int spec_size
= thunk_type_size(arg_type
, 0);
5566 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5567 struct dm_target_spec
*spec
= cur_data
;
5571 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5572 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5574 spec
->next
= sizeof(*spec
) + slen
;
5575 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5577 cur_data
+= spec
->next
;
5582 ret
= -TARGET_EINVAL
;
5583 unlock_user(argptr
, guest_data
, 0);
5586 unlock_user(argptr
, guest_data
, 0);
5588 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5589 if (!is_error(ret
)) {
5590 guest_data
= arg
+ host_dm
->data_start
;
5591 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5592 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5593 switch (ie
->host_cmd
) {
5598 case DM_DEV_SUSPEND
:
5601 case DM_TABLE_CLEAR
:
5603 case DM_DEV_SET_GEOMETRY
:
5604 /* no return data */
5606 case DM_LIST_DEVICES
:
5608 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5609 uint32_t remaining_data
= guest_data_size
;
5610 void *cur_data
= argptr
;
5611 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5612 int nl_size
= 12; /* can't use thunk_size due to alignment */
5615 uint32_t next
= nl
->next
;
5617 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5619 if (remaining_data
< nl
->next
) {
5620 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5623 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5624 strcpy(cur_data
+ nl_size
, nl
->name
);
5625 cur_data
+= nl
->next
;
5626 remaining_data
-= nl
->next
;
5630 nl
= (void*)nl
+ next
;
5635 case DM_TABLE_STATUS
:
5637 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5638 void *cur_data
= argptr
;
5639 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5640 int spec_size
= thunk_type_size(arg_type
, 0);
5643 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5644 uint32_t next
= spec
->next
;
5645 int slen
= strlen((char*)&spec
[1]) + 1;
5646 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5647 if (guest_data_size
< spec
->next
) {
5648 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5651 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5652 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5653 cur_data
= argptr
+ spec
->next
;
5654 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5660 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5661 int count
= *(uint32_t*)hdata
;
5662 uint64_t *hdev
= hdata
+ 8;
5663 uint64_t *gdev
= argptr
+ 8;
5666 *(uint32_t*)argptr
= tswap32(count
);
5667 for (i
= 0; i
< count
; i
++) {
5668 *gdev
= tswap64(*hdev
);
5674 case DM_LIST_VERSIONS
:
5676 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5677 uint32_t remaining_data
= guest_data_size
;
5678 void *cur_data
= argptr
;
5679 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5680 int vers_size
= thunk_type_size(arg_type
, 0);
5683 uint32_t next
= vers
->next
;
5685 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5687 if (remaining_data
< vers
->next
) {
5688 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5691 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5692 strcpy(cur_data
+ vers_size
, vers
->name
);
5693 cur_data
+= vers
->next
;
5694 remaining_data
-= vers
->next
;
5698 vers
= (void*)vers
+ next
;
5703 unlock_user(argptr
, guest_data
, 0);
5704 ret
= -TARGET_EINVAL
;
5707 unlock_user(argptr
, guest_data
, guest_data_size
);
5709 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5711 ret
= -TARGET_EFAULT
;
5714 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5715 unlock_user(argptr
, arg
, target_size
);
5722 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5723 int cmd
, abi_long arg
)
5727 const argtype
*arg_type
= ie
->arg_type
;
5728 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5731 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5732 struct blkpg_partition host_part
;
5734 /* Read and convert blkpg */
5736 target_size
= thunk_type_size(arg_type
, 0);
5737 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5739 ret
= -TARGET_EFAULT
;
5742 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5743 unlock_user(argptr
, arg
, 0);
5745 switch (host_blkpg
->op
) {
5746 case BLKPG_ADD_PARTITION
:
5747 case BLKPG_DEL_PARTITION
:
5748 /* payload is struct blkpg_partition */
5751 /* Unknown opcode */
5752 ret
= -TARGET_EINVAL
;
5756 /* Read and convert blkpg->data */
5757 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5758 target_size
= thunk_type_size(part_arg_type
, 0);
5759 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5761 ret
= -TARGET_EFAULT
;
5764 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5765 unlock_user(argptr
, arg
, 0);
5767 /* Swizzle the data pointer to our local copy and call! */
5768 host_blkpg
->data
= &host_part
;
5769 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5775 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5776 int fd
, int cmd
, abi_long arg
)
5778 const argtype
*arg_type
= ie
->arg_type
;
5779 const StructEntry
*se
;
5780 const argtype
*field_types
;
5781 const int *dst_offsets
, *src_offsets
;
5784 abi_ulong
*target_rt_dev_ptr
;
5785 unsigned long *host_rt_dev_ptr
;
5789 assert(ie
->access
== IOC_W
);
5790 assert(*arg_type
== TYPE_PTR
);
5792 assert(*arg_type
== TYPE_STRUCT
);
5793 target_size
= thunk_type_size(arg_type
, 0);
5794 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5796 return -TARGET_EFAULT
;
5799 assert(*arg_type
== (int)STRUCT_rtentry
);
5800 se
= struct_entries
+ *arg_type
++;
5801 assert(se
->convert
[0] == NULL
);
5802 /* convert struct here to be able to catch rt_dev string */
5803 field_types
= se
->field_types
;
5804 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5805 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5806 for (i
= 0; i
< se
->nb_fields
; i
++) {
5807 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5808 assert(*field_types
== TYPE_PTRVOID
);
5809 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5810 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5811 if (*target_rt_dev_ptr
!= 0) {
5812 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5813 tswapal(*target_rt_dev_ptr
));
5814 if (!*host_rt_dev_ptr
) {
5815 unlock_user(argptr
, arg
, 0);
5816 return -TARGET_EFAULT
;
5819 *host_rt_dev_ptr
= 0;
5824 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5825 argptr
+ src_offsets
[i
],
5826 field_types
, THUNK_HOST
);
5828 unlock_user(argptr
, arg
, 0);
5830 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5831 if (*host_rt_dev_ptr
!= 0) {
5832 unlock_user((void *)*host_rt_dev_ptr
,
5833 *target_rt_dev_ptr
, 0);
5838 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5839 int fd
, int cmd
, abi_long arg
)
5841 int sig
= target_to_host_signal(arg
);
5842 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5846 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5847 int fd
, int cmd
, abi_long arg
)
5849 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5850 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5854 static IOCTLEntry ioctl_entries
[] = {
5855 #define IOCTL(cmd, access, ...) \
5856 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5857 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5858 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5859 #define IOCTL_IGNORE(cmd) \
5860 { TARGET_ ## cmd, 0, #cmd },
5865 /* ??? Implement proper locking for ioctls. */
5866 /* do_ioctl() Must return target values and target errnos. */
5867 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5869 const IOCTLEntry
*ie
;
5870 const argtype
*arg_type
;
5872 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5878 if (ie
->target_cmd
== 0) {
5879 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5880 return -TARGET_ENOSYS
;
5882 if (ie
->target_cmd
== cmd
)
5886 arg_type
= ie
->arg_type
;
5888 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5891 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5892 } else if (!ie
->host_cmd
) {
5893 /* Some architectures define BSD ioctls in their headers
5894 that are not implemented in Linux. */
5895 return -TARGET_ENOSYS
;
5898 switch(arg_type
[0]) {
5901 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5905 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5909 target_size
= thunk_type_size(arg_type
, 0);
5910 switch(ie
->access
) {
5912 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5913 if (!is_error(ret
)) {
5914 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5916 return -TARGET_EFAULT
;
5917 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5918 unlock_user(argptr
, arg
, target_size
);
5922 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5924 return -TARGET_EFAULT
;
5925 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5926 unlock_user(argptr
, arg
, 0);
5927 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5931 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5933 return -TARGET_EFAULT
;
5934 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5935 unlock_user(argptr
, arg
, 0);
5936 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5937 if (!is_error(ret
)) {
5938 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5940 return -TARGET_EFAULT
;
5941 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5942 unlock_user(argptr
, arg
, target_size
);
5948 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5949 (long)cmd
, arg_type
[0]);
5950 ret
= -TARGET_ENOSYS
;
5956 static const bitmask_transtbl iflag_tbl
[] = {
5957 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5958 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5959 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5960 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5961 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5962 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5963 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5964 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5965 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5966 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5967 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5968 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5969 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5970 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5974 static const bitmask_transtbl oflag_tbl
[] = {
5975 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5976 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5977 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5978 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5979 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5980 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5981 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5982 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5983 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5984 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5985 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5986 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5987 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5988 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5989 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5990 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5991 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5992 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5993 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5994 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5995 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5996 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5997 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5998 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
6002 static const bitmask_transtbl cflag_tbl
[] = {
6003 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
6004 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
6005 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
6006 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
6007 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
6008 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
6009 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
6010 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
6011 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
6012 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
6013 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
6014 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
6015 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
6016 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
6017 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
6018 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
6019 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
6020 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
6021 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
6022 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
6023 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
6024 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
6025 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
6026 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
6027 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
6028 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
6029 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
6030 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
6031 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
6032 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
6033 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
6037 static const bitmask_transtbl lflag_tbl
[] = {
6038 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
6039 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
6040 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
6041 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
6042 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
6043 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
6044 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
6045 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
6046 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
6047 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
6048 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
6049 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
6050 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
6051 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
6052 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
6056 static void target_to_host_termios (void *dst
, const void *src
)
6058 struct host_termios
*host
= dst
;
6059 const struct target_termios
*target
= src
;
6062 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
6064 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
6066 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
6068 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
6069 host
->c_line
= target
->c_line
;
6071 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
6072 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
6073 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
6074 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
6075 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
6076 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
6077 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
6078 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
6079 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
6080 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
6081 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
6082 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
6083 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
6084 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
6085 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
6086 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
6087 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
6088 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
6091 static void host_to_target_termios (void *dst
, const void *src
)
6093 struct target_termios
*target
= dst
;
6094 const struct host_termios
*host
= src
;
6097 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
6099 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
6101 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
6103 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
6104 target
->c_line
= host
->c_line
;
6106 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6107 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6108 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6109 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6110 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6111 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6112 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6113 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6114 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6115 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6116 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6117 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6118 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6119 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6120 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6121 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6122 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6123 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6126 static const StructEntry struct_termios_def
= {
6127 .convert
= { host_to_target_termios
, target_to_host_termios
},
6128 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6129 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6132 static bitmask_transtbl mmap_flags_tbl
[] = {
6133 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6134 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6135 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6136 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6137 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6138 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6139 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6140 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6141 MAP_DENYWRITE
, MAP_DENYWRITE
},
6142 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6143 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6144 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6145 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6146 MAP_NORESERVE
, MAP_NORESERVE
},
6147 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6148 /* MAP_STACK had been ignored by the kernel for quite some time.
6149 Recognize it for the target insofar as we do not want to pass
6150 it through to the host. */
6151 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6155 #if defined(TARGET_I386)
6157 /* NOTE: there is really one LDT for all the threads */
6158 static uint8_t *ldt_table
;
6160 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6167 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6168 if (size
> bytecount
)
6170 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6172 return -TARGET_EFAULT
;
6173 /* ??? Should this by byteswapped? */
6174 memcpy(p
, ldt_table
, size
);
6175 unlock_user(p
, ptr
, size
);
6179 /* XXX: add locking support */
6180 static abi_long
write_ldt(CPUX86State
*env
,
6181 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6183 struct target_modify_ldt_ldt_s ldt_info
;
6184 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6185 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6186 int seg_not_present
, useable
, lm
;
6187 uint32_t *lp
, entry_1
, entry_2
;
6189 if (bytecount
!= sizeof(ldt_info
))
6190 return -TARGET_EINVAL
;
6191 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6192 return -TARGET_EFAULT
;
6193 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6194 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6195 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6196 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6197 unlock_user_struct(target_ldt_info
, ptr
, 0);
6199 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6200 return -TARGET_EINVAL
;
6201 seg_32bit
= ldt_info
.flags
& 1;
6202 contents
= (ldt_info
.flags
>> 1) & 3;
6203 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6204 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6205 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6206 useable
= (ldt_info
.flags
>> 6) & 1;
6210 lm
= (ldt_info
.flags
>> 7) & 1;
6212 if (contents
== 3) {
6214 return -TARGET_EINVAL
;
6215 if (seg_not_present
== 0)
6216 return -TARGET_EINVAL
;
6218 /* allocate the LDT */
6220 env
->ldt
.base
= target_mmap(0,
6221 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6222 PROT_READ
|PROT_WRITE
,
6223 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6224 if (env
->ldt
.base
== -1)
6225 return -TARGET_ENOMEM
;
6226 memset(g2h(env
->ldt
.base
), 0,
6227 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6228 env
->ldt
.limit
= 0xffff;
6229 ldt_table
= g2h(env
->ldt
.base
);
6232 /* NOTE: same code as Linux kernel */
6233 /* Allow LDTs to be cleared by the user. */
6234 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6237 read_exec_only
== 1 &&
6239 limit_in_pages
== 0 &&
6240 seg_not_present
== 1 &&
6248 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6249 (ldt_info
.limit
& 0x0ffff);
6250 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6251 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6252 (ldt_info
.limit
& 0xf0000) |
6253 ((read_exec_only
^ 1) << 9) |
6255 ((seg_not_present
^ 1) << 15) |
6257 (limit_in_pages
<< 23) |
6261 entry_2
|= (useable
<< 20);
6263 /* Install the new entry ... */
6265 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6266 lp
[0] = tswap32(entry_1
);
6267 lp
[1] = tswap32(entry_2
);
6271 /* specific and weird i386 syscalls */
6272 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6273 unsigned long bytecount
)
6279 ret
= read_ldt(ptr
, bytecount
);
6282 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6285 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6288 ret
= -TARGET_ENOSYS
;
6294 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6295 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6297 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6298 struct target_modify_ldt_ldt_s ldt_info
;
6299 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6300 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6301 int seg_not_present
, useable
, lm
;
6302 uint32_t *lp
, entry_1
, entry_2
;
6305 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6306 if (!target_ldt_info
)
6307 return -TARGET_EFAULT
;
6308 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6309 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6310 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6311 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6312 if (ldt_info
.entry_number
== -1) {
6313 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6314 if (gdt_table
[i
] == 0) {
6315 ldt_info
.entry_number
= i
;
6316 target_ldt_info
->entry_number
= tswap32(i
);
6321 unlock_user_struct(target_ldt_info
, ptr
, 1);
6323 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6324 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6325 return -TARGET_EINVAL
;
6326 seg_32bit
= ldt_info
.flags
& 1;
6327 contents
= (ldt_info
.flags
>> 1) & 3;
6328 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6329 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6330 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6331 useable
= (ldt_info
.flags
>> 6) & 1;
6335 lm
= (ldt_info
.flags
>> 7) & 1;
6338 if (contents
== 3) {
6339 if (seg_not_present
== 0)
6340 return -TARGET_EINVAL
;
6343 /* NOTE: same code as Linux kernel */
6344 /* Allow LDTs to be cleared by the user. */
6345 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6346 if ((contents
== 0 &&
6347 read_exec_only
== 1 &&
6349 limit_in_pages
== 0 &&
6350 seg_not_present
== 1 &&
6358 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6359 (ldt_info
.limit
& 0x0ffff);
6360 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6361 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6362 (ldt_info
.limit
& 0xf0000) |
6363 ((read_exec_only
^ 1) << 9) |
6365 ((seg_not_present
^ 1) << 15) |
6367 (limit_in_pages
<< 23) |
6372 /* Install the new entry ... */
6374 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6375 lp
[0] = tswap32(entry_1
);
6376 lp
[1] = tswap32(entry_2
);
6380 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6382 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6383 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6384 uint32_t base_addr
, limit
, flags
;
6385 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6386 int seg_not_present
, useable
, lm
;
6387 uint32_t *lp
, entry_1
, entry_2
;
6389 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6390 if (!target_ldt_info
)
6391 return -TARGET_EFAULT
;
6392 idx
= tswap32(target_ldt_info
->entry_number
);
6393 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6394 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6395 unlock_user_struct(target_ldt_info
, ptr
, 1);
6396 return -TARGET_EINVAL
;
6398 lp
= (uint32_t *)(gdt_table
+ idx
);
6399 entry_1
= tswap32(lp
[0]);
6400 entry_2
= tswap32(lp
[1]);
6402 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6403 contents
= (entry_2
>> 10) & 3;
6404 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6405 seg_32bit
= (entry_2
>> 22) & 1;
6406 limit_in_pages
= (entry_2
>> 23) & 1;
6407 useable
= (entry_2
>> 20) & 1;
6411 lm
= (entry_2
>> 21) & 1;
6413 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6414 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6415 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6416 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6417 base_addr
= (entry_1
>> 16) |
6418 (entry_2
& 0xff000000) |
6419 ((entry_2
& 0xff) << 16);
6420 target_ldt_info
->base_addr
= tswapal(base_addr
);
6421 target_ldt_info
->limit
= tswap32(limit
);
6422 target_ldt_info
->flags
= tswap32(flags
);
6423 unlock_user_struct(target_ldt_info
, ptr
, 1);
6426 #endif /* TARGET_I386 && TARGET_ABI32 */
6428 #ifndef TARGET_ABI32
6429 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6436 case TARGET_ARCH_SET_GS
:
6437 case TARGET_ARCH_SET_FS
:
6438 if (code
== TARGET_ARCH_SET_GS
)
6442 cpu_x86_load_seg(env
, idx
, 0);
6443 env
->segs
[idx
].base
= addr
;
6445 case TARGET_ARCH_GET_GS
:
6446 case TARGET_ARCH_GET_FS
:
6447 if (code
== TARGET_ARCH_GET_GS
)
6451 val
= env
->segs
[idx
].base
;
6452 if (put_user(val
, addr
, abi_ulong
))
6453 ret
= -TARGET_EFAULT
;
6456 ret
= -TARGET_EINVAL
;
6463 #endif /* defined(TARGET_I386) */
6465 #define NEW_STACK_SIZE 0x40000
6468 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6471 pthread_mutex_t mutex
;
6472 pthread_cond_t cond
;
6475 abi_ulong child_tidptr
;
6476 abi_ulong parent_tidptr
;
6480 static void *clone_func(void *arg
)
6482 new_thread_info
*info
= arg
;
6487 rcu_register_thread();
6488 tcg_register_thread();
6490 cpu
= ENV_GET_CPU(env
);
6492 ts
= (TaskState
*)cpu
->opaque
;
6493 info
->tid
= gettid();
6495 if (info
->child_tidptr
)
6496 put_user_u32(info
->tid
, info
->child_tidptr
);
6497 if (info
->parent_tidptr
)
6498 put_user_u32(info
->tid
, info
->parent_tidptr
);
6499 /* Enable signals. */
6500 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6501 /* Signal to the parent that we're ready. */
6502 pthread_mutex_lock(&info
->mutex
);
6503 pthread_cond_broadcast(&info
->cond
);
6504 pthread_mutex_unlock(&info
->mutex
);
6505 /* Wait until the parent has finished initializing the tls state. */
6506 pthread_mutex_lock(&clone_lock
);
6507 pthread_mutex_unlock(&clone_lock
);
6513 /* do_fork() Must return host values and target errnos (unlike most
6514 do_*() functions). */
6515 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6516 abi_ulong parent_tidptr
, target_ulong newtls
,
6517 abi_ulong child_tidptr
)
6519 CPUState
*cpu
= ENV_GET_CPU(env
);
6523 CPUArchState
*new_env
;
6526 flags
&= ~CLONE_IGNORED_FLAGS
;
6528 /* Emulate vfork() with fork() */
6529 if (flags
& CLONE_VFORK
)
6530 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6532 if (flags
& CLONE_VM
) {
6533 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6534 new_thread_info info
;
6535 pthread_attr_t attr
;
6537 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6538 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6539 return -TARGET_EINVAL
;
6542 ts
= g_new0(TaskState
, 1);
6543 init_task_state(ts
);
6545 /* Grab a mutex so that thread setup appears atomic. */
6546 pthread_mutex_lock(&clone_lock
);
6548 /* we create a new CPU instance. */
6549 new_env
= cpu_copy(env
);
6550 /* Init regs that differ from the parent. */
6551 cpu_clone_regs(new_env
, newsp
);
6552 new_cpu
= ENV_GET_CPU(new_env
);
6553 new_cpu
->opaque
= ts
;
6554 ts
->bprm
= parent_ts
->bprm
;
6555 ts
->info
= parent_ts
->info
;
6556 ts
->signal_mask
= parent_ts
->signal_mask
;
6558 if (flags
& CLONE_CHILD_CLEARTID
) {
6559 ts
->child_tidptr
= child_tidptr
;
6562 if (flags
& CLONE_SETTLS
) {
6563 cpu_set_tls (new_env
, newtls
);
6566 memset(&info
, 0, sizeof(info
));
6567 pthread_mutex_init(&info
.mutex
, NULL
);
6568 pthread_mutex_lock(&info
.mutex
);
6569 pthread_cond_init(&info
.cond
, NULL
);
6571 if (flags
& CLONE_CHILD_SETTID
) {
6572 info
.child_tidptr
= child_tidptr
;
6574 if (flags
& CLONE_PARENT_SETTID
) {
6575 info
.parent_tidptr
= parent_tidptr
;
6578 ret
= pthread_attr_init(&attr
);
6579 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6580 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6581 /* It is not safe to deliver signals until the child has finished
6582 initializing, so temporarily block all signals. */
6583 sigfillset(&sigmask
);
6584 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6586 /* If this is our first additional thread, we need to ensure we
6587 * generate code for parallel execution and flush old translations.
6589 if (!parallel_cpus
) {
6590 parallel_cpus
= true;
6594 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6595 /* TODO: Free new CPU state if thread creation failed. */
6597 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6598 pthread_attr_destroy(&attr
);
6600 /* Wait for the child to initialize. */
6601 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6606 pthread_mutex_unlock(&info
.mutex
);
6607 pthread_cond_destroy(&info
.cond
);
6608 pthread_mutex_destroy(&info
.mutex
);
6609 pthread_mutex_unlock(&clone_lock
);
6611 /* if no CLONE_VM, we consider it is a fork */
6612 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6613 return -TARGET_EINVAL
;
6616 /* We can't support custom termination signals */
6617 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6618 return -TARGET_EINVAL
;
6621 if (block_signals()) {
6622 return -TARGET_ERESTARTSYS
;
6628 /* Child Process. */
6629 cpu_clone_regs(env
, newsp
);
6631 /* There is a race condition here. The parent process could
6632 theoretically read the TID in the child process before the child
6633 tid is set. This would require using either ptrace
6634 (not implemented) or having *_tidptr to point at a shared memory
6635 mapping. We can't repeat the spinlock hack used above because
6636 the child process gets its own copy of the lock. */
6637 if (flags
& CLONE_CHILD_SETTID
)
6638 put_user_u32(gettid(), child_tidptr
);
6639 if (flags
& CLONE_PARENT_SETTID
)
6640 put_user_u32(gettid(), parent_tidptr
);
6641 ts
= (TaskState
*)cpu
->opaque
;
6642 if (flags
& CLONE_SETTLS
)
6643 cpu_set_tls (env
, newtls
);
6644 if (flags
& CLONE_CHILD_CLEARTID
)
6645 ts
->child_tidptr
= child_tidptr
;
6653 /* warning : doesn't handle linux specific flags... */
6654 static int target_to_host_fcntl_cmd(int cmd
)
6659 case TARGET_F_DUPFD
:
6660 case TARGET_F_GETFD
:
6661 case TARGET_F_SETFD
:
6662 case TARGET_F_GETFL
:
6663 case TARGET_F_SETFL
:
6666 case TARGET_F_GETLK
:
6669 case TARGET_F_SETLK
:
6672 case TARGET_F_SETLKW
:
6675 case TARGET_F_GETOWN
:
6678 case TARGET_F_SETOWN
:
6681 case TARGET_F_GETSIG
:
6684 case TARGET_F_SETSIG
:
6687 #if TARGET_ABI_BITS == 32
6688 case TARGET_F_GETLK64
:
6691 case TARGET_F_SETLK64
:
6694 case TARGET_F_SETLKW64
:
6698 case TARGET_F_SETLEASE
:
6701 case TARGET_F_GETLEASE
:
6704 #ifdef F_DUPFD_CLOEXEC
6705 case TARGET_F_DUPFD_CLOEXEC
:
6706 ret
= F_DUPFD_CLOEXEC
;
6709 case TARGET_F_NOTIFY
:
6713 case TARGET_F_GETOWN_EX
:
6718 case TARGET_F_SETOWN_EX
:
6723 case TARGET_F_SETPIPE_SZ
:
6726 case TARGET_F_GETPIPE_SZ
:
6731 ret
= -TARGET_EINVAL
;
6735 #if defined(__powerpc64__)
6736 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6737 * is not supported by kernel. The glibc fcntl call actually adjusts
6738 * them to 5, 6 and 7 before making the syscall(). Since we make the
6739 * syscall directly, adjust to what is supported by the kernel.
6741 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6742 ret
-= F_GETLK64
- 5;
6749 #define FLOCK_TRANSTBL \
6751 TRANSTBL_CONVERT(F_RDLCK); \
6752 TRANSTBL_CONVERT(F_WRLCK); \
6753 TRANSTBL_CONVERT(F_UNLCK); \
6754 TRANSTBL_CONVERT(F_EXLCK); \
6755 TRANSTBL_CONVERT(F_SHLCK); \
6758 static int target_to_host_flock(int type
)
6760 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6762 #undef TRANSTBL_CONVERT
6763 return -TARGET_EINVAL
;
6766 static int host_to_target_flock(int type
)
6768 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6770 #undef TRANSTBL_CONVERT
6771 /* if we don't know how to convert the value coming
6772 * from the host we copy to the target field as-is
6777 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6778 abi_ulong target_flock_addr
)
6780 struct target_flock
*target_fl
;
6783 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6784 return -TARGET_EFAULT
;
6787 __get_user(l_type
, &target_fl
->l_type
);
6788 l_type
= target_to_host_flock(l_type
);
6792 fl
->l_type
= l_type
;
6793 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6794 __get_user(fl
->l_start
, &target_fl
->l_start
);
6795 __get_user(fl
->l_len
, &target_fl
->l_len
);
6796 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6797 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6801 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6802 const struct flock64
*fl
)
6804 struct target_flock
*target_fl
;
6807 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6808 return -TARGET_EFAULT
;
6811 l_type
= host_to_target_flock(fl
->l_type
);
6812 __put_user(l_type
, &target_fl
->l_type
);
6813 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6814 __put_user(fl
->l_start
, &target_fl
->l_start
);
6815 __put_user(fl
->l_len
, &target_fl
->l_len
);
6816 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6817 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6821 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6822 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6824 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6825 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6826 abi_ulong target_flock_addr
)
6828 struct target_oabi_flock64
*target_fl
;
6831 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6832 return -TARGET_EFAULT
;
6835 __get_user(l_type
, &target_fl
->l_type
);
6836 l_type
= target_to_host_flock(l_type
);
6840 fl
->l_type
= l_type
;
6841 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6842 __get_user(fl
->l_start
, &target_fl
->l_start
);
6843 __get_user(fl
->l_len
, &target_fl
->l_len
);
6844 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6845 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6849 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6850 const struct flock64
*fl
)
6852 struct target_oabi_flock64
*target_fl
;
6855 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6856 return -TARGET_EFAULT
;
6859 l_type
= host_to_target_flock(fl
->l_type
);
6860 __put_user(l_type
, &target_fl
->l_type
);
6861 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6862 __put_user(fl
->l_start
, &target_fl
->l_start
);
6863 __put_user(fl
->l_len
, &target_fl
->l_len
);
6864 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6865 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6870 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6871 abi_ulong target_flock_addr
)
6873 struct target_flock64
*target_fl
;
6876 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6877 return -TARGET_EFAULT
;
6880 __get_user(l_type
, &target_fl
->l_type
);
6881 l_type
= target_to_host_flock(l_type
);
6885 fl
->l_type
= l_type
;
6886 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6887 __get_user(fl
->l_start
, &target_fl
->l_start
);
6888 __get_user(fl
->l_len
, &target_fl
->l_len
);
6889 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6890 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6894 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6895 const struct flock64
*fl
)
6897 struct target_flock64
*target_fl
;
6900 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6901 return -TARGET_EFAULT
;
6904 l_type
= host_to_target_flock(fl
->l_type
);
6905 __put_user(l_type
, &target_fl
->l_type
);
6906 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6907 __put_user(fl
->l_start
, &target_fl
->l_start
);
6908 __put_user(fl
->l_len
, &target_fl
->l_len
);
6909 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6910 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6914 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6916 struct flock64 fl64
;
6918 struct f_owner_ex fox
;
6919 struct target_f_owner_ex
*target_fox
;
6922 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6924 if (host_cmd
== -TARGET_EINVAL
)
6928 case TARGET_F_GETLK
:
6929 ret
= copy_from_user_flock(&fl64
, arg
);
6933 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6935 ret
= copy_to_user_flock(arg
, &fl64
);
6939 case TARGET_F_SETLK
:
6940 case TARGET_F_SETLKW
:
6941 ret
= copy_from_user_flock(&fl64
, arg
);
6945 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6948 case TARGET_F_GETLK64
:
6949 ret
= copy_from_user_flock64(&fl64
, arg
);
6953 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6955 ret
= copy_to_user_flock64(arg
, &fl64
);
6958 case TARGET_F_SETLK64
:
6959 case TARGET_F_SETLKW64
:
6960 ret
= copy_from_user_flock64(&fl64
, arg
);
6964 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6967 case TARGET_F_GETFL
:
6968 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6970 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6974 case TARGET_F_SETFL
:
6975 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6976 target_to_host_bitmask(arg
,
6981 case TARGET_F_GETOWN_EX
:
6982 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6984 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6985 return -TARGET_EFAULT
;
6986 target_fox
->type
= tswap32(fox
.type
);
6987 target_fox
->pid
= tswap32(fox
.pid
);
6988 unlock_user_struct(target_fox
, arg
, 1);
6994 case TARGET_F_SETOWN_EX
:
6995 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6996 return -TARGET_EFAULT
;
6997 fox
.type
= tswap32(target_fox
->type
);
6998 fox
.pid
= tswap32(target_fox
->pid
);
6999 unlock_user_struct(target_fox
, arg
, 0);
7000 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
7004 case TARGET_F_SETOWN
:
7005 case TARGET_F_GETOWN
:
7006 case TARGET_F_SETSIG
:
7007 case TARGET_F_GETSIG
:
7008 case TARGET_F_SETLEASE
:
7009 case TARGET_F_GETLEASE
:
7010 case TARGET_F_SETPIPE_SZ
:
7011 case TARGET_F_GETPIPE_SZ
:
7012 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
7016 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
7024 static inline int high2lowuid(int uid
)
7032 static inline int high2lowgid(int gid
)
7040 static inline int low2highuid(int uid
)
7042 if ((int16_t)uid
== -1)
7048 static inline int low2highgid(int gid
)
7050 if ((int16_t)gid
== -1)
7055 static inline int tswapid(int id
)
7060 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7062 #else /* !USE_UID16 */
7063 static inline int high2lowuid(int uid
)
7067 static inline int high2lowgid(int gid
)
7071 static inline int low2highuid(int uid
)
7075 static inline int low2highgid(int gid
)
7079 static inline int tswapid(int id
)
7084 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7086 #endif /* USE_UID16 */
7088 /* We must do direct syscalls for setting UID/GID, because we want to
7089 * implement the Linux system call semantics of "change only for this thread",
7090 * not the libc/POSIX semantics of "change for all threads in process".
7091 * (See http://ewontfix.com/17/ for more details.)
7092 * We use the 32-bit version of the syscalls if present; if it is not
7093 * then either the host architecture supports 32-bit UIDs natively with
7094 * the standard syscall, or the 16-bit UID is the best we can do.
7096 #ifdef __NR_setuid32
7097 #define __NR_sys_setuid __NR_setuid32
7099 #define __NR_sys_setuid __NR_setuid
7101 #ifdef __NR_setgid32
7102 #define __NR_sys_setgid __NR_setgid32
7104 #define __NR_sys_setgid __NR_setgid
7106 #ifdef __NR_setresuid32
7107 #define __NR_sys_setresuid __NR_setresuid32
7109 #define __NR_sys_setresuid __NR_setresuid
7111 #ifdef __NR_setresgid32
7112 #define __NR_sys_setresgid __NR_setresgid32
7114 #define __NR_sys_setresgid __NR_setresgid
7117 _syscall1(int, sys_setuid
, uid_t
, uid
)
7118 _syscall1(int, sys_setgid
, gid_t
, gid
)
7119 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7120 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7122 void syscall_init(void)
7125 const argtype
*arg_type
;
7129 thunk_init(STRUCT_MAX
);
7131 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7132 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7133 #include "syscall_types.h"
7135 #undef STRUCT_SPECIAL
7137 /* Build target_to_host_errno_table[] table from
7138 * host_to_target_errno_table[]. */
7139 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7140 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7143 /* we patch the ioctl size if necessary. We rely on the fact that
7144 no ioctl has all the bits at '1' in the size field */
7146 while (ie
->target_cmd
!= 0) {
7147 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7148 TARGET_IOC_SIZEMASK
) {
7149 arg_type
= ie
->arg_type
;
7150 if (arg_type
[0] != TYPE_PTR
) {
7151 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7156 size
= thunk_type_size(arg_type
, 0);
7157 ie
->target_cmd
= (ie
->target_cmd
&
7158 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7159 (size
<< TARGET_IOC_SIZESHIFT
);
7162 /* automatic consistency check if same arch */
7163 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7164 (defined(__x86_64__) && defined(TARGET_X86_64))
7165 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7166 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7167 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7174 #if TARGET_ABI_BITS == 32
7175 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
7177 #ifdef TARGET_WORDS_BIGENDIAN
7178 return ((uint64_t)word0
<< 32) | word1
;
7180 return ((uint64_t)word1
<< 32) | word0
;
7183 #else /* TARGET_ABI_BITS == 32 */
7184 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
7188 #endif /* TARGET_ABI_BITS != 32 */
7190 #ifdef TARGET_NR_truncate64
7191 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7196 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7200 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7204 #ifdef TARGET_NR_ftruncate64
7205 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7210 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7214 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7218 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
7219 abi_ulong target_addr
)
7221 struct target_timespec
*target_ts
;
7223 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
7224 return -TARGET_EFAULT
;
7225 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7226 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7227 unlock_user_struct(target_ts
, target_addr
, 0);
7231 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7232 struct timespec
*host_ts
)
7234 struct target_timespec
*target_ts
;
7236 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7237 return -TARGET_EFAULT
;
7238 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7239 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7240 unlock_user_struct(target_ts
, target_addr
, 1);
7244 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7245 abi_ulong target_addr
)
7247 struct target_itimerspec
*target_itspec
;
7249 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7250 return -TARGET_EFAULT
;
7253 host_itspec
->it_interval
.tv_sec
=
7254 tswapal(target_itspec
->it_interval
.tv_sec
);
7255 host_itspec
->it_interval
.tv_nsec
=
7256 tswapal(target_itspec
->it_interval
.tv_nsec
);
7257 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7258 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7260 unlock_user_struct(target_itspec
, target_addr
, 1);
7264 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7265 struct itimerspec
*host_its
)
7267 struct target_itimerspec
*target_itspec
;
7269 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7270 return -TARGET_EFAULT
;
7273 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7274 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7276 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7277 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7279 unlock_user_struct(target_itspec
, target_addr
, 0);
7283 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7284 abi_long target_addr
)
7286 struct target_timex
*target_tx
;
7288 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7289 return -TARGET_EFAULT
;
7292 __get_user(host_tx
->modes
, &target_tx
->modes
);
7293 __get_user(host_tx
->offset
, &target_tx
->offset
);
7294 __get_user(host_tx
->freq
, &target_tx
->freq
);
7295 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7296 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7297 __get_user(host_tx
->status
, &target_tx
->status
);
7298 __get_user(host_tx
->constant
, &target_tx
->constant
);
7299 __get_user(host_tx
->precision
, &target_tx
->precision
);
7300 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7301 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7302 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7303 __get_user(host_tx
->tick
, &target_tx
->tick
);
7304 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7305 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7306 __get_user(host_tx
->shift
, &target_tx
->shift
);
7307 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7308 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7309 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7310 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7311 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7312 __get_user(host_tx
->tai
, &target_tx
->tai
);
7314 unlock_user_struct(target_tx
, target_addr
, 0);
7318 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7319 struct timex
*host_tx
)
7321 struct target_timex
*target_tx
;
7323 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7324 return -TARGET_EFAULT
;
7327 __put_user(host_tx
->modes
, &target_tx
->modes
);
7328 __put_user(host_tx
->offset
, &target_tx
->offset
);
7329 __put_user(host_tx
->freq
, &target_tx
->freq
);
7330 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7331 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7332 __put_user(host_tx
->status
, &target_tx
->status
);
7333 __put_user(host_tx
->constant
, &target_tx
->constant
);
7334 __put_user(host_tx
->precision
, &target_tx
->precision
);
7335 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7336 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7337 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7338 __put_user(host_tx
->tick
, &target_tx
->tick
);
7339 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7340 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7341 __put_user(host_tx
->shift
, &target_tx
->shift
);
7342 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7343 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7344 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7345 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7346 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7347 __put_user(host_tx
->tai
, &target_tx
->tai
);
7349 unlock_user_struct(target_tx
, target_addr
, 1);
7354 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7355 abi_ulong target_addr
)
7357 struct target_sigevent
*target_sevp
;
7359 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7360 return -TARGET_EFAULT
;
7363 /* This union is awkward on 64 bit systems because it has a 32 bit
7364 * integer and a pointer in it; we follow the conversion approach
7365 * used for handling sigval types in signal.c so the guest should get
7366 * the correct value back even if we did a 64 bit byteswap and it's
7367 * using the 32 bit integer.
7369 host_sevp
->sigev_value
.sival_ptr
=
7370 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7371 host_sevp
->sigev_signo
=
7372 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7373 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7374 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7376 unlock_user_struct(target_sevp
, target_addr
, 1);
7380 #if defined(TARGET_NR_mlockall)
7381 static inline int target_to_host_mlockall_arg(int arg
)
7385 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7386 result
|= MCL_CURRENT
;
7388 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7389 result
|= MCL_FUTURE
;
7395 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7396 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7397 defined(TARGET_NR_newfstatat))
7398 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7399 abi_ulong target_addr
,
7400 struct stat
*host_st
)
7402 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7403 if (((CPUARMState
*)cpu_env
)->eabi
) {
7404 struct target_eabi_stat64
*target_st
;
7406 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7407 return -TARGET_EFAULT
;
7408 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7409 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7410 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7411 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7412 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7414 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7415 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7416 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7417 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7418 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7419 __put_user(host_st
->st_size
, &target_st
->st_size
);
7420 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7421 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7422 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7423 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7424 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7425 unlock_user_struct(target_st
, target_addr
, 1);
7429 #if defined(TARGET_HAS_STRUCT_STAT64)
7430 struct target_stat64
*target_st
;
7432 struct target_stat
*target_st
;
7435 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7436 return -TARGET_EFAULT
;
7437 memset(target_st
, 0, sizeof(*target_st
));
7438 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7439 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7440 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7441 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7443 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7444 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7445 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7446 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7447 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7448 /* XXX: better use of kernel struct */
7449 __put_user(host_st
->st_size
, &target_st
->st_size
);
7450 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7451 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7452 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7453 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7454 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7455 unlock_user_struct(target_st
, target_addr
, 1);
7462 /* ??? Using host futex calls even when target atomic operations
7463 are not really atomic probably breaks things. However implementing
7464 futexes locally would make futexes shared between multiple processes
7465 tricky. However they're probably useless because guest atomic
7466 operations won't work either. */
7467 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7468 target_ulong uaddr2
, int val3
)
7470 struct timespec ts
, *pts
;
7473 /* ??? We assume FUTEX_* constants are the same on both host
7475 #ifdef FUTEX_CMD_MASK
7476 base_op
= op
& FUTEX_CMD_MASK
;
7482 case FUTEX_WAIT_BITSET
:
7485 target_to_host_timespec(pts
, timeout
);
7489 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7492 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7494 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7496 case FUTEX_CMP_REQUEUE
:
7498 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7499 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7500 But the prototype takes a `struct timespec *'; insert casts
7501 to satisfy the compiler. We do not need to tswap TIMEOUT
7502 since it's not compared to guest memory. */
7503 pts
= (struct timespec
*)(uintptr_t) timeout
;
7504 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7506 (base_op
== FUTEX_CMP_REQUEUE
7510 return -TARGET_ENOSYS
;
7513 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7514 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7515 abi_long handle
, abi_long mount_id
,
7518 struct file_handle
*target_fh
;
7519 struct file_handle
*fh
;
7523 unsigned int size
, total_size
;
7525 if (get_user_s32(size
, handle
)) {
7526 return -TARGET_EFAULT
;
7529 name
= lock_user_string(pathname
);
7531 return -TARGET_EFAULT
;
7534 total_size
= sizeof(struct file_handle
) + size
;
7535 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7537 unlock_user(name
, pathname
, 0);
7538 return -TARGET_EFAULT
;
7541 fh
= g_malloc0(total_size
);
7542 fh
->handle_bytes
= size
;
7544 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7545 unlock_user(name
, pathname
, 0);
7547 /* man name_to_handle_at(2):
7548 * Other than the use of the handle_bytes field, the caller should treat
7549 * the file_handle structure as an opaque data type
7552 memcpy(target_fh
, fh
, total_size
);
7553 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7554 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7556 unlock_user(target_fh
, handle
, total_size
);
7558 if (put_user_s32(mid
, mount_id
)) {
7559 return -TARGET_EFAULT
;
7567 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7568 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7571 struct file_handle
*target_fh
;
7572 struct file_handle
*fh
;
7573 unsigned int size
, total_size
;
7576 if (get_user_s32(size
, handle
)) {
7577 return -TARGET_EFAULT
;
7580 total_size
= sizeof(struct file_handle
) + size
;
7581 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7583 return -TARGET_EFAULT
;
7586 fh
= g_memdup(target_fh
, total_size
);
7587 fh
->handle_bytes
= size
;
7588 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7590 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7591 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7595 unlock_user(target_fh
, handle
, total_size
);
7601 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7603 /* signalfd siginfo conversion */
7606 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7607 const struct signalfd_siginfo
*info
)
7609 int sig
= host_to_target_signal(info
->ssi_signo
);
7611 /* linux/signalfd.h defines a ssi_addr_lsb
7612 * not defined in sys/signalfd.h but used by some kernels
7615 #ifdef BUS_MCEERR_AO
7616 if (tinfo
->ssi_signo
== SIGBUS
&&
7617 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7618 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7619 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7620 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7621 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7625 tinfo
->ssi_signo
= tswap32(sig
);
7626 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7627 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7628 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7629 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7630 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7631 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7632 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7633 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7634 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7635 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7636 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7637 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7638 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7639 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7640 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7643 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7647 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7648 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7654 static TargetFdTrans target_signalfd_trans
= {
7655 .host_to_target_data
= host_to_target_data_signalfd
,
7658 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7661 target_sigset_t
*target_mask
;
7665 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7666 return -TARGET_EINVAL
;
7668 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7669 return -TARGET_EFAULT
;
7672 target_to_host_sigset(&host_mask
, target_mask
);
7674 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7676 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7678 fd_trans_register(ret
, &target_signalfd_trans
);
7681 unlock_user_struct(target_mask
, mask
, 0);
7687 /* Map host to target signal numbers for the wait family of syscalls.
7688 Assume all other status bits are the same. */
7689 int host_to_target_waitstatus(int status
)
7691 if (WIFSIGNALED(status
)) {
7692 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7694 if (WIFSTOPPED(status
)) {
7695 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7701 static int open_self_cmdline(void *cpu_env
, int fd
)
7703 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7704 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7707 for (i
= 0; i
< bprm
->argc
; i
++) {
7708 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7710 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7718 static int open_self_maps(void *cpu_env
, int fd
)
7720 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7721 TaskState
*ts
= cpu
->opaque
;
7727 fp
= fopen("/proc/self/maps", "r");
7732 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7733 int fields
, dev_maj
, dev_min
, inode
;
7734 uint64_t min
, max
, offset
;
7735 char flag_r
, flag_w
, flag_x
, flag_p
;
7736 char path
[512] = "";
7737 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7738 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7739 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7741 if ((fields
< 10) || (fields
> 11)) {
7744 if (h2g_valid(min
)) {
7745 int flags
= page_get_flags(h2g(min
));
7746 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7747 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7750 if (h2g(min
) == ts
->info
->stack_limit
) {
7751 pstrcpy(path
, sizeof(path
), " [stack]");
7753 dprintf(fd
, TARGET_ABI_FMT_ptr
"-" TARGET_ABI_FMT_ptr
7754 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7755 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7756 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7757 path
[0] ? " " : "", path
);
7767 static int open_self_stat(void *cpu_env
, int fd
)
7769 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7770 TaskState
*ts
= cpu
->opaque
;
7771 abi_ulong start_stack
= ts
->info
->start_stack
;
7774 for (i
= 0; i
< 44; i
++) {
7782 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7783 } else if (i
== 1) {
7785 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7786 } else if (i
== 27) {
7789 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7791 /* for the rest, there is MasterCard */
7792 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7796 if (write(fd
, buf
, len
) != len
) {
7804 static int open_self_auxv(void *cpu_env
, int fd
)
7806 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7807 TaskState
*ts
= cpu
->opaque
;
7808 abi_ulong auxv
= ts
->info
->saved_auxv
;
7809 abi_ulong len
= ts
->info
->auxv_len
;
7813 * Auxiliary vector is stored in target process stack.
7814 * read in whole auxv vector and copy it to file
7816 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7820 r
= write(fd
, ptr
, len
);
7827 lseek(fd
, 0, SEEK_SET
);
7828 unlock_user(ptr
, auxv
, len
);
7834 static int is_proc_myself(const char *filename
, const char *entry
)
7836 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7837 filename
+= strlen("/proc/");
7838 if (!strncmp(filename
, "self/", strlen("self/"))) {
7839 filename
+= strlen("self/");
7840 } else if (*filename
>= '1' && *filename
<= '9') {
7842 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7843 if (!strncmp(filename
, myself
, strlen(myself
))) {
7844 filename
+= strlen(myself
);
7851 if (!strcmp(filename
, entry
)) {
7858 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7859 static int is_proc(const char *filename
, const char *entry
)
7861 return strcmp(filename
, entry
) == 0;
7864 static int open_net_route(void *cpu_env
, int fd
)
7871 fp
= fopen("/proc/net/route", "r");
7878 read
= getline(&line
, &len
, fp
);
7879 dprintf(fd
, "%s", line
);
7883 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7885 uint32_t dest
, gw
, mask
;
7886 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7887 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7888 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7889 &mask
, &mtu
, &window
, &irtt
);
7890 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7891 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7892 metric
, tswap32(mask
), mtu
, window
, irtt
);
7902 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7905 const char *filename
;
7906 int (*fill
)(void *cpu_env
, int fd
);
7907 int (*cmp
)(const char *s1
, const char *s2
);
7909 const struct fake_open
*fake_open
;
7910 static const struct fake_open fakes
[] = {
7911 { "maps", open_self_maps
, is_proc_myself
},
7912 { "stat", open_self_stat
, is_proc_myself
},
7913 { "auxv", open_self_auxv
, is_proc_myself
},
7914 { "cmdline", open_self_cmdline
, is_proc_myself
},
7915 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7916 { "/proc/net/route", open_net_route
, is_proc
},
7918 { NULL
, NULL
, NULL
}
7921 if (is_proc_myself(pathname
, "exe")) {
7922 int execfd
= qemu_getauxval(AT_EXECFD
);
7923 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7926 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7927 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7932 if (fake_open
->filename
) {
7934 char filename
[PATH_MAX
];
7937 /* create temporary file to map stat to */
7938 tmpdir
= getenv("TMPDIR");
7941 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7942 fd
= mkstemp(filename
);
7948 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7954 lseek(fd
, 0, SEEK_SET
);
7959 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7962 #define TIMER_MAGIC 0x0caf0000
7963 #define TIMER_MAGIC_MASK 0xffff0000
7965 /* Convert QEMU provided timer ID back to internal 16bit index format */
7966 static target_timer_t
get_timer_id(abi_long arg
)
7968 target_timer_t timerid
= arg
;
7970 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7971 return -TARGET_EINVAL
;
7976 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7977 return -TARGET_EINVAL
;
7983 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7985 uint64_t *counter
= buf
;
7988 if (len
< sizeof(uint64_t)) {
7992 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7993 *counter
= tswap64(*counter
);
8000 static TargetFdTrans target_eventfd_trans
= {
8001 .host_to_target_data
= swap_data_eventfd
,
8002 .target_to_host_data
= swap_data_eventfd
,
8005 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
8006 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
8007 defined(__NR_inotify_init1))
8008 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
8010 struct inotify_event
*ev
;
8014 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
8015 ev
= (struct inotify_event
*)((char *)buf
+ i
);
8018 ev
->wd
= tswap32(ev
->wd
);
8019 ev
->mask
= tswap32(ev
->mask
);
8020 ev
->cookie
= tswap32(ev
->cookie
);
8021 ev
->len
= tswap32(name_len
);
8027 static TargetFdTrans target_inotify_trans
= {
8028 .host_to_target_data
= host_to_target_data_inotify
,
8032 static int target_to_host_cpu_mask(unsigned long *host_mask
,
8034 abi_ulong target_addr
,
8037 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8038 unsigned host_bits
= sizeof(*host_mask
) * 8;
8039 abi_ulong
*target_mask
;
8042 assert(host_size
>= target_size
);
8044 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
8046 return -TARGET_EFAULT
;
8048 memset(host_mask
, 0, host_size
);
8050 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8051 unsigned bit
= i
* target_bits
;
8054 __get_user(val
, &target_mask
[i
]);
8055 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8056 if (val
& (1UL << j
)) {
8057 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
8062 unlock_user(target_mask
, target_addr
, 0);
8066 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
8068 abi_ulong target_addr
,
8071 unsigned target_bits
= sizeof(abi_ulong
) * 8;
8072 unsigned host_bits
= sizeof(*host_mask
) * 8;
8073 abi_ulong
*target_mask
;
8076 assert(host_size
>= target_size
);
8078 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
8080 return -TARGET_EFAULT
;
8083 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
8084 unsigned bit
= i
* target_bits
;
8087 for (j
= 0; j
< target_bits
; j
++, bit
++) {
8088 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
8092 __put_user(val
, &target_mask
[i
]);
8095 unlock_user(target_mask
, target_addr
, target_size
);
8099 /* do_syscall() should always have a single exit point at the end so
8100 that actions, such as logging of syscall results, can be performed.
8101 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
8102 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
8103 abi_long arg2
, abi_long arg3
, abi_long arg4
,
8104 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8107 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
8109 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8110 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8111 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8114 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8115 || defined(TARGET_NR_fstatfs)
8120 #if defined(DEBUG_ERESTARTSYS)
8121 /* Debug-only code for exercising the syscall-restart code paths
8122 * in the per-architecture cpu main loops: restart every syscall
8123 * the guest makes once before letting it through.
8130 return -TARGET_ERESTARTSYS
;
8136 gemu_log("syscall %d", num
);
8138 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
8140 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8143 case TARGET_NR_exit
:
8144 /* In old applications this may be used to implement _exit(2).
8145 However in threaded applictions it is used for thread termination,
8146 and _exit_group is used for application termination.
8147 Do thread termination if we have more then one thread. */
8149 if (block_signals()) {
8150 ret
= -TARGET_ERESTARTSYS
;
8156 if (CPU_NEXT(first_cpu
)) {
8159 /* Remove the CPU from the list. */
8160 QTAILQ_REMOVE(&cpus
, cpu
, node
);
8165 if (ts
->child_tidptr
) {
8166 put_user_u32(0, ts
->child_tidptr
);
8167 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8171 object_unref(OBJECT(cpu
));
8173 rcu_unregister_thread();
8178 preexit_cleanup(cpu_env
, arg1
);
8180 ret
= 0; /* avoid warning */
8182 case TARGET_NR_read
:
8186 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8188 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8190 fd_trans_host_to_target_data(arg1
)) {
8191 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8193 unlock_user(p
, arg2
, ret
);
8196 case TARGET_NR_write
:
8197 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8199 if (fd_trans_target_to_host_data(arg1
)) {
8200 void *copy
= g_malloc(arg3
);
8201 memcpy(copy
, p
, arg3
);
8202 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8204 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8208 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8210 unlock_user(p
, arg2
, 0);
8212 #ifdef TARGET_NR_open
8213 case TARGET_NR_open
:
8214 if (!(p
= lock_user_string(arg1
)))
8216 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8217 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8219 fd_trans_unregister(ret
);
8220 unlock_user(p
, arg1
, 0);
8223 case TARGET_NR_openat
:
8224 if (!(p
= lock_user_string(arg2
)))
8226 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8227 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8229 fd_trans_unregister(ret
);
8230 unlock_user(p
, arg2
, 0);
8232 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8233 case TARGET_NR_name_to_handle_at
:
8234 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8237 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8238 case TARGET_NR_open_by_handle_at
:
8239 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8240 fd_trans_unregister(ret
);
8243 case TARGET_NR_close
:
8244 fd_trans_unregister(arg1
);
8245 ret
= get_errno(close(arg1
));
8250 #ifdef TARGET_NR_fork
8251 case TARGET_NR_fork
:
8252 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8255 #ifdef TARGET_NR_waitpid
8256 case TARGET_NR_waitpid
:
8259 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8260 if (!is_error(ret
) && arg2
&& ret
8261 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8266 #ifdef TARGET_NR_waitid
8267 case TARGET_NR_waitid
:
8271 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8272 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8273 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8275 host_to_target_siginfo(p
, &info
);
8276 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8281 #ifdef TARGET_NR_creat /* not on alpha */
8282 case TARGET_NR_creat
:
8283 if (!(p
= lock_user_string(arg1
)))
8285 ret
= get_errno(creat(p
, arg2
));
8286 fd_trans_unregister(ret
);
8287 unlock_user(p
, arg1
, 0);
8290 #ifdef TARGET_NR_link
8291 case TARGET_NR_link
:
8294 p
= lock_user_string(arg1
);
8295 p2
= lock_user_string(arg2
);
8297 ret
= -TARGET_EFAULT
;
8299 ret
= get_errno(link(p
, p2
));
8300 unlock_user(p2
, arg2
, 0);
8301 unlock_user(p
, arg1
, 0);
8305 #if defined(TARGET_NR_linkat)
8306 case TARGET_NR_linkat
:
8311 p
= lock_user_string(arg2
);
8312 p2
= lock_user_string(arg4
);
8314 ret
= -TARGET_EFAULT
;
8316 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8317 unlock_user(p
, arg2
, 0);
8318 unlock_user(p2
, arg4
, 0);
8322 #ifdef TARGET_NR_unlink
8323 case TARGET_NR_unlink
:
8324 if (!(p
= lock_user_string(arg1
)))
8326 ret
= get_errno(unlink(p
));
8327 unlock_user(p
, arg1
, 0);
8330 #if defined(TARGET_NR_unlinkat)
8331 case TARGET_NR_unlinkat
:
8332 if (!(p
= lock_user_string(arg2
)))
8334 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8335 unlock_user(p
, arg2
, 0);
8338 case TARGET_NR_execve
:
8340 char **argp
, **envp
;
8343 abi_ulong guest_argp
;
8344 abi_ulong guest_envp
;
8351 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8352 if (get_user_ual(addr
, gp
))
8360 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8361 if (get_user_ual(addr
, gp
))
8368 argp
= g_new0(char *, argc
+ 1);
8369 envp
= g_new0(char *, envc
+ 1);
8371 for (gp
= guest_argp
, q
= argp
; gp
;
8372 gp
+= sizeof(abi_ulong
), q
++) {
8373 if (get_user_ual(addr
, gp
))
8377 if (!(*q
= lock_user_string(addr
)))
8379 total_size
+= strlen(*q
) + 1;
8383 for (gp
= guest_envp
, q
= envp
; gp
;
8384 gp
+= sizeof(abi_ulong
), q
++) {
8385 if (get_user_ual(addr
, gp
))
8389 if (!(*q
= lock_user_string(addr
)))
8391 total_size
+= strlen(*q
) + 1;
8395 if (!(p
= lock_user_string(arg1
)))
8397 /* Although execve() is not an interruptible syscall it is
8398 * a special case where we must use the safe_syscall wrapper:
8399 * if we allow a signal to happen before we make the host
8400 * syscall then we will 'lose' it, because at the point of
8401 * execve the process leaves QEMU's control. So we use the
8402 * safe syscall wrapper to ensure that we either take the
8403 * signal as a guest signal, or else it does not happen
8404 * before the execve completes and makes it the other
8405 * program's problem.
8407 ret
= get_errno(safe_execve(p
, argp
, envp
));
8408 unlock_user(p
, arg1
, 0);
8413 ret
= -TARGET_EFAULT
;
8416 for (gp
= guest_argp
, q
= argp
; *q
;
8417 gp
+= sizeof(abi_ulong
), q
++) {
8418 if (get_user_ual(addr
, gp
)
8421 unlock_user(*q
, addr
, 0);
8423 for (gp
= guest_envp
, q
= envp
; *q
;
8424 gp
+= sizeof(abi_ulong
), q
++) {
8425 if (get_user_ual(addr
, gp
)
8428 unlock_user(*q
, addr
, 0);
8435 case TARGET_NR_chdir
:
8436 if (!(p
= lock_user_string(arg1
)))
8438 ret
= get_errno(chdir(p
));
8439 unlock_user(p
, arg1
, 0);
8441 #ifdef TARGET_NR_time
8442 case TARGET_NR_time
:
8445 ret
= get_errno(time(&host_time
));
8448 && put_user_sal(host_time
, arg1
))
8453 #ifdef TARGET_NR_mknod
8454 case TARGET_NR_mknod
:
8455 if (!(p
= lock_user_string(arg1
)))
8457 ret
= get_errno(mknod(p
, arg2
, arg3
));
8458 unlock_user(p
, arg1
, 0);
8461 #if defined(TARGET_NR_mknodat)
8462 case TARGET_NR_mknodat
:
8463 if (!(p
= lock_user_string(arg2
)))
8465 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8466 unlock_user(p
, arg2
, 0);
8469 #ifdef TARGET_NR_chmod
8470 case TARGET_NR_chmod
:
8471 if (!(p
= lock_user_string(arg1
)))
8473 ret
= get_errno(chmod(p
, arg2
));
8474 unlock_user(p
, arg1
, 0);
8477 #ifdef TARGET_NR_break
8478 case TARGET_NR_break
:
8481 #ifdef TARGET_NR_oldstat
8482 case TARGET_NR_oldstat
:
8485 #ifdef TARGET_NR_lseek
8486 case TARGET_NR_lseek
:
8487 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8490 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8491 /* Alpha specific */
8492 case TARGET_NR_getxpid
:
8493 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8494 ret
= get_errno(getpid());
8497 #ifdef TARGET_NR_getpid
8498 case TARGET_NR_getpid
:
8499 ret
= get_errno(getpid());
8502 case TARGET_NR_mount
:
8504 /* need to look at the data field */
8508 p
= lock_user_string(arg1
);
8516 p2
= lock_user_string(arg2
);
8519 unlock_user(p
, arg1
, 0);
8525 p3
= lock_user_string(arg3
);
8528 unlock_user(p
, arg1
, 0);
8530 unlock_user(p2
, arg2
, 0);
8537 /* FIXME - arg5 should be locked, but it isn't clear how to
8538 * do that since it's not guaranteed to be a NULL-terminated
8542 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8544 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8546 ret
= get_errno(ret
);
8549 unlock_user(p
, arg1
, 0);
8551 unlock_user(p2
, arg2
, 0);
8553 unlock_user(p3
, arg3
, 0);
8557 #ifdef TARGET_NR_umount
8558 case TARGET_NR_umount
:
8559 if (!(p
= lock_user_string(arg1
)))
8561 ret
= get_errno(umount(p
));
8562 unlock_user(p
, arg1
, 0);
8565 #ifdef TARGET_NR_stime /* not on alpha */
8566 case TARGET_NR_stime
:
8569 if (get_user_sal(host_time
, arg1
))
8571 ret
= get_errno(stime(&host_time
));
8575 case TARGET_NR_ptrace
:
8577 #ifdef TARGET_NR_alarm /* not on alpha */
8578 case TARGET_NR_alarm
:
8582 #ifdef TARGET_NR_oldfstat
8583 case TARGET_NR_oldfstat
:
8586 #ifdef TARGET_NR_pause /* not on alpha */
8587 case TARGET_NR_pause
:
8588 if (!block_signals()) {
8589 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8591 ret
= -TARGET_EINTR
;
8594 #ifdef TARGET_NR_utime
8595 case TARGET_NR_utime
:
8597 struct utimbuf tbuf
, *host_tbuf
;
8598 struct target_utimbuf
*target_tbuf
;
8600 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8602 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8603 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8604 unlock_user_struct(target_tbuf
, arg2
, 0);
8609 if (!(p
= lock_user_string(arg1
)))
8611 ret
= get_errno(utime(p
, host_tbuf
));
8612 unlock_user(p
, arg1
, 0);
8616 #ifdef TARGET_NR_utimes
8617 case TARGET_NR_utimes
:
8619 struct timeval
*tvp
, tv
[2];
8621 if (copy_from_user_timeval(&tv
[0], arg2
)
8622 || copy_from_user_timeval(&tv
[1],
8623 arg2
+ sizeof(struct target_timeval
)))
8629 if (!(p
= lock_user_string(arg1
)))
8631 ret
= get_errno(utimes(p
, tvp
));
8632 unlock_user(p
, arg1
, 0);
8636 #if defined(TARGET_NR_futimesat)
8637 case TARGET_NR_futimesat
:
8639 struct timeval
*tvp
, tv
[2];
8641 if (copy_from_user_timeval(&tv
[0], arg3
)
8642 || copy_from_user_timeval(&tv
[1],
8643 arg3
+ sizeof(struct target_timeval
)))
8649 if (!(p
= lock_user_string(arg2
)))
8651 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8652 unlock_user(p
, arg2
, 0);
8656 #ifdef TARGET_NR_stty
8657 case TARGET_NR_stty
:
8660 #ifdef TARGET_NR_gtty
8661 case TARGET_NR_gtty
:
8664 #ifdef TARGET_NR_access
8665 case TARGET_NR_access
:
8666 if (!(p
= lock_user_string(arg1
)))
8668 ret
= get_errno(access(path(p
), arg2
));
8669 unlock_user(p
, arg1
, 0);
8672 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8673 case TARGET_NR_faccessat
:
8674 if (!(p
= lock_user_string(arg2
)))
8676 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8677 unlock_user(p
, arg2
, 0);
8680 #ifdef TARGET_NR_nice /* not on alpha */
8681 case TARGET_NR_nice
:
8682 ret
= get_errno(nice(arg1
));
8685 #ifdef TARGET_NR_ftime
8686 case TARGET_NR_ftime
:
8689 case TARGET_NR_sync
:
8693 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8694 case TARGET_NR_syncfs
:
8695 ret
= get_errno(syncfs(arg1
));
8698 case TARGET_NR_kill
:
8699 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8701 #ifdef TARGET_NR_rename
8702 case TARGET_NR_rename
:
8705 p
= lock_user_string(arg1
);
8706 p2
= lock_user_string(arg2
);
8708 ret
= -TARGET_EFAULT
;
8710 ret
= get_errno(rename(p
, p2
));
8711 unlock_user(p2
, arg2
, 0);
8712 unlock_user(p
, arg1
, 0);
8716 #if defined(TARGET_NR_renameat)
8717 case TARGET_NR_renameat
:
8720 p
= lock_user_string(arg2
);
8721 p2
= lock_user_string(arg4
);
8723 ret
= -TARGET_EFAULT
;
8725 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8726 unlock_user(p2
, arg4
, 0);
8727 unlock_user(p
, arg2
, 0);
8731 #if defined(TARGET_NR_renameat2)
8732 case TARGET_NR_renameat2
:
8735 p
= lock_user_string(arg2
);
8736 p2
= lock_user_string(arg4
);
8738 ret
= -TARGET_EFAULT
;
8740 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8742 unlock_user(p2
, arg4
, 0);
8743 unlock_user(p
, arg2
, 0);
8747 #ifdef TARGET_NR_mkdir
8748 case TARGET_NR_mkdir
:
8749 if (!(p
= lock_user_string(arg1
)))
8751 ret
= get_errno(mkdir(p
, arg2
));
8752 unlock_user(p
, arg1
, 0);
8755 #if defined(TARGET_NR_mkdirat)
8756 case TARGET_NR_mkdirat
:
8757 if (!(p
= lock_user_string(arg2
)))
8759 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8760 unlock_user(p
, arg2
, 0);
8763 #ifdef TARGET_NR_rmdir
8764 case TARGET_NR_rmdir
:
8765 if (!(p
= lock_user_string(arg1
)))
8767 ret
= get_errno(rmdir(p
));
8768 unlock_user(p
, arg1
, 0);
8772 ret
= get_errno(dup(arg1
));
8774 fd_trans_dup(arg1
, ret
);
8777 #ifdef TARGET_NR_pipe
8778 case TARGET_NR_pipe
:
8779 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8782 #ifdef TARGET_NR_pipe2
8783 case TARGET_NR_pipe2
:
8784 ret
= do_pipe(cpu_env
, arg1
,
8785 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8788 case TARGET_NR_times
:
8790 struct target_tms
*tmsp
;
8792 ret
= get_errno(times(&tms
));
8794 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8797 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8798 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8799 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8800 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8803 ret
= host_to_target_clock_t(ret
);
8806 #ifdef TARGET_NR_prof
8807 case TARGET_NR_prof
:
8810 #ifdef TARGET_NR_signal
8811 case TARGET_NR_signal
:
8814 case TARGET_NR_acct
:
8816 ret
= get_errno(acct(NULL
));
8818 if (!(p
= lock_user_string(arg1
)))
8820 ret
= get_errno(acct(path(p
)));
8821 unlock_user(p
, arg1
, 0);
8824 #ifdef TARGET_NR_umount2
8825 case TARGET_NR_umount2
:
8826 if (!(p
= lock_user_string(arg1
)))
8828 ret
= get_errno(umount2(p
, arg2
));
8829 unlock_user(p
, arg1
, 0);
8832 #ifdef TARGET_NR_lock
8833 case TARGET_NR_lock
:
8836 case TARGET_NR_ioctl
:
8837 ret
= do_ioctl(arg1
, arg2
, arg3
);
8839 #ifdef TARGET_NR_fcntl
8840 case TARGET_NR_fcntl
:
8841 ret
= do_fcntl(arg1
, arg2
, arg3
);
8844 #ifdef TARGET_NR_mpx
8848 case TARGET_NR_setpgid
:
8849 ret
= get_errno(setpgid(arg1
, arg2
));
8851 #ifdef TARGET_NR_ulimit
8852 case TARGET_NR_ulimit
:
8855 #ifdef TARGET_NR_oldolduname
8856 case TARGET_NR_oldolduname
:
8859 case TARGET_NR_umask
:
8860 ret
= get_errno(umask(arg1
));
8862 case TARGET_NR_chroot
:
8863 if (!(p
= lock_user_string(arg1
)))
8865 ret
= get_errno(chroot(p
));
8866 unlock_user(p
, arg1
, 0);
8868 #ifdef TARGET_NR_ustat
8869 case TARGET_NR_ustat
:
8872 #ifdef TARGET_NR_dup2
8873 case TARGET_NR_dup2
:
8874 ret
= get_errno(dup2(arg1
, arg2
));
8876 fd_trans_dup(arg1
, arg2
);
8880 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8881 case TARGET_NR_dup3
:
8885 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8888 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8889 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8891 fd_trans_dup(arg1
, arg2
);
8896 #ifdef TARGET_NR_getppid /* not on alpha */
8897 case TARGET_NR_getppid
:
8898 ret
= get_errno(getppid());
8901 #ifdef TARGET_NR_getpgrp
8902 case TARGET_NR_getpgrp
:
8903 ret
= get_errno(getpgrp());
8906 case TARGET_NR_setsid
:
8907 ret
= get_errno(setsid());
8909 #ifdef TARGET_NR_sigaction
8910 case TARGET_NR_sigaction
:
8912 #if defined(TARGET_ALPHA)
8913 struct target_sigaction act
, oact
, *pact
= 0;
8914 struct target_old_sigaction
*old_act
;
8916 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8918 act
._sa_handler
= old_act
->_sa_handler
;
8919 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8920 act
.sa_flags
= old_act
->sa_flags
;
8921 act
.sa_restorer
= 0;
8922 unlock_user_struct(old_act
, arg2
, 0);
8925 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8926 if (!is_error(ret
) && arg3
) {
8927 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8929 old_act
->_sa_handler
= oact
._sa_handler
;
8930 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8931 old_act
->sa_flags
= oact
.sa_flags
;
8932 unlock_user_struct(old_act
, arg3
, 1);
8934 #elif defined(TARGET_MIPS)
8935 struct target_sigaction act
, oact
, *pact
, *old_act
;
8938 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8940 act
._sa_handler
= old_act
->_sa_handler
;
8941 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8942 act
.sa_flags
= old_act
->sa_flags
;
8943 unlock_user_struct(old_act
, arg2
, 0);
8949 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8951 if (!is_error(ret
) && arg3
) {
8952 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8954 old_act
->_sa_handler
= oact
._sa_handler
;
8955 old_act
->sa_flags
= oact
.sa_flags
;
8956 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8957 old_act
->sa_mask
.sig
[1] = 0;
8958 old_act
->sa_mask
.sig
[2] = 0;
8959 old_act
->sa_mask
.sig
[3] = 0;
8960 unlock_user_struct(old_act
, arg3
, 1);
8963 struct target_old_sigaction
*old_act
;
8964 struct target_sigaction act
, oact
, *pact
;
8966 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8968 act
._sa_handler
= old_act
->_sa_handler
;
8969 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8970 act
.sa_flags
= old_act
->sa_flags
;
8971 act
.sa_restorer
= old_act
->sa_restorer
;
8972 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8973 act
.ka_restorer
= 0;
8975 unlock_user_struct(old_act
, arg2
, 0);
8980 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8981 if (!is_error(ret
) && arg3
) {
8982 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8984 old_act
->_sa_handler
= oact
._sa_handler
;
8985 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8986 old_act
->sa_flags
= oact
.sa_flags
;
8987 old_act
->sa_restorer
= oact
.sa_restorer
;
8988 unlock_user_struct(old_act
, arg3
, 1);
8994 case TARGET_NR_rt_sigaction
:
8996 #if defined(TARGET_ALPHA)
8997 /* For Alpha and SPARC this is a 5 argument syscall, with
8998 * a 'restorer' parameter which must be copied into the
8999 * sa_restorer field of the sigaction struct.
9000 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9001 * and arg5 is the sigsetsize.
9002 * Alpha also has a separate rt_sigaction struct that it uses
9003 * here; SPARC uses the usual sigaction struct.
9005 struct target_rt_sigaction
*rt_act
;
9006 struct target_sigaction act
, oact
, *pact
= 0;
9008 if (arg4
!= sizeof(target_sigset_t
)) {
9009 ret
= -TARGET_EINVAL
;
9013 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
9015 act
._sa_handler
= rt_act
->_sa_handler
;
9016 act
.sa_mask
= rt_act
->sa_mask
;
9017 act
.sa_flags
= rt_act
->sa_flags
;
9018 act
.sa_restorer
= arg5
;
9019 unlock_user_struct(rt_act
, arg2
, 0);
9022 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
9023 if (!is_error(ret
) && arg3
) {
9024 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
9026 rt_act
->_sa_handler
= oact
._sa_handler
;
9027 rt_act
->sa_mask
= oact
.sa_mask
;
9028 rt_act
->sa_flags
= oact
.sa_flags
;
9029 unlock_user_struct(rt_act
, arg3
, 1);
9033 target_ulong restorer
= arg4
;
9034 target_ulong sigsetsize
= arg5
;
9036 target_ulong sigsetsize
= arg4
;
9038 struct target_sigaction
*act
;
9039 struct target_sigaction
*oact
;
9041 if (sigsetsize
!= sizeof(target_sigset_t
)) {
9042 ret
= -TARGET_EINVAL
;
9046 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
9049 #ifdef TARGET_ARCH_HAS_KA_RESTORER
9050 act
->ka_restorer
= restorer
;
9056 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
9057 ret
= -TARGET_EFAULT
;
9058 goto rt_sigaction_fail
;
9062 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
9065 unlock_user_struct(act
, arg2
, 0);
9067 unlock_user_struct(oact
, arg3
, 1);
9071 #ifdef TARGET_NR_sgetmask /* not on alpha */
9072 case TARGET_NR_sgetmask
:
9075 abi_ulong target_set
;
9076 ret
= do_sigprocmask(0, NULL
, &cur_set
);
9078 host_to_target_old_sigset(&target_set
, &cur_set
);
9084 #ifdef TARGET_NR_ssetmask /* not on alpha */
9085 case TARGET_NR_ssetmask
:
9088 abi_ulong target_set
= arg1
;
9089 target_to_host_old_sigset(&set
, &target_set
);
9090 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
9092 host_to_target_old_sigset(&target_set
, &oset
);
9098 #ifdef TARGET_NR_sigprocmask
9099 case TARGET_NR_sigprocmask
:
9101 #if defined(TARGET_ALPHA)
9102 sigset_t set
, oldset
;
9107 case TARGET_SIG_BLOCK
:
9110 case TARGET_SIG_UNBLOCK
:
9113 case TARGET_SIG_SETMASK
:
9117 ret
= -TARGET_EINVAL
;
9121 target_to_host_old_sigset(&set
, &mask
);
9123 ret
= do_sigprocmask(how
, &set
, &oldset
);
9124 if (!is_error(ret
)) {
9125 host_to_target_old_sigset(&mask
, &oldset
);
9127 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9130 sigset_t set
, oldset
, *set_ptr
;
9135 case TARGET_SIG_BLOCK
:
9138 case TARGET_SIG_UNBLOCK
:
9141 case TARGET_SIG_SETMASK
:
9145 ret
= -TARGET_EINVAL
;
9148 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9150 target_to_host_old_sigset(&set
, p
);
9151 unlock_user(p
, arg2
, 0);
9157 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9158 if (!is_error(ret
) && arg3
) {
9159 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9161 host_to_target_old_sigset(p
, &oldset
);
9162 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9168 case TARGET_NR_rt_sigprocmask
:
9171 sigset_t set
, oldset
, *set_ptr
;
9173 if (arg4
!= sizeof(target_sigset_t
)) {
9174 ret
= -TARGET_EINVAL
;
9180 case TARGET_SIG_BLOCK
:
9183 case TARGET_SIG_UNBLOCK
:
9186 case TARGET_SIG_SETMASK
:
9190 ret
= -TARGET_EINVAL
;
9193 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9195 target_to_host_sigset(&set
, p
);
9196 unlock_user(p
, arg2
, 0);
9202 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9203 if (!is_error(ret
) && arg3
) {
9204 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9206 host_to_target_sigset(p
, &oldset
);
9207 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9211 #ifdef TARGET_NR_sigpending
9212 case TARGET_NR_sigpending
:
9215 ret
= get_errno(sigpending(&set
));
9216 if (!is_error(ret
)) {
9217 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9219 host_to_target_old_sigset(p
, &set
);
9220 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9225 case TARGET_NR_rt_sigpending
:
9229 /* Yes, this check is >, not != like most. We follow the kernel's
9230 * logic and it does it like this because it implements
9231 * NR_sigpending through the same code path, and in that case
9232 * the old_sigset_t is smaller in size.
9234 if (arg2
> sizeof(target_sigset_t
)) {
9235 ret
= -TARGET_EINVAL
;
9239 ret
= get_errno(sigpending(&set
));
9240 if (!is_error(ret
)) {
9241 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9243 host_to_target_sigset(p
, &set
);
9244 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9248 #ifdef TARGET_NR_sigsuspend
9249 case TARGET_NR_sigsuspend
:
9251 TaskState
*ts
= cpu
->opaque
;
9252 #if defined(TARGET_ALPHA)
9253 abi_ulong mask
= arg1
;
9254 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9256 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9258 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9259 unlock_user(p
, arg1
, 0);
9261 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9263 if (ret
!= -TARGET_ERESTARTSYS
) {
9264 ts
->in_sigsuspend
= 1;
9269 case TARGET_NR_rt_sigsuspend
:
9271 TaskState
*ts
= cpu
->opaque
;
9273 if (arg2
!= sizeof(target_sigset_t
)) {
9274 ret
= -TARGET_EINVAL
;
9277 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9279 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9280 unlock_user(p
, arg1
, 0);
9281 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9283 if (ret
!= -TARGET_ERESTARTSYS
) {
9284 ts
->in_sigsuspend
= 1;
9288 case TARGET_NR_rt_sigtimedwait
:
9291 struct timespec uts
, *puts
;
9294 if (arg4
!= sizeof(target_sigset_t
)) {
9295 ret
= -TARGET_EINVAL
;
9299 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9301 target_to_host_sigset(&set
, p
);
9302 unlock_user(p
, arg1
, 0);
9305 target_to_host_timespec(puts
, arg3
);
9309 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9311 if (!is_error(ret
)) {
9313 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9318 host_to_target_siginfo(p
, &uinfo
);
9319 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9321 ret
= host_to_target_signal(ret
);
9325 case TARGET_NR_rt_sigqueueinfo
:
9329 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9333 target_to_host_siginfo(&uinfo
, p
);
9334 unlock_user(p
, arg3
, 0);
9335 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9338 case TARGET_NR_rt_tgsigqueueinfo
:
9342 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9346 target_to_host_siginfo(&uinfo
, p
);
9347 unlock_user(p
, arg4
, 0);
9348 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9351 #ifdef TARGET_NR_sigreturn
9352 case TARGET_NR_sigreturn
:
9353 if (block_signals()) {
9354 ret
= -TARGET_ERESTARTSYS
;
9356 ret
= do_sigreturn(cpu_env
);
9360 case TARGET_NR_rt_sigreturn
:
9361 if (block_signals()) {
9362 ret
= -TARGET_ERESTARTSYS
;
9364 ret
= do_rt_sigreturn(cpu_env
);
9367 case TARGET_NR_sethostname
:
9368 if (!(p
= lock_user_string(arg1
)))
9370 ret
= get_errno(sethostname(p
, arg2
));
9371 unlock_user(p
, arg1
, 0);
9373 #ifdef TARGET_NR_setrlimit
9374 case TARGET_NR_setrlimit
:
9376 int resource
= target_to_host_resource(arg1
);
9377 struct target_rlimit
*target_rlim
;
9379 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9381 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9382 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9383 unlock_user_struct(target_rlim
, arg2
, 0);
9384 ret
= get_errno(setrlimit(resource
, &rlim
));
9388 #ifdef TARGET_NR_getrlimit
9389 case TARGET_NR_getrlimit
:
9391 int resource
= target_to_host_resource(arg1
);
9392 struct target_rlimit
*target_rlim
;
9395 ret
= get_errno(getrlimit(resource
, &rlim
));
9396 if (!is_error(ret
)) {
9397 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9399 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9400 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9401 unlock_user_struct(target_rlim
, arg2
, 1);
9406 case TARGET_NR_getrusage
:
9408 struct rusage rusage
;
9409 ret
= get_errno(getrusage(arg1
, &rusage
));
9410 if (!is_error(ret
)) {
9411 ret
= host_to_target_rusage(arg2
, &rusage
);
9415 case TARGET_NR_gettimeofday
:
9418 ret
= get_errno(gettimeofday(&tv
, NULL
));
9419 if (!is_error(ret
)) {
9420 if (copy_to_user_timeval(arg1
, &tv
))
9425 case TARGET_NR_settimeofday
:
9427 struct timeval tv
, *ptv
= NULL
;
9428 struct timezone tz
, *ptz
= NULL
;
9431 if (copy_from_user_timeval(&tv
, arg1
)) {
9438 if (copy_from_user_timezone(&tz
, arg2
)) {
9444 ret
= get_errno(settimeofday(ptv
, ptz
));
9447 #if defined(TARGET_NR_select)
9448 case TARGET_NR_select
:
9449 #if defined(TARGET_WANT_NI_OLD_SELECT)
9450 /* some architectures used to have old_select here
9451 * but now ENOSYS it.
9453 ret
= -TARGET_ENOSYS
;
9454 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9455 ret
= do_old_select(arg1
);
9457 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9461 #ifdef TARGET_NR_pselect6
9462 case TARGET_NR_pselect6
:
9464 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9465 fd_set rfds
, wfds
, efds
;
9466 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9467 struct timespec ts
, *ts_ptr
;
9470 * The 6th arg is actually two args smashed together,
9471 * so we cannot use the C library.
9479 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9480 target_sigset_t
*target_sigset
;
9488 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9492 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9496 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9502 * This takes a timespec, and not a timeval, so we cannot
9503 * use the do_select() helper ...
9506 if (target_to_host_timespec(&ts
, ts_addr
)) {
9514 /* Extract the two packed args for the sigset */
9517 sig
.size
= SIGSET_T_SIZE
;
9519 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9523 arg_sigset
= tswapal(arg7
[0]);
9524 arg_sigsize
= tswapal(arg7
[1]);
9525 unlock_user(arg7
, arg6
, 0);
9529 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9530 /* Like the kernel, we enforce correct size sigsets */
9531 ret
= -TARGET_EINVAL
;
9534 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9535 sizeof(*target_sigset
), 1);
9536 if (!target_sigset
) {
9539 target_to_host_sigset(&set
, target_sigset
);
9540 unlock_user(target_sigset
, arg_sigset
, 0);
9548 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9551 if (!is_error(ret
)) {
9552 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9554 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9556 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9559 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9565 #ifdef TARGET_NR_symlink
9566 case TARGET_NR_symlink
:
9569 p
= lock_user_string(arg1
);
9570 p2
= lock_user_string(arg2
);
9572 ret
= -TARGET_EFAULT
;
9574 ret
= get_errno(symlink(p
, p2
));
9575 unlock_user(p2
, arg2
, 0);
9576 unlock_user(p
, arg1
, 0);
9580 #if defined(TARGET_NR_symlinkat)
9581 case TARGET_NR_symlinkat
:
9584 p
= lock_user_string(arg1
);
9585 p2
= lock_user_string(arg3
);
9587 ret
= -TARGET_EFAULT
;
9589 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9590 unlock_user(p2
, arg3
, 0);
9591 unlock_user(p
, arg1
, 0);
9595 #ifdef TARGET_NR_oldlstat
9596 case TARGET_NR_oldlstat
:
9599 #ifdef TARGET_NR_readlink
9600 case TARGET_NR_readlink
:
9603 p
= lock_user_string(arg1
);
9604 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9606 ret
= -TARGET_EFAULT
;
9608 /* Short circuit this for the magic exe check. */
9609 ret
= -TARGET_EINVAL
;
9610 } else if (is_proc_myself((const char *)p
, "exe")) {
9611 char real
[PATH_MAX
], *temp
;
9612 temp
= realpath(exec_path
, real
);
9613 /* Return value is # of bytes that we wrote to the buffer. */
9615 ret
= get_errno(-1);
9617 /* Don't worry about sign mismatch as earlier mapping
9618 * logic would have thrown a bad address error. */
9619 ret
= MIN(strlen(real
), arg3
);
9620 /* We cannot NUL terminate the string. */
9621 memcpy(p2
, real
, ret
);
9624 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9626 unlock_user(p2
, arg2
, ret
);
9627 unlock_user(p
, arg1
, 0);
9631 #if defined(TARGET_NR_readlinkat)
9632 case TARGET_NR_readlinkat
:
9635 p
= lock_user_string(arg2
);
9636 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9638 ret
= -TARGET_EFAULT
;
9639 } else if (is_proc_myself((const char *)p
, "exe")) {
9640 char real
[PATH_MAX
], *temp
;
9641 temp
= realpath(exec_path
, real
);
9642 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9643 snprintf((char *)p2
, arg4
, "%s", real
);
9645 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9647 unlock_user(p2
, arg3
, ret
);
9648 unlock_user(p
, arg2
, 0);
9652 #ifdef TARGET_NR_uselib
9653 case TARGET_NR_uselib
:
9656 #ifdef TARGET_NR_swapon
9657 case TARGET_NR_swapon
:
9658 if (!(p
= lock_user_string(arg1
)))
9660 ret
= get_errno(swapon(p
, arg2
));
9661 unlock_user(p
, arg1
, 0);
9664 case TARGET_NR_reboot
:
9665 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9666 /* arg4 must be ignored in all other cases */
9667 p
= lock_user_string(arg4
);
9671 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9672 unlock_user(p
, arg4
, 0);
9674 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9677 #ifdef TARGET_NR_readdir
9678 case TARGET_NR_readdir
:
9681 #ifdef TARGET_NR_mmap
9682 case TARGET_NR_mmap
:
9683 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9684 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9685 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9686 || defined(TARGET_S390X)
9689 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9690 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9698 unlock_user(v
, arg1
, 0);
9699 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9700 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9704 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9705 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9711 #ifdef TARGET_NR_mmap2
9712 case TARGET_NR_mmap2
:
9714 #define MMAP_SHIFT 12
9716 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9717 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9719 arg6
<< MMAP_SHIFT
));
9722 case TARGET_NR_munmap
:
9723 ret
= get_errno(target_munmap(arg1
, arg2
));
9725 case TARGET_NR_mprotect
:
9727 TaskState
*ts
= cpu
->opaque
;
9728 /* Special hack to detect libc making the stack executable. */
9729 if ((arg3
& PROT_GROWSDOWN
)
9730 && arg1
>= ts
->info
->stack_limit
9731 && arg1
<= ts
->info
->start_stack
) {
9732 arg3
&= ~PROT_GROWSDOWN
;
9733 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9734 arg1
= ts
->info
->stack_limit
;
9737 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9739 #ifdef TARGET_NR_mremap
9740 case TARGET_NR_mremap
:
9741 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9744 /* ??? msync/mlock/munlock are broken for softmmu. */
9745 #ifdef TARGET_NR_msync
9746 case TARGET_NR_msync
:
9747 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9750 #ifdef TARGET_NR_mlock
9751 case TARGET_NR_mlock
:
9752 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9755 #ifdef TARGET_NR_munlock
9756 case TARGET_NR_munlock
:
9757 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9760 #ifdef TARGET_NR_mlockall
9761 case TARGET_NR_mlockall
:
9762 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9765 #ifdef TARGET_NR_munlockall
9766 case TARGET_NR_munlockall
:
9767 ret
= get_errno(munlockall());
9770 #ifdef TARGET_NR_truncate
9771 case TARGET_NR_truncate
:
9772 if (!(p
= lock_user_string(arg1
)))
9774 ret
= get_errno(truncate(p
, arg2
));
9775 unlock_user(p
, arg1
, 0);
9778 #ifdef TARGET_NR_ftruncate
9779 case TARGET_NR_ftruncate
:
9780 ret
= get_errno(ftruncate(arg1
, arg2
));
9783 case TARGET_NR_fchmod
:
9784 ret
= get_errno(fchmod(arg1
, arg2
));
9786 #if defined(TARGET_NR_fchmodat)
9787 case TARGET_NR_fchmodat
:
9788 if (!(p
= lock_user_string(arg2
)))
9790 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9791 unlock_user(p
, arg2
, 0);
9794 case TARGET_NR_getpriority
:
9795 /* Note that negative values are valid for getpriority, so we must
9796 differentiate based on errno settings. */
9798 ret
= getpriority(arg1
, arg2
);
9799 if (ret
== -1 && errno
!= 0) {
9800 ret
= -host_to_target_errno(errno
);
9804 /* Return value is the unbiased priority. Signal no error. */
9805 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9807 /* Return value is a biased priority to avoid negative numbers. */
9811 case TARGET_NR_setpriority
:
9812 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9814 #ifdef TARGET_NR_profil
9815 case TARGET_NR_profil
:
9818 #ifdef TARGET_NR_statfs
9819 case TARGET_NR_statfs
:
9820 if (!(p
= lock_user_string(arg1
)))
9822 ret
= get_errno(statfs(path(p
), &stfs
));
9823 unlock_user(p
, arg1
, 0);
9825 if (!is_error(ret
)) {
9826 struct target_statfs
*target_stfs
;
9828 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9830 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9831 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9832 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9833 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9834 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9835 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9836 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9837 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9838 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9839 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9840 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9841 #ifdef _STATFS_F_FLAGS
9842 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9844 __put_user(0, &target_stfs
->f_flags
);
9846 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9847 unlock_user_struct(target_stfs
, arg2
, 1);
9851 #ifdef TARGET_NR_fstatfs
9852 case TARGET_NR_fstatfs
:
9853 ret
= get_errno(fstatfs(arg1
, &stfs
));
9854 goto convert_statfs
;
9856 #ifdef TARGET_NR_statfs64
9857 case TARGET_NR_statfs64
:
9858 if (!(p
= lock_user_string(arg1
)))
9860 ret
= get_errno(statfs(path(p
), &stfs
));
9861 unlock_user(p
, arg1
, 0);
9863 if (!is_error(ret
)) {
9864 struct target_statfs64
*target_stfs
;
9866 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9868 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9869 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9870 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9871 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9872 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9873 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9874 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9875 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9876 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9877 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9878 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9879 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9880 unlock_user_struct(target_stfs
, arg3
, 1);
9883 case TARGET_NR_fstatfs64
:
9884 ret
= get_errno(fstatfs(arg1
, &stfs
));
9885 goto convert_statfs64
;
9887 #ifdef TARGET_NR_ioperm
9888 case TARGET_NR_ioperm
:
9891 #ifdef TARGET_NR_socketcall
9892 case TARGET_NR_socketcall
:
9893 ret
= do_socketcall(arg1
, arg2
);
9896 #ifdef TARGET_NR_accept
9897 case TARGET_NR_accept
:
9898 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9901 #ifdef TARGET_NR_accept4
9902 case TARGET_NR_accept4
:
9903 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9906 #ifdef TARGET_NR_bind
9907 case TARGET_NR_bind
:
9908 ret
= do_bind(arg1
, arg2
, arg3
);
9911 #ifdef TARGET_NR_connect
9912 case TARGET_NR_connect
:
9913 ret
= do_connect(arg1
, arg2
, arg3
);
9916 #ifdef TARGET_NR_getpeername
9917 case TARGET_NR_getpeername
:
9918 ret
= do_getpeername(arg1
, arg2
, arg3
);
9921 #ifdef TARGET_NR_getsockname
9922 case TARGET_NR_getsockname
:
9923 ret
= do_getsockname(arg1
, arg2
, arg3
);
9926 #ifdef TARGET_NR_getsockopt
9927 case TARGET_NR_getsockopt
:
9928 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9931 #ifdef TARGET_NR_listen
9932 case TARGET_NR_listen
:
9933 ret
= get_errno(listen(arg1
, arg2
));
9936 #ifdef TARGET_NR_recv
9937 case TARGET_NR_recv
:
9938 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9941 #ifdef TARGET_NR_recvfrom
9942 case TARGET_NR_recvfrom
:
9943 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9946 #ifdef TARGET_NR_recvmsg
9947 case TARGET_NR_recvmsg
:
9948 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9951 #ifdef TARGET_NR_send
9952 case TARGET_NR_send
:
9953 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9956 #ifdef TARGET_NR_sendmsg
9957 case TARGET_NR_sendmsg
:
9958 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9961 #ifdef TARGET_NR_sendmmsg
9962 case TARGET_NR_sendmmsg
:
9963 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9965 case TARGET_NR_recvmmsg
:
9966 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9969 #ifdef TARGET_NR_sendto
9970 case TARGET_NR_sendto
:
9971 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9974 #ifdef TARGET_NR_shutdown
9975 case TARGET_NR_shutdown
:
9976 ret
= get_errno(shutdown(arg1
, arg2
));
9979 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9980 case TARGET_NR_getrandom
:
9981 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9985 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9986 unlock_user(p
, arg1
, ret
);
9989 #ifdef TARGET_NR_socket
9990 case TARGET_NR_socket
:
9991 ret
= do_socket(arg1
, arg2
, arg3
);
9994 #ifdef TARGET_NR_socketpair
9995 case TARGET_NR_socketpair
:
9996 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9999 #ifdef TARGET_NR_setsockopt
10000 case TARGET_NR_setsockopt
:
10001 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
10004 #if defined(TARGET_NR_syslog)
10005 case TARGET_NR_syslog
:
10010 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
10011 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
10012 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
10013 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
10014 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
10015 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
10016 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
10017 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
10019 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
10022 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
10023 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
10024 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
10026 ret
= -TARGET_EINVAL
;
10034 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
10036 ret
= -TARGET_EFAULT
;
10039 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
10040 unlock_user(p
, arg2
, arg3
);
10050 case TARGET_NR_setitimer
:
10052 struct itimerval value
, ovalue
, *pvalue
;
10056 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
10057 || copy_from_user_timeval(&pvalue
->it_value
,
10058 arg2
+ sizeof(struct target_timeval
)))
10063 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
10064 if (!is_error(ret
) && arg3
) {
10065 if (copy_to_user_timeval(arg3
,
10066 &ovalue
.it_interval
)
10067 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
10073 case TARGET_NR_getitimer
:
10075 struct itimerval value
;
10077 ret
= get_errno(getitimer(arg1
, &value
));
10078 if (!is_error(ret
) && arg2
) {
10079 if (copy_to_user_timeval(arg2
,
10080 &value
.it_interval
)
10081 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
10087 #ifdef TARGET_NR_stat
10088 case TARGET_NR_stat
:
10089 if (!(p
= lock_user_string(arg1
)))
10091 ret
= get_errno(stat(path(p
), &st
));
10092 unlock_user(p
, arg1
, 0);
10095 #ifdef TARGET_NR_lstat
10096 case TARGET_NR_lstat
:
10097 if (!(p
= lock_user_string(arg1
)))
10099 ret
= get_errno(lstat(path(p
), &st
));
10100 unlock_user(p
, arg1
, 0);
10103 #ifdef TARGET_NR_fstat
10104 case TARGET_NR_fstat
:
10106 ret
= get_errno(fstat(arg1
, &st
));
10107 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10110 if (!is_error(ret
)) {
10111 struct target_stat
*target_st
;
10113 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10115 memset(target_st
, 0, sizeof(*target_st
));
10116 __put_user(st
.st_dev
, &target_st
->st_dev
);
10117 __put_user(st
.st_ino
, &target_st
->st_ino
);
10118 __put_user(st
.st_mode
, &target_st
->st_mode
);
10119 __put_user(st
.st_uid
, &target_st
->st_uid
);
10120 __put_user(st
.st_gid
, &target_st
->st_gid
);
10121 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10122 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10123 __put_user(st
.st_size
, &target_st
->st_size
);
10124 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10125 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10126 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10127 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10128 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10129 unlock_user_struct(target_st
, arg2
, 1);
10134 #ifdef TARGET_NR_olduname
10135 case TARGET_NR_olduname
:
10136 goto unimplemented
;
10138 #ifdef TARGET_NR_iopl
10139 case TARGET_NR_iopl
:
10140 goto unimplemented
;
10142 case TARGET_NR_vhangup
:
10143 ret
= get_errno(vhangup());
10145 #ifdef TARGET_NR_idle
10146 case TARGET_NR_idle
:
10147 goto unimplemented
;
10149 #ifdef TARGET_NR_syscall
10150 case TARGET_NR_syscall
:
10151 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10152 arg6
, arg7
, arg8
, 0);
10155 case TARGET_NR_wait4
:
10158 abi_long status_ptr
= arg2
;
10159 struct rusage rusage
, *rusage_ptr
;
10160 abi_ulong target_rusage
= arg4
;
10161 abi_long rusage_err
;
10163 rusage_ptr
= &rusage
;
10166 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10167 if (!is_error(ret
)) {
10168 if (status_ptr
&& ret
) {
10169 status
= host_to_target_waitstatus(status
);
10170 if (put_user_s32(status
, status_ptr
))
10173 if (target_rusage
) {
10174 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10182 #ifdef TARGET_NR_swapoff
10183 case TARGET_NR_swapoff
:
10184 if (!(p
= lock_user_string(arg1
)))
10186 ret
= get_errno(swapoff(p
));
10187 unlock_user(p
, arg1
, 0);
10190 case TARGET_NR_sysinfo
:
10192 struct target_sysinfo
*target_value
;
10193 struct sysinfo value
;
10194 ret
= get_errno(sysinfo(&value
));
10195 if (!is_error(ret
) && arg1
)
10197 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10199 __put_user(value
.uptime
, &target_value
->uptime
);
10200 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10201 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10202 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10203 __put_user(value
.totalram
, &target_value
->totalram
);
10204 __put_user(value
.freeram
, &target_value
->freeram
);
10205 __put_user(value
.sharedram
, &target_value
->sharedram
);
10206 __put_user(value
.bufferram
, &target_value
->bufferram
);
10207 __put_user(value
.totalswap
, &target_value
->totalswap
);
10208 __put_user(value
.freeswap
, &target_value
->freeswap
);
10209 __put_user(value
.procs
, &target_value
->procs
);
10210 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10211 __put_user(value
.freehigh
, &target_value
->freehigh
);
10212 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10213 unlock_user_struct(target_value
, arg1
, 1);
10217 #ifdef TARGET_NR_ipc
10218 case TARGET_NR_ipc
:
10219 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10222 #ifdef TARGET_NR_semget
10223 case TARGET_NR_semget
:
10224 ret
= get_errno(semget(arg1
, arg2
, arg3
));
10227 #ifdef TARGET_NR_semop
10228 case TARGET_NR_semop
:
10229 ret
= do_semop(arg1
, arg2
, arg3
);
10232 #ifdef TARGET_NR_semctl
10233 case TARGET_NR_semctl
:
10234 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
10237 #ifdef TARGET_NR_msgctl
10238 case TARGET_NR_msgctl
:
10239 ret
= do_msgctl(arg1
, arg2
, arg3
);
10242 #ifdef TARGET_NR_msgget
10243 case TARGET_NR_msgget
:
10244 ret
= get_errno(msgget(arg1
, arg2
));
10247 #ifdef TARGET_NR_msgrcv
10248 case TARGET_NR_msgrcv
:
10249 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10252 #ifdef TARGET_NR_msgsnd
10253 case TARGET_NR_msgsnd
:
10254 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10257 #ifdef TARGET_NR_shmget
10258 case TARGET_NR_shmget
:
10259 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
10262 #ifdef TARGET_NR_shmctl
10263 case TARGET_NR_shmctl
:
10264 ret
= do_shmctl(arg1
, arg2
, arg3
);
10267 #ifdef TARGET_NR_shmat
10268 case TARGET_NR_shmat
:
10269 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10272 #ifdef TARGET_NR_shmdt
10273 case TARGET_NR_shmdt
:
10274 ret
= do_shmdt(arg1
);
10277 case TARGET_NR_fsync
:
10278 ret
= get_errno(fsync(arg1
));
10280 case TARGET_NR_clone
:
10281 /* Linux manages to have three different orderings for its
10282 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10283 * match the kernel's CONFIG_CLONE_* settings.
10284 * Microblaze is further special in that it uses a sixth
10285 * implicit argument to clone for the TLS pointer.
10287 #if defined(TARGET_MICROBLAZE)
10288 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10289 #elif defined(TARGET_CLONE_BACKWARDS)
10290 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10291 #elif defined(TARGET_CLONE_BACKWARDS2)
10292 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10294 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10297 #ifdef __NR_exit_group
10298 /* new thread calls */
10299 case TARGET_NR_exit_group
:
10300 preexit_cleanup(cpu_env
, arg1
);
10301 ret
= get_errno(exit_group(arg1
));
10304 case TARGET_NR_setdomainname
:
10305 if (!(p
= lock_user_string(arg1
)))
10307 ret
= get_errno(setdomainname(p
, arg2
));
10308 unlock_user(p
, arg1
, 0);
10310 case TARGET_NR_uname
:
10311 /* no need to transcode because we use the linux syscall */
10313 struct new_utsname
* buf
;
10315 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10317 ret
= get_errno(sys_uname(buf
));
10318 if (!is_error(ret
)) {
10319 /* Overwrite the native machine name with whatever is being
10321 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10322 sizeof(buf
->machine
));
10323 /* Allow the user to override the reported release. */
10324 if (qemu_uname_release
&& *qemu_uname_release
) {
10325 g_strlcpy(buf
->release
, qemu_uname_release
,
10326 sizeof(buf
->release
));
10329 unlock_user_struct(buf
, arg1
, 1);
10333 case TARGET_NR_modify_ldt
:
10334 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10336 #if !defined(TARGET_X86_64)
10337 case TARGET_NR_vm86old
:
10338 goto unimplemented
;
10339 case TARGET_NR_vm86
:
10340 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10344 case TARGET_NR_adjtimex
:
10346 struct timex host_buf
;
10348 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10351 ret
= get_errno(adjtimex(&host_buf
));
10352 if (!is_error(ret
)) {
10353 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10359 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10360 case TARGET_NR_clock_adjtime
:
10362 struct timex htx
, *phtx
= &htx
;
10364 if (target_to_host_timex(phtx
, arg2
) != 0) {
10367 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10368 if (!is_error(ret
) && phtx
) {
10369 if (host_to_target_timex(arg2
, phtx
) != 0) {
10376 #ifdef TARGET_NR_create_module
10377 case TARGET_NR_create_module
:
10379 case TARGET_NR_init_module
:
10380 case TARGET_NR_delete_module
:
10381 #ifdef TARGET_NR_get_kernel_syms
10382 case TARGET_NR_get_kernel_syms
:
10384 goto unimplemented
;
10385 case TARGET_NR_quotactl
:
10386 goto unimplemented
;
10387 case TARGET_NR_getpgid
:
10388 ret
= get_errno(getpgid(arg1
));
10390 case TARGET_NR_fchdir
:
10391 ret
= get_errno(fchdir(arg1
));
10393 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10394 case TARGET_NR_bdflush
:
10395 goto unimplemented
;
10397 #ifdef TARGET_NR_sysfs
10398 case TARGET_NR_sysfs
:
10399 goto unimplemented
;
10401 case TARGET_NR_personality
:
10402 ret
= get_errno(personality(arg1
));
10404 #ifdef TARGET_NR_afs_syscall
10405 case TARGET_NR_afs_syscall
:
10406 goto unimplemented
;
10408 #ifdef TARGET_NR__llseek /* Not on alpha */
10409 case TARGET_NR__llseek
:
10412 #if !defined(__NR_llseek)
10413 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10415 ret
= get_errno(res
);
10420 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10422 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10428 #ifdef TARGET_NR_getdents
10429 case TARGET_NR_getdents
:
10430 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10431 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10433 struct target_dirent
*target_dirp
;
10434 struct linux_dirent
*dirp
;
10435 abi_long count
= arg3
;
10437 dirp
= g_try_malloc(count
);
10439 ret
= -TARGET_ENOMEM
;
10443 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10444 if (!is_error(ret
)) {
10445 struct linux_dirent
*de
;
10446 struct target_dirent
*tde
;
10448 int reclen
, treclen
;
10449 int count1
, tnamelen
;
10453 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10457 reclen
= de
->d_reclen
;
10458 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10459 assert(tnamelen
>= 0);
10460 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10461 assert(count1
+ treclen
<= count
);
10462 tde
->d_reclen
= tswap16(treclen
);
10463 tde
->d_ino
= tswapal(de
->d_ino
);
10464 tde
->d_off
= tswapal(de
->d_off
);
10465 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10466 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10468 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10472 unlock_user(target_dirp
, arg2
, ret
);
10478 struct linux_dirent
*dirp
;
10479 abi_long count
= arg3
;
10481 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10483 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10484 if (!is_error(ret
)) {
10485 struct linux_dirent
*de
;
10490 reclen
= de
->d_reclen
;
10493 de
->d_reclen
= tswap16(reclen
);
10494 tswapls(&de
->d_ino
);
10495 tswapls(&de
->d_off
);
10496 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10500 unlock_user(dirp
, arg2
, ret
);
10504 /* Implement getdents in terms of getdents64 */
10506 struct linux_dirent64
*dirp
;
10507 abi_long count
= arg3
;
10509 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10513 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10514 if (!is_error(ret
)) {
10515 /* Convert the dirent64 structs to target dirent. We do this
10516 * in-place, since we can guarantee that a target_dirent is no
10517 * larger than a dirent64; however this means we have to be
10518 * careful to read everything before writing in the new format.
10520 struct linux_dirent64
*de
;
10521 struct target_dirent
*tde
;
10526 tde
= (struct target_dirent
*)dirp
;
10528 int namelen
, treclen
;
10529 int reclen
= de
->d_reclen
;
10530 uint64_t ino
= de
->d_ino
;
10531 int64_t off
= de
->d_off
;
10532 uint8_t type
= de
->d_type
;
10534 namelen
= strlen(de
->d_name
);
10535 treclen
= offsetof(struct target_dirent
, d_name
)
10537 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10539 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10540 tde
->d_ino
= tswapal(ino
);
10541 tde
->d_off
= tswapal(off
);
10542 tde
->d_reclen
= tswap16(treclen
);
10543 /* The target_dirent type is in what was formerly a padding
10544 * byte at the end of the structure:
10546 *(((char *)tde
) + treclen
- 1) = type
;
10548 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10549 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10555 unlock_user(dirp
, arg2
, ret
);
10559 #endif /* TARGET_NR_getdents */
10560 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10561 case TARGET_NR_getdents64
:
10563 struct linux_dirent64
*dirp
;
10564 abi_long count
= arg3
;
10565 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10567 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10568 if (!is_error(ret
)) {
10569 struct linux_dirent64
*de
;
10574 reclen
= de
->d_reclen
;
10577 de
->d_reclen
= tswap16(reclen
);
10578 tswap64s((uint64_t *)&de
->d_ino
);
10579 tswap64s((uint64_t *)&de
->d_off
);
10580 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10584 unlock_user(dirp
, arg2
, ret
);
10587 #endif /* TARGET_NR_getdents64 */
10588 #if defined(TARGET_NR__newselect)
10589 case TARGET_NR__newselect
:
10590 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10593 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10594 # ifdef TARGET_NR_poll
10595 case TARGET_NR_poll
:
10597 # ifdef TARGET_NR_ppoll
10598 case TARGET_NR_ppoll
:
10601 struct target_pollfd
*target_pfd
;
10602 unsigned int nfds
= arg2
;
10603 struct pollfd
*pfd
;
10609 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10610 ret
= -TARGET_EINVAL
;
10614 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10615 sizeof(struct target_pollfd
) * nfds
, 1);
10620 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10621 for (i
= 0; i
< nfds
; i
++) {
10622 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10623 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10628 # ifdef TARGET_NR_ppoll
10629 case TARGET_NR_ppoll
:
10631 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10632 target_sigset_t
*target_set
;
10633 sigset_t _set
, *set
= &_set
;
10636 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10637 unlock_user(target_pfd
, arg1
, 0);
10645 if (arg5
!= sizeof(target_sigset_t
)) {
10646 unlock_user(target_pfd
, arg1
, 0);
10647 ret
= -TARGET_EINVAL
;
10651 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10653 unlock_user(target_pfd
, arg1
, 0);
10656 target_to_host_sigset(set
, target_set
);
10661 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10662 set
, SIGSET_T_SIZE
));
10664 if (!is_error(ret
) && arg3
) {
10665 host_to_target_timespec(arg3
, timeout_ts
);
10668 unlock_user(target_set
, arg4
, 0);
10673 # ifdef TARGET_NR_poll
10674 case TARGET_NR_poll
:
10676 struct timespec ts
, *pts
;
10679 /* Convert ms to secs, ns */
10680 ts
.tv_sec
= arg3
/ 1000;
10681 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10684 /* -ve poll() timeout means "infinite" */
10687 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10692 g_assert_not_reached();
10695 if (!is_error(ret
)) {
10696 for(i
= 0; i
< nfds
; i
++) {
10697 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10700 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10704 case TARGET_NR_flock
:
10705 /* NOTE: the flock constant seems to be the same for every
10707 ret
= get_errno(safe_flock(arg1
, arg2
));
10709 case TARGET_NR_readv
:
10711 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10713 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10714 unlock_iovec(vec
, arg2
, arg3
, 1);
10716 ret
= -host_to_target_errno(errno
);
10720 case TARGET_NR_writev
:
10722 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10724 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10725 unlock_iovec(vec
, arg2
, arg3
, 0);
10727 ret
= -host_to_target_errno(errno
);
10731 #if defined(TARGET_NR_preadv)
10732 case TARGET_NR_preadv
:
10734 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10736 unsigned long low
, high
;
10738 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10739 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10740 unlock_iovec(vec
, arg2
, arg3
, 1);
10742 ret
= -host_to_target_errno(errno
);
10747 #if defined(TARGET_NR_pwritev)
10748 case TARGET_NR_pwritev
:
10750 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10752 unsigned long low
, high
;
10754 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10755 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10756 unlock_iovec(vec
, arg2
, arg3
, 0);
10758 ret
= -host_to_target_errno(errno
);
10763 case TARGET_NR_getsid
:
10764 ret
= get_errno(getsid(arg1
));
10766 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10767 case TARGET_NR_fdatasync
:
10768 ret
= get_errno(fdatasync(arg1
));
10771 #ifdef TARGET_NR__sysctl
10772 case TARGET_NR__sysctl
:
10773 /* We don't implement this, but ENOTDIR is always a safe
10775 ret
= -TARGET_ENOTDIR
;
10778 case TARGET_NR_sched_getaffinity
:
10780 unsigned int mask_size
;
10781 unsigned long *mask
;
10784 * sched_getaffinity needs multiples of ulong, so need to take
10785 * care of mismatches between target ulong and host ulong sizes.
10787 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10788 ret
= -TARGET_EINVAL
;
10791 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10793 mask
= alloca(mask_size
);
10794 memset(mask
, 0, mask_size
);
10795 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10797 if (!is_error(ret
)) {
10799 /* More data returned than the caller's buffer will fit.
10800 * This only happens if sizeof(abi_long) < sizeof(long)
10801 * and the caller passed us a buffer holding an odd number
10802 * of abi_longs. If the host kernel is actually using the
10803 * extra 4 bytes then fail EINVAL; otherwise we can just
10804 * ignore them and only copy the interesting part.
10806 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10807 if (numcpus
> arg2
* 8) {
10808 ret
= -TARGET_EINVAL
;
10814 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10820 case TARGET_NR_sched_setaffinity
:
10822 unsigned int mask_size
;
10823 unsigned long *mask
;
10826 * sched_setaffinity needs multiples of ulong, so need to take
10827 * care of mismatches between target ulong and host ulong sizes.
10829 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10830 ret
= -TARGET_EINVAL
;
10833 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10834 mask
= alloca(mask_size
);
10836 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10841 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10844 case TARGET_NR_getcpu
:
10846 unsigned cpu
, node
;
10847 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10848 arg2
? &node
: NULL
,
10850 if (is_error(ret
)) {
10853 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10856 if (arg2
&& put_user_u32(node
, arg2
)) {
10861 case TARGET_NR_sched_setparam
:
10863 struct sched_param
*target_schp
;
10864 struct sched_param schp
;
10867 return -TARGET_EINVAL
;
10869 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10871 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10872 unlock_user_struct(target_schp
, arg2
, 0);
10873 ret
= get_errno(sched_setparam(arg1
, &schp
));
10876 case TARGET_NR_sched_getparam
:
10878 struct sched_param
*target_schp
;
10879 struct sched_param schp
;
10882 return -TARGET_EINVAL
;
10884 ret
= get_errno(sched_getparam(arg1
, &schp
));
10885 if (!is_error(ret
)) {
10886 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10888 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10889 unlock_user_struct(target_schp
, arg2
, 1);
10893 case TARGET_NR_sched_setscheduler
:
10895 struct sched_param
*target_schp
;
10896 struct sched_param schp
;
10898 return -TARGET_EINVAL
;
10900 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10902 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10903 unlock_user_struct(target_schp
, arg3
, 0);
10904 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10907 case TARGET_NR_sched_getscheduler
:
10908 ret
= get_errno(sched_getscheduler(arg1
));
10910 case TARGET_NR_sched_yield
:
10911 ret
= get_errno(sched_yield());
10913 case TARGET_NR_sched_get_priority_max
:
10914 ret
= get_errno(sched_get_priority_max(arg1
));
10916 case TARGET_NR_sched_get_priority_min
:
10917 ret
= get_errno(sched_get_priority_min(arg1
));
10919 case TARGET_NR_sched_rr_get_interval
:
10921 struct timespec ts
;
10922 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10923 if (!is_error(ret
)) {
10924 ret
= host_to_target_timespec(arg2
, &ts
);
10928 case TARGET_NR_nanosleep
:
10930 struct timespec req
, rem
;
10931 target_to_host_timespec(&req
, arg1
);
10932 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10933 if (is_error(ret
) && arg2
) {
10934 host_to_target_timespec(arg2
, &rem
);
10938 #ifdef TARGET_NR_query_module
10939 case TARGET_NR_query_module
:
10940 goto unimplemented
;
10942 #ifdef TARGET_NR_nfsservctl
10943 case TARGET_NR_nfsservctl
:
10944 goto unimplemented
;
10946 case TARGET_NR_prctl
:
10948 case PR_GET_PDEATHSIG
:
10951 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10952 if (!is_error(ret
) && arg2
10953 && put_user_ual(deathsig
, arg2
)) {
10961 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10965 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10966 arg3
, arg4
, arg5
));
10967 unlock_user(name
, arg2
, 16);
10972 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10976 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10977 arg3
, arg4
, arg5
));
10978 unlock_user(name
, arg2
, 0);
10982 #ifdef TARGET_AARCH64
10983 case TARGET_PR_SVE_SET_VL
:
10985 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10986 * PR_SVE_VL_INHERIT. Note the kernel definition
10987 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10988 * even though the current architectural maximum is VQ=16.
10990 ret
= -TARGET_EINVAL
;
10991 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10992 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10993 CPUARMState
*env
= cpu_env
;
10994 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10995 uint32_t vq
, old_vq
;
10997 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10998 vq
= MAX(arg2
/ 16, 1);
10999 vq
= MIN(vq
, cpu
->sve_max_vq
);
11002 aarch64_sve_narrow_vq(env
, vq
);
11004 env
->vfp
.zcr_el
[1] = vq
- 1;
11008 case TARGET_PR_SVE_GET_VL
:
11009 ret
= -TARGET_EINVAL
;
11010 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
11011 CPUARMState
*env
= cpu_env
;
11012 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
11015 #endif /* AARCH64 */
11016 case PR_GET_SECCOMP
:
11017 case PR_SET_SECCOMP
:
11018 /* Disable seccomp to prevent the target disabling syscalls we
11020 ret
= -TARGET_EINVAL
;
11023 /* Most prctl options have no pointer arguments */
11024 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
11028 #ifdef TARGET_NR_arch_prctl
11029 case TARGET_NR_arch_prctl
:
11030 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
11031 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
11034 goto unimplemented
;
11037 #ifdef TARGET_NR_pread64
11038 case TARGET_NR_pread64
:
11039 if (regpairs_aligned(cpu_env
, num
)) {
11043 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
11045 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11046 unlock_user(p
, arg2
, ret
);
11048 case TARGET_NR_pwrite64
:
11049 if (regpairs_aligned(cpu_env
, num
)) {
11053 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
11055 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
11056 unlock_user(p
, arg2
, 0);
11059 case TARGET_NR_getcwd
:
11060 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
11062 ret
= get_errno(sys_getcwd1(p
, arg2
));
11063 unlock_user(p
, arg1
, ret
);
11065 case TARGET_NR_capget
:
11066 case TARGET_NR_capset
:
11068 struct target_user_cap_header
*target_header
;
11069 struct target_user_cap_data
*target_data
= NULL
;
11070 struct __user_cap_header_struct header
;
11071 struct __user_cap_data_struct data
[2];
11072 struct __user_cap_data_struct
*dataptr
= NULL
;
11073 int i
, target_datalen
;
11074 int data_items
= 1;
11076 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
11079 header
.version
= tswap32(target_header
->version
);
11080 header
.pid
= tswap32(target_header
->pid
);
11082 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
11083 /* Version 2 and up takes pointer to two user_data structs */
11087 target_datalen
= sizeof(*target_data
) * data_items
;
11090 if (num
== TARGET_NR_capget
) {
11091 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
11093 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
11095 if (!target_data
) {
11096 unlock_user_struct(target_header
, arg1
, 0);
11100 if (num
== TARGET_NR_capset
) {
11101 for (i
= 0; i
< data_items
; i
++) {
11102 data
[i
].effective
= tswap32(target_data
[i
].effective
);
11103 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
11104 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11111 if (num
== TARGET_NR_capget
) {
11112 ret
= get_errno(capget(&header
, dataptr
));
11114 ret
= get_errno(capset(&header
, dataptr
));
11117 /* The kernel always updates version for both capget and capset */
11118 target_header
->version
= tswap32(header
.version
);
11119 unlock_user_struct(target_header
, arg1
, 1);
11122 if (num
== TARGET_NR_capget
) {
11123 for (i
= 0; i
< data_items
; i
++) {
11124 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11125 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11126 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11128 unlock_user(target_data
, arg2
, target_datalen
);
11130 unlock_user(target_data
, arg2
, 0);
11135 case TARGET_NR_sigaltstack
:
11136 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11139 #ifdef CONFIG_SENDFILE
11140 #ifdef TARGET_NR_sendfile
11141 case TARGET_NR_sendfile
:
11143 off_t
*offp
= NULL
;
11146 ret
= get_user_sal(off
, arg3
);
11147 if (is_error(ret
)) {
11152 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11153 if (!is_error(ret
) && arg3
) {
11154 abi_long ret2
= put_user_sal(off
, arg3
);
11155 if (is_error(ret2
)) {
11162 #ifdef TARGET_NR_sendfile64
11163 case TARGET_NR_sendfile64
:
11165 off_t
*offp
= NULL
;
11168 ret
= get_user_s64(off
, arg3
);
11169 if (is_error(ret
)) {
11174 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11175 if (!is_error(ret
) && arg3
) {
11176 abi_long ret2
= put_user_s64(off
, arg3
);
11177 if (is_error(ret2
)) {
11185 case TARGET_NR_sendfile
:
11186 #ifdef TARGET_NR_sendfile64
11187 case TARGET_NR_sendfile64
:
11189 goto unimplemented
;
11192 #ifdef TARGET_NR_getpmsg
11193 case TARGET_NR_getpmsg
:
11194 goto unimplemented
;
11196 #ifdef TARGET_NR_putpmsg
11197 case TARGET_NR_putpmsg
:
11198 goto unimplemented
;
11200 #ifdef TARGET_NR_vfork
11201 case TARGET_NR_vfork
:
11202 ret
= get_errno(do_fork(cpu_env
,
11203 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11207 #ifdef TARGET_NR_ugetrlimit
11208 case TARGET_NR_ugetrlimit
:
11210 struct rlimit rlim
;
11211 int resource
= target_to_host_resource(arg1
);
11212 ret
= get_errno(getrlimit(resource
, &rlim
));
11213 if (!is_error(ret
)) {
11214 struct target_rlimit
*target_rlim
;
11215 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11217 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11218 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11219 unlock_user_struct(target_rlim
, arg2
, 1);
11224 #ifdef TARGET_NR_truncate64
11225 case TARGET_NR_truncate64
:
11226 if (!(p
= lock_user_string(arg1
)))
11228 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11229 unlock_user(p
, arg1
, 0);
11232 #ifdef TARGET_NR_ftruncate64
11233 case TARGET_NR_ftruncate64
:
11234 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11237 #ifdef TARGET_NR_stat64
11238 case TARGET_NR_stat64
:
11239 if (!(p
= lock_user_string(arg1
)))
11241 ret
= get_errno(stat(path(p
), &st
));
11242 unlock_user(p
, arg1
, 0);
11243 if (!is_error(ret
))
11244 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11247 #ifdef TARGET_NR_lstat64
11248 case TARGET_NR_lstat64
:
11249 if (!(p
= lock_user_string(arg1
)))
11251 ret
= get_errno(lstat(path(p
), &st
));
11252 unlock_user(p
, arg1
, 0);
11253 if (!is_error(ret
))
11254 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11257 #ifdef TARGET_NR_fstat64
11258 case TARGET_NR_fstat64
:
11259 ret
= get_errno(fstat(arg1
, &st
));
11260 if (!is_error(ret
))
11261 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11264 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11265 #ifdef TARGET_NR_fstatat64
11266 case TARGET_NR_fstatat64
:
11268 #ifdef TARGET_NR_newfstatat
11269 case TARGET_NR_newfstatat
:
11271 if (!(p
= lock_user_string(arg2
)))
11273 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11274 if (!is_error(ret
))
11275 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11278 #ifdef TARGET_NR_lchown
11279 case TARGET_NR_lchown
:
11280 if (!(p
= lock_user_string(arg1
)))
11282 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11283 unlock_user(p
, arg1
, 0);
11286 #ifdef TARGET_NR_getuid
11287 case TARGET_NR_getuid
:
11288 ret
= get_errno(high2lowuid(getuid()));
11291 #ifdef TARGET_NR_getgid
11292 case TARGET_NR_getgid
:
11293 ret
= get_errno(high2lowgid(getgid()));
11296 #ifdef TARGET_NR_geteuid
11297 case TARGET_NR_geteuid
:
11298 ret
= get_errno(high2lowuid(geteuid()));
11301 #ifdef TARGET_NR_getegid
11302 case TARGET_NR_getegid
:
11303 ret
= get_errno(high2lowgid(getegid()));
11306 case TARGET_NR_setreuid
:
11307 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11309 case TARGET_NR_setregid
:
11310 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11312 case TARGET_NR_getgroups
:
11314 int gidsetsize
= arg1
;
11315 target_id
*target_grouplist
;
11319 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11320 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11321 if (gidsetsize
== 0)
11323 if (!is_error(ret
)) {
11324 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11325 if (!target_grouplist
)
11327 for(i
= 0;i
< ret
; i
++)
11328 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11329 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11333 case TARGET_NR_setgroups
:
11335 int gidsetsize
= arg1
;
11336 target_id
*target_grouplist
;
11337 gid_t
*grouplist
= NULL
;
11340 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11341 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11342 if (!target_grouplist
) {
11343 ret
= -TARGET_EFAULT
;
11346 for (i
= 0; i
< gidsetsize
; i
++) {
11347 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11349 unlock_user(target_grouplist
, arg2
, 0);
11351 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11354 case TARGET_NR_fchown
:
11355 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11357 #if defined(TARGET_NR_fchownat)
11358 case TARGET_NR_fchownat
:
11359 if (!(p
= lock_user_string(arg2
)))
11361 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11362 low2highgid(arg4
), arg5
));
11363 unlock_user(p
, arg2
, 0);
11366 #ifdef TARGET_NR_setresuid
11367 case TARGET_NR_setresuid
:
11368 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11370 low2highuid(arg3
)));
11373 #ifdef TARGET_NR_getresuid
11374 case TARGET_NR_getresuid
:
11376 uid_t ruid
, euid
, suid
;
11377 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11378 if (!is_error(ret
)) {
11379 if (put_user_id(high2lowuid(ruid
), arg1
)
11380 || put_user_id(high2lowuid(euid
), arg2
)
11381 || put_user_id(high2lowuid(suid
), arg3
))
11387 #ifdef TARGET_NR_getresgid
11388 case TARGET_NR_setresgid
:
11389 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11391 low2highgid(arg3
)));
11394 #ifdef TARGET_NR_getresgid
11395 case TARGET_NR_getresgid
:
11397 gid_t rgid
, egid
, sgid
;
11398 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11399 if (!is_error(ret
)) {
11400 if (put_user_id(high2lowgid(rgid
), arg1
)
11401 || put_user_id(high2lowgid(egid
), arg2
)
11402 || put_user_id(high2lowgid(sgid
), arg3
))
11408 #ifdef TARGET_NR_chown
11409 case TARGET_NR_chown
:
11410 if (!(p
= lock_user_string(arg1
)))
11412 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11413 unlock_user(p
, arg1
, 0);
11416 case TARGET_NR_setuid
:
11417 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11419 case TARGET_NR_setgid
:
11420 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11422 case TARGET_NR_setfsuid
:
11423 ret
= get_errno(setfsuid(arg1
));
11425 case TARGET_NR_setfsgid
:
11426 ret
= get_errno(setfsgid(arg1
));
11429 #ifdef TARGET_NR_lchown32
11430 case TARGET_NR_lchown32
:
11431 if (!(p
= lock_user_string(arg1
)))
11433 ret
= get_errno(lchown(p
, arg2
, arg3
));
11434 unlock_user(p
, arg1
, 0);
11437 #ifdef TARGET_NR_getuid32
11438 case TARGET_NR_getuid32
:
11439 ret
= get_errno(getuid());
11443 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11444 /* Alpha specific */
11445 case TARGET_NR_getxuid
:
11449 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11451 ret
= get_errno(getuid());
11454 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11455 /* Alpha specific */
11456 case TARGET_NR_getxgid
:
11460 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11462 ret
= get_errno(getgid());
11465 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11466 /* Alpha specific */
11467 case TARGET_NR_osf_getsysinfo
:
11468 ret
= -TARGET_EOPNOTSUPP
;
11470 case TARGET_GSI_IEEE_FP_CONTROL
:
11472 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11474 /* Copied from linux ieee_fpcr_to_swcr. */
11475 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11476 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11477 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11478 | SWCR_TRAP_ENABLE_DZE
11479 | SWCR_TRAP_ENABLE_OVF
);
11480 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11481 | SWCR_TRAP_ENABLE_INE
);
11482 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11483 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11485 if (put_user_u64 (swcr
, arg2
))
11491 /* case GSI_IEEE_STATE_AT_SIGNAL:
11492 -- Not implemented in linux kernel.
11494 -- Retrieves current unaligned access state; not much used.
11495 case GSI_PROC_TYPE:
11496 -- Retrieves implver information; surely not used.
11497 case GSI_GET_HWRPB:
11498 -- Grabs a copy of the HWRPB; surely not used.
11503 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11504 /* Alpha specific */
11505 case TARGET_NR_osf_setsysinfo
:
11506 ret
= -TARGET_EOPNOTSUPP
;
11508 case TARGET_SSI_IEEE_FP_CONTROL
:
11510 uint64_t swcr
, fpcr
, orig_fpcr
;
11512 if (get_user_u64 (swcr
, arg2
)) {
11515 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11516 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11518 /* Copied from linux ieee_swcr_to_fpcr. */
11519 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11520 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11521 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11522 | SWCR_TRAP_ENABLE_DZE
11523 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11524 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11525 | SWCR_TRAP_ENABLE_INE
)) << 57;
11526 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11527 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11529 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11534 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11536 uint64_t exc
, fpcr
, orig_fpcr
;
11539 if (get_user_u64(exc
, arg2
)) {
11543 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11545 /* We only add to the exception status here. */
11546 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11548 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11551 /* Old exceptions are not signaled. */
11552 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11554 /* If any exceptions set by this call,
11555 and are unmasked, send a signal. */
11557 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11558 si_code
= TARGET_FPE_FLTRES
;
11560 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11561 si_code
= TARGET_FPE_FLTUND
;
11563 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11564 si_code
= TARGET_FPE_FLTOVF
;
11566 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11567 si_code
= TARGET_FPE_FLTDIV
;
11569 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11570 si_code
= TARGET_FPE_FLTINV
;
11572 if (si_code
!= 0) {
11573 target_siginfo_t info
;
11574 info
.si_signo
= SIGFPE
;
11576 info
.si_code
= si_code
;
11577 info
._sifields
._sigfault
._addr
11578 = ((CPUArchState
*)cpu_env
)->pc
;
11579 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11580 QEMU_SI_FAULT
, &info
);
11585 /* case SSI_NVPAIRS:
11586 -- Used with SSIN_UACPROC to enable unaligned accesses.
11587 case SSI_IEEE_STATE_AT_SIGNAL:
11588 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11589 -- Not implemented in linux kernel
11594 #ifdef TARGET_NR_osf_sigprocmask
11595 /* Alpha specific. */
11596 case TARGET_NR_osf_sigprocmask
:
11600 sigset_t set
, oldset
;
11603 case TARGET_SIG_BLOCK
:
11606 case TARGET_SIG_UNBLOCK
:
11609 case TARGET_SIG_SETMASK
:
11613 ret
= -TARGET_EINVAL
;
11617 target_to_host_old_sigset(&set
, &mask
);
11618 ret
= do_sigprocmask(how
, &set
, &oldset
);
11620 host_to_target_old_sigset(&mask
, &oldset
);
11627 #ifdef TARGET_NR_getgid32
11628 case TARGET_NR_getgid32
:
11629 ret
= get_errno(getgid());
11632 #ifdef TARGET_NR_geteuid32
11633 case TARGET_NR_geteuid32
:
11634 ret
= get_errno(geteuid());
11637 #ifdef TARGET_NR_getegid32
11638 case TARGET_NR_getegid32
:
11639 ret
= get_errno(getegid());
11642 #ifdef TARGET_NR_setreuid32
11643 case TARGET_NR_setreuid32
:
11644 ret
= get_errno(setreuid(arg1
, arg2
));
11647 #ifdef TARGET_NR_setregid32
11648 case TARGET_NR_setregid32
:
11649 ret
= get_errno(setregid(arg1
, arg2
));
11652 #ifdef TARGET_NR_getgroups32
11653 case TARGET_NR_getgroups32
:
11655 int gidsetsize
= arg1
;
11656 uint32_t *target_grouplist
;
11660 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11661 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11662 if (gidsetsize
== 0)
11664 if (!is_error(ret
)) {
11665 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11666 if (!target_grouplist
) {
11667 ret
= -TARGET_EFAULT
;
11670 for(i
= 0;i
< ret
; i
++)
11671 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11672 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11677 #ifdef TARGET_NR_setgroups32
11678 case TARGET_NR_setgroups32
:
11680 int gidsetsize
= arg1
;
11681 uint32_t *target_grouplist
;
11685 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11686 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11687 if (!target_grouplist
) {
11688 ret
= -TARGET_EFAULT
;
11691 for(i
= 0;i
< gidsetsize
; i
++)
11692 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11693 unlock_user(target_grouplist
, arg2
, 0);
11694 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11698 #ifdef TARGET_NR_fchown32
11699 case TARGET_NR_fchown32
:
11700 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11703 #ifdef TARGET_NR_setresuid32
11704 case TARGET_NR_setresuid32
:
11705 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11708 #ifdef TARGET_NR_getresuid32
11709 case TARGET_NR_getresuid32
:
11711 uid_t ruid
, euid
, suid
;
11712 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11713 if (!is_error(ret
)) {
11714 if (put_user_u32(ruid
, arg1
)
11715 || put_user_u32(euid
, arg2
)
11716 || put_user_u32(suid
, arg3
))
11722 #ifdef TARGET_NR_setresgid32
11723 case TARGET_NR_setresgid32
:
11724 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11727 #ifdef TARGET_NR_getresgid32
11728 case TARGET_NR_getresgid32
:
11730 gid_t rgid
, egid
, sgid
;
11731 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11732 if (!is_error(ret
)) {
11733 if (put_user_u32(rgid
, arg1
)
11734 || put_user_u32(egid
, arg2
)
11735 || put_user_u32(sgid
, arg3
))
11741 #ifdef TARGET_NR_chown32
11742 case TARGET_NR_chown32
:
11743 if (!(p
= lock_user_string(arg1
)))
11745 ret
= get_errno(chown(p
, arg2
, arg3
));
11746 unlock_user(p
, arg1
, 0);
11749 #ifdef TARGET_NR_setuid32
11750 case TARGET_NR_setuid32
:
11751 ret
= get_errno(sys_setuid(arg1
));
11754 #ifdef TARGET_NR_setgid32
11755 case TARGET_NR_setgid32
:
11756 ret
= get_errno(sys_setgid(arg1
));
11759 #ifdef TARGET_NR_setfsuid32
11760 case TARGET_NR_setfsuid32
:
11761 ret
= get_errno(setfsuid(arg1
));
11764 #ifdef TARGET_NR_setfsgid32
11765 case TARGET_NR_setfsgid32
:
11766 ret
= get_errno(setfsgid(arg1
));
11770 case TARGET_NR_pivot_root
:
11771 goto unimplemented
;
11772 #ifdef TARGET_NR_mincore
11773 case TARGET_NR_mincore
:
11776 ret
= -TARGET_ENOMEM
;
11777 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11781 ret
= -TARGET_EFAULT
;
11782 p
= lock_user_string(arg3
);
11786 ret
= get_errno(mincore(a
, arg2
, p
));
11787 unlock_user(p
, arg3
, ret
);
11789 unlock_user(a
, arg1
, 0);
11793 #ifdef TARGET_NR_arm_fadvise64_64
11794 case TARGET_NR_arm_fadvise64_64
:
11795 /* arm_fadvise64_64 looks like fadvise64_64 but
11796 * with different argument order: fd, advice, offset, len
11797 * rather than the usual fd, offset, len, advice.
11798 * Note that offset and len are both 64-bit so appear as
11799 * pairs of 32-bit registers.
11801 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11802 target_offset64(arg5
, arg6
), arg2
);
11803 ret
= -host_to_target_errno(ret
);
11807 #if TARGET_ABI_BITS == 32
11809 #ifdef TARGET_NR_fadvise64_64
11810 case TARGET_NR_fadvise64_64
:
11811 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11812 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11820 /* 6 args: fd, offset (high, low), len (high, low), advice */
11821 if (regpairs_aligned(cpu_env
, num
)) {
11822 /* offset is in (3,4), len in (5,6) and advice in 7 */
11830 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11831 target_offset64(arg2
, arg3
),
11832 target_offset64(arg4
, arg5
),
11837 #ifdef TARGET_NR_fadvise64
11838 case TARGET_NR_fadvise64
:
11839 /* 5 args: fd, offset (high, low), len, advice */
11840 if (regpairs_aligned(cpu_env
, num
)) {
11841 /* offset is in (3,4), len in 5 and advice in 6 */
11847 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11848 target_offset64(arg2
, arg3
),
11853 #else /* not a 32-bit ABI */
11854 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11855 #ifdef TARGET_NR_fadvise64_64
11856 case TARGET_NR_fadvise64_64
:
11858 #ifdef TARGET_NR_fadvise64
11859 case TARGET_NR_fadvise64
:
11861 #ifdef TARGET_S390X
11863 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11864 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11865 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11866 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11870 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11873 #endif /* end of 64-bit ABI fadvise handling */
11875 #ifdef TARGET_NR_madvise
11876 case TARGET_NR_madvise
:
11877 /* A straight passthrough may not be safe because qemu sometimes
11878 turns private file-backed mappings into anonymous mappings.
11879 This will break MADV_DONTNEED.
11880 This is a hint, so ignoring and returning success is ok. */
11881 ret
= get_errno(0);
11884 #if TARGET_ABI_BITS == 32
11885 case TARGET_NR_fcntl64
:
11889 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11890 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11893 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11894 copyfrom
= copy_from_user_oabi_flock64
;
11895 copyto
= copy_to_user_oabi_flock64
;
11899 cmd
= target_to_host_fcntl_cmd(arg2
);
11900 if (cmd
== -TARGET_EINVAL
) {
11906 case TARGET_F_GETLK64
:
11907 ret
= copyfrom(&fl
, arg3
);
11911 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11913 ret
= copyto(arg3
, &fl
);
11917 case TARGET_F_SETLK64
:
11918 case TARGET_F_SETLKW64
:
11919 ret
= copyfrom(&fl
, arg3
);
11923 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11926 ret
= do_fcntl(arg1
, arg2
, arg3
);
11932 #ifdef TARGET_NR_cacheflush
11933 case TARGET_NR_cacheflush
:
11934 /* self-modifying code is handled automatically, so nothing needed */
11938 #ifdef TARGET_NR_security
11939 case TARGET_NR_security
:
11940 goto unimplemented
;
11942 #ifdef TARGET_NR_getpagesize
11943 case TARGET_NR_getpagesize
:
11944 ret
= TARGET_PAGE_SIZE
;
11947 case TARGET_NR_gettid
:
11948 ret
= get_errno(gettid());
11950 #ifdef TARGET_NR_readahead
11951 case TARGET_NR_readahead
:
11952 #if TARGET_ABI_BITS == 32
11953 if (regpairs_aligned(cpu_env
, num
)) {
11958 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11960 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11965 #ifdef TARGET_NR_setxattr
11966 case TARGET_NR_listxattr
:
11967 case TARGET_NR_llistxattr
:
11971 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11973 ret
= -TARGET_EFAULT
;
11977 p
= lock_user_string(arg1
);
11979 if (num
== TARGET_NR_listxattr
) {
11980 ret
= get_errno(listxattr(p
, b
, arg3
));
11982 ret
= get_errno(llistxattr(p
, b
, arg3
));
11985 ret
= -TARGET_EFAULT
;
11987 unlock_user(p
, arg1
, 0);
11988 unlock_user(b
, arg2
, arg3
);
11991 case TARGET_NR_flistxattr
:
11995 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11997 ret
= -TARGET_EFAULT
;
12001 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
12002 unlock_user(b
, arg2
, arg3
);
12005 case TARGET_NR_setxattr
:
12006 case TARGET_NR_lsetxattr
:
12008 void *p
, *n
, *v
= 0;
12010 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12012 ret
= -TARGET_EFAULT
;
12016 p
= lock_user_string(arg1
);
12017 n
= lock_user_string(arg2
);
12019 if (num
== TARGET_NR_setxattr
) {
12020 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
12022 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
12025 ret
= -TARGET_EFAULT
;
12027 unlock_user(p
, arg1
, 0);
12028 unlock_user(n
, arg2
, 0);
12029 unlock_user(v
, arg3
, 0);
12032 case TARGET_NR_fsetxattr
:
12036 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
12038 ret
= -TARGET_EFAULT
;
12042 n
= lock_user_string(arg2
);
12044 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
12046 ret
= -TARGET_EFAULT
;
12048 unlock_user(n
, arg2
, 0);
12049 unlock_user(v
, arg3
, 0);
12052 case TARGET_NR_getxattr
:
12053 case TARGET_NR_lgetxattr
:
12055 void *p
, *n
, *v
= 0;
12057 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12059 ret
= -TARGET_EFAULT
;
12063 p
= lock_user_string(arg1
);
12064 n
= lock_user_string(arg2
);
12066 if (num
== TARGET_NR_getxattr
) {
12067 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
12069 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
12072 ret
= -TARGET_EFAULT
;
12074 unlock_user(p
, arg1
, 0);
12075 unlock_user(n
, arg2
, 0);
12076 unlock_user(v
, arg3
, arg4
);
12079 case TARGET_NR_fgetxattr
:
12083 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
12085 ret
= -TARGET_EFAULT
;
12089 n
= lock_user_string(arg2
);
12091 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
12093 ret
= -TARGET_EFAULT
;
12095 unlock_user(n
, arg2
, 0);
12096 unlock_user(v
, arg3
, arg4
);
12099 case TARGET_NR_removexattr
:
12100 case TARGET_NR_lremovexattr
:
12103 p
= lock_user_string(arg1
);
12104 n
= lock_user_string(arg2
);
12106 if (num
== TARGET_NR_removexattr
) {
12107 ret
= get_errno(removexattr(p
, n
));
12109 ret
= get_errno(lremovexattr(p
, n
));
12112 ret
= -TARGET_EFAULT
;
12114 unlock_user(p
, arg1
, 0);
12115 unlock_user(n
, arg2
, 0);
12118 case TARGET_NR_fremovexattr
:
12121 n
= lock_user_string(arg2
);
12123 ret
= get_errno(fremovexattr(arg1
, n
));
12125 ret
= -TARGET_EFAULT
;
12127 unlock_user(n
, arg2
, 0);
12131 #endif /* CONFIG_ATTR */
12132 #ifdef TARGET_NR_set_thread_area
12133 case TARGET_NR_set_thread_area
:
12134 #if defined(TARGET_MIPS)
12135 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12138 #elif defined(TARGET_CRIS)
12140 ret
= -TARGET_EINVAL
;
12142 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12146 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12147 ret
= do_set_thread_area(cpu_env
, arg1
);
12149 #elif defined(TARGET_M68K)
12151 TaskState
*ts
= cpu
->opaque
;
12152 ts
->tp_value
= arg1
;
12157 goto unimplemented_nowarn
;
12160 #ifdef TARGET_NR_get_thread_area
12161 case TARGET_NR_get_thread_area
:
12162 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12163 ret
= do_get_thread_area(cpu_env
, arg1
);
12165 #elif defined(TARGET_M68K)
12167 TaskState
*ts
= cpu
->opaque
;
12168 ret
= ts
->tp_value
;
12172 goto unimplemented_nowarn
;
12175 #ifdef TARGET_NR_getdomainname
12176 case TARGET_NR_getdomainname
:
12177 goto unimplemented_nowarn
;
12180 #ifdef TARGET_NR_clock_settime
12181 case TARGET_NR_clock_settime
:
12183 struct timespec ts
;
12185 ret
= target_to_host_timespec(&ts
, arg2
);
12186 if (!is_error(ret
)) {
12187 ret
= get_errno(clock_settime(arg1
, &ts
));
12192 #ifdef TARGET_NR_clock_gettime
12193 case TARGET_NR_clock_gettime
:
12195 struct timespec ts
;
12196 ret
= get_errno(clock_gettime(arg1
, &ts
));
12197 if (!is_error(ret
)) {
12198 ret
= host_to_target_timespec(arg2
, &ts
);
12203 #ifdef TARGET_NR_clock_getres
12204 case TARGET_NR_clock_getres
:
12206 struct timespec ts
;
12207 ret
= get_errno(clock_getres(arg1
, &ts
));
12208 if (!is_error(ret
)) {
12209 host_to_target_timespec(arg2
, &ts
);
12214 #ifdef TARGET_NR_clock_nanosleep
12215 case TARGET_NR_clock_nanosleep
:
12217 struct timespec ts
;
12218 target_to_host_timespec(&ts
, arg3
);
12219 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12220 &ts
, arg4
? &ts
: NULL
));
12222 host_to_target_timespec(arg4
, &ts
);
12224 #if defined(TARGET_PPC)
12225 /* clock_nanosleep is odd in that it returns positive errno values.
12226 * On PPC, CR0 bit 3 should be set in such a situation. */
12227 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
12228 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
12235 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12236 case TARGET_NR_set_tid_address
:
12237 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
12241 case TARGET_NR_tkill
:
12242 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12245 case TARGET_NR_tgkill
:
12246 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12247 target_to_host_signal(arg3
)));
12250 #ifdef TARGET_NR_set_robust_list
12251 case TARGET_NR_set_robust_list
:
12252 case TARGET_NR_get_robust_list
:
12253 /* The ABI for supporting robust futexes has userspace pass
12254 * the kernel a pointer to a linked list which is updated by
12255 * userspace after the syscall; the list is walked by the kernel
12256 * when the thread exits. Since the linked list in QEMU guest
12257 * memory isn't a valid linked list for the host and we have
12258 * no way to reliably intercept the thread-death event, we can't
12259 * support these. Silently return ENOSYS so that guest userspace
12260 * falls back to a non-robust futex implementation (which should
12261 * be OK except in the corner case of the guest crashing while
12262 * holding a mutex that is shared with another process via
12265 goto unimplemented_nowarn
;
12268 #if defined(TARGET_NR_utimensat)
12269 case TARGET_NR_utimensat
:
12271 struct timespec
*tsp
, ts
[2];
12275 target_to_host_timespec(ts
, arg3
);
12276 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
12280 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12282 if (!(p
= lock_user_string(arg2
))) {
12283 ret
= -TARGET_EFAULT
;
12286 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12287 unlock_user(p
, arg2
, 0);
12292 case TARGET_NR_futex
:
12293 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12295 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12296 case TARGET_NR_inotify_init
:
12297 ret
= get_errno(sys_inotify_init());
12299 fd_trans_register(ret
, &target_inotify_trans
);
12303 #ifdef CONFIG_INOTIFY1
12304 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12305 case TARGET_NR_inotify_init1
:
12306 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12307 fcntl_flags_tbl
)));
12309 fd_trans_register(ret
, &target_inotify_trans
);
12314 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12315 case TARGET_NR_inotify_add_watch
:
12316 p
= lock_user_string(arg2
);
12317 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12318 unlock_user(p
, arg2
, 0);
12321 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12322 case TARGET_NR_inotify_rm_watch
:
12323 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12327 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12328 case TARGET_NR_mq_open
:
12330 struct mq_attr posix_mq_attr
;
12331 struct mq_attr
*pposix_mq_attr
;
12334 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12335 pposix_mq_attr
= NULL
;
12337 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12340 pposix_mq_attr
= &posix_mq_attr
;
12342 p
= lock_user_string(arg1
- 1);
12346 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12347 unlock_user (p
, arg1
, 0);
12351 case TARGET_NR_mq_unlink
:
12352 p
= lock_user_string(arg1
- 1);
12354 ret
= -TARGET_EFAULT
;
12357 ret
= get_errno(mq_unlink(p
));
12358 unlock_user (p
, arg1
, 0);
12361 case TARGET_NR_mq_timedsend
:
12363 struct timespec ts
;
12365 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12367 target_to_host_timespec(&ts
, arg5
);
12368 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12369 host_to_target_timespec(arg5
, &ts
);
12371 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12373 unlock_user (p
, arg2
, arg3
);
12377 case TARGET_NR_mq_timedreceive
:
12379 struct timespec ts
;
12382 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12384 target_to_host_timespec(&ts
, arg5
);
12385 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12387 host_to_target_timespec(arg5
, &ts
);
12389 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12392 unlock_user (p
, arg2
, arg3
);
12394 put_user_u32(prio
, arg4
);
12398 /* Not implemented for now... */
12399 /* case TARGET_NR_mq_notify: */
12402 case TARGET_NR_mq_getsetattr
:
12404 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12407 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12408 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12409 &posix_mq_attr_out
));
12410 } else if (arg3
!= 0) {
12411 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12413 if (ret
== 0 && arg3
!= 0) {
12414 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12420 #ifdef CONFIG_SPLICE
12421 #ifdef TARGET_NR_tee
12422 case TARGET_NR_tee
:
12424 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12428 #ifdef TARGET_NR_splice
12429 case TARGET_NR_splice
:
12431 loff_t loff_in
, loff_out
;
12432 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12434 if (get_user_u64(loff_in
, arg2
)) {
12437 ploff_in
= &loff_in
;
12440 if (get_user_u64(loff_out
, arg4
)) {
12443 ploff_out
= &loff_out
;
12445 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12447 if (put_user_u64(loff_in
, arg2
)) {
12452 if (put_user_u64(loff_out
, arg4
)) {
12459 #ifdef TARGET_NR_vmsplice
12460 case TARGET_NR_vmsplice
:
12462 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12464 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12465 unlock_iovec(vec
, arg2
, arg3
, 0);
12467 ret
= -host_to_target_errno(errno
);
12472 #endif /* CONFIG_SPLICE */
12473 #ifdef CONFIG_EVENTFD
12474 #if defined(TARGET_NR_eventfd)
12475 case TARGET_NR_eventfd
:
12476 ret
= get_errno(eventfd(arg1
, 0));
12478 fd_trans_register(ret
, &target_eventfd_trans
);
12482 #if defined(TARGET_NR_eventfd2)
12483 case TARGET_NR_eventfd2
:
12485 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12486 if (arg2
& TARGET_O_NONBLOCK
) {
12487 host_flags
|= O_NONBLOCK
;
12489 if (arg2
& TARGET_O_CLOEXEC
) {
12490 host_flags
|= O_CLOEXEC
;
12492 ret
= get_errno(eventfd(arg1
, host_flags
));
12494 fd_trans_register(ret
, &target_eventfd_trans
);
12499 #endif /* CONFIG_EVENTFD */
12500 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12501 case TARGET_NR_fallocate
:
12502 #if TARGET_ABI_BITS == 32
12503 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12504 target_offset64(arg5
, arg6
)));
12506 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12510 #if defined(CONFIG_SYNC_FILE_RANGE)
12511 #if defined(TARGET_NR_sync_file_range)
12512 case TARGET_NR_sync_file_range
:
12513 #if TARGET_ABI_BITS == 32
12514 #if defined(TARGET_MIPS)
12515 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12516 target_offset64(arg5
, arg6
), arg7
));
12518 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12519 target_offset64(arg4
, arg5
), arg6
));
12520 #endif /* !TARGET_MIPS */
12522 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12526 #if defined(TARGET_NR_sync_file_range2)
12527 case TARGET_NR_sync_file_range2
:
12528 /* This is like sync_file_range but the arguments are reordered */
12529 #if TARGET_ABI_BITS == 32
12530 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12531 target_offset64(arg5
, arg6
), arg2
));
12533 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12538 #if defined(TARGET_NR_signalfd4)
12539 case TARGET_NR_signalfd4
:
12540 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12543 #if defined(TARGET_NR_signalfd)
12544 case TARGET_NR_signalfd
:
12545 ret
= do_signalfd4(arg1
, arg2
, 0);
12548 #if defined(CONFIG_EPOLL)
12549 #if defined(TARGET_NR_epoll_create)
12550 case TARGET_NR_epoll_create
:
12551 ret
= get_errno(epoll_create(arg1
));
12554 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12555 case TARGET_NR_epoll_create1
:
12556 ret
= get_errno(epoll_create1(arg1
));
12559 #if defined(TARGET_NR_epoll_ctl)
12560 case TARGET_NR_epoll_ctl
:
12562 struct epoll_event ep
;
12563 struct epoll_event
*epp
= 0;
12565 struct target_epoll_event
*target_ep
;
12566 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12569 ep
.events
= tswap32(target_ep
->events
);
12570 /* The epoll_data_t union is just opaque data to the kernel,
12571 * so we transfer all 64 bits across and need not worry what
12572 * actual data type it is.
12574 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12575 unlock_user_struct(target_ep
, arg4
, 0);
12578 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12583 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12584 #if defined(TARGET_NR_epoll_wait)
12585 case TARGET_NR_epoll_wait
:
12587 #if defined(TARGET_NR_epoll_pwait)
12588 case TARGET_NR_epoll_pwait
:
12591 struct target_epoll_event
*target_ep
;
12592 struct epoll_event
*ep
;
12594 int maxevents
= arg3
;
12595 int timeout
= arg4
;
12597 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12598 ret
= -TARGET_EINVAL
;
12602 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12603 maxevents
* sizeof(struct target_epoll_event
), 1);
12608 ep
= g_try_new(struct epoll_event
, maxevents
);
12610 unlock_user(target_ep
, arg2
, 0);
12611 ret
= -TARGET_ENOMEM
;
12616 #if defined(TARGET_NR_epoll_pwait)
12617 case TARGET_NR_epoll_pwait
:
12619 target_sigset_t
*target_set
;
12620 sigset_t _set
, *set
= &_set
;
12623 if (arg6
!= sizeof(target_sigset_t
)) {
12624 ret
= -TARGET_EINVAL
;
12628 target_set
= lock_user(VERIFY_READ
, arg5
,
12629 sizeof(target_sigset_t
), 1);
12631 ret
= -TARGET_EFAULT
;
12634 target_to_host_sigset(set
, target_set
);
12635 unlock_user(target_set
, arg5
, 0);
12640 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12641 set
, SIGSET_T_SIZE
));
12645 #if defined(TARGET_NR_epoll_wait)
12646 case TARGET_NR_epoll_wait
:
12647 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12652 ret
= -TARGET_ENOSYS
;
12654 if (!is_error(ret
)) {
12656 for (i
= 0; i
< ret
; i
++) {
12657 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12658 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12660 unlock_user(target_ep
, arg2
,
12661 ret
* sizeof(struct target_epoll_event
));
12663 unlock_user(target_ep
, arg2
, 0);
12670 #ifdef TARGET_NR_prlimit64
12671 case TARGET_NR_prlimit64
:
12673 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12674 struct target_rlimit64
*target_rnew
, *target_rold
;
12675 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12676 int resource
= target_to_host_resource(arg2
);
12678 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12681 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12682 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12683 unlock_user_struct(target_rnew
, arg3
, 0);
12687 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12688 if (!is_error(ret
) && arg4
) {
12689 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12692 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12693 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12694 unlock_user_struct(target_rold
, arg4
, 1);
12699 #ifdef TARGET_NR_gethostname
12700 case TARGET_NR_gethostname
:
12702 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12704 ret
= get_errno(gethostname(name
, arg2
));
12705 unlock_user(name
, arg1
, arg2
);
12707 ret
= -TARGET_EFAULT
;
12712 #ifdef TARGET_NR_atomic_cmpxchg_32
12713 case TARGET_NR_atomic_cmpxchg_32
:
12715 /* should use start_exclusive from main.c */
12716 abi_ulong mem_value
;
12717 if (get_user_u32(mem_value
, arg6
)) {
12718 target_siginfo_t info
;
12719 info
.si_signo
= SIGSEGV
;
12721 info
.si_code
= TARGET_SEGV_MAPERR
;
12722 info
._sifields
._sigfault
._addr
= arg6
;
12723 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12724 QEMU_SI_FAULT
, &info
);
12728 if (mem_value
== arg2
)
12729 put_user_u32(arg1
, arg6
);
12734 #ifdef TARGET_NR_atomic_barrier
12735 case TARGET_NR_atomic_barrier
:
12737 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12743 #ifdef TARGET_NR_timer_create
12744 case TARGET_NR_timer_create
:
12746 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12748 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12751 int timer_index
= next_free_host_timer();
12753 if (timer_index
< 0) {
12754 ret
= -TARGET_EAGAIN
;
12756 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12759 phost_sevp
= &host_sevp
;
12760 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12766 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12770 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12779 #ifdef TARGET_NR_timer_settime
12780 case TARGET_NR_timer_settime
:
12782 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12783 * struct itimerspec * old_value */
12784 target_timer_t timerid
= get_timer_id(arg1
);
12788 } else if (arg3
== 0) {
12789 ret
= -TARGET_EINVAL
;
12791 timer_t htimer
= g_posix_timers
[timerid
];
12792 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12794 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12798 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12799 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12807 #ifdef TARGET_NR_timer_gettime
12808 case TARGET_NR_timer_gettime
:
12810 /* args: timer_t timerid, struct itimerspec *curr_value */
12811 target_timer_t timerid
= get_timer_id(arg1
);
12815 } else if (!arg2
) {
12816 ret
= -TARGET_EFAULT
;
12818 timer_t htimer
= g_posix_timers
[timerid
];
12819 struct itimerspec hspec
;
12820 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12822 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12823 ret
= -TARGET_EFAULT
;
12830 #ifdef TARGET_NR_timer_getoverrun
12831 case TARGET_NR_timer_getoverrun
:
12833 /* args: timer_t timerid */
12834 target_timer_t timerid
= get_timer_id(arg1
);
12839 timer_t htimer
= g_posix_timers
[timerid
];
12840 ret
= get_errno(timer_getoverrun(htimer
));
12842 fd_trans_unregister(ret
);
12847 #ifdef TARGET_NR_timer_delete
12848 case TARGET_NR_timer_delete
:
12850 /* args: timer_t timerid */
12851 target_timer_t timerid
= get_timer_id(arg1
);
12856 timer_t htimer
= g_posix_timers
[timerid
];
12857 ret
= get_errno(timer_delete(htimer
));
12858 g_posix_timers
[timerid
] = 0;
12864 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12865 case TARGET_NR_timerfd_create
:
12866 ret
= get_errno(timerfd_create(arg1
,
12867 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12871 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12872 case TARGET_NR_timerfd_gettime
:
12874 struct itimerspec its_curr
;
12876 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12878 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12885 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12886 case TARGET_NR_timerfd_settime
:
12888 struct itimerspec its_new
, its_old
, *p_new
;
12891 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12899 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12901 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12908 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12909 case TARGET_NR_ioprio_get
:
12910 ret
= get_errno(ioprio_get(arg1
, arg2
));
12914 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12915 case TARGET_NR_ioprio_set
:
12916 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12920 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12921 case TARGET_NR_setns
:
12922 ret
= get_errno(setns(arg1
, arg2
));
12925 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12926 case TARGET_NR_unshare
:
12927 ret
= get_errno(unshare(arg1
));
12930 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12931 case TARGET_NR_kcmp
:
12932 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12935 #ifdef TARGET_NR_swapcontext
12936 case TARGET_NR_swapcontext
:
12937 /* PowerPC specific. */
12938 ret
= do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12944 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12945 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12946 unimplemented_nowarn
:
12948 ret
= -TARGET_ENOSYS
;
12953 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12956 print_syscall_ret(num
, ret
);
12957 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12960 ret
= -TARGET_EFAULT
;