4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
283 loff_t
*, res
, uint
, wh
);
285 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
286 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
288 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group
,int,error_code
)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address
,int *,tidptr
)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
297 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
307 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
309 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
310 struct __user_cap_data_struct
*, data
);
311 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get
, int, which
, int, who
)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
325 unsigned long, idx1
, unsigned long, idx2
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
366 QEMU_IFLA_BR_FORWARD_DELAY
,
367 QEMU_IFLA_BR_HELLO_TIME
,
368 QEMU_IFLA_BR_MAX_AGE
,
369 QEMU_IFLA_BR_AGEING_TIME
,
370 QEMU_IFLA_BR_STP_STATE
,
371 QEMU_IFLA_BR_PRIORITY
,
372 QEMU_IFLA_BR_VLAN_FILTERING
,
373 QEMU_IFLA_BR_VLAN_PROTOCOL
,
374 QEMU_IFLA_BR_GROUP_FWD_MASK
,
375 QEMU_IFLA_BR_ROOT_ID
,
376 QEMU_IFLA_BR_BRIDGE_ID
,
377 QEMU_IFLA_BR_ROOT_PORT
,
378 QEMU_IFLA_BR_ROOT_PATH_COST
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
381 QEMU_IFLA_BR_HELLO_TIMER
,
382 QEMU_IFLA_BR_TCN_TIMER
,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
384 QEMU_IFLA_BR_GC_TIMER
,
385 QEMU_IFLA_BR_GROUP_ADDR
,
386 QEMU_IFLA_BR_FDB_FLUSH
,
387 QEMU_IFLA_BR_MCAST_ROUTER
,
388 QEMU_IFLA_BR_MCAST_SNOOPING
,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
390 QEMU_IFLA_BR_MCAST_QUERIER
,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
392 QEMU_IFLA_BR_MCAST_HASH_MAX
,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION
,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION
,
433 QEMU_IFLA_NET_NS_PID
,
436 QEMU_IFLA_VFINFO_LIST
,
444 QEMU_IFLA_PROMISCUITY
,
445 QEMU_IFLA_NUM_TX_QUEUES
,
446 QEMU_IFLA_NUM_RX_QUEUES
,
448 QEMU_IFLA_PHYS_PORT_ID
,
449 QEMU_IFLA_CARRIER_CHANGES
,
450 QEMU_IFLA_PHYS_SWITCH_ID
,
451 QEMU_IFLA_LINK_NETNSID
,
452 QEMU_IFLA_PHYS_PORT_NAME
,
453 QEMU_IFLA_PROTO_DOWN
,
454 QEMU_IFLA_GSO_MAX_SEGS
,
455 QEMU_IFLA_GSO_MAX_SIZE
,
459 QEMU_IFLA_NEW_NETNSID
,
460 QEMU_IFLA_IF_NETNSID
,
461 QEMU_IFLA_CARRIER_UP_COUNT
,
462 QEMU_IFLA_CARRIER_DOWN_COUNT
,
463 QEMU_IFLA_NEW_IFINDEX
,
468 QEMU_IFLA_BRPORT_UNSPEC
,
469 QEMU_IFLA_BRPORT_STATE
,
470 QEMU_IFLA_BRPORT_PRIORITY
,
471 QEMU_IFLA_BRPORT_COST
,
472 QEMU_IFLA_BRPORT_MODE
,
473 QEMU_IFLA_BRPORT_GUARD
,
474 QEMU_IFLA_BRPORT_PROTECT
,
475 QEMU_IFLA_BRPORT_FAST_LEAVE
,
476 QEMU_IFLA_BRPORT_LEARNING
,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
478 QEMU_IFLA_BRPORT_PROXYARP
,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
481 QEMU_IFLA_BRPORT_ROOT_ID
,
482 QEMU_IFLA_BRPORT_BRIDGE_ID
,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
491 QEMU_IFLA_BRPORT_HOLD_TIMER
,
492 QEMU_IFLA_BRPORT_FLUSH
,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
494 QEMU_IFLA_BRPORT_PAD
,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD
,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST
,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL
,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD
,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK
,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
,
501 QEMU___IFLA_BRPORT_MAX
505 QEMU_IFLA_INFO_UNSPEC
,
508 QEMU_IFLA_INFO_XSTATS
,
509 QEMU_IFLA_INFO_SLAVE_KIND
,
510 QEMU_IFLA_INFO_SLAVE_DATA
,
511 QEMU___IFLA_INFO_MAX
,
515 QEMU_IFLA_INET_UNSPEC
,
517 QEMU___IFLA_INET_MAX
,
521 QEMU_IFLA_INET6_UNSPEC
,
522 QEMU_IFLA_INET6_FLAGS
,
523 QEMU_IFLA_INET6_CONF
,
524 QEMU_IFLA_INET6_STATS
,
525 QEMU_IFLA_INET6_MCAST
,
526 QEMU_IFLA_INET6_CACHEINFO
,
527 QEMU_IFLA_INET6_ICMP6STATS
,
528 QEMU_IFLA_INET6_TOKEN
,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
530 QEMU___IFLA_INET6_MAX
534 QEMU_IFLA_XDP_UNSPEC
,
536 QEMU_IFLA_XDP_ATTACHED
,
538 QEMU_IFLA_XDP_PROG_ID
,
542 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
544 typedef struct TargetFdTrans
{
545 TargetFdDataFunc host_to_target_data
;
546 TargetFdDataFunc target_to_host_data
;
547 TargetFdAddrFunc target_to_host_addr
;
550 static TargetFdTrans
**target_fd_trans
;
552 static unsigned int target_fd_max
;
554 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
556 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
557 return target_fd_trans
[fd
]->target_to_host_data
;
562 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
564 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
565 return target_fd_trans
[fd
]->host_to_target_data
;
570 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
572 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
573 return target_fd_trans
[fd
]->target_to_host_addr
;
578 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
582 if (fd
>= target_fd_max
) {
583 oldmax
= target_fd_max
;
584 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans
= g_renew(TargetFdTrans
*,
586 target_fd_trans
, target_fd_max
);
587 memset((void *)(target_fd_trans
+ oldmax
), 0,
588 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
590 target_fd_trans
[fd
] = trans
;
593 static void fd_trans_unregister(int fd
)
595 if (fd
>= 0 && fd
< target_fd_max
) {
596 target_fd_trans
[fd
] = NULL
;
600 static void fd_trans_dup(int oldfd
, int newfd
)
602 fd_trans_unregister(newfd
);
603 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
604 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
608 static int sys_getcwd1(char *buf
, size_t size
)
610 if (getcwd(buf
, size
) == NULL
) {
611 /* getcwd() sets errno */
614 return strlen(buf
)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
621 const struct timespec
*,tsp
,int,flags
)
623 static int sys_utimensat(int dirfd
, const char *pathname
,
624 const struct timespec times
[2], int flags
)
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
636 const char *, new, unsigned int, flags
)
638 static int sys_renameat2(int oldfd
, const char *old
,
639 int newfd
, const char *new, int flags
)
642 return renameat(oldfd
, old
, newfd
, new);
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
662 return (inotify_add_watch(fd
, pathname
, mask
));
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
668 return (inotify_rm_watch(fd
, wd
));
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags
)
675 return (inotify_init1(flags
));
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64
{
697 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
698 const struct host_rlimit64
*, new_limit
,
699 struct host_rlimit64
*, old_limit
)
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers
[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
712 if (g_posix_timers
[k
] == 0) {
713 g_posix_timers
[k
] = (timer_t
) 1;
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
723 static inline int regpairs_aligned(void *cpu_env
, int num
)
725 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
733 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env
, int num
)
739 case TARGET_NR_pread64
:
740 case TARGET_NR_pwrite64
:
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
750 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
765 [EAGAIN
] = TARGET_EAGAIN
,
766 [EIDRM
] = TARGET_EIDRM
,
767 [ECHRNG
] = TARGET_ECHRNG
,
768 [EL2NSYNC
] = TARGET_EL2NSYNC
,
769 [EL3HLT
] = TARGET_EL3HLT
,
770 [EL3RST
] = TARGET_EL3RST
,
771 [ELNRNG
] = TARGET_ELNRNG
,
772 [EUNATCH
] = TARGET_EUNATCH
,
773 [ENOCSI
] = TARGET_ENOCSI
,
774 [EL2HLT
] = TARGET_EL2HLT
,
775 [EDEADLK
] = TARGET_EDEADLK
,
776 [ENOLCK
] = TARGET_ENOLCK
,
777 [EBADE
] = TARGET_EBADE
,
778 [EBADR
] = TARGET_EBADR
,
779 [EXFULL
] = TARGET_EXFULL
,
780 [ENOANO
] = TARGET_ENOANO
,
781 [EBADRQC
] = TARGET_EBADRQC
,
782 [EBADSLT
] = TARGET_EBADSLT
,
783 [EBFONT
] = TARGET_EBFONT
,
784 [ENOSTR
] = TARGET_ENOSTR
,
785 [ENODATA
] = TARGET_ENODATA
,
786 [ETIME
] = TARGET_ETIME
,
787 [ENOSR
] = TARGET_ENOSR
,
788 [ENONET
] = TARGET_ENONET
,
789 [ENOPKG
] = TARGET_ENOPKG
,
790 [EREMOTE
] = TARGET_EREMOTE
,
791 [ENOLINK
] = TARGET_ENOLINK
,
792 [EADV
] = TARGET_EADV
,
793 [ESRMNT
] = TARGET_ESRMNT
,
794 [ECOMM
] = TARGET_ECOMM
,
795 [EPROTO
] = TARGET_EPROTO
,
796 [EDOTDOT
] = TARGET_EDOTDOT
,
797 [EMULTIHOP
] = TARGET_EMULTIHOP
,
798 [EBADMSG
] = TARGET_EBADMSG
,
799 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
800 [EOVERFLOW
] = TARGET_EOVERFLOW
,
801 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
802 [EBADFD
] = TARGET_EBADFD
,
803 [EREMCHG
] = TARGET_EREMCHG
,
804 [ELIBACC
] = TARGET_ELIBACC
,
805 [ELIBBAD
] = TARGET_ELIBBAD
,
806 [ELIBSCN
] = TARGET_ELIBSCN
,
807 [ELIBMAX
] = TARGET_ELIBMAX
,
808 [ELIBEXEC
] = TARGET_ELIBEXEC
,
809 [EILSEQ
] = TARGET_EILSEQ
,
810 [ENOSYS
] = TARGET_ENOSYS
,
811 [ELOOP
] = TARGET_ELOOP
,
812 [ERESTART
] = TARGET_ERESTART
,
813 [ESTRPIPE
] = TARGET_ESTRPIPE
,
814 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
815 [EUSERS
] = TARGET_EUSERS
,
816 [ENOTSOCK
] = TARGET_ENOTSOCK
,
817 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
818 [EMSGSIZE
] = TARGET_EMSGSIZE
,
819 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
820 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
821 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
822 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
823 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
824 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
825 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
826 [EADDRINUSE
] = TARGET_EADDRINUSE
,
827 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
828 [ENETDOWN
] = TARGET_ENETDOWN
,
829 [ENETUNREACH
] = TARGET_ENETUNREACH
,
830 [ENETRESET
] = TARGET_ENETRESET
,
831 [ECONNABORTED
] = TARGET_ECONNABORTED
,
832 [ECONNRESET
] = TARGET_ECONNRESET
,
833 [ENOBUFS
] = TARGET_ENOBUFS
,
834 [EISCONN
] = TARGET_EISCONN
,
835 [ENOTCONN
] = TARGET_ENOTCONN
,
836 [EUCLEAN
] = TARGET_EUCLEAN
,
837 [ENOTNAM
] = TARGET_ENOTNAM
,
838 [ENAVAIL
] = TARGET_ENAVAIL
,
839 [EISNAM
] = TARGET_EISNAM
,
840 [EREMOTEIO
] = TARGET_EREMOTEIO
,
841 [EDQUOT
] = TARGET_EDQUOT
,
842 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
843 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
844 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
845 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
846 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
847 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
848 [EALREADY
] = TARGET_EALREADY
,
849 [EINPROGRESS
] = TARGET_EINPROGRESS
,
850 [ESTALE
] = TARGET_ESTALE
,
851 [ECANCELED
] = TARGET_ECANCELED
,
852 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
853 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
855 [ENOKEY
] = TARGET_ENOKEY
,
858 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
861 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
864 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
867 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
873 [ENOMSG
] = TARGET_ENOMSG
,
876 [ERFKILL
] = TARGET_ERFKILL
,
879 [EHWPOISON
] = TARGET_EHWPOISON
,
883 static inline int host_to_target_errno(int err
)
885 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
886 host_to_target_errno_table
[err
]) {
887 return host_to_target_errno_table
[err
];
892 static inline int target_to_host_errno(int err
)
894 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
895 target_to_host_errno_table
[err
]) {
896 return target_to_host_errno_table
[err
];
901 static inline abi_long
get_errno(abi_long ret
)
904 return -host_to_target_errno(errno
);
909 const char *target_strerror(int err
)
911 if (err
== TARGET_ERESTARTSYS
) {
912 return "To be restarted";
914 if (err
== TARGET_QEMU_ESIGRETURN
) {
915 return "Successful exit from sigreturn";
918 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
921 return strerror(target_to_host_errno(err
));
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
927 return safe_syscall(__NR_##name); \
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
933 return safe_syscall(__NR_##name, arg1); \
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
939 return safe_syscall(__NR_##name, arg1, arg2); \
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
971 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
972 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
973 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
974 int, flags
, mode_t
, mode
)
975 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
976 struct rusage
*, rusage
)
977 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
978 int, options
, struct rusage
*, rusage
)
979 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
980 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
981 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
982 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
983 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
985 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
986 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
988 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
989 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
990 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
991 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
992 safe_syscall2(int, tkill
, int, tid
, int, sig
)
993 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
994 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
995 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
996 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
997 unsigned long, pos_l
, unsigned long, pos_h
)
998 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
999 unsigned long, pos_l
, unsigned long, pos_h
)
1000 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
1002 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
1003 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
1004 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
1005 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
1006 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
1007 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
1008 safe_syscall2(int, flock
, int, fd
, int, operation
)
1009 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
1010 const struct timespec
*, uts
, size_t, sigsetsize
)
1011 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
1013 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
1014 struct timespec
*, rem
)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
1017 const struct timespec
*, req
, struct timespec
*, rem
)
1020 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1022 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1023 long, msgtype
, int, flags
)
1024 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1025 unsigned, nsops
, const struct timespec
*, timeout
)
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1034 #define Q_SEMTIMEDOP 4
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1039 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1040 void *, ptr
, long, fifth
)
1041 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1045 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1049 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1050 const struct timespec
*timeout
)
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1058 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1059 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1060 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1078 static inline int host_to_target_sock_type(int host_type
)
1082 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1084 target_type
= TARGET_SOCK_DGRAM
;
1087 target_type
= TARGET_SOCK_STREAM
;
1090 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type
& SOCK_CLOEXEC
) {
1096 target_type
|= TARGET_SOCK_CLOEXEC
;
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type
& SOCK_NONBLOCK
) {
1102 target_type
|= TARGET_SOCK_NONBLOCK
;
1109 static abi_ulong target_brk
;
1110 static abi_ulong target_original_brk
;
1111 static abi_ulong brk_page
;
1113 void target_set_brk(abi_ulong new_brk
)
1115 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1116 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long
do_brk(abi_ulong new_brk
)
1125 abi_long mapped_addr
;
1126 abi_ulong new_alloc_size
;
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1134 if (new_brk
< target_original_brk
) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk
<= brk_page
) {
1143 /* Heap contents are initialized to zero, as for anonymous
1145 if (new_brk
> target_brk
) {
1146 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1148 target_brk
= new_brk
;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1159 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1160 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1161 PROT_READ
|PROT_WRITE
,
1162 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1164 if (mapped_addr
== brk_page
) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1174 target_brk
= new_brk
;
1175 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1179 } else if (mapped_addr
!= -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1183 target_munmap(mapped_addr
, new_alloc_size
);
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM
;
1196 /* For everything else, return the previous break. */
1200 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1201 abi_ulong target_fds_addr
,
1205 abi_ulong b
, *target_fds
;
1207 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1208 if (!(target_fds
= lock_user(VERIFY_READ
,
1210 sizeof(abi_ulong
) * nw
,
1212 return -TARGET_EFAULT
;
1216 for (i
= 0; i
< nw
; i
++) {
1217 /* grab the abi_ulong */
1218 __get_user(b
, &target_fds
[i
]);
1219 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1220 /* check the bit inside the abi_ulong */
1227 unlock_user(target_fds
, target_fds_addr
, 0);
1232 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1233 abi_ulong target_fds_addr
,
1236 if (target_fds_addr
) {
1237 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1238 return -TARGET_EFAULT
;
1246 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1252 abi_ulong
*target_fds
;
1254 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1255 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1257 sizeof(abi_ulong
) * nw
,
1259 return -TARGET_EFAULT
;
1262 for (i
= 0; i
< nw
; i
++) {
1264 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1265 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1268 __put_user(v
, &target_fds
[i
]);
1271 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1282 static inline abi_long
host_to_target_clock_t(long ticks
)
1284 #if HOST_HZ == TARGET_HZ
1287 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1291 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1292 const struct rusage
*rusage
)
1294 struct target_rusage
*target_rusage
;
1296 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1297 return -TARGET_EFAULT
;
1298 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1299 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1300 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1301 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1302 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1303 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1304 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1305 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1306 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1307 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1308 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1309 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1310 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1311 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1312 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1313 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1314 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1315 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1316 unlock_user_struct(target_rusage
, target_addr
, 1);
1321 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1323 abi_ulong target_rlim_swap
;
1326 target_rlim_swap
= tswapal(target_rlim
);
1327 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1328 return RLIM_INFINITY
;
1330 result
= target_rlim_swap
;
1331 if (target_rlim_swap
!= (rlim_t
)result
)
1332 return RLIM_INFINITY
;
1337 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1339 abi_ulong target_rlim_swap
;
1342 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1343 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1345 target_rlim_swap
= rlim
;
1346 result
= tswapal(target_rlim_swap
);
1351 static inline int target_to_host_resource(int code
)
1354 case TARGET_RLIMIT_AS
:
1356 case TARGET_RLIMIT_CORE
:
1358 case TARGET_RLIMIT_CPU
:
1360 case TARGET_RLIMIT_DATA
:
1362 case TARGET_RLIMIT_FSIZE
:
1363 return RLIMIT_FSIZE
;
1364 case TARGET_RLIMIT_LOCKS
:
1365 return RLIMIT_LOCKS
;
1366 case TARGET_RLIMIT_MEMLOCK
:
1367 return RLIMIT_MEMLOCK
;
1368 case TARGET_RLIMIT_MSGQUEUE
:
1369 return RLIMIT_MSGQUEUE
;
1370 case TARGET_RLIMIT_NICE
:
1372 case TARGET_RLIMIT_NOFILE
:
1373 return RLIMIT_NOFILE
;
1374 case TARGET_RLIMIT_NPROC
:
1375 return RLIMIT_NPROC
;
1376 case TARGET_RLIMIT_RSS
:
1378 case TARGET_RLIMIT_RTPRIO
:
1379 return RLIMIT_RTPRIO
;
1380 case TARGET_RLIMIT_SIGPENDING
:
1381 return RLIMIT_SIGPENDING
;
1382 case TARGET_RLIMIT_STACK
:
1383 return RLIMIT_STACK
;
1389 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1390 abi_ulong target_tv_addr
)
1392 struct target_timeval
*target_tv
;
1394 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1395 return -TARGET_EFAULT
;
1397 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1398 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1400 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1405 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1406 const struct timeval
*tv
)
1408 struct target_timeval
*target_tv
;
1410 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1411 return -TARGET_EFAULT
;
1413 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1414 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1416 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1421 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1422 abi_ulong target_tz_addr
)
1424 struct target_timezone
*target_tz
;
1426 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1427 return -TARGET_EFAULT
;
1430 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1431 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1433 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1441 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1442 abi_ulong target_mq_attr_addr
)
1444 struct target_mq_attr
*target_mq_attr
;
1446 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1447 target_mq_attr_addr
, 1))
1448 return -TARGET_EFAULT
;
1450 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1451 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1452 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1453 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1455 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1460 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1461 const struct mq_attr
*attr
)
1463 struct target_mq_attr
*target_mq_attr
;
1465 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1466 target_mq_attr_addr
, 0))
1467 return -TARGET_EFAULT
;
1469 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1470 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1471 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1472 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1474 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long
do_select(int n
,
1483 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1484 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1486 fd_set rfds
, wfds
, efds
;
1487 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1489 struct timespec ts
, *ts_ptr
;
1492 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1496 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1500 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1505 if (target_tv_addr
) {
1506 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1507 return -TARGET_EFAULT
;
1508 ts
.tv_sec
= tv
.tv_sec
;
1509 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1515 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1518 if (!is_error(ret
)) {
1519 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1520 return -TARGET_EFAULT
;
1521 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1522 return -TARGET_EFAULT
;
1523 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1524 return -TARGET_EFAULT
;
1526 if (target_tv_addr
) {
1527 tv
.tv_sec
= ts
.tv_sec
;
1528 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1529 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1530 return -TARGET_EFAULT
;
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long
do_old_select(abi_ulong arg1
)
1541 struct target_sel_arg_struct
*sel
;
1542 abi_ulong inp
, outp
, exp
, tvp
;
1545 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1546 return -TARGET_EFAULT
;
1549 nsel
= tswapal(sel
->n
);
1550 inp
= tswapal(sel
->inp
);
1551 outp
= tswapal(sel
->outp
);
1552 exp
= tswapal(sel
->exp
);
1553 tvp
= tswapal(sel
->tvp
);
1555 unlock_user_struct(sel
, arg1
, 0);
1557 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1562 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1565 return pipe2(host_pipe
, flags
);
1571 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1572 int flags
, int is_pipe2
)
1576 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1579 return get_errno(ret
);
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1586 return host_pipe
[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1589 return host_pipe
[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1592 return host_pipe
[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1595 return host_pipe
[0];
1599 if (put_user_s32(host_pipe
[0], pipedes
)
1600 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1601 return -TARGET_EFAULT
;
1602 return get_errno(ret
);
1605 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1606 abi_ulong target_addr
,
1609 struct target_ip_mreqn
*target_smreqn
;
1611 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1613 return -TARGET_EFAULT
;
1614 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1615 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1616 if (len
== sizeof(struct target_ip_mreqn
))
1617 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1618 unlock_user(target_smreqn
, target_addr
, 0);
1623 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1624 abi_ulong target_addr
,
1627 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1628 sa_family_t sa_family
;
1629 struct target_sockaddr
*target_saddr
;
1631 if (fd_trans_target_to_host_addr(fd
)) {
1632 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1635 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1637 return -TARGET_EFAULT
;
1639 sa_family
= tswap16(target_saddr
->sa_family
);
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1649 if (sa_family
== AF_UNIX
) {
1650 if (len
< unix_maxlen
&& len
> 0) {
1651 char *cp
= (char*)target_saddr
;
1653 if ( cp
[len
-1] && !cp
[len
] )
1656 if (len
> unix_maxlen
)
1660 memcpy(addr
, target_saddr
, len
);
1661 addr
->sa_family
= sa_family
;
1662 if (sa_family
== AF_NETLINK
) {
1663 struct sockaddr_nl
*nladdr
;
1665 nladdr
= (struct sockaddr_nl
*)addr
;
1666 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1667 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1668 } else if (sa_family
== AF_PACKET
) {
1669 struct target_sockaddr_ll
*lladdr
;
1671 lladdr
= (struct target_sockaddr_ll
*)addr
;
1672 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1673 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1675 unlock_user(target_saddr
, target_addr
, 0);
1680 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1681 struct sockaddr
*addr
,
1684 struct target_sockaddr
*target_saddr
;
1691 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1693 return -TARGET_EFAULT
;
1694 memcpy(target_saddr
, addr
, len
);
1695 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1696 sizeof(target_saddr
->sa_family
)) {
1697 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1699 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1700 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1701 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1702 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1703 } else if (addr
->sa_family
== AF_PACKET
) {
1704 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1705 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1706 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1707 } else if (addr
->sa_family
== AF_INET6
&&
1708 len
>= sizeof(struct target_sockaddr_in6
)) {
1709 struct target_sockaddr_in6
*target_in6
=
1710 (struct target_sockaddr_in6
*)target_saddr
;
1711 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1713 unlock_user(target_saddr
, target_addr
, len
);
1718 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1719 struct target_msghdr
*target_msgh
)
1721 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1722 abi_long msg_controllen
;
1723 abi_ulong target_cmsg_addr
;
1724 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1725 socklen_t space
= 0;
1727 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1728 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1730 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1731 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1732 target_cmsg_start
= target_cmsg
;
1734 return -TARGET_EFAULT
;
1736 while (cmsg
&& target_cmsg
) {
1737 void *data
= CMSG_DATA(cmsg
);
1738 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1740 int len
= tswapal(target_cmsg
->cmsg_len
)
1741 - sizeof(struct target_cmsghdr
);
1743 space
+= CMSG_SPACE(len
);
1744 if (space
> msgh
->msg_controllen
) {
1745 space
-= CMSG_SPACE(len
);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1755 gemu_log("Host cmsg overflow\n");
1759 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1760 cmsg
->cmsg_level
= SOL_SOCKET
;
1762 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1764 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1765 cmsg
->cmsg_len
= CMSG_LEN(len
);
1767 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1768 int *fd
= (int *)data
;
1769 int *target_fd
= (int *)target_data
;
1770 int i
, numfds
= len
/ sizeof(int);
1772 for (i
= 0; i
< numfds
; i
++) {
1773 __get_user(fd
[i
], target_fd
+ i
);
1775 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1776 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1777 struct ucred
*cred
= (struct ucred
*)data
;
1778 struct target_ucred
*target_cred
=
1779 (struct target_ucred
*)target_data
;
1781 __get_user(cred
->pid
, &target_cred
->pid
);
1782 __get_user(cred
->uid
, &target_cred
->uid
);
1783 __get_user(cred
->gid
, &target_cred
->gid
);
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1787 memcpy(data
, target_data
, len
);
1790 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1791 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1794 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1796 msgh
->msg_controllen
= space
;
1800 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1801 struct msghdr
*msgh
)
1803 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1804 abi_long msg_controllen
;
1805 abi_ulong target_cmsg_addr
;
1806 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1807 socklen_t space
= 0;
1809 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1810 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1812 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1813 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1814 target_cmsg_start
= target_cmsg
;
1816 return -TARGET_EFAULT
;
1818 while (cmsg
&& target_cmsg
) {
1819 void *data
= CMSG_DATA(cmsg
);
1820 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1822 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1823 int tgt_len
, tgt_space
;
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1831 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1832 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1836 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1837 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1839 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1841 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1847 switch (cmsg
->cmsg_level
) {
1849 switch (cmsg
->cmsg_type
) {
1851 tgt_len
= sizeof(struct target_timeval
);
1861 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1862 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1863 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1871 switch (cmsg
->cmsg_level
) {
1873 switch (cmsg
->cmsg_type
) {
1876 int *fd
= (int *)data
;
1877 int *target_fd
= (int *)target_data
;
1878 int i
, numfds
= tgt_len
/ sizeof(int);
1880 for (i
= 0; i
< numfds
; i
++) {
1881 __put_user(fd
[i
], target_fd
+ i
);
1887 struct timeval
*tv
= (struct timeval
*)data
;
1888 struct target_timeval
*target_tv
=
1889 (struct target_timeval
*)target_data
;
1891 if (len
!= sizeof(struct timeval
) ||
1892 tgt_len
!= sizeof(struct target_timeval
)) {
1896 /* copy struct timeval to target */
1897 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1898 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1901 case SCM_CREDENTIALS
:
1903 struct ucred
*cred
= (struct ucred
*)data
;
1904 struct target_ucred
*target_cred
=
1905 (struct target_ucred
*)target_data
;
1907 __put_user(cred
->pid
, &target_cred
->pid
);
1908 __put_user(cred
->uid
, &target_cred
->uid
);
1909 __put_user(cred
->gid
, &target_cred
->gid
);
1918 switch (cmsg
->cmsg_type
) {
1921 uint32_t *v
= (uint32_t *)data
;
1922 uint32_t *t_int
= (uint32_t *)target_data
;
1924 if (len
!= sizeof(uint32_t) ||
1925 tgt_len
!= sizeof(uint32_t)) {
1928 __put_user(*v
, t_int
);
1934 struct sock_extended_err ee
;
1935 struct sockaddr_in offender
;
1937 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1938 struct errhdr_t
*target_errh
=
1939 (struct errhdr_t
*)target_data
;
1941 if (len
!= sizeof(struct errhdr_t
) ||
1942 tgt_len
!= sizeof(struct errhdr_t
)) {
1945 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1946 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1947 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1948 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1949 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1950 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1951 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1952 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1953 (void *) &errh
->offender
, sizeof(errh
->offender
));
1962 switch (cmsg
->cmsg_type
) {
1965 uint32_t *v
= (uint32_t *)data
;
1966 uint32_t *t_int
= (uint32_t *)target_data
;
1968 if (len
!= sizeof(uint32_t) ||
1969 tgt_len
!= sizeof(uint32_t)) {
1972 __put_user(*v
, t_int
);
1978 struct sock_extended_err ee
;
1979 struct sockaddr_in6 offender
;
1981 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1982 struct errhdr6_t
*target_errh
=
1983 (struct errhdr6_t
*)target_data
;
1985 if (len
!= sizeof(struct errhdr6_t
) ||
1986 tgt_len
!= sizeof(struct errhdr6_t
)) {
1989 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1990 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1991 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1992 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1993 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1994 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1995 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1996 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1997 (void *) &errh
->offender
, sizeof(errh
->offender
));
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2009 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2010 if (tgt_len
> len
) {
2011 memset(target_data
+ len
, 0, tgt_len
- len
);
2015 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2016 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2017 if (msg_controllen
< tgt_space
) {
2018 tgt_space
= msg_controllen
;
2020 msg_controllen
-= tgt_space
;
2022 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2023 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2026 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2028 target_msgh
->msg_controllen
= tswapal(space
);
2032 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2034 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2035 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2036 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2037 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2038 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2041 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2043 abi_long (*host_to_target_nlmsg
)
2044 (struct nlmsghdr
*))
2049 while (len
> sizeof(struct nlmsghdr
)) {
2051 nlmsg_len
= nlh
->nlmsg_len
;
2052 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2057 switch (nlh
->nlmsg_type
) {
2059 tswap_nlmsghdr(nlh
);
2065 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2066 e
->error
= tswap32(e
->error
);
2067 tswap_nlmsghdr(&e
->msg
);
2068 tswap_nlmsghdr(nlh
);
2072 ret
= host_to_target_nlmsg(nlh
);
2074 tswap_nlmsghdr(nlh
);
2079 tswap_nlmsghdr(nlh
);
2080 len
-= NLMSG_ALIGN(nlmsg_len
);
2081 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2086 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2088 abi_long (*target_to_host_nlmsg
)
2089 (struct nlmsghdr
*))
2093 while (len
> sizeof(struct nlmsghdr
)) {
2094 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2095 tswap32(nlh
->nlmsg_len
) > len
) {
2098 tswap_nlmsghdr(nlh
);
2099 switch (nlh
->nlmsg_type
) {
2106 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2107 e
->error
= tswap32(e
->error
);
2108 tswap_nlmsghdr(&e
->msg
);
2112 ret
= target_to_host_nlmsg(nlh
);
2117 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2118 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2125 size_t len
, void *context
,
2126 abi_long (*host_to_target_nlattr
)
2130 unsigned short nla_len
;
2133 while (len
> sizeof(struct nlattr
)) {
2134 nla_len
= nlattr
->nla_len
;
2135 if (nla_len
< sizeof(struct nlattr
) ||
2139 ret
= host_to_target_nlattr(nlattr
, context
);
2140 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2141 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2145 len
-= NLA_ALIGN(nla_len
);
2146 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2151 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2153 abi_long (*host_to_target_rtattr
)
2156 unsigned short rta_len
;
2159 while (len
> sizeof(struct rtattr
)) {
2160 rta_len
= rtattr
->rta_len
;
2161 if (rta_len
< sizeof(struct rtattr
) ||
2165 ret
= host_to_target_rtattr(rtattr
);
2166 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2167 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2171 len
-= RTA_ALIGN(rta_len
);
2172 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2179 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2186 switch (nlattr
->nla_type
) {
2188 case QEMU_IFLA_BR_FDB_FLUSH
:
2191 case QEMU_IFLA_BR_GROUP_ADDR
:
2194 case QEMU_IFLA_BR_VLAN_FILTERING
:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2197 case QEMU_IFLA_BR_MCAST_ROUTER
:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2200 case QEMU_IFLA_BR_MCAST_QUERIER
:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED
:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED
:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION
:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION
:
2210 case QEMU_IFLA_BR_PRIORITY
:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2213 case QEMU_IFLA_BR_ROOT_PORT
:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2215 u16
= NLA_DATA(nlattr
);
2216 *u16
= tswap16(*u16
);
2219 case QEMU_IFLA_BR_FORWARD_DELAY
:
2220 case QEMU_IFLA_BR_HELLO_TIME
:
2221 case QEMU_IFLA_BR_MAX_AGE
:
2222 case QEMU_IFLA_BR_AGEING_TIME
:
2223 case QEMU_IFLA_BR_STP_STATE
:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2229 u32
= NLA_DATA(nlattr
);
2230 *u32
= tswap32(*u32
);
2233 case QEMU_IFLA_BR_HELLO_TIMER
:
2234 case QEMU_IFLA_BR_TCN_TIMER
:
2235 case QEMU_IFLA_BR_GC_TIMER
:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2243 u64
= NLA_DATA(nlattr
);
2244 *u64
= tswap64(*u64
);
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID
:
2248 case QEMU_IFLA_BR_BRIDGE_ID
:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2257 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2264 switch (nlattr
->nla_type
) {
2266 case QEMU_IFLA_BRPORT_STATE
:
2267 case QEMU_IFLA_BRPORT_MODE
:
2268 case QEMU_IFLA_BRPORT_GUARD
:
2269 case QEMU_IFLA_BRPORT_PROTECT
:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2271 case QEMU_IFLA_BRPORT_LEARNING
:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2273 case QEMU_IFLA_BRPORT_PROXYARP
:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD
:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST
:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL
:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD
:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
:
2286 case QEMU_IFLA_BRPORT_PRIORITY
:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2289 case QEMU_IFLA_BRPORT_ID
:
2290 case QEMU_IFLA_BRPORT_NO
:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK
:
2292 u16
= NLA_DATA(nlattr
);
2293 *u16
= tswap16(*u16
);
2296 case QEMU_IFLA_BRPORT_COST
:
2297 u32
= NLA_DATA(nlattr
);
2298 *u32
= tswap32(*u32
);
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2304 u64
= NLA_DATA(nlattr
);
2305 *u64
= tswap64(*u64
);
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID
:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2318 struct linkinfo_context
{
2325 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2328 struct linkinfo_context
*li_context
= context
;
2330 switch (nlattr
->nla_type
) {
2332 case QEMU_IFLA_INFO_KIND
:
2333 li_context
->name
= NLA_DATA(nlattr
);
2334 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2336 case QEMU_IFLA_INFO_SLAVE_KIND
:
2337 li_context
->slave_name
= NLA_DATA(nlattr
);
2338 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2341 case QEMU_IFLA_INFO_XSTATS
:
2342 /* FIXME: only used by CAN */
2345 case QEMU_IFLA_INFO_DATA
:
2346 if (strncmp(li_context
->name
, "bridge",
2347 li_context
->len
) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2351 host_to_target_data_bridge_nlattr
);
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2356 case QEMU_IFLA_INFO_SLAVE_DATA
:
2357 if (strncmp(li_context
->slave_name
, "bridge",
2358 li_context
->slave_len
) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2362 host_to_target_slave_data_bridge_nlattr
);
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context
->slave_name
);
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2376 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2382 switch (nlattr
->nla_type
) {
2383 case QEMU_IFLA_INET_CONF
:
2384 u32
= NLA_DATA(nlattr
);
2385 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2387 u32
[i
] = tswap32(u32
[i
]);
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2396 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2401 struct ifla_cacheinfo
*ci
;
2404 switch (nlattr
->nla_type
) {
2406 case QEMU_IFLA_INET6_TOKEN
:
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2412 case QEMU_IFLA_INET6_FLAGS
:
2413 u32
= NLA_DATA(nlattr
);
2414 *u32
= tswap32(*u32
);
2417 case QEMU_IFLA_INET6_CONF
:
2418 u32
= NLA_DATA(nlattr
);
2419 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2421 u32
[i
] = tswap32(u32
[i
]);
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO
:
2426 ci
= NLA_DATA(nlattr
);
2427 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2428 ci
->tstamp
= tswap32(ci
->tstamp
);
2429 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2430 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2433 case QEMU_IFLA_INET6_STATS
:
2434 case QEMU_IFLA_INET6_ICMP6STATS
:
2435 u64
= NLA_DATA(nlattr
);
2436 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2438 u64
[i
] = tswap64(u64
[i
]);
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2447 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2450 switch (nlattr
->nla_type
) {
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2454 host_to_target_data_inet_nlattr
);
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2458 host_to_target_data_inet6_nlattr
);
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2466 static abi_long
host_to_target_data_xdp_nlattr(struct nlattr
*nlattr
,
2471 switch (nlattr
->nla_type
) {
2473 case QEMU_IFLA_XDP_ATTACHED
:
2476 case QEMU_IFLA_XDP_PROG_ID
:
2477 u32
= NLA_DATA(nlattr
);
2478 *u32
= tswap32(*u32
);
2481 gemu_log("Unknown host XDP type: %d\n", nlattr
->nla_type
);
2487 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2490 struct rtnl_link_stats
*st
;
2491 struct rtnl_link_stats64
*st64
;
2492 struct rtnl_link_ifmap
*map
;
2493 struct linkinfo_context li_context
;
2495 switch (rtattr
->rta_type
) {
2497 case QEMU_IFLA_ADDRESS
:
2498 case QEMU_IFLA_BROADCAST
:
2500 case QEMU_IFLA_IFNAME
:
2501 case QEMU_IFLA_QDISC
:
2504 case QEMU_IFLA_OPERSTATE
:
2505 case QEMU_IFLA_LINKMODE
:
2506 case QEMU_IFLA_CARRIER
:
2507 case QEMU_IFLA_PROTO_DOWN
:
2511 case QEMU_IFLA_LINK
:
2512 case QEMU_IFLA_WEIGHT
:
2513 case QEMU_IFLA_TXQLEN
:
2514 case QEMU_IFLA_CARRIER_CHANGES
:
2515 case QEMU_IFLA_NUM_RX_QUEUES
:
2516 case QEMU_IFLA_NUM_TX_QUEUES
:
2517 case QEMU_IFLA_PROMISCUITY
:
2518 case QEMU_IFLA_EXT_MASK
:
2519 case QEMU_IFLA_LINK_NETNSID
:
2520 case QEMU_IFLA_GROUP
:
2521 case QEMU_IFLA_MASTER
:
2522 case QEMU_IFLA_NUM_VF
:
2523 case QEMU_IFLA_GSO_MAX_SEGS
:
2524 case QEMU_IFLA_GSO_MAX_SIZE
:
2525 u32
= RTA_DATA(rtattr
);
2526 *u32
= tswap32(*u32
);
2528 /* struct rtnl_link_stats */
2529 case QEMU_IFLA_STATS
:
2530 st
= RTA_DATA(rtattr
);
2531 st
->rx_packets
= tswap32(st
->rx_packets
);
2532 st
->tx_packets
= tswap32(st
->tx_packets
);
2533 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2534 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2535 st
->rx_errors
= tswap32(st
->rx_errors
);
2536 st
->tx_errors
= tswap32(st
->tx_errors
);
2537 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2538 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2539 st
->multicast
= tswap32(st
->multicast
);
2540 st
->collisions
= tswap32(st
->collisions
);
2542 /* detailed rx_errors: */
2543 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2544 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2545 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2546 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2547 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2548 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2550 /* detailed tx_errors */
2551 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2552 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2553 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2554 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2555 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2558 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2559 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2561 /* struct rtnl_link_stats64 */
2562 case QEMU_IFLA_STATS64
:
2563 st64
= RTA_DATA(rtattr
);
2564 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2565 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2566 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2567 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2568 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2569 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2570 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2571 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2572 st64
->multicast
= tswap64(st64
->multicast
);
2573 st64
->collisions
= tswap64(st64
->collisions
);
2575 /* detailed rx_errors: */
2576 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2577 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2578 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2579 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2580 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2581 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2583 /* detailed tx_errors */
2584 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2585 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2586 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2587 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2588 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2591 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2592 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2594 /* struct rtnl_link_ifmap */
2596 map
= RTA_DATA(rtattr
);
2597 map
->mem_start
= tswap64(map
->mem_start
);
2598 map
->mem_end
= tswap64(map
->mem_end
);
2599 map
->base_addr
= tswap64(map
->base_addr
);
2600 map
->irq
= tswap16(map
->irq
);
2603 case QEMU_IFLA_LINKINFO
:
2604 memset(&li_context
, 0, sizeof(li_context
));
2605 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2607 host_to_target_data_linkinfo_nlattr
);
2608 case QEMU_IFLA_AF_SPEC
:
2609 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2611 host_to_target_data_spec_nlattr
);
2613 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2615 host_to_target_data_xdp_nlattr
);
2617 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2623 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2626 struct ifa_cacheinfo
*ci
;
2628 switch (rtattr
->rta_type
) {
2629 /* binary: depends on family type */
2639 u32
= RTA_DATA(rtattr
);
2640 *u32
= tswap32(*u32
);
2642 /* struct ifa_cacheinfo */
2644 ci
= RTA_DATA(rtattr
);
2645 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2646 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2647 ci
->cstamp
= tswap32(ci
->cstamp
);
2648 ci
->tstamp
= tswap32(ci
->tstamp
);
2651 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2657 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2660 switch (rtattr
->rta_type
) {
2661 /* binary: depends on family type */
2670 u32
= RTA_DATA(rtattr
);
2671 *u32
= tswap32(*u32
);
2674 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2680 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2681 uint32_t rtattr_len
)
2683 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2684 host_to_target_data_link_rtattr
);
2687 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2688 uint32_t rtattr_len
)
2690 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2691 host_to_target_data_addr_rtattr
);
2694 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2695 uint32_t rtattr_len
)
2697 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2698 host_to_target_data_route_rtattr
);
2701 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2704 struct ifinfomsg
*ifi
;
2705 struct ifaddrmsg
*ifa
;
2708 nlmsg_len
= nlh
->nlmsg_len
;
2709 switch (nlh
->nlmsg_type
) {
2713 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2714 ifi
= NLMSG_DATA(nlh
);
2715 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2716 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2717 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2718 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2719 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2720 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2726 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2727 ifa
= NLMSG_DATA(nlh
);
2728 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2729 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2730 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2736 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2737 rtm
= NLMSG_DATA(nlh
);
2738 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2739 host_to_target_route_rtattr(RTM_RTA(rtm
),
2740 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2744 return -TARGET_EINVAL
;
2749 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2752 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2755 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2757 abi_long (*target_to_host_rtattr
)
2762 while (len
>= sizeof(struct rtattr
)) {
2763 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2764 tswap16(rtattr
->rta_len
) > len
) {
2767 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2768 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2769 ret
= target_to_host_rtattr(rtattr
);
2773 len
-= RTA_ALIGN(rtattr
->rta_len
);
2774 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2775 RTA_ALIGN(rtattr
->rta_len
));
2780 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2782 switch (rtattr
->rta_type
) {
2784 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2790 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2792 switch (rtattr
->rta_type
) {
2793 /* binary: depends on family type */
2798 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2804 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2807 switch (rtattr
->rta_type
) {
2808 /* binary: depends on family type */
2816 u32
= RTA_DATA(rtattr
);
2817 *u32
= tswap32(*u32
);
2820 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2826 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2827 uint32_t rtattr_len
)
2829 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2830 target_to_host_data_link_rtattr
);
2833 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2834 uint32_t rtattr_len
)
2836 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2837 target_to_host_data_addr_rtattr
);
2840 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2841 uint32_t rtattr_len
)
2843 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2844 target_to_host_data_route_rtattr
);
2847 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2849 struct ifinfomsg
*ifi
;
2850 struct ifaddrmsg
*ifa
;
2853 switch (nlh
->nlmsg_type
) {
2858 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2859 ifi
= NLMSG_DATA(nlh
);
2860 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2861 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2862 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2863 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2864 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2865 NLMSG_LENGTH(sizeof(*ifi
)));
2871 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2872 ifa
= NLMSG_DATA(nlh
);
2873 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2874 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2875 NLMSG_LENGTH(sizeof(*ifa
)));
2882 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2883 rtm
= NLMSG_DATA(nlh
);
2884 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2885 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2886 NLMSG_LENGTH(sizeof(*rtm
)));
2890 return -TARGET_EOPNOTSUPP
;
2895 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2897 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2899 #endif /* CONFIG_RTNETLINK */
2901 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2903 switch (nlh
->nlmsg_type
) {
2905 gemu_log("Unknown host audit message type %d\n",
2907 return -TARGET_EINVAL
;
2912 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2915 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2918 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2920 switch (nlh
->nlmsg_type
) {
2922 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2923 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2926 gemu_log("Unknown target audit message type %d\n",
2928 return -TARGET_EINVAL
;
2934 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2936 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2939 /* do_setsockopt() Must return target values and target errnos. */
2940 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2941 abi_ulong optval_addr
, socklen_t optlen
)
2945 struct ip_mreqn
*ip_mreq
;
2946 struct ip_mreq_source
*ip_mreq_source
;
2950 /* TCP options all take an 'int' value. */
2951 if (optlen
< sizeof(uint32_t))
2952 return -TARGET_EINVAL
;
2954 if (get_user_u32(val
, optval_addr
))
2955 return -TARGET_EFAULT
;
2956 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2963 case IP_ROUTER_ALERT
:
2967 case IP_MTU_DISCOVER
:
2974 case IP_MULTICAST_TTL
:
2975 case IP_MULTICAST_LOOP
:
2977 if (optlen
>= sizeof(uint32_t)) {
2978 if (get_user_u32(val
, optval_addr
))
2979 return -TARGET_EFAULT
;
2980 } else if (optlen
>= 1) {
2981 if (get_user_u8(val
, optval_addr
))
2982 return -TARGET_EFAULT
;
2984 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2986 case IP_ADD_MEMBERSHIP
:
2987 case IP_DROP_MEMBERSHIP
:
2988 if (optlen
< sizeof (struct target_ip_mreq
) ||
2989 optlen
> sizeof (struct target_ip_mreqn
))
2990 return -TARGET_EINVAL
;
2992 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2993 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2994 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2997 case IP_BLOCK_SOURCE
:
2998 case IP_UNBLOCK_SOURCE
:
2999 case IP_ADD_SOURCE_MEMBERSHIP
:
3000 case IP_DROP_SOURCE_MEMBERSHIP
:
3001 if (optlen
!= sizeof (struct target_ip_mreq_source
))
3002 return -TARGET_EINVAL
;
3004 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3005 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
3006 unlock_user (ip_mreq_source
, optval_addr
, 0);
3015 case IPV6_MTU_DISCOVER
:
3018 case IPV6_RECVPKTINFO
:
3019 case IPV6_UNICAST_HOPS
:
3021 case IPV6_RECVHOPLIMIT
:
3022 case IPV6_2292HOPLIMIT
:
3025 if (optlen
< sizeof(uint32_t)) {
3026 return -TARGET_EINVAL
;
3028 if (get_user_u32(val
, optval_addr
)) {
3029 return -TARGET_EFAULT
;
3031 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3032 &val
, sizeof(val
)));
3036 struct in6_pktinfo pki
;
3038 if (optlen
< sizeof(pki
)) {
3039 return -TARGET_EINVAL
;
3042 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
3043 return -TARGET_EFAULT
;
3046 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
3048 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3049 &pki
, sizeof(pki
)));
3060 struct icmp6_filter icmp6f
;
3062 if (optlen
> sizeof(icmp6f
)) {
3063 optlen
= sizeof(icmp6f
);
3066 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3067 return -TARGET_EFAULT
;
3070 for (val
= 0; val
< 8; val
++) {
3071 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3074 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3086 /* those take an u32 value */
3087 if (optlen
< sizeof(uint32_t)) {
3088 return -TARGET_EINVAL
;
3091 if (get_user_u32(val
, optval_addr
)) {
3092 return -TARGET_EFAULT
;
3094 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3095 &val
, sizeof(val
)));
3102 case TARGET_SOL_SOCKET
:
3104 case TARGET_SO_RCVTIMEO
:
3108 optname
= SO_RCVTIMEO
;
3111 if (optlen
!= sizeof(struct target_timeval
)) {
3112 return -TARGET_EINVAL
;
3115 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3116 return -TARGET_EFAULT
;
3119 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3123 case TARGET_SO_SNDTIMEO
:
3124 optname
= SO_SNDTIMEO
;
3126 case TARGET_SO_ATTACH_FILTER
:
3128 struct target_sock_fprog
*tfprog
;
3129 struct target_sock_filter
*tfilter
;
3130 struct sock_fprog fprog
;
3131 struct sock_filter
*filter
;
3134 if (optlen
!= sizeof(*tfprog
)) {
3135 return -TARGET_EINVAL
;
3137 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3138 return -TARGET_EFAULT
;
3140 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3141 tswapal(tfprog
->filter
), 0)) {
3142 unlock_user_struct(tfprog
, optval_addr
, 1);
3143 return -TARGET_EFAULT
;
3146 fprog
.len
= tswap16(tfprog
->len
);
3147 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3148 if (filter
== NULL
) {
3149 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3150 unlock_user_struct(tfprog
, optval_addr
, 1);
3151 return -TARGET_ENOMEM
;
3153 for (i
= 0; i
< fprog
.len
; i
++) {
3154 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3155 filter
[i
].jt
= tfilter
[i
].jt
;
3156 filter
[i
].jf
= tfilter
[i
].jf
;
3157 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3159 fprog
.filter
= filter
;
3161 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3162 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3165 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3166 unlock_user_struct(tfprog
, optval_addr
, 1);
3169 case TARGET_SO_BINDTODEVICE
:
3171 char *dev_ifname
, *addr_ifname
;
3173 if (optlen
> IFNAMSIZ
- 1) {
3174 optlen
= IFNAMSIZ
- 1;
3176 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3178 return -TARGET_EFAULT
;
3180 optname
= SO_BINDTODEVICE
;
3181 addr_ifname
= alloca(IFNAMSIZ
);
3182 memcpy(addr_ifname
, dev_ifname
, optlen
);
3183 addr_ifname
[optlen
] = 0;
3184 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3185 addr_ifname
, optlen
));
3186 unlock_user (dev_ifname
, optval_addr
, 0);
3189 /* Options with 'int' argument. */
3190 case TARGET_SO_DEBUG
:
3193 case TARGET_SO_REUSEADDR
:
3194 optname
= SO_REUSEADDR
;
3196 case TARGET_SO_TYPE
:
3199 case TARGET_SO_ERROR
:
3202 case TARGET_SO_DONTROUTE
:
3203 optname
= SO_DONTROUTE
;
3205 case TARGET_SO_BROADCAST
:
3206 optname
= SO_BROADCAST
;
3208 case TARGET_SO_SNDBUF
:
3209 optname
= SO_SNDBUF
;
3211 case TARGET_SO_SNDBUFFORCE
:
3212 optname
= SO_SNDBUFFORCE
;
3214 case TARGET_SO_RCVBUF
:
3215 optname
= SO_RCVBUF
;
3217 case TARGET_SO_RCVBUFFORCE
:
3218 optname
= SO_RCVBUFFORCE
;
3220 case TARGET_SO_KEEPALIVE
:
3221 optname
= SO_KEEPALIVE
;
3223 case TARGET_SO_OOBINLINE
:
3224 optname
= SO_OOBINLINE
;
3226 case TARGET_SO_NO_CHECK
:
3227 optname
= SO_NO_CHECK
;
3229 case TARGET_SO_PRIORITY
:
3230 optname
= SO_PRIORITY
;
3233 case TARGET_SO_BSDCOMPAT
:
3234 optname
= SO_BSDCOMPAT
;
3237 case TARGET_SO_PASSCRED
:
3238 optname
= SO_PASSCRED
;
3240 case TARGET_SO_PASSSEC
:
3241 optname
= SO_PASSSEC
;
3243 case TARGET_SO_TIMESTAMP
:
3244 optname
= SO_TIMESTAMP
;
3246 case TARGET_SO_RCVLOWAT
:
3247 optname
= SO_RCVLOWAT
;
3252 if (optlen
< sizeof(uint32_t))
3253 return -TARGET_EINVAL
;
3255 if (get_user_u32(val
, optval_addr
))
3256 return -TARGET_EFAULT
;
3257 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3261 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3262 ret
= -TARGET_ENOPROTOOPT
;
3267 /* do_getsockopt() Must return target values and target errnos. */
3268 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3269 abi_ulong optval_addr
, abi_ulong optlen
)
3276 case TARGET_SOL_SOCKET
:
3279 /* These don't just return a single integer */
3280 case TARGET_SO_LINGER
:
3281 case TARGET_SO_RCVTIMEO
:
3282 case TARGET_SO_SNDTIMEO
:
3283 case TARGET_SO_PEERNAME
:
3285 case TARGET_SO_PEERCRED
: {
3288 struct target_ucred
*tcr
;
3290 if (get_user_u32(len
, optlen
)) {
3291 return -TARGET_EFAULT
;
3294 return -TARGET_EINVAL
;
3298 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3306 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3307 return -TARGET_EFAULT
;
3309 __put_user(cr
.pid
, &tcr
->pid
);
3310 __put_user(cr
.uid
, &tcr
->uid
);
3311 __put_user(cr
.gid
, &tcr
->gid
);
3312 unlock_user_struct(tcr
, optval_addr
, 1);
3313 if (put_user_u32(len
, optlen
)) {
3314 return -TARGET_EFAULT
;
3318 /* Options with 'int' argument. */
3319 case TARGET_SO_DEBUG
:
3322 case TARGET_SO_REUSEADDR
:
3323 optname
= SO_REUSEADDR
;
3325 case TARGET_SO_TYPE
:
3328 case TARGET_SO_ERROR
:
3331 case TARGET_SO_DONTROUTE
:
3332 optname
= SO_DONTROUTE
;
3334 case TARGET_SO_BROADCAST
:
3335 optname
= SO_BROADCAST
;
3337 case TARGET_SO_SNDBUF
:
3338 optname
= SO_SNDBUF
;
3340 case TARGET_SO_RCVBUF
:
3341 optname
= SO_RCVBUF
;
3343 case TARGET_SO_KEEPALIVE
:
3344 optname
= SO_KEEPALIVE
;
3346 case TARGET_SO_OOBINLINE
:
3347 optname
= SO_OOBINLINE
;
3349 case TARGET_SO_NO_CHECK
:
3350 optname
= SO_NO_CHECK
;
3352 case TARGET_SO_PRIORITY
:
3353 optname
= SO_PRIORITY
;
3356 case TARGET_SO_BSDCOMPAT
:
3357 optname
= SO_BSDCOMPAT
;
3360 case TARGET_SO_PASSCRED
:
3361 optname
= SO_PASSCRED
;
3363 case TARGET_SO_TIMESTAMP
:
3364 optname
= SO_TIMESTAMP
;
3366 case TARGET_SO_RCVLOWAT
:
3367 optname
= SO_RCVLOWAT
;
3369 case TARGET_SO_ACCEPTCONN
:
3370 optname
= SO_ACCEPTCONN
;
3377 /* TCP options all take an 'int' value. */
3379 if (get_user_u32(len
, optlen
))
3380 return -TARGET_EFAULT
;
3382 return -TARGET_EINVAL
;
3384 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3387 if (optname
== SO_TYPE
) {
3388 val
= host_to_target_sock_type(val
);
3393 if (put_user_u32(val
, optval_addr
))
3394 return -TARGET_EFAULT
;
3396 if (put_user_u8(val
, optval_addr
))
3397 return -TARGET_EFAULT
;
3399 if (put_user_u32(len
, optlen
))
3400 return -TARGET_EFAULT
;
3407 case IP_ROUTER_ALERT
:
3411 case IP_MTU_DISCOVER
:
3417 case IP_MULTICAST_TTL
:
3418 case IP_MULTICAST_LOOP
:
3419 if (get_user_u32(len
, optlen
))
3420 return -TARGET_EFAULT
;
3422 return -TARGET_EINVAL
;
3424 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3427 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3429 if (put_user_u32(len
, optlen
)
3430 || put_user_u8(val
, optval_addr
))
3431 return -TARGET_EFAULT
;
3433 if (len
> sizeof(int))
3435 if (put_user_u32(len
, optlen
)
3436 || put_user_u32(val
, optval_addr
))
3437 return -TARGET_EFAULT
;
3441 ret
= -TARGET_ENOPROTOOPT
;
3447 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3449 ret
= -TARGET_EOPNOTSUPP
;
3455 /* Convert target low/high pair representing file offset into the host
3456 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3457 * as the kernel doesn't handle them either.
3459 static void target_to_host_low_high(abi_ulong tlow
,
3461 unsigned long *hlow
,
3462 unsigned long *hhigh
)
3464 uint64_t off
= tlow
|
3465 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3466 TARGET_LONG_BITS
/ 2;
3469 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3472 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3473 abi_ulong count
, int copy
)
3475 struct target_iovec
*target_vec
;
3477 abi_ulong total_len
, max_len
;
3480 bool bad_address
= false;
3486 if (count
> IOV_MAX
) {
3491 vec
= g_try_new0(struct iovec
, count
);
3497 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3498 count
* sizeof(struct target_iovec
), 1);
3499 if (target_vec
== NULL
) {
3504 /* ??? If host page size > target page size, this will result in a
3505 value larger than what we can actually support. */
3506 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3509 for (i
= 0; i
< count
; i
++) {
3510 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3511 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3516 } else if (len
== 0) {
3517 /* Zero length pointer is ignored. */
3518 vec
[i
].iov_base
= 0;
3520 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3521 /* If the first buffer pointer is bad, this is a fault. But
3522 * subsequent bad buffers will result in a partial write; this
3523 * is realized by filling the vector with null pointers and
3525 if (!vec
[i
].iov_base
) {
3536 if (len
> max_len
- total_len
) {
3537 len
= max_len
- total_len
;
3540 vec
[i
].iov_len
= len
;
3544 unlock_user(target_vec
, target_addr
, 0);
3549 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3550 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3553 unlock_user(target_vec
, target_addr
, 0);
3560 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3561 abi_ulong count
, int copy
)
3563 struct target_iovec
*target_vec
;
3566 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3567 count
* sizeof(struct target_iovec
), 1);
3569 for (i
= 0; i
< count
; i
++) {
3570 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3571 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3575 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3577 unlock_user(target_vec
, target_addr
, 0);
3583 static inline int target_to_host_sock_type(int *type
)
3586 int target_type
= *type
;
3588 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3589 case TARGET_SOCK_DGRAM
:
3590 host_type
= SOCK_DGRAM
;
3592 case TARGET_SOCK_STREAM
:
3593 host_type
= SOCK_STREAM
;
3596 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3599 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3600 #if defined(SOCK_CLOEXEC)
3601 host_type
|= SOCK_CLOEXEC
;
3603 return -TARGET_EINVAL
;
3606 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3607 #if defined(SOCK_NONBLOCK)
3608 host_type
|= SOCK_NONBLOCK
;
3609 #elif !defined(O_NONBLOCK)
3610 return -TARGET_EINVAL
;
3617 /* Try to emulate socket type flags after socket creation. */
3618 static int sock_flags_fixup(int fd
, int target_type
)
3620 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3621 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3622 int flags
= fcntl(fd
, F_GETFL
);
3623 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3625 return -TARGET_EINVAL
;
3632 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3633 abi_ulong target_addr
,
3636 struct sockaddr
*addr
= host_addr
;
3637 struct target_sockaddr
*target_saddr
;
3639 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3640 if (!target_saddr
) {
3641 return -TARGET_EFAULT
;
3644 memcpy(addr
, target_saddr
, len
);
3645 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3646 /* spkt_protocol is big-endian */
3648 unlock_user(target_saddr
, target_addr
, 0);
3652 static TargetFdTrans target_packet_trans
= {
3653 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3656 #ifdef CONFIG_RTNETLINK
3657 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3661 ret
= target_to_host_nlmsg_route(buf
, len
);
3669 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3673 ret
= host_to_target_nlmsg_route(buf
, len
);
3681 static TargetFdTrans target_netlink_route_trans
= {
3682 .target_to_host_data
= netlink_route_target_to_host
,
3683 .host_to_target_data
= netlink_route_host_to_target
,
3685 #endif /* CONFIG_RTNETLINK */
3687 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3691 ret
= target_to_host_nlmsg_audit(buf
, len
);
3699 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3703 ret
= host_to_target_nlmsg_audit(buf
, len
);
3711 static TargetFdTrans target_netlink_audit_trans
= {
3712 .target_to_host_data
= netlink_audit_target_to_host
,
3713 .host_to_target_data
= netlink_audit_host_to_target
,
3716 /* do_socket() Must return target values and target errnos. */
3717 static abi_long
do_socket(int domain
, int type
, int protocol
)
3719 int target_type
= type
;
3722 ret
= target_to_host_sock_type(&type
);
3727 if (domain
== PF_NETLINK
&& !(
3728 #ifdef CONFIG_RTNETLINK
3729 protocol
== NETLINK_ROUTE
||
3731 protocol
== NETLINK_KOBJECT_UEVENT
||
3732 protocol
== NETLINK_AUDIT
)) {
3733 return -EPFNOSUPPORT
;
3736 if (domain
== AF_PACKET
||
3737 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3738 protocol
= tswap16(protocol
);
3741 ret
= get_errno(socket(domain
, type
, protocol
));
3743 ret
= sock_flags_fixup(ret
, target_type
);
3744 if (type
== SOCK_PACKET
) {
3745 /* Manage an obsolete case :
3746 * if socket type is SOCK_PACKET, bind by name
3748 fd_trans_register(ret
, &target_packet_trans
);
3749 } else if (domain
== PF_NETLINK
) {
3751 #ifdef CONFIG_RTNETLINK
3753 fd_trans_register(ret
, &target_netlink_route_trans
);
3756 case NETLINK_KOBJECT_UEVENT
:
3757 /* nothing to do: messages are strings */
3760 fd_trans_register(ret
, &target_netlink_audit_trans
);
3763 g_assert_not_reached();
3770 /* do_bind() Must return target values and target errnos. */
3771 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3777 if ((int)addrlen
< 0) {
3778 return -TARGET_EINVAL
;
3781 addr
= alloca(addrlen
+1);
3783 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3787 return get_errno(bind(sockfd
, addr
, addrlen
));
3790 /* do_connect() Must return target values and target errnos. */
3791 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3797 if ((int)addrlen
< 0) {
3798 return -TARGET_EINVAL
;
3801 addr
= alloca(addrlen
+1);
3803 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3807 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3810 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3811 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3812 int flags
, int send
)
3818 abi_ulong target_vec
;
3820 if (msgp
->msg_name
) {
3821 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3822 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3823 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3824 tswapal(msgp
->msg_name
),
3826 if (ret
== -TARGET_EFAULT
) {
3827 /* For connected sockets msg_name and msg_namelen must
3828 * be ignored, so returning EFAULT immediately is wrong.
3829 * Instead, pass a bad msg_name to the host kernel, and
3830 * let it decide whether to return EFAULT or not.
3832 msg
.msg_name
= (void *)-1;
3837 msg
.msg_name
= NULL
;
3838 msg
.msg_namelen
= 0;
3840 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3841 msg
.msg_control
= alloca(msg
.msg_controllen
);
3842 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3844 count
= tswapal(msgp
->msg_iovlen
);
3845 target_vec
= tswapal(msgp
->msg_iov
);
3847 if (count
> IOV_MAX
) {
3848 /* sendrcvmsg returns a different errno for this condition than
3849 * readv/writev, so we must catch it here before lock_iovec() does.
3851 ret
= -TARGET_EMSGSIZE
;
3855 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3856 target_vec
, count
, send
);
3858 ret
= -host_to_target_errno(errno
);
3861 msg
.msg_iovlen
= count
;
3865 if (fd_trans_target_to_host_data(fd
)) {
3868 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3869 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3870 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3871 msg
.msg_iov
->iov_len
);
3873 msg
.msg_iov
->iov_base
= host_msg
;
3874 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3878 ret
= target_to_host_cmsg(&msg
, msgp
);
3880 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3884 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3885 if (!is_error(ret
)) {
3887 if (fd_trans_host_to_target_data(fd
)) {
3888 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3891 ret
= host_to_target_cmsg(msgp
, &msg
);
3893 if (!is_error(ret
)) {
3894 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3895 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3896 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3897 msg
.msg_name
, msg
.msg_namelen
);
3909 unlock_iovec(vec
, target_vec
, count
, !send
);
3914 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3915 int flags
, int send
)
3918 struct target_msghdr
*msgp
;
3920 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3924 return -TARGET_EFAULT
;
3926 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3927 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3931 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3932 * so it might not have this *mmsg-specific flag either.
3934 #ifndef MSG_WAITFORONE
3935 #define MSG_WAITFORONE 0x10000
3938 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3939 unsigned int vlen
, unsigned int flags
,
3942 struct target_mmsghdr
*mmsgp
;
3946 if (vlen
> UIO_MAXIOV
) {
3950 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3952 return -TARGET_EFAULT
;
3955 for (i
= 0; i
< vlen
; i
++) {
3956 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3957 if (is_error(ret
)) {
3960 mmsgp
[i
].msg_len
= tswap32(ret
);
3961 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3962 if (flags
& MSG_WAITFORONE
) {
3963 flags
|= MSG_DONTWAIT
;
3967 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3969 /* Return number of datagrams sent if we sent any at all;
3970 * otherwise return the error.
3978 /* do_accept4() Must return target values and target errnos. */
3979 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3980 abi_ulong target_addrlen_addr
, int flags
)
3987 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3989 if (target_addr
== 0) {
3990 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3993 /* linux returns EINVAL if addrlen pointer is invalid */
3994 if (get_user_u32(addrlen
, target_addrlen_addr
))
3995 return -TARGET_EINVAL
;
3997 if ((int)addrlen
< 0) {
3998 return -TARGET_EINVAL
;
4001 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4002 return -TARGET_EINVAL
;
4004 addr
= alloca(addrlen
);
4006 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
4007 if (!is_error(ret
)) {
4008 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4009 if (put_user_u32(addrlen
, target_addrlen_addr
))
4010 ret
= -TARGET_EFAULT
;
4015 /* do_getpeername() Must return target values and target errnos. */
4016 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
4017 abi_ulong target_addrlen_addr
)
4023 if (get_user_u32(addrlen
, target_addrlen_addr
))
4024 return -TARGET_EFAULT
;
4026 if ((int)addrlen
< 0) {
4027 return -TARGET_EINVAL
;
4030 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4031 return -TARGET_EFAULT
;
4033 addr
= alloca(addrlen
);
4035 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
4036 if (!is_error(ret
)) {
4037 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4038 if (put_user_u32(addrlen
, target_addrlen_addr
))
4039 ret
= -TARGET_EFAULT
;
4044 /* do_getsockname() Must return target values and target errnos. */
4045 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
4046 abi_ulong target_addrlen_addr
)
4052 if (get_user_u32(addrlen
, target_addrlen_addr
))
4053 return -TARGET_EFAULT
;
4055 if ((int)addrlen
< 0) {
4056 return -TARGET_EINVAL
;
4059 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4060 return -TARGET_EFAULT
;
4062 addr
= alloca(addrlen
);
4064 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4065 if (!is_error(ret
)) {
4066 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4067 if (put_user_u32(addrlen
, target_addrlen_addr
))
4068 ret
= -TARGET_EFAULT
;
4073 /* do_socketpair() Must return target values and target errnos. */
4074 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4075 abi_ulong target_tab_addr
)
4080 target_to_host_sock_type(&type
);
4082 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4083 if (!is_error(ret
)) {
4084 if (put_user_s32(tab
[0], target_tab_addr
)
4085 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4086 ret
= -TARGET_EFAULT
;
4091 /* do_sendto() Must return target values and target errnos. */
4092 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4093 abi_ulong target_addr
, socklen_t addrlen
)
4097 void *copy_msg
= NULL
;
4100 if ((int)addrlen
< 0) {
4101 return -TARGET_EINVAL
;
4104 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4106 return -TARGET_EFAULT
;
4107 if (fd_trans_target_to_host_data(fd
)) {
4108 copy_msg
= host_msg
;
4109 host_msg
= g_malloc(len
);
4110 memcpy(host_msg
, copy_msg
, len
);
4111 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4117 addr
= alloca(addrlen
+1);
4118 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4122 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4124 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4129 host_msg
= copy_msg
;
4131 unlock_user(host_msg
, msg
, 0);
4135 /* do_recvfrom() Must return target values and target errnos. */
4136 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4137 abi_ulong target_addr
,
4138 abi_ulong target_addrlen
)
4145 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4147 return -TARGET_EFAULT
;
4149 if (get_user_u32(addrlen
, target_addrlen
)) {
4150 ret
= -TARGET_EFAULT
;
4153 if ((int)addrlen
< 0) {
4154 ret
= -TARGET_EINVAL
;
4157 addr
= alloca(addrlen
);
4158 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4161 addr
= NULL
; /* To keep compiler quiet. */
4162 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4164 if (!is_error(ret
)) {
4165 if (fd_trans_host_to_target_data(fd
)) {
4166 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4169 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4170 if (put_user_u32(addrlen
, target_addrlen
)) {
4171 ret
= -TARGET_EFAULT
;
4175 unlock_user(host_msg
, msg
, len
);
4178 unlock_user(host_msg
, msg
, 0);
4183 #ifdef TARGET_NR_socketcall
4184 /* do_socketcall() must return target values and target errnos. */
4185 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4187 static const unsigned nargs
[] = { /* number of arguments per operation */
4188 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4189 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4190 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4191 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4192 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4193 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4194 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4195 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4196 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4197 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4198 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4199 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4200 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4201 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4202 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4203 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4204 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4205 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4206 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4207 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4209 abi_long a
[6]; /* max 6 args */
4212 /* check the range of the first argument num */
4213 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4214 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4215 return -TARGET_EINVAL
;
4217 /* ensure we have space for args */
4218 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4219 return -TARGET_EINVAL
;
4221 /* collect the arguments in a[] according to nargs[] */
4222 for (i
= 0; i
< nargs
[num
]; ++i
) {
4223 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4224 return -TARGET_EFAULT
;
4227 /* now when we have the args, invoke the appropriate underlying function */
4229 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4230 return do_socket(a
[0], a
[1], a
[2]);
4231 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4232 return do_bind(a
[0], a
[1], a
[2]);
4233 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4234 return do_connect(a
[0], a
[1], a
[2]);
4235 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4236 return get_errno(listen(a
[0], a
[1]));
4237 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4238 return do_accept4(a
[0], a
[1], a
[2], 0);
4239 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4240 return do_getsockname(a
[0], a
[1], a
[2]);
4241 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4242 return do_getpeername(a
[0], a
[1], a
[2]);
4243 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4244 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4245 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4246 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4247 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4248 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4249 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4250 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4251 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4252 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4253 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4254 return get_errno(shutdown(a
[0], a
[1]));
4255 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4256 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4257 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4258 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4259 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4260 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4261 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4262 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4263 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4264 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4265 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4266 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4267 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4268 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4270 gemu_log("Unsupported socketcall: %d\n", num
);
4271 return -TARGET_EINVAL
;
4276 #define N_SHM_REGIONS 32
4278 static struct shm_region
{
4282 } shm_regions
[N_SHM_REGIONS
];
4284 #ifndef TARGET_SEMID64_DS
4285 /* asm-generic version of this struct */
4286 struct target_semid64_ds
4288 struct target_ipc_perm sem_perm
;
4289 abi_ulong sem_otime
;
4290 #if TARGET_ABI_BITS == 32
4291 abi_ulong __unused1
;
4293 abi_ulong sem_ctime
;
4294 #if TARGET_ABI_BITS == 32
4295 abi_ulong __unused2
;
4297 abi_ulong sem_nsems
;
4298 abi_ulong __unused3
;
4299 abi_ulong __unused4
;
4303 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4304 abi_ulong target_addr
)
4306 struct target_ipc_perm
*target_ip
;
4307 struct target_semid64_ds
*target_sd
;
4309 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4310 return -TARGET_EFAULT
;
4311 target_ip
= &(target_sd
->sem_perm
);
4312 host_ip
->__key
= tswap32(target_ip
->__key
);
4313 host_ip
->uid
= tswap32(target_ip
->uid
);
4314 host_ip
->gid
= tswap32(target_ip
->gid
);
4315 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4316 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4317 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4318 host_ip
->mode
= tswap32(target_ip
->mode
);
4320 host_ip
->mode
= tswap16(target_ip
->mode
);
4322 #if defined(TARGET_PPC)
4323 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4325 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4327 unlock_user_struct(target_sd
, target_addr
, 0);
4331 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4332 struct ipc_perm
*host_ip
)
4334 struct target_ipc_perm
*target_ip
;
4335 struct target_semid64_ds
*target_sd
;
4337 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4338 return -TARGET_EFAULT
;
4339 target_ip
= &(target_sd
->sem_perm
);
4340 target_ip
->__key
= tswap32(host_ip
->__key
);
4341 target_ip
->uid
= tswap32(host_ip
->uid
);
4342 target_ip
->gid
= tswap32(host_ip
->gid
);
4343 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4344 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4345 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4346 target_ip
->mode
= tswap32(host_ip
->mode
);
4348 target_ip
->mode
= tswap16(host_ip
->mode
);
4350 #if defined(TARGET_PPC)
4351 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4353 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4355 unlock_user_struct(target_sd
, target_addr
, 1);
4359 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4360 abi_ulong target_addr
)
4362 struct target_semid64_ds
*target_sd
;
4364 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4365 return -TARGET_EFAULT
;
4366 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4367 return -TARGET_EFAULT
;
4368 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4369 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4370 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4371 unlock_user_struct(target_sd
, target_addr
, 0);
4375 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4376 struct semid_ds
*host_sd
)
4378 struct target_semid64_ds
*target_sd
;
4380 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4381 return -TARGET_EFAULT
;
4382 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4383 return -TARGET_EFAULT
;
4384 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4385 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4386 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4387 unlock_user_struct(target_sd
, target_addr
, 1);
4391 struct target_seminfo
{
4404 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4405 struct seminfo
*host_seminfo
)
4407 struct target_seminfo
*target_seminfo
;
4408 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4409 return -TARGET_EFAULT
;
4410 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4411 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4412 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4413 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4414 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4415 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4416 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4417 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4418 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4419 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4420 unlock_user_struct(target_seminfo
, target_addr
, 1);
4426 struct semid_ds
*buf
;
4427 unsigned short *array
;
4428 struct seminfo
*__buf
;
4431 union target_semun
{
4438 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4439 abi_ulong target_addr
)
4442 unsigned short *array
;
4444 struct semid_ds semid_ds
;
4447 semun
.buf
= &semid_ds
;
4449 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4451 return get_errno(ret
);
4453 nsems
= semid_ds
.sem_nsems
;
4455 *host_array
= g_try_new(unsigned short, nsems
);
4457 return -TARGET_ENOMEM
;
4459 array
= lock_user(VERIFY_READ
, target_addr
,
4460 nsems
*sizeof(unsigned short), 1);
4462 g_free(*host_array
);
4463 return -TARGET_EFAULT
;
4466 for(i
=0; i
<nsems
; i
++) {
4467 __get_user((*host_array
)[i
], &array
[i
]);
4469 unlock_user(array
, target_addr
, 0);
4474 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4475 unsigned short **host_array
)
4478 unsigned short *array
;
4480 struct semid_ds semid_ds
;
4483 semun
.buf
= &semid_ds
;
4485 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4487 return get_errno(ret
);
4489 nsems
= semid_ds
.sem_nsems
;
4491 array
= lock_user(VERIFY_WRITE
, target_addr
,
4492 nsems
*sizeof(unsigned short), 0);
4494 return -TARGET_EFAULT
;
4496 for(i
=0; i
<nsems
; i
++) {
4497 __put_user((*host_array
)[i
], &array
[i
]);
4499 g_free(*host_array
);
4500 unlock_user(array
, target_addr
, 1);
4505 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4506 abi_ulong target_arg
)
4508 union target_semun target_su
= { .buf
= target_arg
};
4510 struct semid_ds dsarg
;
4511 unsigned short *array
= NULL
;
4512 struct seminfo seminfo
;
4513 abi_long ret
= -TARGET_EINVAL
;
4520 /* In 64 bit cross-endian situations, we will erroneously pick up
4521 * the wrong half of the union for the "val" element. To rectify
4522 * this, the entire 8-byte structure is byteswapped, followed by
4523 * a swap of the 4 byte val field. In other cases, the data is
4524 * already in proper host byte order. */
4525 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4526 target_su
.buf
= tswapal(target_su
.buf
);
4527 arg
.val
= tswap32(target_su
.val
);
4529 arg
.val
= target_su
.val
;
4531 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4535 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4539 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4540 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4547 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4551 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4552 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4558 arg
.__buf
= &seminfo
;
4559 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4560 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4568 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4575 struct target_sembuf
{
4576 unsigned short sem_num
;
4581 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4582 abi_ulong target_addr
,
4585 struct target_sembuf
*target_sembuf
;
4588 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4589 nsops
*sizeof(struct target_sembuf
), 1);
4591 return -TARGET_EFAULT
;
4593 for(i
=0; i
<nsops
; i
++) {
4594 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4595 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4596 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4599 unlock_user(target_sembuf
, target_addr
, 0);
4604 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4606 struct sembuf sops
[nsops
];
4608 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4609 return -TARGET_EFAULT
;
4611 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4614 struct target_msqid_ds
4616 struct target_ipc_perm msg_perm
;
4617 abi_ulong msg_stime
;
4618 #if TARGET_ABI_BITS == 32
4619 abi_ulong __unused1
;
4621 abi_ulong msg_rtime
;
4622 #if TARGET_ABI_BITS == 32
4623 abi_ulong __unused2
;
4625 abi_ulong msg_ctime
;
4626 #if TARGET_ABI_BITS == 32
4627 abi_ulong __unused3
;
4629 abi_ulong __msg_cbytes
;
4631 abi_ulong msg_qbytes
;
4632 abi_ulong msg_lspid
;
4633 abi_ulong msg_lrpid
;
4634 abi_ulong __unused4
;
4635 abi_ulong __unused5
;
4638 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4639 abi_ulong target_addr
)
4641 struct target_msqid_ds
*target_md
;
4643 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4644 return -TARGET_EFAULT
;
4645 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4646 return -TARGET_EFAULT
;
4647 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4648 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4649 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4650 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4651 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4652 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4653 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4654 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4655 unlock_user_struct(target_md
, target_addr
, 0);
4659 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4660 struct msqid_ds
*host_md
)
4662 struct target_msqid_ds
*target_md
;
4664 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4665 return -TARGET_EFAULT
;
4666 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4667 return -TARGET_EFAULT
;
4668 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4669 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4670 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4671 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4672 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4673 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4674 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4675 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4676 unlock_user_struct(target_md
, target_addr
, 1);
4680 struct target_msginfo
{
4688 unsigned short int msgseg
;
4691 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4692 struct msginfo
*host_msginfo
)
4694 struct target_msginfo
*target_msginfo
;
4695 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4696 return -TARGET_EFAULT
;
4697 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4698 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4699 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4700 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4701 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4702 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4703 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4704 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4705 unlock_user_struct(target_msginfo
, target_addr
, 1);
4709 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4711 struct msqid_ds dsarg
;
4712 struct msginfo msginfo
;
4713 abi_long ret
= -TARGET_EINVAL
;
4721 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4722 return -TARGET_EFAULT
;
4723 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4724 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4725 return -TARGET_EFAULT
;
4728 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4732 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4733 if (host_to_target_msginfo(ptr
, &msginfo
))
4734 return -TARGET_EFAULT
;
4741 struct target_msgbuf
{
4746 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4747 ssize_t msgsz
, int msgflg
)
4749 struct target_msgbuf
*target_mb
;
4750 struct msgbuf
*host_mb
;
4754 return -TARGET_EINVAL
;
4757 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4758 return -TARGET_EFAULT
;
4759 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4761 unlock_user_struct(target_mb
, msgp
, 0);
4762 return -TARGET_ENOMEM
;
4764 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4765 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4766 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4768 unlock_user_struct(target_mb
, msgp
, 0);
4773 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4774 ssize_t msgsz
, abi_long msgtyp
,
4777 struct target_msgbuf
*target_mb
;
4779 struct msgbuf
*host_mb
;
4783 return -TARGET_EINVAL
;
4786 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4787 return -TARGET_EFAULT
;
4789 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4791 ret
= -TARGET_ENOMEM
;
4794 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4797 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4798 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4799 if (!target_mtext
) {
4800 ret
= -TARGET_EFAULT
;
4803 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4804 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4807 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4811 unlock_user_struct(target_mb
, msgp
, 1);
4816 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4817 abi_ulong target_addr
)
4819 struct target_shmid_ds
*target_sd
;
4821 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4822 return -TARGET_EFAULT
;
4823 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4824 return -TARGET_EFAULT
;
4825 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4826 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4827 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4828 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4829 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4830 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4831 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4832 unlock_user_struct(target_sd
, target_addr
, 0);
4836 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4837 struct shmid_ds
*host_sd
)
4839 struct target_shmid_ds
*target_sd
;
4841 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4842 return -TARGET_EFAULT
;
4843 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4844 return -TARGET_EFAULT
;
4845 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4846 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4847 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4848 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4849 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4850 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4851 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4852 unlock_user_struct(target_sd
, target_addr
, 1);
4856 struct target_shminfo
{
4864 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4865 struct shminfo
*host_shminfo
)
4867 struct target_shminfo
*target_shminfo
;
4868 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4869 return -TARGET_EFAULT
;
4870 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4871 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4872 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4873 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4874 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4875 unlock_user_struct(target_shminfo
, target_addr
, 1);
4879 struct target_shm_info
{
4884 abi_ulong swap_attempts
;
4885 abi_ulong swap_successes
;
4888 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4889 struct shm_info
*host_shm_info
)
4891 struct target_shm_info
*target_shm_info
;
4892 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4893 return -TARGET_EFAULT
;
4894 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4895 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4896 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4897 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4898 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4899 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4900 unlock_user_struct(target_shm_info
, target_addr
, 1);
4904 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4906 struct shmid_ds dsarg
;
4907 struct shminfo shminfo
;
4908 struct shm_info shm_info
;
4909 abi_long ret
= -TARGET_EINVAL
;
4917 if (target_to_host_shmid_ds(&dsarg
, buf
))
4918 return -TARGET_EFAULT
;
4919 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4920 if (host_to_target_shmid_ds(buf
, &dsarg
))
4921 return -TARGET_EFAULT
;
4924 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4925 if (host_to_target_shminfo(buf
, &shminfo
))
4926 return -TARGET_EFAULT
;
4929 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4930 if (host_to_target_shm_info(buf
, &shm_info
))
4931 return -TARGET_EFAULT
;
4936 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4943 #ifndef TARGET_FORCE_SHMLBA
4944 /* For most architectures, SHMLBA is the same as the page size;
4945 * some architectures have larger values, in which case they should
4946 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4947 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4948 * and defining its own value for SHMLBA.
4950 * The kernel also permits SHMLBA to be set by the architecture to a
4951 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4952 * this means that addresses are rounded to the large size if
4953 * SHM_RND is set but addresses not aligned to that size are not rejected
4954 * as long as they are at least page-aligned. Since the only architecture
4955 * which uses this is ia64 this code doesn't provide for that oddity.
4957 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4959 return TARGET_PAGE_SIZE
;
4963 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4964 int shmid
, abi_ulong shmaddr
, int shmflg
)
4968 struct shmid_ds shm_info
;
4972 /* find out the length of the shared memory segment */
4973 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4974 if (is_error(ret
)) {
4975 /* can't get length, bail out */
4979 shmlba
= target_shmlba(cpu_env
);
4981 if (shmaddr
& (shmlba
- 1)) {
4982 if (shmflg
& SHM_RND
) {
4983 shmaddr
&= ~(shmlba
- 1);
4985 return -TARGET_EINVAL
;
4988 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4989 return -TARGET_EINVAL
;
4995 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4997 abi_ulong mmap_start
;
4999 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
5001 if (mmap_start
== -1) {
5003 host_raddr
= (void *)-1;
5005 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
5008 if (host_raddr
== (void *)-1) {
5010 return get_errno((long)host_raddr
);
5012 raddr
=h2g((unsigned long)host_raddr
);
5014 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
5015 PAGE_VALID
| PAGE_READ
|
5016 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
5018 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
5019 if (!shm_regions
[i
].in_use
) {
5020 shm_regions
[i
].in_use
= true;
5021 shm_regions
[i
].start
= raddr
;
5022 shm_regions
[i
].size
= shm_info
.shm_segsz
;
5032 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
5039 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
5040 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
5041 shm_regions
[i
].in_use
= false;
5042 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
5046 rv
= get_errno(shmdt(g2h(shmaddr
)));
5053 #ifdef TARGET_NR_ipc
5054 /* ??? This only works with linear mappings. */
5055 /* do_ipc() must return target values and target errnos. */
5056 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5057 unsigned int call
, abi_long first
,
5058 abi_long second
, abi_long third
,
5059 abi_long ptr
, abi_long fifth
)
5064 version
= call
>> 16;
5069 ret
= do_semop(first
, ptr
, second
);
5073 ret
= get_errno(semget(first
, second
, third
));
5076 case IPCOP_semctl
: {
5077 /* The semun argument to semctl is passed by value, so dereference the
5080 get_user_ual(atptr
, ptr
);
5081 ret
= do_semctl(first
, second
, third
, atptr
);
5086 ret
= get_errno(msgget(first
, second
));
5090 ret
= do_msgsnd(first
, ptr
, second
, third
);
5094 ret
= do_msgctl(first
, second
, ptr
);
5101 struct target_ipc_kludge
{
5106 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5107 ret
= -TARGET_EFAULT
;
5111 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5113 unlock_user_struct(tmp
, ptr
, 0);
5117 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5126 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5127 if (is_error(raddr
))
5128 return get_errno(raddr
);
5129 if (put_user_ual(raddr
, third
))
5130 return -TARGET_EFAULT
;
5134 ret
= -TARGET_EINVAL
;
5139 ret
= do_shmdt(ptr
);
5143 /* IPC_* flag values are the same on all linux platforms */
5144 ret
= get_errno(shmget(first
, second
, third
));
5147 /* IPC_* and SHM_* command values are the same on all linux platforms */
5149 ret
= do_shmctl(first
, second
, ptr
);
5152 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5153 ret
= -TARGET_ENOSYS
;
5160 /* kernel structure types definitions */
5162 #define STRUCT(name, ...) STRUCT_ ## name,
5163 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5165 #include "syscall_types.h"
5169 #undef STRUCT_SPECIAL
5171 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5172 #define STRUCT_SPECIAL(name)
5173 #include "syscall_types.h"
5175 #undef STRUCT_SPECIAL
5177 typedef struct IOCTLEntry IOCTLEntry
;
5179 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5180 int fd
, int cmd
, abi_long arg
);
5184 unsigned int host_cmd
;
5187 do_ioctl_fn
*do_ioctl
;
5188 const argtype arg_type
[5];
5191 #define IOC_R 0x0001
5192 #define IOC_W 0x0002
5193 #define IOC_RW (IOC_R | IOC_W)
5195 #define MAX_STRUCT_SIZE 4096
5197 #ifdef CONFIG_FIEMAP
5198 /* So fiemap access checks don't overflow on 32 bit systems.
5199 * This is very slightly smaller than the limit imposed by
5200 * the underlying kernel.
5202 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5203 / sizeof(struct fiemap_extent))
5205 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5206 int fd
, int cmd
, abi_long arg
)
5208 /* The parameter for this ioctl is a struct fiemap followed
5209 * by an array of struct fiemap_extent whose size is set
5210 * in fiemap->fm_extent_count. The array is filled in by the
5213 int target_size_in
, target_size_out
;
5215 const argtype
*arg_type
= ie
->arg_type
;
5216 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5219 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5223 assert(arg_type
[0] == TYPE_PTR
);
5224 assert(ie
->access
== IOC_RW
);
5226 target_size_in
= thunk_type_size(arg_type
, 0);
5227 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5229 return -TARGET_EFAULT
;
5231 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5232 unlock_user(argptr
, arg
, 0);
5233 fm
= (struct fiemap
*)buf_temp
;
5234 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5235 return -TARGET_EINVAL
;
5238 outbufsz
= sizeof (*fm
) +
5239 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5241 if (outbufsz
> MAX_STRUCT_SIZE
) {
5242 /* We can't fit all the extents into the fixed size buffer.
5243 * Allocate one that is large enough and use it instead.
5245 fm
= g_try_malloc(outbufsz
);
5247 return -TARGET_ENOMEM
;
5249 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5252 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5253 if (!is_error(ret
)) {
5254 target_size_out
= target_size_in
;
5255 /* An extent_count of 0 means we were only counting the extents
5256 * so there are no structs to copy
5258 if (fm
->fm_extent_count
!= 0) {
5259 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5261 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5263 ret
= -TARGET_EFAULT
;
5265 /* Convert the struct fiemap */
5266 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5267 if (fm
->fm_extent_count
!= 0) {
5268 p
= argptr
+ target_size_in
;
5269 /* ...and then all the struct fiemap_extents */
5270 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5271 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5276 unlock_user(argptr
, arg
, target_size_out
);
5286 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5287 int fd
, int cmd
, abi_long arg
)
5289 const argtype
*arg_type
= ie
->arg_type
;
5293 struct ifconf
*host_ifconf
;
5295 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5296 int target_ifreq_size
;
5301 abi_long target_ifc_buf
;
5305 assert(arg_type
[0] == TYPE_PTR
);
5306 assert(ie
->access
== IOC_RW
);
5309 target_size
= thunk_type_size(arg_type
, 0);
5311 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5313 return -TARGET_EFAULT
;
5314 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5315 unlock_user(argptr
, arg
, 0);
5317 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5318 target_ifc_len
= host_ifconf
->ifc_len
;
5319 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5321 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5322 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5323 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5325 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5326 if (outbufsz
> MAX_STRUCT_SIZE
) {
5327 /* We can't fit all the extents into the fixed size buffer.
5328 * Allocate one that is large enough and use it instead.
5330 host_ifconf
= malloc(outbufsz
);
5332 return -TARGET_ENOMEM
;
5334 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5337 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5339 host_ifconf
->ifc_len
= host_ifc_len
;
5340 host_ifconf
->ifc_buf
= host_ifc_buf
;
5342 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5343 if (!is_error(ret
)) {
5344 /* convert host ifc_len to target ifc_len */
5346 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5347 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5348 host_ifconf
->ifc_len
= target_ifc_len
;
5350 /* restore target ifc_buf */
5352 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5354 /* copy struct ifconf to target user */
5356 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5358 return -TARGET_EFAULT
;
5359 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5360 unlock_user(argptr
, arg
, target_size
);
5362 /* copy ifreq[] to target user */
5364 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5365 for (i
= 0; i
< nb_ifreq
; i
++) {
5366 thunk_convert(argptr
+ i
* target_ifreq_size
,
5367 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5368 ifreq_arg_type
, THUNK_TARGET
);
5370 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5380 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5381 int cmd
, abi_long arg
)
5384 struct dm_ioctl
*host_dm
;
5385 abi_long guest_data
;
5386 uint32_t guest_data_size
;
5388 const argtype
*arg_type
= ie
->arg_type
;
5390 void *big_buf
= NULL
;
5394 target_size
= thunk_type_size(arg_type
, 0);
5395 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5397 ret
= -TARGET_EFAULT
;
5400 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5401 unlock_user(argptr
, arg
, 0);
5403 /* buf_temp is too small, so fetch things into a bigger buffer */
5404 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5405 memcpy(big_buf
, buf_temp
, target_size
);
5409 guest_data
= arg
+ host_dm
->data_start
;
5410 if ((guest_data
- arg
) < 0) {
5411 ret
= -TARGET_EINVAL
;
5414 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5415 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5417 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5419 ret
= -TARGET_EFAULT
;
5423 switch (ie
->host_cmd
) {
5425 case DM_LIST_DEVICES
:
5428 case DM_DEV_SUSPEND
:
5431 case DM_TABLE_STATUS
:
5432 case DM_TABLE_CLEAR
:
5434 case DM_LIST_VERSIONS
:
5438 case DM_DEV_SET_GEOMETRY
:
5439 /* data contains only strings */
5440 memcpy(host_data
, argptr
, guest_data_size
);
5443 memcpy(host_data
, argptr
, guest_data_size
);
5444 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5448 void *gspec
= argptr
;
5449 void *cur_data
= host_data
;
5450 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5451 int spec_size
= thunk_type_size(arg_type
, 0);
5454 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5455 struct dm_target_spec
*spec
= cur_data
;
5459 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5460 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5462 spec
->next
= sizeof(*spec
) + slen
;
5463 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5465 cur_data
+= spec
->next
;
5470 ret
= -TARGET_EINVAL
;
5471 unlock_user(argptr
, guest_data
, 0);
5474 unlock_user(argptr
, guest_data
, 0);
5476 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5477 if (!is_error(ret
)) {
5478 guest_data
= arg
+ host_dm
->data_start
;
5479 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5480 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5481 switch (ie
->host_cmd
) {
5486 case DM_DEV_SUSPEND
:
5489 case DM_TABLE_CLEAR
:
5491 case DM_DEV_SET_GEOMETRY
:
5492 /* no return data */
5494 case DM_LIST_DEVICES
:
5496 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5497 uint32_t remaining_data
= guest_data_size
;
5498 void *cur_data
= argptr
;
5499 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5500 int nl_size
= 12; /* can't use thunk_size due to alignment */
5503 uint32_t next
= nl
->next
;
5505 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5507 if (remaining_data
< nl
->next
) {
5508 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5511 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5512 strcpy(cur_data
+ nl_size
, nl
->name
);
5513 cur_data
+= nl
->next
;
5514 remaining_data
-= nl
->next
;
5518 nl
= (void*)nl
+ next
;
5523 case DM_TABLE_STATUS
:
5525 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5526 void *cur_data
= argptr
;
5527 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5528 int spec_size
= thunk_type_size(arg_type
, 0);
5531 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5532 uint32_t next
= spec
->next
;
5533 int slen
= strlen((char*)&spec
[1]) + 1;
5534 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5535 if (guest_data_size
< spec
->next
) {
5536 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5539 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5540 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5541 cur_data
= argptr
+ spec
->next
;
5542 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5548 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5549 int count
= *(uint32_t*)hdata
;
5550 uint64_t *hdev
= hdata
+ 8;
5551 uint64_t *gdev
= argptr
+ 8;
5554 *(uint32_t*)argptr
= tswap32(count
);
5555 for (i
= 0; i
< count
; i
++) {
5556 *gdev
= tswap64(*hdev
);
5562 case DM_LIST_VERSIONS
:
5564 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5565 uint32_t remaining_data
= guest_data_size
;
5566 void *cur_data
= argptr
;
5567 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5568 int vers_size
= thunk_type_size(arg_type
, 0);
5571 uint32_t next
= vers
->next
;
5573 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5575 if (remaining_data
< vers
->next
) {
5576 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5579 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5580 strcpy(cur_data
+ vers_size
, vers
->name
);
5581 cur_data
+= vers
->next
;
5582 remaining_data
-= vers
->next
;
5586 vers
= (void*)vers
+ next
;
5591 unlock_user(argptr
, guest_data
, 0);
5592 ret
= -TARGET_EINVAL
;
5595 unlock_user(argptr
, guest_data
, guest_data_size
);
5597 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5599 ret
= -TARGET_EFAULT
;
5602 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5603 unlock_user(argptr
, arg
, target_size
);
5610 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5611 int cmd
, abi_long arg
)
5615 const argtype
*arg_type
= ie
->arg_type
;
5616 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5619 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5620 struct blkpg_partition host_part
;
5622 /* Read and convert blkpg */
5624 target_size
= thunk_type_size(arg_type
, 0);
5625 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5627 ret
= -TARGET_EFAULT
;
5630 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5631 unlock_user(argptr
, arg
, 0);
5633 switch (host_blkpg
->op
) {
5634 case BLKPG_ADD_PARTITION
:
5635 case BLKPG_DEL_PARTITION
:
5636 /* payload is struct blkpg_partition */
5639 /* Unknown opcode */
5640 ret
= -TARGET_EINVAL
;
5644 /* Read and convert blkpg->data */
5645 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5646 target_size
= thunk_type_size(part_arg_type
, 0);
5647 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5649 ret
= -TARGET_EFAULT
;
5652 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5653 unlock_user(argptr
, arg
, 0);
5655 /* Swizzle the data pointer to our local copy and call! */
5656 host_blkpg
->data
= &host_part
;
5657 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5663 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5664 int fd
, int cmd
, abi_long arg
)
5666 const argtype
*arg_type
= ie
->arg_type
;
5667 const StructEntry
*se
;
5668 const argtype
*field_types
;
5669 const int *dst_offsets
, *src_offsets
;
5672 abi_ulong
*target_rt_dev_ptr
;
5673 unsigned long *host_rt_dev_ptr
;
5677 assert(ie
->access
== IOC_W
);
5678 assert(*arg_type
== TYPE_PTR
);
5680 assert(*arg_type
== TYPE_STRUCT
);
5681 target_size
= thunk_type_size(arg_type
, 0);
5682 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5684 return -TARGET_EFAULT
;
5687 assert(*arg_type
== (int)STRUCT_rtentry
);
5688 se
= struct_entries
+ *arg_type
++;
5689 assert(se
->convert
[0] == NULL
);
5690 /* convert struct here to be able to catch rt_dev string */
5691 field_types
= se
->field_types
;
5692 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5693 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5694 for (i
= 0; i
< se
->nb_fields
; i
++) {
5695 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5696 assert(*field_types
== TYPE_PTRVOID
);
5697 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5698 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5699 if (*target_rt_dev_ptr
!= 0) {
5700 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5701 tswapal(*target_rt_dev_ptr
));
5702 if (!*host_rt_dev_ptr
) {
5703 unlock_user(argptr
, arg
, 0);
5704 return -TARGET_EFAULT
;
5707 *host_rt_dev_ptr
= 0;
5712 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5713 argptr
+ src_offsets
[i
],
5714 field_types
, THUNK_HOST
);
5716 unlock_user(argptr
, arg
, 0);
5718 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5719 if (*host_rt_dev_ptr
!= 0) {
5720 unlock_user((void *)*host_rt_dev_ptr
,
5721 *target_rt_dev_ptr
, 0);
5726 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5727 int fd
, int cmd
, abi_long arg
)
5729 int sig
= target_to_host_signal(arg
);
5730 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5734 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5735 int fd
, int cmd
, abi_long arg
)
5737 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5738 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5742 static IOCTLEntry ioctl_entries
[] = {
5743 #define IOCTL(cmd, access, ...) \
5744 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5745 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5746 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5747 #define IOCTL_IGNORE(cmd) \
5748 { TARGET_ ## cmd, 0, #cmd },
5753 /* ??? Implement proper locking for ioctls. */
5754 /* do_ioctl() Must return target values and target errnos. */
5755 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5757 const IOCTLEntry
*ie
;
5758 const argtype
*arg_type
;
5760 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5766 if (ie
->target_cmd
== 0) {
5767 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5768 return -TARGET_ENOSYS
;
5770 if (ie
->target_cmd
== cmd
)
5774 arg_type
= ie
->arg_type
;
5776 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5779 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5780 } else if (!ie
->host_cmd
) {
5781 /* Some architectures define BSD ioctls in their headers
5782 that are not implemented in Linux. */
5783 return -TARGET_ENOSYS
;
5786 switch(arg_type
[0]) {
5789 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5793 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5797 target_size
= thunk_type_size(arg_type
, 0);
5798 switch(ie
->access
) {
5800 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5801 if (!is_error(ret
)) {
5802 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5804 return -TARGET_EFAULT
;
5805 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5806 unlock_user(argptr
, arg
, target_size
);
5810 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5812 return -TARGET_EFAULT
;
5813 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5814 unlock_user(argptr
, arg
, 0);
5815 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5819 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5821 return -TARGET_EFAULT
;
5822 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5823 unlock_user(argptr
, arg
, 0);
5824 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5825 if (!is_error(ret
)) {
5826 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5828 return -TARGET_EFAULT
;
5829 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5830 unlock_user(argptr
, arg
, target_size
);
5836 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5837 (long)cmd
, arg_type
[0]);
5838 ret
= -TARGET_ENOSYS
;
5844 static const bitmask_transtbl iflag_tbl
[] = {
5845 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5846 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5847 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5848 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5849 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5850 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5851 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5852 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5853 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5854 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5855 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5856 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5857 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5858 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5862 static const bitmask_transtbl oflag_tbl
[] = {
5863 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5864 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5865 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5866 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5867 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5868 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5869 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5870 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5871 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5872 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5873 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5874 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5875 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5876 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5877 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5878 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5879 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5880 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5881 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5882 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5883 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5884 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5885 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5886 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5890 static const bitmask_transtbl cflag_tbl
[] = {
5891 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5892 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5893 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5894 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5895 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5896 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5897 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5898 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5899 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5900 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5901 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5902 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5903 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5904 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5905 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5906 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5907 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5908 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5909 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5910 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5911 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5912 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5913 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5914 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5915 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5916 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5917 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5918 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5919 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5920 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5921 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5925 static const bitmask_transtbl lflag_tbl
[] = {
5926 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5927 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5928 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5929 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5930 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5931 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5932 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5933 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5934 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5935 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5936 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5937 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5938 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5939 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5940 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5944 static void target_to_host_termios (void *dst
, const void *src
)
5946 struct host_termios
*host
= dst
;
5947 const struct target_termios
*target
= src
;
5950 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5952 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5954 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5956 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5957 host
->c_line
= target
->c_line
;
5959 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5960 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5961 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5962 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5963 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5964 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5965 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5966 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5967 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5968 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5969 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5970 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5971 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5972 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5973 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5974 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5975 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5976 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5979 static void host_to_target_termios (void *dst
, const void *src
)
5981 struct target_termios
*target
= dst
;
5982 const struct host_termios
*host
= src
;
5985 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5987 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5989 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5991 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5992 target
->c_line
= host
->c_line
;
5994 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5995 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5996 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5997 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5998 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5999 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6000 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6001 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6002 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6003 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6004 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6005 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6006 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6007 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6008 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6009 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6010 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6011 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6014 static const StructEntry struct_termios_def
= {
6015 .convert
= { host_to_target_termios
, target_to_host_termios
},
6016 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6017 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6020 static bitmask_transtbl mmap_flags_tbl
[] = {
6021 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6022 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6023 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6024 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6025 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6026 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6027 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6028 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6029 MAP_DENYWRITE
, MAP_DENYWRITE
},
6030 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6031 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6032 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6033 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6034 MAP_NORESERVE
, MAP_NORESERVE
},
6035 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6036 /* MAP_STACK had been ignored by the kernel for quite some time.
6037 Recognize it for the target insofar as we do not want to pass
6038 it through to the host. */
6039 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6043 #if defined(TARGET_I386)
6045 /* NOTE: there is really one LDT for all the threads */
6046 static uint8_t *ldt_table
;
6048 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6055 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6056 if (size
> bytecount
)
6058 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6060 return -TARGET_EFAULT
;
6061 /* ??? Should this by byteswapped? */
6062 memcpy(p
, ldt_table
, size
);
6063 unlock_user(p
, ptr
, size
);
6067 /* XXX: add locking support */
6068 static abi_long
write_ldt(CPUX86State
*env
,
6069 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6071 struct target_modify_ldt_ldt_s ldt_info
;
6072 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6073 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6074 int seg_not_present
, useable
, lm
;
6075 uint32_t *lp
, entry_1
, entry_2
;
6077 if (bytecount
!= sizeof(ldt_info
))
6078 return -TARGET_EINVAL
;
6079 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6080 return -TARGET_EFAULT
;
6081 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6082 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6083 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6084 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6085 unlock_user_struct(target_ldt_info
, ptr
, 0);
6087 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6088 return -TARGET_EINVAL
;
6089 seg_32bit
= ldt_info
.flags
& 1;
6090 contents
= (ldt_info
.flags
>> 1) & 3;
6091 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6092 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6093 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6094 useable
= (ldt_info
.flags
>> 6) & 1;
6098 lm
= (ldt_info
.flags
>> 7) & 1;
6100 if (contents
== 3) {
6102 return -TARGET_EINVAL
;
6103 if (seg_not_present
== 0)
6104 return -TARGET_EINVAL
;
6106 /* allocate the LDT */
6108 env
->ldt
.base
= target_mmap(0,
6109 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6110 PROT_READ
|PROT_WRITE
,
6111 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6112 if (env
->ldt
.base
== -1)
6113 return -TARGET_ENOMEM
;
6114 memset(g2h(env
->ldt
.base
), 0,
6115 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6116 env
->ldt
.limit
= 0xffff;
6117 ldt_table
= g2h(env
->ldt
.base
);
6120 /* NOTE: same code as Linux kernel */
6121 /* Allow LDTs to be cleared by the user. */
6122 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6125 read_exec_only
== 1 &&
6127 limit_in_pages
== 0 &&
6128 seg_not_present
== 1 &&
6136 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6137 (ldt_info
.limit
& 0x0ffff);
6138 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6139 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6140 (ldt_info
.limit
& 0xf0000) |
6141 ((read_exec_only
^ 1) << 9) |
6143 ((seg_not_present
^ 1) << 15) |
6145 (limit_in_pages
<< 23) |
6149 entry_2
|= (useable
<< 20);
6151 /* Install the new entry ... */
6153 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6154 lp
[0] = tswap32(entry_1
);
6155 lp
[1] = tswap32(entry_2
);
6159 /* specific and weird i386 syscalls */
6160 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6161 unsigned long bytecount
)
6167 ret
= read_ldt(ptr
, bytecount
);
6170 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6173 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6176 ret
= -TARGET_ENOSYS
;
6182 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6183 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6185 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6186 struct target_modify_ldt_ldt_s ldt_info
;
6187 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6188 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6189 int seg_not_present
, useable
, lm
;
6190 uint32_t *lp
, entry_1
, entry_2
;
6193 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6194 if (!target_ldt_info
)
6195 return -TARGET_EFAULT
;
6196 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6197 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6198 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6199 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6200 if (ldt_info
.entry_number
== -1) {
6201 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6202 if (gdt_table
[i
] == 0) {
6203 ldt_info
.entry_number
= i
;
6204 target_ldt_info
->entry_number
= tswap32(i
);
6209 unlock_user_struct(target_ldt_info
, ptr
, 1);
6211 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6212 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6213 return -TARGET_EINVAL
;
6214 seg_32bit
= ldt_info
.flags
& 1;
6215 contents
= (ldt_info
.flags
>> 1) & 3;
6216 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6217 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6218 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6219 useable
= (ldt_info
.flags
>> 6) & 1;
6223 lm
= (ldt_info
.flags
>> 7) & 1;
6226 if (contents
== 3) {
6227 if (seg_not_present
== 0)
6228 return -TARGET_EINVAL
;
6231 /* NOTE: same code as Linux kernel */
6232 /* Allow LDTs to be cleared by the user. */
6233 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6234 if ((contents
== 0 &&
6235 read_exec_only
== 1 &&
6237 limit_in_pages
== 0 &&
6238 seg_not_present
== 1 &&
6246 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6247 (ldt_info
.limit
& 0x0ffff);
6248 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6249 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6250 (ldt_info
.limit
& 0xf0000) |
6251 ((read_exec_only
^ 1) << 9) |
6253 ((seg_not_present
^ 1) << 15) |
6255 (limit_in_pages
<< 23) |
6260 /* Install the new entry ... */
6262 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6263 lp
[0] = tswap32(entry_1
);
6264 lp
[1] = tswap32(entry_2
);
6268 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6270 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6271 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6272 uint32_t base_addr
, limit
, flags
;
6273 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6274 int seg_not_present
, useable
, lm
;
6275 uint32_t *lp
, entry_1
, entry_2
;
6277 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6278 if (!target_ldt_info
)
6279 return -TARGET_EFAULT
;
6280 idx
= tswap32(target_ldt_info
->entry_number
);
6281 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6282 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6283 unlock_user_struct(target_ldt_info
, ptr
, 1);
6284 return -TARGET_EINVAL
;
6286 lp
= (uint32_t *)(gdt_table
+ idx
);
6287 entry_1
= tswap32(lp
[0]);
6288 entry_2
= tswap32(lp
[1]);
6290 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6291 contents
= (entry_2
>> 10) & 3;
6292 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6293 seg_32bit
= (entry_2
>> 22) & 1;
6294 limit_in_pages
= (entry_2
>> 23) & 1;
6295 useable
= (entry_2
>> 20) & 1;
6299 lm
= (entry_2
>> 21) & 1;
6301 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6302 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6303 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6304 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6305 base_addr
= (entry_1
>> 16) |
6306 (entry_2
& 0xff000000) |
6307 ((entry_2
& 0xff) << 16);
6308 target_ldt_info
->base_addr
= tswapal(base_addr
);
6309 target_ldt_info
->limit
= tswap32(limit
);
6310 target_ldt_info
->flags
= tswap32(flags
);
6311 unlock_user_struct(target_ldt_info
, ptr
, 1);
6314 #endif /* TARGET_I386 && TARGET_ABI32 */
6316 #ifndef TARGET_ABI32
6317 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6324 case TARGET_ARCH_SET_GS
:
6325 case TARGET_ARCH_SET_FS
:
6326 if (code
== TARGET_ARCH_SET_GS
)
6330 cpu_x86_load_seg(env
, idx
, 0);
6331 env
->segs
[idx
].base
= addr
;
6333 case TARGET_ARCH_GET_GS
:
6334 case TARGET_ARCH_GET_FS
:
6335 if (code
== TARGET_ARCH_GET_GS
)
6339 val
= env
->segs
[idx
].base
;
6340 if (put_user(val
, addr
, abi_ulong
))
6341 ret
= -TARGET_EFAULT
;
6344 ret
= -TARGET_EINVAL
;
6351 #endif /* defined(TARGET_I386) */
6353 #define NEW_STACK_SIZE 0x40000
6356 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6359 pthread_mutex_t mutex
;
6360 pthread_cond_t cond
;
6363 abi_ulong child_tidptr
;
6364 abi_ulong parent_tidptr
;
6368 static void *clone_func(void *arg
)
6370 new_thread_info
*info
= arg
;
6375 rcu_register_thread();
6376 tcg_register_thread();
6378 cpu
= ENV_GET_CPU(env
);
6380 ts
= (TaskState
*)cpu
->opaque
;
6381 info
->tid
= gettid();
6383 if (info
->child_tidptr
)
6384 put_user_u32(info
->tid
, info
->child_tidptr
);
6385 if (info
->parent_tidptr
)
6386 put_user_u32(info
->tid
, info
->parent_tidptr
);
6387 /* Enable signals. */
6388 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6389 /* Signal to the parent that we're ready. */
6390 pthread_mutex_lock(&info
->mutex
);
6391 pthread_cond_broadcast(&info
->cond
);
6392 pthread_mutex_unlock(&info
->mutex
);
6393 /* Wait until the parent has finished initializing the tls state. */
6394 pthread_mutex_lock(&clone_lock
);
6395 pthread_mutex_unlock(&clone_lock
);
6401 /* do_fork() Must return host values and target errnos (unlike most
6402 do_*() functions). */
6403 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6404 abi_ulong parent_tidptr
, target_ulong newtls
,
6405 abi_ulong child_tidptr
)
6407 CPUState
*cpu
= ENV_GET_CPU(env
);
6411 CPUArchState
*new_env
;
6414 flags
&= ~CLONE_IGNORED_FLAGS
;
6416 /* Emulate vfork() with fork() */
6417 if (flags
& CLONE_VFORK
)
6418 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6420 if (flags
& CLONE_VM
) {
6421 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6422 new_thread_info info
;
6423 pthread_attr_t attr
;
6425 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6426 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6427 return -TARGET_EINVAL
;
6430 ts
= g_new0(TaskState
, 1);
6431 init_task_state(ts
);
6433 /* Grab a mutex so that thread setup appears atomic. */
6434 pthread_mutex_lock(&clone_lock
);
6436 /* we create a new CPU instance. */
6437 new_env
= cpu_copy(env
);
6438 /* Init regs that differ from the parent. */
6439 cpu_clone_regs(new_env
, newsp
);
6440 new_cpu
= ENV_GET_CPU(new_env
);
6441 new_cpu
->opaque
= ts
;
6442 ts
->bprm
= parent_ts
->bprm
;
6443 ts
->info
= parent_ts
->info
;
6444 ts
->signal_mask
= parent_ts
->signal_mask
;
6446 if (flags
& CLONE_CHILD_CLEARTID
) {
6447 ts
->child_tidptr
= child_tidptr
;
6450 if (flags
& CLONE_SETTLS
) {
6451 cpu_set_tls (new_env
, newtls
);
6454 memset(&info
, 0, sizeof(info
));
6455 pthread_mutex_init(&info
.mutex
, NULL
);
6456 pthread_mutex_lock(&info
.mutex
);
6457 pthread_cond_init(&info
.cond
, NULL
);
6459 if (flags
& CLONE_CHILD_SETTID
) {
6460 info
.child_tidptr
= child_tidptr
;
6462 if (flags
& CLONE_PARENT_SETTID
) {
6463 info
.parent_tidptr
= parent_tidptr
;
6466 ret
= pthread_attr_init(&attr
);
6467 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6468 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6469 /* It is not safe to deliver signals until the child has finished
6470 initializing, so temporarily block all signals. */
6471 sigfillset(&sigmask
);
6472 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6474 /* If this is our first additional thread, we need to ensure we
6475 * generate code for parallel execution and flush old translations.
6477 if (!parallel_cpus
) {
6478 parallel_cpus
= true;
6482 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6483 /* TODO: Free new CPU state if thread creation failed. */
6485 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6486 pthread_attr_destroy(&attr
);
6488 /* Wait for the child to initialize. */
6489 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6494 pthread_mutex_unlock(&info
.mutex
);
6495 pthread_cond_destroy(&info
.cond
);
6496 pthread_mutex_destroy(&info
.mutex
);
6497 pthread_mutex_unlock(&clone_lock
);
6499 /* if no CLONE_VM, we consider it is a fork */
6500 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6501 return -TARGET_EINVAL
;
6504 /* We can't support custom termination signals */
6505 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6506 return -TARGET_EINVAL
;
6509 if (block_signals()) {
6510 return -TARGET_ERESTARTSYS
;
6516 /* Child Process. */
6517 cpu_clone_regs(env
, newsp
);
6519 /* There is a race condition here. The parent process could
6520 theoretically read the TID in the child process before the child
6521 tid is set. This would require using either ptrace
6522 (not implemented) or having *_tidptr to point at a shared memory
6523 mapping. We can't repeat the spinlock hack used above because
6524 the child process gets its own copy of the lock. */
6525 if (flags
& CLONE_CHILD_SETTID
)
6526 put_user_u32(gettid(), child_tidptr
);
6527 if (flags
& CLONE_PARENT_SETTID
)
6528 put_user_u32(gettid(), parent_tidptr
);
6529 ts
= (TaskState
*)cpu
->opaque
;
6530 if (flags
& CLONE_SETTLS
)
6531 cpu_set_tls (env
, newtls
);
6532 if (flags
& CLONE_CHILD_CLEARTID
)
6533 ts
->child_tidptr
= child_tidptr
;
6541 /* warning : doesn't handle linux specific flags... */
6542 static int target_to_host_fcntl_cmd(int cmd
)
6545 case TARGET_F_DUPFD
:
6546 case TARGET_F_GETFD
:
6547 case TARGET_F_SETFD
:
6548 case TARGET_F_GETFL
:
6549 case TARGET_F_SETFL
:
6551 case TARGET_F_GETLK
:
6553 case TARGET_F_SETLK
:
6555 case TARGET_F_SETLKW
:
6557 case TARGET_F_GETOWN
:
6559 case TARGET_F_SETOWN
:
6561 case TARGET_F_GETSIG
:
6563 case TARGET_F_SETSIG
:
6565 #if TARGET_ABI_BITS == 32
6566 case TARGET_F_GETLK64
:
6568 case TARGET_F_SETLK64
:
6570 case TARGET_F_SETLKW64
:
6573 case TARGET_F_SETLEASE
:
6575 case TARGET_F_GETLEASE
:
6577 #ifdef F_DUPFD_CLOEXEC
6578 case TARGET_F_DUPFD_CLOEXEC
:
6579 return F_DUPFD_CLOEXEC
;
6581 case TARGET_F_NOTIFY
:
6584 case TARGET_F_GETOWN_EX
:
6588 case TARGET_F_SETOWN_EX
:
6592 case TARGET_F_SETPIPE_SZ
:
6593 return F_SETPIPE_SZ
;
6594 case TARGET_F_GETPIPE_SZ
:
6595 return F_GETPIPE_SZ
;
6598 return -TARGET_EINVAL
;
6600 return -TARGET_EINVAL
;
6603 #define FLOCK_TRANSTBL \
6605 TRANSTBL_CONVERT(F_RDLCK); \
6606 TRANSTBL_CONVERT(F_WRLCK); \
6607 TRANSTBL_CONVERT(F_UNLCK); \
6608 TRANSTBL_CONVERT(F_EXLCK); \
6609 TRANSTBL_CONVERT(F_SHLCK); \
6612 static int target_to_host_flock(int type
)
6614 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6616 #undef TRANSTBL_CONVERT
6617 return -TARGET_EINVAL
;
6620 static int host_to_target_flock(int type
)
6622 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6624 #undef TRANSTBL_CONVERT
6625 /* if we don't know how to convert the value coming
6626 * from the host we copy to the target field as-is
6631 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6632 abi_ulong target_flock_addr
)
6634 struct target_flock
*target_fl
;
6637 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6638 return -TARGET_EFAULT
;
6641 __get_user(l_type
, &target_fl
->l_type
);
6642 l_type
= target_to_host_flock(l_type
);
6646 fl
->l_type
= l_type
;
6647 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6648 __get_user(fl
->l_start
, &target_fl
->l_start
);
6649 __get_user(fl
->l_len
, &target_fl
->l_len
);
6650 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6651 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6655 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6656 const struct flock64
*fl
)
6658 struct target_flock
*target_fl
;
6661 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6662 return -TARGET_EFAULT
;
6665 l_type
= host_to_target_flock(fl
->l_type
);
6666 __put_user(l_type
, &target_fl
->l_type
);
6667 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6668 __put_user(fl
->l_start
, &target_fl
->l_start
);
6669 __put_user(fl
->l_len
, &target_fl
->l_len
);
6670 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6671 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6675 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6676 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6678 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6679 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6680 abi_ulong target_flock_addr
)
6682 struct target_oabi_flock64
*target_fl
;
6685 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6686 return -TARGET_EFAULT
;
6689 __get_user(l_type
, &target_fl
->l_type
);
6690 l_type
= target_to_host_flock(l_type
);
6694 fl
->l_type
= l_type
;
6695 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6696 __get_user(fl
->l_start
, &target_fl
->l_start
);
6697 __get_user(fl
->l_len
, &target_fl
->l_len
);
6698 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6699 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6703 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6704 const struct flock64
*fl
)
6706 struct target_oabi_flock64
*target_fl
;
6709 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6710 return -TARGET_EFAULT
;
6713 l_type
= host_to_target_flock(fl
->l_type
);
6714 __put_user(l_type
, &target_fl
->l_type
);
6715 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6716 __put_user(fl
->l_start
, &target_fl
->l_start
);
6717 __put_user(fl
->l_len
, &target_fl
->l_len
);
6718 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6719 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6724 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6725 abi_ulong target_flock_addr
)
6727 struct target_flock64
*target_fl
;
6730 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6731 return -TARGET_EFAULT
;
6734 __get_user(l_type
, &target_fl
->l_type
);
6735 l_type
= target_to_host_flock(l_type
);
6739 fl
->l_type
= l_type
;
6740 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6741 __get_user(fl
->l_start
, &target_fl
->l_start
);
6742 __get_user(fl
->l_len
, &target_fl
->l_len
);
6743 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6744 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6748 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6749 const struct flock64
*fl
)
6751 struct target_flock64
*target_fl
;
6754 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6755 return -TARGET_EFAULT
;
6758 l_type
= host_to_target_flock(fl
->l_type
);
6759 __put_user(l_type
, &target_fl
->l_type
);
6760 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6761 __put_user(fl
->l_start
, &target_fl
->l_start
);
6762 __put_user(fl
->l_len
, &target_fl
->l_len
);
6763 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6764 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6768 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6770 struct flock64 fl64
;
6772 struct f_owner_ex fox
;
6773 struct target_f_owner_ex
*target_fox
;
6776 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6778 if (host_cmd
== -TARGET_EINVAL
)
6782 case TARGET_F_GETLK
:
6783 ret
= copy_from_user_flock(&fl64
, arg
);
6787 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6789 ret
= copy_to_user_flock(arg
, &fl64
);
6793 case TARGET_F_SETLK
:
6794 case TARGET_F_SETLKW
:
6795 ret
= copy_from_user_flock(&fl64
, arg
);
6799 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6802 case TARGET_F_GETLK64
:
6803 ret
= copy_from_user_flock64(&fl64
, arg
);
6807 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6809 ret
= copy_to_user_flock64(arg
, &fl64
);
6812 case TARGET_F_SETLK64
:
6813 case TARGET_F_SETLKW64
:
6814 ret
= copy_from_user_flock64(&fl64
, arg
);
6818 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6821 case TARGET_F_GETFL
:
6822 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6824 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6828 case TARGET_F_SETFL
:
6829 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6830 target_to_host_bitmask(arg
,
6835 case TARGET_F_GETOWN_EX
:
6836 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6838 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6839 return -TARGET_EFAULT
;
6840 target_fox
->type
= tswap32(fox
.type
);
6841 target_fox
->pid
= tswap32(fox
.pid
);
6842 unlock_user_struct(target_fox
, arg
, 1);
6848 case TARGET_F_SETOWN_EX
:
6849 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6850 return -TARGET_EFAULT
;
6851 fox
.type
= tswap32(target_fox
->type
);
6852 fox
.pid
= tswap32(target_fox
->pid
);
6853 unlock_user_struct(target_fox
, arg
, 0);
6854 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6858 case TARGET_F_SETOWN
:
6859 case TARGET_F_GETOWN
:
6860 case TARGET_F_SETSIG
:
6861 case TARGET_F_GETSIG
:
6862 case TARGET_F_SETLEASE
:
6863 case TARGET_F_GETLEASE
:
6864 case TARGET_F_SETPIPE_SZ
:
6865 case TARGET_F_GETPIPE_SZ
:
6866 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6870 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6878 static inline int high2lowuid(int uid
)
6886 static inline int high2lowgid(int gid
)
6894 static inline int low2highuid(int uid
)
6896 if ((int16_t)uid
== -1)
6902 static inline int low2highgid(int gid
)
6904 if ((int16_t)gid
== -1)
6909 static inline int tswapid(int id
)
6914 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6916 #else /* !USE_UID16 */
6917 static inline int high2lowuid(int uid
)
6921 static inline int high2lowgid(int gid
)
6925 static inline int low2highuid(int uid
)
6929 static inline int low2highgid(int gid
)
6933 static inline int tswapid(int id
)
6938 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6940 #endif /* USE_UID16 */
6942 /* We must do direct syscalls for setting UID/GID, because we want to
6943 * implement the Linux system call semantics of "change only for this thread",
6944 * not the libc/POSIX semantics of "change for all threads in process".
6945 * (See http://ewontfix.com/17/ for more details.)
6946 * We use the 32-bit version of the syscalls if present; if it is not
6947 * then either the host architecture supports 32-bit UIDs natively with
6948 * the standard syscall, or the 16-bit UID is the best we can do.
6950 #ifdef __NR_setuid32
6951 #define __NR_sys_setuid __NR_setuid32
6953 #define __NR_sys_setuid __NR_setuid
6955 #ifdef __NR_setgid32
6956 #define __NR_sys_setgid __NR_setgid32
6958 #define __NR_sys_setgid __NR_setgid
6960 #ifdef __NR_setresuid32
6961 #define __NR_sys_setresuid __NR_setresuid32
6963 #define __NR_sys_setresuid __NR_setresuid
6965 #ifdef __NR_setresgid32
6966 #define __NR_sys_setresgid __NR_setresgid32
6968 #define __NR_sys_setresgid __NR_setresgid
6971 _syscall1(int, sys_setuid
, uid_t
, uid
)
6972 _syscall1(int, sys_setgid
, gid_t
, gid
)
6973 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6974 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6976 void syscall_init(void)
6979 const argtype
*arg_type
;
6983 thunk_init(STRUCT_MAX
);
6985 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6986 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6987 #include "syscall_types.h"
6989 #undef STRUCT_SPECIAL
6991 /* Build target_to_host_errno_table[] table from
6992 * host_to_target_errno_table[]. */
6993 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6994 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6997 /* we patch the ioctl size if necessary. We rely on the fact that
6998 no ioctl has all the bits at '1' in the size field */
7000 while (ie
->target_cmd
!= 0) {
7001 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7002 TARGET_IOC_SIZEMASK
) {
7003 arg_type
= ie
->arg_type
;
7004 if (arg_type
[0] != TYPE_PTR
) {
7005 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7010 size
= thunk_type_size(arg_type
, 0);
7011 ie
->target_cmd
= (ie
->target_cmd
&
7012 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7013 (size
<< TARGET_IOC_SIZESHIFT
);
7016 /* automatic consistency check if same arch */
7017 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7018 (defined(__x86_64__) && defined(TARGET_X86_64))
7019 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7020 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7021 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7028 #if TARGET_ABI_BITS == 32
7029 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
7031 #ifdef TARGET_WORDS_BIGENDIAN
7032 return ((uint64_t)word0
<< 32) | word1
;
7034 return ((uint64_t)word1
<< 32) | word0
;
7037 #else /* TARGET_ABI_BITS == 32 */
7038 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
7042 #endif /* TARGET_ABI_BITS != 32 */
7044 #ifdef TARGET_NR_truncate64
7045 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7050 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7054 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7058 #ifdef TARGET_NR_ftruncate64
7059 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7064 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7068 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7072 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
7073 abi_ulong target_addr
)
7075 struct target_timespec
*target_ts
;
7077 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
7078 return -TARGET_EFAULT
;
7079 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7080 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7081 unlock_user_struct(target_ts
, target_addr
, 0);
7085 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7086 struct timespec
*host_ts
)
7088 struct target_timespec
*target_ts
;
7090 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7091 return -TARGET_EFAULT
;
7092 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7093 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7094 unlock_user_struct(target_ts
, target_addr
, 1);
7098 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7099 abi_ulong target_addr
)
7101 struct target_itimerspec
*target_itspec
;
7103 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7104 return -TARGET_EFAULT
;
7107 host_itspec
->it_interval
.tv_sec
=
7108 tswapal(target_itspec
->it_interval
.tv_sec
);
7109 host_itspec
->it_interval
.tv_nsec
=
7110 tswapal(target_itspec
->it_interval
.tv_nsec
);
7111 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7112 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7114 unlock_user_struct(target_itspec
, target_addr
, 1);
7118 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7119 struct itimerspec
*host_its
)
7121 struct target_itimerspec
*target_itspec
;
7123 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7124 return -TARGET_EFAULT
;
7127 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7128 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7130 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7131 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7133 unlock_user_struct(target_itspec
, target_addr
, 0);
7137 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7138 abi_long target_addr
)
7140 struct target_timex
*target_tx
;
7142 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7143 return -TARGET_EFAULT
;
7146 __get_user(host_tx
->modes
, &target_tx
->modes
);
7147 __get_user(host_tx
->offset
, &target_tx
->offset
);
7148 __get_user(host_tx
->freq
, &target_tx
->freq
);
7149 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7150 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7151 __get_user(host_tx
->status
, &target_tx
->status
);
7152 __get_user(host_tx
->constant
, &target_tx
->constant
);
7153 __get_user(host_tx
->precision
, &target_tx
->precision
);
7154 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7155 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7156 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7157 __get_user(host_tx
->tick
, &target_tx
->tick
);
7158 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7159 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7160 __get_user(host_tx
->shift
, &target_tx
->shift
);
7161 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7162 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7163 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7164 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7165 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7166 __get_user(host_tx
->tai
, &target_tx
->tai
);
7168 unlock_user_struct(target_tx
, target_addr
, 0);
7172 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7173 struct timex
*host_tx
)
7175 struct target_timex
*target_tx
;
7177 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7178 return -TARGET_EFAULT
;
7181 __put_user(host_tx
->modes
, &target_tx
->modes
);
7182 __put_user(host_tx
->offset
, &target_tx
->offset
);
7183 __put_user(host_tx
->freq
, &target_tx
->freq
);
7184 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7185 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7186 __put_user(host_tx
->status
, &target_tx
->status
);
7187 __put_user(host_tx
->constant
, &target_tx
->constant
);
7188 __put_user(host_tx
->precision
, &target_tx
->precision
);
7189 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7190 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7191 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7192 __put_user(host_tx
->tick
, &target_tx
->tick
);
7193 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7194 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7195 __put_user(host_tx
->shift
, &target_tx
->shift
);
7196 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7197 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7198 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7199 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7200 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7201 __put_user(host_tx
->tai
, &target_tx
->tai
);
7203 unlock_user_struct(target_tx
, target_addr
, 1);
7208 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7209 abi_ulong target_addr
)
7211 struct target_sigevent
*target_sevp
;
7213 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7214 return -TARGET_EFAULT
;
7217 /* This union is awkward on 64 bit systems because it has a 32 bit
7218 * integer and a pointer in it; we follow the conversion approach
7219 * used for handling sigval types in signal.c so the guest should get
7220 * the correct value back even if we did a 64 bit byteswap and it's
7221 * using the 32 bit integer.
7223 host_sevp
->sigev_value
.sival_ptr
=
7224 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7225 host_sevp
->sigev_signo
=
7226 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7227 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7228 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7230 unlock_user_struct(target_sevp
, target_addr
, 1);
7234 #if defined(TARGET_NR_mlockall)
7235 static inline int target_to_host_mlockall_arg(int arg
)
7239 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7240 result
|= MCL_CURRENT
;
7242 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7243 result
|= MCL_FUTURE
;
7249 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7250 abi_ulong target_addr
,
7251 struct stat
*host_st
)
7253 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7254 if (((CPUARMState
*)cpu_env
)->eabi
) {
7255 struct target_eabi_stat64
*target_st
;
7257 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7258 return -TARGET_EFAULT
;
7259 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7260 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7261 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7262 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7263 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7265 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7266 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7267 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7268 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7269 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7270 __put_user(host_st
->st_size
, &target_st
->st_size
);
7271 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7272 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7273 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7274 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7275 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7276 unlock_user_struct(target_st
, target_addr
, 1);
7280 #if defined(TARGET_HAS_STRUCT_STAT64)
7281 struct target_stat64
*target_st
;
7283 struct target_stat
*target_st
;
7286 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7287 return -TARGET_EFAULT
;
7288 memset(target_st
, 0, sizeof(*target_st
));
7289 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7290 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7291 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7292 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7294 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7295 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7296 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7297 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7298 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7299 /* XXX: better use of kernel struct */
7300 __put_user(host_st
->st_size
, &target_st
->st_size
);
7301 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7302 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7303 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7304 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7305 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7306 unlock_user_struct(target_st
, target_addr
, 1);
7312 /* ??? Using host futex calls even when target atomic operations
7313 are not really atomic probably breaks things. However implementing
7314 futexes locally would make futexes shared between multiple processes
7315 tricky. However they're probably useless because guest atomic
7316 operations won't work either. */
7317 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7318 target_ulong uaddr2
, int val3
)
7320 struct timespec ts
, *pts
;
7323 /* ??? We assume FUTEX_* constants are the same on both host
7325 #ifdef FUTEX_CMD_MASK
7326 base_op
= op
& FUTEX_CMD_MASK
;
7332 case FUTEX_WAIT_BITSET
:
7335 target_to_host_timespec(pts
, timeout
);
7339 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7342 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7344 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7346 case FUTEX_CMP_REQUEUE
:
7348 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7349 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7350 But the prototype takes a `struct timespec *'; insert casts
7351 to satisfy the compiler. We do not need to tswap TIMEOUT
7352 since it's not compared to guest memory. */
7353 pts
= (struct timespec
*)(uintptr_t) timeout
;
7354 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7356 (base_op
== FUTEX_CMP_REQUEUE
7360 return -TARGET_ENOSYS
;
7363 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7364 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7365 abi_long handle
, abi_long mount_id
,
7368 struct file_handle
*target_fh
;
7369 struct file_handle
*fh
;
7373 unsigned int size
, total_size
;
7375 if (get_user_s32(size
, handle
)) {
7376 return -TARGET_EFAULT
;
7379 name
= lock_user_string(pathname
);
7381 return -TARGET_EFAULT
;
7384 total_size
= sizeof(struct file_handle
) + size
;
7385 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7387 unlock_user(name
, pathname
, 0);
7388 return -TARGET_EFAULT
;
7391 fh
= g_malloc0(total_size
);
7392 fh
->handle_bytes
= size
;
7394 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7395 unlock_user(name
, pathname
, 0);
7397 /* man name_to_handle_at(2):
7398 * Other than the use of the handle_bytes field, the caller should treat
7399 * the file_handle structure as an opaque data type
7402 memcpy(target_fh
, fh
, total_size
);
7403 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7404 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7406 unlock_user(target_fh
, handle
, total_size
);
7408 if (put_user_s32(mid
, mount_id
)) {
7409 return -TARGET_EFAULT
;
7417 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7418 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7421 struct file_handle
*target_fh
;
7422 struct file_handle
*fh
;
7423 unsigned int size
, total_size
;
7426 if (get_user_s32(size
, handle
)) {
7427 return -TARGET_EFAULT
;
7430 total_size
= sizeof(struct file_handle
) + size
;
7431 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7433 return -TARGET_EFAULT
;
7436 fh
= g_memdup(target_fh
, total_size
);
7437 fh
->handle_bytes
= size
;
7438 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7440 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7441 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7445 unlock_user(target_fh
, handle
, total_size
);
7451 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7453 /* signalfd siginfo conversion */
7456 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7457 const struct signalfd_siginfo
*info
)
7459 int sig
= host_to_target_signal(info
->ssi_signo
);
7461 /* linux/signalfd.h defines a ssi_addr_lsb
7462 * not defined in sys/signalfd.h but used by some kernels
7465 #ifdef BUS_MCEERR_AO
7466 if (tinfo
->ssi_signo
== SIGBUS
&&
7467 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7468 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7469 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7470 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7471 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7475 tinfo
->ssi_signo
= tswap32(sig
);
7476 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7477 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7478 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7479 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7480 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7481 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7482 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7483 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7484 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7485 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7486 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7487 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7488 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7489 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7490 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7493 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7497 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7498 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7504 static TargetFdTrans target_signalfd_trans
= {
7505 .host_to_target_data
= host_to_target_data_signalfd
,
7508 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7511 target_sigset_t
*target_mask
;
7515 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7516 return -TARGET_EINVAL
;
7518 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7519 return -TARGET_EFAULT
;
7522 target_to_host_sigset(&host_mask
, target_mask
);
7524 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7526 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7528 fd_trans_register(ret
, &target_signalfd_trans
);
7531 unlock_user_struct(target_mask
, mask
, 0);
7537 /* Map host to target signal numbers for the wait family of syscalls.
7538 Assume all other status bits are the same. */
7539 int host_to_target_waitstatus(int status
)
7541 if (WIFSIGNALED(status
)) {
7542 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7544 if (WIFSTOPPED(status
)) {
7545 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7551 static int open_self_cmdline(void *cpu_env
, int fd
)
7553 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7554 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7557 for (i
= 0; i
< bprm
->argc
; i
++) {
7558 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7560 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7568 static int open_self_maps(void *cpu_env
, int fd
)
7570 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7571 TaskState
*ts
= cpu
->opaque
;
7577 fp
= fopen("/proc/self/maps", "r");
7582 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7583 int fields
, dev_maj
, dev_min
, inode
;
7584 uint64_t min
, max
, offset
;
7585 char flag_r
, flag_w
, flag_x
, flag_p
;
7586 char path
[512] = "";
7587 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7588 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7589 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7591 if ((fields
< 10) || (fields
> 11)) {
7594 if (h2g_valid(min
)) {
7595 int flags
= page_get_flags(h2g(min
));
7596 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7597 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7600 if (h2g(min
) == ts
->info
->stack_limit
) {
7601 pstrcpy(path
, sizeof(path
), " [stack]");
7603 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7604 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7605 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7606 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7607 path
[0] ? " " : "", path
);
7617 static int open_self_stat(void *cpu_env
, int fd
)
7619 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7620 TaskState
*ts
= cpu
->opaque
;
7621 abi_ulong start_stack
= ts
->info
->start_stack
;
7624 for (i
= 0; i
< 44; i
++) {
7632 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7633 } else if (i
== 1) {
7635 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7636 } else if (i
== 27) {
7639 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7641 /* for the rest, there is MasterCard */
7642 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7646 if (write(fd
, buf
, len
) != len
) {
7654 static int open_self_auxv(void *cpu_env
, int fd
)
7656 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7657 TaskState
*ts
= cpu
->opaque
;
7658 abi_ulong auxv
= ts
->info
->saved_auxv
;
7659 abi_ulong len
= ts
->info
->auxv_len
;
7663 * Auxiliary vector is stored in target process stack.
7664 * read in whole auxv vector and copy it to file
7666 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7670 r
= write(fd
, ptr
, len
);
7677 lseek(fd
, 0, SEEK_SET
);
7678 unlock_user(ptr
, auxv
, len
);
7684 static int is_proc_myself(const char *filename
, const char *entry
)
7686 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7687 filename
+= strlen("/proc/");
7688 if (!strncmp(filename
, "self/", strlen("self/"))) {
7689 filename
+= strlen("self/");
7690 } else if (*filename
>= '1' && *filename
<= '9') {
7692 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7693 if (!strncmp(filename
, myself
, strlen(myself
))) {
7694 filename
+= strlen(myself
);
7701 if (!strcmp(filename
, entry
)) {
7708 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7709 static int is_proc(const char *filename
, const char *entry
)
7711 return strcmp(filename
, entry
) == 0;
7714 static int open_net_route(void *cpu_env
, int fd
)
7721 fp
= fopen("/proc/net/route", "r");
7728 read
= getline(&line
, &len
, fp
);
7729 dprintf(fd
, "%s", line
);
7733 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7735 uint32_t dest
, gw
, mask
;
7736 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7737 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7738 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7739 &mask
, &mtu
, &window
, &irtt
);
7740 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7741 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7742 metric
, tswap32(mask
), mtu
, window
, irtt
);
7752 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7755 const char *filename
;
7756 int (*fill
)(void *cpu_env
, int fd
);
7757 int (*cmp
)(const char *s1
, const char *s2
);
7759 const struct fake_open
*fake_open
;
7760 static const struct fake_open fakes
[] = {
7761 { "maps", open_self_maps
, is_proc_myself
},
7762 { "stat", open_self_stat
, is_proc_myself
},
7763 { "auxv", open_self_auxv
, is_proc_myself
},
7764 { "cmdline", open_self_cmdline
, is_proc_myself
},
7765 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7766 { "/proc/net/route", open_net_route
, is_proc
},
7768 { NULL
, NULL
, NULL
}
7771 if (is_proc_myself(pathname
, "exe")) {
7772 int execfd
= qemu_getauxval(AT_EXECFD
);
7773 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7776 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7777 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7782 if (fake_open
->filename
) {
7784 char filename
[PATH_MAX
];
7787 /* create temporary file to map stat to */
7788 tmpdir
= getenv("TMPDIR");
7791 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7792 fd
= mkstemp(filename
);
7798 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7804 lseek(fd
, 0, SEEK_SET
);
7809 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7812 #define TIMER_MAGIC 0x0caf0000
7813 #define TIMER_MAGIC_MASK 0xffff0000
7815 /* Convert QEMU provided timer ID back to internal 16bit index format */
7816 static target_timer_t
get_timer_id(abi_long arg
)
7818 target_timer_t timerid
= arg
;
7820 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7821 return -TARGET_EINVAL
;
7826 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7827 return -TARGET_EINVAL
;
7833 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7835 uint64_t *counter
= buf
;
7838 if (len
< sizeof(uint64_t)) {
7842 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7843 *counter
= tswap64(*counter
);
7850 static TargetFdTrans target_eventfd_trans
= {
7851 .host_to_target_data
= swap_data_eventfd
,
7852 .target_to_host_data
= swap_data_eventfd
,
7855 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7856 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7857 defined(__NR_inotify_init1))
7858 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7860 struct inotify_event
*ev
;
7864 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7865 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7868 ev
->wd
= tswap32(ev
->wd
);
7869 ev
->mask
= tswap32(ev
->mask
);
7870 ev
->cookie
= tswap32(ev
->cookie
);
7871 ev
->len
= tswap32(name_len
);
7877 static TargetFdTrans target_inotify_trans
= {
7878 .host_to_target_data
= host_to_target_data_inotify
,
7882 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7884 abi_ulong target_addr
,
7887 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7888 unsigned host_bits
= sizeof(*host_mask
) * 8;
7889 abi_ulong
*target_mask
;
7892 assert(host_size
>= target_size
);
7894 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7896 return -TARGET_EFAULT
;
7898 memset(host_mask
, 0, host_size
);
7900 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7901 unsigned bit
= i
* target_bits
;
7904 __get_user(val
, &target_mask
[i
]);
7905 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7906 if (val
& (1UL << j
)) {
7907 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7912 unlock_user(target_mask
, target_addr
, 0);
7916 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7918 abi_ulong target_addr
,
7921 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7922 unsigned host_bits
= sizeof(*host_mask
) * 8;
7923 abi_ulong
*target_mask
;
7926 assert(host_size
>= target_size
);
7928 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7930 return -TARGET_EFAULT
;
7933 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7934 unsigned bit
= i
* target_bits
;
7937 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7938 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7942 __put_user(val
, &target_mask
[i
]);
7945 unlock_user(target_mask
, target_addr
, target_size
);
7949 /* do_syscall() should always have a single exit point at the end so
7950 that actions, such as logging of syscall results, can be performed.
7951 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7952 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7953 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7954 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7957 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7963 #if defined(DEBUG_ERESTARTSYS)
7964 /* Debug-only code for exercising the syscall-restart code paths
7965 * in the per-architecture cpu main loops: restart every syscall
7966 * the guest makes once before letting it through.
7973 return -TARGET_ERESTARTSYS
;
7979 gemu_log("syscall %d", num
);
7981 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7983 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7986 case TARGET_NR_exit
:
7987 /* In old applications this may be used to implement _exit(2).
7988 However in threaded applictions it is used for thread termination,
7989 and _exit_group is used for application termination.
7990 Do thread termination if we have more then one thread. */
7992 if (block_signals()) {
7993 ret
= -TARGET_ERESTARTSYS
;
7999 if (CPU_NEXT(first_cpu
)) {
8002 /* Remove the CPU from the list. */
8003 QTAILQ_REMOVE(&cpus
, cpu
, node
);
8008 if (ts
->child_tidptr
) {
8009 put_user_u32(0, ts
->child_tidptr
);
8010 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8014 object_unref(OBJECT(cpu
));
8016 rcu_unregister_thread();
8024 gdb_exit(cpu_env
, arg1
);
8026 ret
= 0; /* avoid warning */
8028 case TARGET_NR_read
:
8032 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8034 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8036 fd_trans_host_to_target_data(arg1
)) {
8037 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8039 unlock_user(p
, arg2
, ret
);
8042 case TARGET_NR_write
:
8043 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8045 if (fd_trans_target_to_host_data(arg1
)) {
8046 void *copy
= g_malloc(arg3
);
8047 memcpy(copy
, p
, arg3
);
8048 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8050 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8054 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8056 unlock_user(p
, arg2
, 0);
8058 #ifdef TARGET_NR_open
8059 case TARGET_NR_open
:
8060 if (!(p
= lock_user_string(arg1
)))
8062 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8063 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8065 fd_trans_unregister(ret
);
8066 unlock_user(p
, arg1
, 0);
8069 case TARGET_NR_openat
:
8070 if (!(p
= lock_user_string(arg2
)))
8072 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8073 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8075 fd_trans_unregister(ret
);
8076 unlock_user(p
, arg2
, 0);
8078 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8079 case TARGET_NR_name_to_handle_at
:
8080 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8083 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8084 case TARGET_NR_open_by_handle_at
:
8085 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8086 fd_trans_unregister(ret
);
8089 case TARGET_NR_close
:
8090 fd_trans_unregister(arg1
);
8091 ret
= get_errno(close(arg1
));
8096 #ifdef TARGET_NR_fork
8097 case TARGET_NR_fork
:
8098 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8101 #ifdef TARGET_NR_waitpid
8102 case TARGET_NR_waitpid
:
8105 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8106 if (!is_error(ret
) && arg2
&& ret
8107 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8112 #ifdef TARGET_NR_waitid
8113 case TARGET_NR_waitid
:
8117 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8118 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8119 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8121 host_to_target_siginfo(p
, &info
);
8122 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8127 #ifdef TARGET_NR_creat /* not on alpha */
8128 case TARGET_NR_creat
:
8129 if (!(p
= lock_user_string(arg1
)))
8131 ret
= get_errno(creat(p
, arg2
));
8132 fd_trans_unregister(ret
);
8133 unlock_user(p
, arg1
, 0);
8136 #ifdef TARGET_NR_link
8137 case TARGET_NR_link
:
8140 p
= lock_user_string(arg1
);
8141 p2
= lock_user_string(arg2
);
8143 ret
= -TARGET_EFAULT
;
8145 ret
= get_errno(link(p
, p2
));
8146 unlock_user(p2
, arg2
, 0);
8147 unlock_user(p
, arg1
, 0);
8151 #if defined(TARGET_NR_linkat)
8152 case TARGET_NR_linkat
:
8157 p
= lock_user_string(arg2
);
8158 p2
= lock_user_string(arg4
);
8160 ret
= -TARGET_EFAULT
;
8162 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8163 unlock_user(p
, arg2
, 0);
8164 unlock_user(p2
, arg4
, 0);
8168 #ifdef TARGET_NR_unlink
8169 case TARGET_NR_unlink
:
8170 if (!(p
= lock_user_string(arg1
)))
8172 ret
= get_errno(unlink(p
));
8173 unlock_user(p
, arg1
, 0);
8176 #if defined(TARGET_NR_unlinkat)
8177 case TARGET_NR_unlinkat
:
8178 if (!(p
= lock_user_string(arg2
)))
8180 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8181 unlock_user(p
, arg2
, 0);
8184 case TARGET_NR_execve
:
8186 char **argp
, **envp
;
8189 abi_ulong guest_argp
;
8190 abi_ulong guest_envp
;
8197 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8198 if (get_user_ual(addr
, gp
))
8206 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8207 if (get_user_ual(addr
, gp
))
8214 argp
= g_new0(char *, argc
+ 1);
8215 envp
= g_new0(char *, envc
+ 1);
8217 for (gp
= guest_argp
, q
= argp
; gp
;
8218 gp
+= sizeof(abi_ulong
), q
++) {
8219 if (get_user_ual(addr
, gp
))
8223 if (!(*q
= lock_user_string(addr
)))
8225 total_size
+= strlen(*q
) + 1;
8229 for (gp
= guest_envp
, q
= envp
; gp
;
8230 gp
+= sizeof(abi_ulong
), q
++) {
8231 if (get_user_ual(addr
, gp
))
8235 if (!(*q
= lock_user_string(addr
)))
8237 total_size
+= strlen(*q
) + 1;
8241 if (!(p
= lock_user_string(arg1
)))
8243 /* Although execve() is not an interruptible syscall it is
8244 * a special case where we must use the safe_syscall wrapper:
8245 * if we allow a signal to happen before we make the host
8246 * syscall then we will 'lose' it, because at the point of
8247 * execve the process leaves QEMU's control. So we use the
8248 * safe syscall wrapper to ensure that we either take the
8249 * signal as a guest signal, or else it does not happen
8250 * before the execve completes and makes it the other
8251 * program's problem.
8253 ret
= get_errno(safe_execve(p
, argp
, envp
));
8254 unlock_user(p
, arg1
, 0);
8259 ret
= -TARGET_EFAULT
;
8262 for (gp
= guest_argp
, q
= argp
; *q
;
8263 gp
+= sizeof(abi_ulong
), q
++) {
8264 if (get_user_ual(addr
, gp
)
8267 unlock_user(*q
, addr
, 0);
8269 for (gp
= guest_envp
, q
= envp
; *q
;
8270 gp
+= sizeof(abi_ulong
), q
++) {
8271 if (get_user_ual(addr
, gp
)
8274 unlock_user(*q
, addr
, 0);
8281 case TARGET_NR_chdir
:
8282 if (!(p
= lock_user_string(arg1
)))
8284 ret
= get_errno(chdir(p
));
8285 unlock_user(p
, arg1
, 0);
8287 #ifdef TARGET_NR_time
8288 case TARGET_NR_time
:
8291 ret
= get_errno(time(&host_time
));
8294 && put_user_sal(host_time
, arg1
))
8299 #ifdef TARGET_NR_mknod
8300 case TARGET_NR_mknod
:
8301 if (!(p
= lock_user_string(arg1
)))
8303 ret
= get_errno(mknod(p
, arg2
, arg3
));
8304 unlock_user(p
, arg1
, 0);
8307 #if defined(TARGET_NR_mknodat)
8308 case TARGET_NR_mknodat
:
8309 if (!(p
= lock_user_string(arg2
)))
8311 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8312 unlock_user(p
, arg2
, 0);
8315 #ifdef TARGET_NR_chmod
8316 case TARGET_NR_chmod
:
8317 if (!(p
= lock_user_string(arg1
)))
8319 ret
= get_errno(chmod(p
, arg2
));
8320 unlock_user(p
, arg1
, 0);
8323 #ifdef TARGET_NR_break
8324 case TARGET_NR_break
:
8327 #ifdef TARGET_NR_oldstat
8328 case TARGET_NR_oldstat
:
8331 case TARGET_NR_lseek
:
8332 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8334 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8335 /* Alpha specific */
8336 case TARGET_NR_getxpid
:
8337 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8338 ret
= get_errno(getpid());
8341 #ifdef TARGET_NR_getpid
8342 case TARGET_NR_getpid
:
8343 ret
= get_errno(getpid());
8346 case TARGET_NR_mount
:
8348 /* need to look at the data field */
8352 p
= lock_user_string(arg1
);
8360 p2
= lock_user_string(arg2
);
8363 unlock_user(p
, arg1
, 0);
8369 p3
= lock_user_string(arg3
);
8372 unlock_user(p
, arg1
, 0);
8374 unlock_user(p2
, arg2
, 0);
8381 /* FIXME - arg5 should be locked, but it isn't clear how to
8382 * do that since it's not guaranteed to be a NULL-terminated
8386 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8388 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8390 ret
= get_errno(ret
);
8393 unlock_user(p
, arg1
, 0);
8395 unlock_user(p2
, arg2
, 0);
8397 unlock_user(p3
, arg3
, 0);
8401 #ifdef TARGET_NR_umount
8402 case TARGET_NR_umount
:
8403 if (!(p
= lock_user_string(arg1
)))
8405 ret
= get_errno(umount(p
));
8406 unlock_user(p
, arg1
, 0);
8409 #ifdef TARGET_NR_stime /* not on alpha */
8410 case TARGET_NR_stime
:
8413 if (get_user_sal(host_time
, arg1
))
8415 ret
= get_errno(stime(&host_time
));
8419 case TARGET_NR_ptrace
:
8421 #ifdef TARGET_NR_alarm /* not on alpha */
8422 case TARGET_NR_alarm
:
8426 #ifdef TARGET_NR_oldfstat
8427 case TARGET_NR_oldfstat
:
8430 #ifdef TARGET_NR_pause /* not on alpha */
8431 case TARGET_NR_pause
:
8432 if (!block_signals()) {
8433 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8435 ret
= -TARGET_EINTR
;
8438 #ifdef TARGET_NR_utime
8439 case TARGET_NR_utime
:
8441 struct utimbuf tbuf
, *host_tbuf
;
8442 struct target_utimbuf
*target_tbuf
;
8444 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8446 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8447 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8448 unlock_user_struct(target_tbuf
, arg2
, 0);
8453 if (!(p
= lock_user_string(arg1
)))
8455 ret
= get_errno(utime(p
, host_tbuf
));
8456 unlock_user(p
, arg1
, 0);
8460 #ifdef TARGET_NR_utimes
8461 case TARGET_NR_utimes
:
8463 struct timeval
*tvp
, tv
[2];
8465 if (copy_from_user_timeval(&tv
[0], arg2
)
8466 || copy_from_user_timeval(&tv
[1],
8467 arg2
+ sizeof(struct target_timeval
)))
8473 if (!(p
= lock_user_string(arg1
)))
8475 ret
= get_errno(utimes(p
, tvp
));
8476 unlock_user(p
, arg1
, 0);
8480 #if defined(TARGET_NR_futimesat)
8481 case TARGET_NR_futimesat
:
8483 struct timeval
*tvp
, tv
[2];
8485 if (copy_from_user_timeval(&tv
[0], arg3
)
8486 || copy_from_user_timeval(&tv
[1],
8487 arg3
+ sizeof(struct target_timeval
)))
8493 if (!(p
= lock_user_string(arg2
)))
8495 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8496 unlock_user(p
, arg2
, 0);
8500 #ifdef TARGET_NR_stty
8501 case TARGET_NR_stty
:
8504 #ifdef TARGET_NR_gtty
8505 case TARGET_NR_gtty
:
8508 #ifdef TARGET_NR_access
8509 case TARGET_NR_access
:
8510 if (!(p
= lock_user_string(arg1
)))
8512 ret
= get_errno(access(path(p
), arg2
));
8513 unlock_user(p
, arg1
, 0);
8516 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8517 case TARGET_NR_faccessat
:
8518 if (!(p
= lock_user_string(arg2
)))
8520 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8521 unlock_user(p
, arg2
, 0);
8524 #ifdef TARGET_NR_nice /* not on alpha */
8525 case TARGET_NR_nice
:
8526 ret
= get_errno(nice(arg1
));
8529 #ifdef TARGET_NR_ftime
8530 case TARGET_NR_ftime
:
8533 case TARGET_NR_sync
:
8537 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8538 case TARGET_NR_syncfs
:
8539 ret
= get_errno(syncfs(arg1
));
8542 case TARGET_NR_kill
:
8543 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8545 #ifdef TARGET_NR_rename
8546 case TARGET_NR_rename
:
8549 p
= lock_user_string(arg1
);
8550 p2
= lock_user_string(arg2
);
8552 ret
= -TARGET_EFAULT
;
8554 ret
= get_errno(rename(p
, p2
));
8555 unlock_user(p2
, arg2
, 0);
8556 unlock_user(p
, arg1
, 0);
8560 #if defined(TARGET_NR_renameat)
8561 case TARGET_NR_renameat
:
8564 p
= lock_user_string(arg2
);
8565 p2
= lock_user_string(arg4
);
8567 ret
= -TARGET_EFAULT
;
8569 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8570 unlock_user(p2
, arg4
, 0);
8571 unlock_user(p
, arg2
, 0);
8575 #if defined(TARGET_NR_renameat2)
8576 case TARGET_NR_renameat2
:
8579 p
= lock_user_string(arg2
);
8580 p2
= lock_user_string(arg4
);
8582 ret
= -TARGET_EFAULT
;
8584 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8586 unlock_user(p2
, arg4
, 0);
8587 unlock_user(p
, arg2
, 0);
8591 #ifdef TARGET_NR_mkdir
8592 case TARGET_NR_mkdir
:
8593 if (!(p
= lock_user_string(arg1
)))
8595 ret
= get_errno(mkdir(p
, arg2
));
8596 unlock_user(p
, arg1
, 0);
8599 #if defined(TARGET_NR_mkdirat)
8600 case TARGET_NR_mkdirat
:
8601 if (!(p
= lock_user_string(arg2
)))
8603 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8604 unlock_user(p
, arg2
, 0);
8607 #ifdef TARGET_NR_rmdir
8608 case TARGET_NR_rmdir
:
8609 if (!(p
= lock_user_string(arg1
)))
8611 ret
= get_errno(rmdir(p
));
8612 unlock_user(p
, arg1
, 0);
8616 ret
= get_errno(dup(arg1
));
8618 fd_trans_dup(arg1
, ret
);
8621 #ifdef TARGET_NR_pipe
8622 case TARGET_NR_pipe
:
8623 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8626 #ifdef TARGET_NR_pipe2
8627 case TARGET_NR_pipe2
:
8628 ret
= do_pipe(cpu_env
, arg1
,
8629 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8632 case TARGET_NR_times
:
8634 struct target_tms
*tmsp
;
8636 ret
= get_errno(times(&tms
));
8638 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8641 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8642 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8643 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8644 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8647 ret
= host_to_target_clock_t(ret
);
8650 #ifdef TARGET_NR_prof
8651 case TARGET_NR_prof
:
8654 #ifdef TARGET_NR_signal
8655 case TARGET_NR_signal
:
8658 case TARGET_NR_acct
:
8660 ret
= get_errno(acct(NULL
));
8662 if (!(p
= lock_user_string(arg1
)))
8664 ret
= get_errno(acct(path(p
)));
8665 unlock_user(p
, arg1
, 0);
8668 #ifdef TARGET_NR_umount2
8669 case TARGET_NR_umount2
:
8670 if (!(p
= lock_user_string(arg1
)))
8672 ret
= get_errno(umount2(p
, arg2
));
8673 unlock_user(p
, arg1
, 0);
8676 #ifdef TARGET_NR_lock
8677 case TARGET_NR_lock
:
8680 case TARGET_NR_ioctl
:
8681 ret
= do_ioctl(arg1
, arg2
, arg3
);
8683 #ifdef TARGET_NR_fcntl
8684 case TARGET_NR_fcntl
:
8685 ret
= do_fcntl(arg1
, arg2
, arg3
);
8688 #ifdef TARGET_NR_mpx
8692 case TARGET_NR_setpgid
:
8693 ret
= get_errno(setpgid(arg1
, arg2
));
8695 #ifdef TARGET_NR_ulimit
8696 case TARGET_NR_ulimit
:
8699 #ifdef TARGET_NR_oldolduname
8700 case TARGET_NR_oldolduname
:
8703 case TARGET_NR_umask
:
8704 ret
= get_errno(umask(arg1
));
8706 case TARGET_NR_chroot
:
8707 if (!(p
= lock_user_string(arg1
)))
8709 ret
= get_errno(chroot(p
));
8710 unlock_user(p
, arg1
, 0);
8712 #ifdef TARGET_NR_ustat
8713 case TARGET_NR_ustat
:
8716 #ifdef TARGET_NR_dup2
8717 case TARGET_NR_dup2
:
8718 ret
= get_errno(dup2(arg1
, arg2
));
8720 fd_trans_dup(arg1
, arg2
);
8724 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8725 case TARGET_NR_dup3
:
8729 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8732 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8733 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8735 fd_trans_dup(arg1
, arg2
);
8740 #ifdef TARGET_NR_getppid /* not on alpha */
8741 case TARGET_NR_getppid
:
8742 ret
= get_errno(getppid());
8745 #ifdef TARGET_NR_getpgrp
8746 case TARGET_NR_getpgrp
:
8747 ret
= get_errno(getpgrp());
8750 case TARGET_NR_setsid
:
8751 ret
= get_errno(setsid());
8753 #ifdef TARGET_NR_sigaction
8754 case TARGET_NR_sigaction
:
8756 #if defined(TARGET_ALPHA)
8757 struct target_sigaction act
, oact
, *pact
= 0;
8758 struct target_old_sigaction
*old_act
;
8760 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8762 act
._sa_handler
= old_act
->_sa_handler
;
8763 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8764 act
.sa_flags
= old_act
->sa_flags
;
8765 act
.sa_restorer
= 0;
8766 unlock_user_struct(old_act
, arg2
, 0);
8769 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8770 if (!is_error(ret
) && arg3
) {
8771 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8773 old_act
->_sa_handler
= oact
._sa_handler
;
8774 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8775 old_act
->sa_flags
= oact
.sa_flags
;
8776 unlock_user_struct(old_act
, arg3
, 1);
8778 #elif defined(TARGET_MIPS)
8779 struct target_sigaction act
, oact
, *pact
, *old_act
;
8782 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8784 act
._sa_handler
= old_act
->_sa_handler
;
8785 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8786 act
.sa_flags
= old_act
->sa_flags
;
8787 unlock_user_struct(old_act
, arg2
, 0);
8793 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8795 if (!is_error(ret
) && arg3
) {
8796 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8798 old_act
->_sa_handler
= oact
._sa_handler
;
8799 old_act
->sa_flags
= oact
.sa_flags
;
8800 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8801 old_act
->sa_mask
.sig
[1] = 0;
8802 old_act
->sa_mask
.sig
[2] = 0;
8803 old_act
->sa_mask
.sig
[3] = 0;
8804 unlock_user_struct(old_act
, arg3
, 1);
8807 struct target_old_sigaction
*old_act
;
8808 struct target_sigaction act
, oact
, *pact
;
8810 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8812 act
._sa_handler
= old_act
->_sa_handler
;
8813 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8814 act
.sa_flags
= old_act
->sa_flags
;
8815 act
.sa_restorer
= old_act
->sa_restorer
;
8816 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8817 act
.ka_restorer
= 0;
8819 unlock_user_struct(old_act
, arg2
, 0);
8824 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8825 if (!is_error(ret
) && arg3
) {
8826 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8828 old_act
->_sa_handler
= oact
._sa_handler
;
8829 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8830 old_act
->sa_flags
= oact
.sa_flags
;
8831 old_act
->sa_restorer
= oact
.sa_restorer
;
8832 unlock_user_struct(old_act
, arg3
, 1);
8838 case TARGET_NR_rt_sigaction
:
8840 #if defined(TARGET_ALPHA)
8841 /* For Alpha and SPARC this is a 5 argument syscall, with
8842 * a 'restorer' parameter which must be copied into the
8843 * sa_restorer field of the sigaction struct.
8844 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8845 * and arg5 is the sigsetsize.
8846 * Alpha also has a separate rt_sigaction struct that it uses
8847 * here; SPARC uses the usual sigaction struct.
8849 struct target_rt_sigaction
*rt_act
;
8850 struct target_sigaction act
, oact
, *pact
= 0;
8852 if (arg4
!= sizeof(target_sigset_t
)) {
8853 ret
= -TARGET_EINVAL
;
8857 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8859 act
._sa_handler
= rt_act
->_sa_handler
;
8860 act
.sa_mask
= rt_act
->sa_mask
;
8861 act
.sa_flags
= rt_act
->sa_flags
;
8862 act
.sa_restorer
= arg5
;
8863 unlock_user_struct(rt_act
, arg2
, 0);
8866 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8867 if (!is_error(ret
) && arg3
) {
8868 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8870 rt_act
->_sa_handler
= oact
._sa_handler
;
8871 rt_act
->sa_mask
= oact
.sa_mask
;
8872 rt_act
->sa_flags
= oact
.sa_flags
;
8873 unlock_user_struct(rt_act
, arg3
, 1);
8877 target_ulong restorer
= arg4
;
8878 target_ulong sigsetsize
= arg5
;
8880 target_ulong sigsetsize
= arg4
;
8882 struct target_sigaction
*act
;
8883 struct target_sigaction
*oact
;
8885 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8886 ret
= -TARGET_EINVAL
;
8890 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8893 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8894 act
->ka_restorer
= restorer
;
8900 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8901 ret
= -TARGET_EFAULT
;
8902 goto rt_sigaction_fail
;
8906 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8909 unlock_user_struct(act
, arg2
, 0);
8911 unlock_user_struct(oact
, arg3
, 1);
8915 #ifdef TARGET_NR_sgetmask /* not on alpha */
8916 case TARGET_NR_sgetmask
:
8919 abi_ulong target_set
;
8920 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8922 host_to_target_old_sigset(&target_set
, &cur_set
);
8928 #ifdef TARGET_NR_ssetmask /* not on alpha */
8929 case TARGET_NR_ssetmask
:
8932 abi_ulong target_set
= arg1
;
8933 target_to_host_old_sigset(&set
, &target_set
);
8934 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8936 host_to_target_old_sigset(&target_set
, &oset
);
8942 #ifdef TARGET_NR_sigprocmask
8943 case TARGET_NR_sigprocmask
:
8945 #if defined(TARGET_ALPHA)
8946 sigset_t set
, oldset
;
8951 case TARGET_SIG_BLOCK
:
8954 case TARGET_SIG_UNBLOCK
:
8957 case TARGET_SIG_SETMASK
:
8961 ret
= -TARGET_EINVAL
;
8965 target_to_host_old_sigset(&set
, &mask
);
8967 ret
= do_sigprocmask(how
, &set
, &oldset
);
8968 if (!is_error(ret
)) {
8969 host_to_target_old_sigset(&mask
, &oldset
);
8971 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8974 sigset_t set
, oldset
, *set_ptr
;
8979 case TARGET_SIG_BLOCK
:
8982 case TARGET_SIG_UNBLOCK
:
8985 case TARGET_SIG_SETMASK
:
8989 ret
= -TARGET_EINVAL
;
8992 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8994 target_to_host_old_sigset(&set
, p
);
8995 unlock_user(p
, arg2
, 0);
9001 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9002 if (!is_error(ret
) && arg3
) {
9003 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9005 host_to_target_old_sigset(p
, &oldset
);
9006 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9012 case TARGET_NR_rt_sigprocmask
:
9015 sigset_t set
, oldset
, *set_ptr
;
9017 if (arg4
!= sizeof(target_sigset_t
)) {
9018 ret
= -TARGET_EINVAL
;
9024 case TARGET_SIG_BLOCK
:
9027 case TARGET_SIG_UNBLOCK
:
9030 case TARGET_SIG_SETMASK
:
9034 ret
= -TARGET_EINVAL
;
9037 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9039 target_to_host_sigset(&set
, p
);
9040 unlock_user(p
, arg2
, 0);
9046 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9047 if (!is_error(ret
) && arg3
) {
9048 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9050 host_to_target_sigset(p
, &oldset
);
9051 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9055 #ifdef TARGET_NR_sigpending
9056 case TARGET_NR_sigpending
:
9059 ret
= get_errno(sigpending(&set
));
9060 if (!is_error(ret
)) {
9061 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9063 host_to_target_old_sigset(p
, &set
);
9064 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9069 case TARGET_NR_rt_sigpending
:
9073 /* Yes, this check is >, not != like most. We follow the kernel's
9074 * logic and it does it like this because it implements
9075 * NR_sigpending through the same code path, and in that case
9076 * the old_sigset_t is smaller in size.
9078 if (arg2
> sizeof(target_sigset_t
)) {
9079 ret
= -TARGET_EINVAL
;
9083 ret
= get_errno(sigpending(&set
));
9084 if (!is_error(ret
)) {
9085 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9087 host_to_target_sigset(p
, &set
);
9088 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9092 #ifdef TARGET_NR_sigsuspend
9093 case TARGET_NR_sigsuspend
:
9095 TaskState
*ts
= cpu
->opaque
;
9096 #if defined(TARGET_ALPHA)
9097 abi_ulong mask
= arg1
;
9098 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9100 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9102 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9103 unlock_user(p
, arg1
, 0);
9105 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9107 if (ret
!= -TARGET_ERESTARTSYS
) {
9108 ts
->in_sigsuspend
= 1;
9113 case TARGET_NR_rt_sigsuspend
:
9115 TaskState
*ts
= cpu
->opaque
;
9117 if (arg2
!= sizeof(target_sigset_t
)) {
9118 ret
= -TARGET_EINVAL
;
9121 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9123 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9124 unlock_user(p
, arg1
, 0);
9125 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9127 if (ret
!= -TARGET_ERESTARTSYS
) {
9128 ts
->in_sigsuspend
= 1;
9132 case TARGET_NR_rt_sigtimedwait
:
9135 struct timespec uts
, *puts
;
9138 if (arg4
!= sizeof(target_sigset_t
)) {
9139 ret
= -TARGET_EINVAL
;
9143 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9145 target_to_host_sigset(&set
, p
);
9146 unlock_user(p
, arg1
, 0);
9149 target_to_host_timespec(puts
, arg3
);
9153 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9155 if (!is_error(ret
)) {
9157 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9162 host_to_target_siginfo(p
, &uinfo
);
9163 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9165 ret
= host_to_target_signal(ret
);
9169 case TARGET_NR_rt_sigqueueinfo
:
9173 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9177 target_to_host_siginfo(&uinfo
, p
);
9178 unlock_user(p
, arg3
, 0);
9179 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9182 case TARGET_NR_rt_tgsigqueueinfo
:
9186 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9190 target_to_host_siginfo(&uinfo
, p
);
9191 unlock_user(p
, arg4
, 0);
9192 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9195 #ifdef TARGET_NR_sigreturn
9196 case TARGET_NR_sigreturn
:
9197 if (block_signals()) {
9198 ret
= -TARGET_ERESTARTSYS
;
9200 ret
= do_sigreturn(cpu_env
);
9204 case TARGET_NR_rt_sigreturn
:
9205 if (block_signals()) {
9206 ret
= -TARGET_ERESTARTSYS
;
9208 ret
= do_rt_sigreturn(cpu_env
);
9211 case TARGET_NR_sethostname
:
9212 if (!(p
= lock_user_string(arg1
)))
9214 ret
= get_errno(sethostname(p
, arg2
));
9215 unlock_user(p
, arg1
, 0);
9217 case TARGET_NR_setrlimit
:
9219 int resource
= target_to_host_resource(arg1
);
9220 struct target_rlimit
*target_rlim
;
9222 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9224 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9225 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9226 unlock_user_struct(target_rlim
, arg2
, 0);
9227 ret
= get_errno(setrlimit(resource
, &rlim
));
9230 case TARGET_NR_getrlimit
:
9232 int resource
= target_to_host_resource(arg1
);
9233 struct target_rlimit
*target_rlim
;
9236 ret
= get_errno(getrlimit(resource
, &rlim
));
9237 if (!is_error(ret
)) {
9238 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9240 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9241 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9242 unlock_user_struct(target_rlim
, arg2
, 1);
9246 case TARGET_NR_getrusage
:
9248 struct rusage rusage
;
9249 ret
= get_errno(getrusage(arg1
, &rusage
));
9250 if (!is_error(ret
)) {
9251 ret
= host_to_target_rusage(arg2
, &rusage
);
9255 case TARGET_NR_gettimeofday
:
9258 ret
= get_errno(gettimeofday(&tv
, NULL
));
9259 if (!is_error(ret
)) {
9260 if (copy_to_user_timeval(arg1
, &tv
))
9265 case TARGET_NR_settimeofday
:
9267 struct timeval tv
, *ptv
= NULL
;
9268 struct timezone tz
, *ptz
= NULL
;
9271 if (copy_from_user_timeval(&tv
, arg1
)) {
9278 if (copy_from_user_timezone(&tz
, arg2
)) {
9284 ret
= get_errno(settimeofday(ptv
, ptz
));
9287 #if defined(TARGET_NR_select)
9288 case TARGET_NR_select
:
9289 #if defined(TARGET_WANT_NI_OLD_SELECT)
9290 /* some architectures used to have old_select here
9291 * but now ENOSYS it.
9293 ret
= -TARGET_ENOSYS
;
9294 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9295 ret
= do_old_select(arg1
);
9297 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9301 #ifdef TARGET_NR_pselect6
9302 case TARGET_NR_pselect6
:
9304 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9305 fd_set rfds
, wfds
, efds
;
9306 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9307 struct timespec ts
, *ts_ptr
;
9310 * The 6th arg is actually two args smashed together,
9311 * so we cannot use the C library.
9319 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9320 target_sigset_t
*target_sigset
;
9328 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9332 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9336 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9342 * This takes a timespec, and not a timeval, so we cannot
9343 * use the do_select() helper ...
9346 if (target_to_host_timespec(&ts
, ts_addr
)) {
9354 /* Extract the two packed args for the sigset */
9357 sig
.size
= SIGSET_T_SIZE
;
9359 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9363 arg_sigset
= tswapal(arg7
[0]);
9364 arg_sigsize
= tswapal(arg7
[1]);
9365 unlock_user(arg7
, arg6
, 0);
9369 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9370 /* Like the kernel, we enforce correct size sigsets */
9371 ret
= -TARGET_EINVAL
;
9374 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9375 sizeof(*target_sigset
), 1);
9376 if (!target_sigset
) {
9379 target_to_host_sigset(&set
, target_sigset
);
9380 unlock_user(target_sigset
, arg_sigset
, 0);
9388 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9391 if (!is_error(ret
)) {
9392 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9394 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9396 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9399 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9405 #ifdef TARGET_NR_symlink
9406 case TARGET_NR_symlink
:
9409 p
= lock_user_string(arg1
);
9410 p2
= lock_user_string(arg2
);
9412 ret
= -TARGET_EFAULT
;
9414 ret
= get_errno(symlink(p
, p2
));
9415 unlock_user(p2
, arg2
, 0);
9416 unlock_user(p
, arg1
, 0);
9420 #if defined(TARGET_NR_symlinkat)
9421 case TARGET_NR_symlinkat
:
9424 p
= lock_user_string(arg1
);
9425 p2
= lock_user_string(arg3
);
9427 ret
= -TARGET_EFAULT
;
9429 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9430 unlock_user(p2
, arg3
, 0);
9431 unlock_user(p
, arg1
, 0);
9435 #ifdef TARGET_NR_oldlstat
9436 case TARGET_NR_oldlstat
:
9439 #ifdef TARGET_NR_readlink
9440 case TARGET_NR_readlink
:
9443 p
= lock_user_string(arg1
);
9444 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9446 ret
= -TARGET_EFAULT
;
9448 /* Short circuit this for the magic exe check. */
9449 ret
= -TARGET_EINVAL
;
9450 } else if (is_proc_myself((const char *)p
, "exe")) {
9451 char real
[PATH_MAX
], *temp
;
9452 temp
= realpath(exec_path
, real
);
9453 /* Return value is # of bytes that we wrote to the buffer. */
9455 ret
= get_errno(-1);
9457 /* Don't worry about sign mismatch as earlier mapping
9458 * logic would have thrown a bad address error. */
9459 ret
= MIN(strlen(real
), arg3
);
9460 /* We cannot NUL terminate the string. */
9461 memcpy(p2
, real
, ret
);
9464 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9466 unlock_user(p2
, arg2
, ret
);
9467 unlock_user(p
, arg1
, 0);
9471 #if defined(TARGET_NR_readlinkat)
9472 case TARGET_NR_readlinkat
:
9475 p
= lock_user_string(arg2
);
9476 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9478 ret
= -TARGET_EFAULT
;
9479 } else if (is_proc_myself((const char *)p
, "exe")) {
9480 char real
[PATH_MAX
], *temp
;
9481 temp
= realpath(exec_path
, real
);
9482 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9483 snprintf((char *)p2
, arg4
, "%s", real
);
9485 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9487 unlock_user(p2
, arg3
, ret
);
9488 unlock_user(p
, arg2
, 0);
9492 #ifdef TARGET_NR_uselib
9493 case TARGET_NR_uselib
:
9496 #ifdef TARGET_NR_swapon
9497 case TARGET_NR_swapon
:
9498 if (!(p
= lock_user_string(arg1
)))
9500 ret
= get_errno(swapon(p
, arg2
));
9501 unlock_user(p
, arg1
, 0);
9504 case TARGET_NR_reboot
:
9505 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9506 /* arg4 must be ignored in all other cases */
9507 p
= lock_user_string(arg4
);
9511 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9512 unlock_user(p
, arg4
, 0);
9514 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9517 #ifdef TARGET_NR_readdir
9518 case TARGET_NR_readdir
:
9521 #ifdef TARGET_NR_mmap
9522 case TARGET_NR_mmap
:
9523 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9524 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9525 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9526 || defined(TARGET_S390X)
9529 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9530 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9538 unlock_user(v
, arg1
, 0);
9539 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9540 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9544 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9545 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9551 #ifdef TARGET_NR_mmap2
9552 case TARGET_NR_mmap2
:
9554 #define MMAP_SHIFT 12
9556 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9557 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9559 arg6
<< MMAP_SHIFT
));
9562 case TARGET_NR_munmap
:
9563 ret
= get_errno(target_munmap(arg1
, arg2
));
9565 case TARGET_NR_mprotect
:
9567 TaskState
*ts
= cpu
->opaque
;
9568 /* Special hack to detect libc making the stack executable. */
9569 if ((arg3
& PROT_GROWSDOWN
)
9570 && arg1
>= ts
->info
->stack_limit
9571 && arg1
<= ts
->info
->start_stack
) {
9572 arg3
&= ~PROT_GROWSDOWN
;
9573 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9574 arg1
= ts
->info
->stack_limit
;
9577 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9579 #ifdef TARGET_NR_mremap
9580 case TARGET_NR_mremap
:
9581 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9584 /* ??? msync/mlock/munlock are broken for softmmu. */
9585 #ifdef TARGET_NR_msync
9586 case TARGET_NR_msync
:
9587 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9590 #ifdef TARGET_NR_mlock
9591 case TARGET_NR_mlock
:
9592 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9595 #ifdef TARGET_NR_munlock
9596 case TARGET_NR_munlock
:
9597 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9600 #ifdef TARGET_NR_mlockall
9601 case TARGET_NR_mlockall
:
9602 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9605 #ifdef TARGET_NR_munlockall
9606 case TARGET_NR_munlockall
:
9607 ret
= get_errno(munlockall());
9610 case TARGET_NR_truncate
:
9611 if (!(p
= lock_user_string(arg1
)))
9613 ret
= get_errno(truncate(p
, arg2
));
9614 unlock_user(p
, arg1
, 0);
9616 case TARGET_NR_ftruncate
:
9617 ret
= get_errno(ftruncate(arg1
, arg2
));
9619 case TARGET_NR_fchmod
:
9620 ret
= get_errno(fchmod(arg1
, arg2
));
9622 #if defined(TARGET_NR_fchmodat)
9623 case TARGET_NR_fchmodat
:
9624 if (!(p
= lock_user_string(arg2
)))
9626 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9627 unlock_user(p
, arg2
, 0);
9630 case TARGET_NR_getpriority
:
9631 /* Note that negative values are valid for getpriority, so we must
9632 differentiate based on errno settings. */
9634 ret
= getpriority(arg1
, arg2
);
9635 if (ret
== -1 && errno
!= 0) {
9636 ret
= -host_to_target_errno(errno
);
9640 /* Return value is the unbiased priority. Signal no error. */
9641 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9643 /* Return value is a biased priority to avoid negative numbers. */
9647 case TARGET_NR_setpriority
:
9648 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9650 #ifdef TARGET_NR_profil
9651 case TARGET_NR_profil
:
9654 case TARGET_NR_statfs
:
9655 if (!(p
= lock_user_string(arg1
)))
9657 ret
= get_errno(statfs(path(p
), &stfs
));
9658 unlock_user(p
, arg1
, 0);
9660 if (!is_error(ret
)) {
9661 struct target_statfs
*target_stfs
;
9663 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9665 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9666 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9667 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9668 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9669 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9670 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9671 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9672 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9673 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9674 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9675 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9676 #ifdef _STATFS_F_FLAGS
9677 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9679 __put_user(0, &target_stfs
->f_flags
);
9681 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9682 unlock_user_struct(target_stfs
, arg2
, 1);
9685 case TARGET_NR_fstatfs
:
9686 ret
= get_errno(fstatfs(arg1
, &stfs
));
9687 goto convert_statfs
;
9688 #ifdef TARGET_NR_statfs64
9689 case TARGET_NR_statfs64
:
9690 if (!(p
= lock_user_string(arg1
)))
9692 ret
= get_errno(statfs(path(p
), &stfs
));
9693 unlock_user(p
, arg1
, 0);
9695 if (!is_error(ret
)) {
9696 struct target_statfs64
*target_stfs
;
9698 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9700 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9701 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9702 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9703 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9704 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9705 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9706 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9707 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9708 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9709 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9710 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9711 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9712 unlock_user_struct(target_stfs
, arg3
, 1);
9715 case TARGET_NR_fstatfs64
:
9716 ret
= get_errno(fstatfs(arg1
, &stfs
));
9717 goto convert_statfs64
;
9719 #ifdef TARGET_NR_ioperm
9720 case TARGET_NR_ioperm
:
9723 #ifdef TARGET_NR_socketcall
9724 case TARGET_NR_socketcall
:
9725 ret
= do_socketcall(arg1
, arg2
);
9728 #ifdef TARGET_NR_accept
9729 case TARGET_NR_accept
:
9730 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9733 #ifdef TARGET_NR_accept4
9734 case TARGET_NR_accept4
:
9735 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9738 #ifdef TARGET_NR_bind
9739 case TARGET_NR_bind
:
9740 ret
= do_bind(arg1
, arg2
, arg3
);
9743 #ifdef TARGET_NR_connect
9744 case TARGET_NR_connect
:
9745 ret
= do_connect(arg1
, arg2
, arg3
);
9748 #ifdef TARGET_NR_getpeername
9749 case TARGET_NR_getpeername
:
9750 ret
= do_getpeername(arg1
, arg2
, arg3
);
9753 #ifdef TARGET_NR_getsockname
9754 case TARGET_NR_getsockname
:
9755 ret
= do_getsockname(arg1
, arg2
, arg3
);
9758 #ifdef TARGET_NR_getsockopt
9759 case TARGET_NR_getsockopt
:
9760 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9763 #ifdef TARGET_NR_listen
9764 case TARGET_NR_listen
:
9765 ret
= get_errno(listen(arg1
, arg2
));
9768 #ifdef TARGET_NR_recv
9769 case TARGET_NR_recv
:
9770 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9773 #ifdef TARGET_NR_recvfrom
9774 case TARGET_NR_recvfrom
:
9775 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9778 #ifdef TARGET_NR_recvmsg
9779 case TARGET_NR_recvmsg
:
9780 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9783 #ifdef TARGET_NR_send
9784 case TARGET_NR_send
:
9785 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9788 #ifdef TARGET_NR_sendmsg
9789 case TARGET_NR_sendmsg
:
9790 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9793 #ifdef TARGET_NR_sendmmsg
9794 case TARGET_NR_sendmmsg
:
9795 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9797 case TARGET_NR_recvmmsg
:
9798 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9801 #ifdef TARGET_NR_sendto
9802 case TARGET_NR_sendto
:
9803 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9806 #ifdef TARGET_NR_shutdown
9807 case TARGET_NR_shutdown
:
9808 ret
= get_errno(shutdown(arg1
, arg2
));
9811 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9812 case TARGET_NR_getrandom
:
9813 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9817 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9818 unlock_user(p
, arg1
, ret
);
9821 #ifdef TARGET_NR_socket
9822 case TARGET_NR_socket
:
9823 ret
= do_socket(arg1
, arg2
, arg3
);
9826 #ifdef TARGET_NR_socketpair
9827 case TARGET_NR_socketpair
:
9828 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9831 #ifdef TARGET_NR_setsockopt
9832 case TARGET_NR_setsockopt
:
9833 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9836 #if defined(TARGET_NR_syslog)
9837 case TARGET_NR_syslog
:
9842 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9843 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9844 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9845 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9846 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9847 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9848 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9849 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9851 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9854 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9855 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9856 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9858 ret
= -TARGET_EINVAL
;
9866 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9868 ret
= -TARGET_EFAULT
;
9871 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9872 unlock_user(p
, arg2
, arg3
);
9882 case TARGET_NR_setitimer
:
9884 struct itimerval value
, ovalue
, *pvalue
;
9888 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9889 || copy_from_user_timeval(&pvalue
->it_value
,
9890 arg2
+ sizeof(struct target_timeval
)))
9895 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9896 if (!is_error(ret
) && arg3
) {
9897 if (copy_to_user_timeval(arg3
,
9898 &ovalue
.it_interval
)
9899 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9905 case TARGET_NR_getitimer
:
9907 struct itimerval value
;
9909 ret
= get_errno(getitimer(arg1
, &value
));
9910 if (!is_error(ret
) && arg2
) {
9911 if (copy_to_user_timeval(arg2
,
9913 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9919 #ifdef TARGET_NR_stat
9920 case TARGET_NR_stat
:
9921 if (!(p
= lock_user_string(arg1
)))
9923 ret
= get_errno(stat(path(p
), &st
));
9924 unlock_user(p
, arg1
, 0);
9927 #ifdef TARGET_NR_lstat
9928 case TARGET_NR_lstat
:
9929 if (!(p
= lock_user_string(arg1
)))
9931 ret
= get_errno(lstat(path(p
), &st
));
9932 unlock_user(p
, arg1
, 0);
9935 case TARGET_NR_fstat
:
9937 ret
= get_errno(fstat(arg1
, &st
));
9938 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9941 if (!is_error(ret
)) {
9942 struct target_stat
*target_st
;
9944 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9946 memset(target_st
, 0, sizeof(*target_st
));
9947 __put_user(st
.st_dev
, &target_st
->st_dev
);
9948 __put_user(st
.st_ino
, &target_st
->st_ino
);
9949 __put_user(st
.st_mode
, &target_st
->st_mode
);
9950 __put_user(st
.st_uid
, &target_st
->st_uid
);
9951 __put_user(st
.st_gid
, &target_st
->st_gid
);
9952 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9953 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9954 __put_user(st
.st_size
, &target_st
->st_size
);
9955 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9956 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9957 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9958 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9959 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9960 unlock_user_struct(target_st
, arg2
, 1);
9964 #ifdef TARGET_NR_olduname
9965 case TARGET_NR_olduname
:
9968 #ifdef TARGET_NR_iopl
9969 case TARGET_NR_iopl
:
9972 case TARGET_NR_vhangup
:
9973 ret
= get_errno(vhangup());
9975 #ifdef TARGET_NR_idle
9976 case TARGET_NR_idle
:
9979 #ifdef TARGET_NR_syscall
9980 case TARGET_NR_syscall
:
9981 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9982 arg6
, arg7
, arg8
, 0);
9985 case TARGET_NR_wait4
:
9988 abi_long status_ptr
= arg2
;
9989 struct rusage rusage
, *rusage_ptr
;
9990 abi_ulong target_rusage
= arg4
;
9991 abi_long rusage_err
;
9993 rusage_ptr
= &rusage
;
9996 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9997 if (!is_error(ret
)) {
9998 if (status_ptr
&& ret
) {
9999 status
= host_to_target_waitstatus(status
);
10000 if (put_user_s32(status
, status_ptr
))
10003 if (target_rusage
) {
10004 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10012 #ifdef TARGET_NR_swapoff
10013 case TARGET_NR_swapoff
:
10014 if (!(p
= lock_user_string(arg1
)))
10016 ret
= get_errno(swapoff(p
));
10017 unlock_user(p
, arg1
, 0);
10020 case TARGET_NR_sysinfo
:
10022 struct target_sysinfo
*target_value
;
10023 struct sysinfo value
;
10024 ret
= get_errno(sysinfo(&value
));
10025 if (!is_error(ret
) && arg1
)
10027 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10029 __put_user(value
.uptime
, &target_value
->uptime
);
10030 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10031 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10032 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10033 __put_user(value
.totalram
, &target_value
->totalram
);
10034 __put_user(value
.freeram
, &target_value
->freeram
);
10035 __put_user(value
.sharedram
, &target_value
->sharedram
);
10036 __put_user(value
.bufferram
, &target_value
->bufferram
);
10037 __put_user(value
.totalswap
, &target_value
->totalswap
);
10038 __put_user(value
.freeswap
, &target_value
->freeswap
);
10039 __put_user(value
.procs
, &target_value
->procs
);
10040 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10041 __put_user(value
.freehigh
, &target_value
->freehigh
);
10042 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10043 unlock_user_struct(target_value
, arg1
, 1);
10047 #ifdef TARGET_NR_ipc
10048 case TARGET_NR_ipc
:
10049 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10052 #ifdef TARGET_NR_semget
10053 case TARGET_NR_semget
:
10054 ret
= get_errno(semget(arg1
, arg2
, arg3
));
10057 #ifdef TARGET_NR_semop
10058 case TARGET_NR_semop
:
10059 ret
= do_semop(arg1
, arg2
, arg3
);
10062 #ifdef TARGET_NR_semctl
10063 case TARGET_NR_semctl
:
10064 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
10067 #ifdef TARGET_NR_msgctl
10068 case TARGET_NR_msgctl
:
10069 ret
= do_msgctl(arg1
, arg2
, arg3
);
10072 #ifdef TARGET_NR_msgget
10073 case TARGET_NR_msgget
:
10074 ret
= get_errno(msgget(arg1
, arg2
));
10077 #ifdef TARGET_NR_msgrcv
10078 case TARGET_NR_msgrcv
:
10079 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10082 #ifdef TARGET_NR_msgsnd
10083 case TARGET_NR_msgsnd
:
10084 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10087 #ifdef TARGET_NR_shmget
10088 case TARGET_NR_shmget
:
10089 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
10092 #ifdef TARGET_NR_shmctl
10093 case TARGET_NR_shmctl
:
10094 ret
= do_shmctl(arg1
, arg2
, arg3
);
10097 #ifdef TARGET_NR_shmat
10098 case TARGET_NR_shmat
:
10099 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10102 #ifdef TARGET_NR_shmdt
10103 case TARGET_NR_shmdt
:
10104 ret
= do_shmdt(arg1
);
10107 case TARGET_NR_fsync
:
10108 ret
= get_errno(fsync(arg1
));
10110 case TARGET_NR_clone
:
10111 /* Linux manages to have three different orderings for its
10112 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10113 * match the kernel's CONFIG_CLONE_* settings.
10114 * Microblaze is further special in that it uses a sixth
10115 * implicit argument to clone for the TLS pointer.
10117 #if defined(TARGET_MICROBLAZE)
10118 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10119 #elif defined(TARGET_CLONE_BACKWARDS)
10120 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10121 #elif defined(TARGET_CLONE_BACKWARDS2)
10122 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10124 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10127 #ifdef __NR_exit_group
10128 /* new thread calls */
10129 case TARGET_NR_exit_group
:
10130 #ifdef TARGET_GPROF
10133 gdb_exit(cpu_env
, arg1
);
10134 ret
= get_errno(exit_group(arg1
));
10137 case TARGET_NR_setdomainname
:
10138 if (!(p
= lock_user_string(arg1
)))
10140 ret
= get_errno(setdomainname(p
, arg2
));
10141 unlock_user(p
, arg1
, 0);
10143 case TARGET_NR_uname
:
10144 /* no need to transcode because we use the linux syscall */
10146 struct new_utsname
* buf
;
10148 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10150 ret
= get_errno(sys_uname(buf
));
10151 if (!is_error(ret
)) {
10152 /* Overwrite the native machine name with whatever is being
10154 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10155 sizeof(buf
->machine
));
10156 /* Allow the user to override the reported release. */
10157 if (qemu_uname_release
&& *qemu_uname_release
) {
10158 g_strlcpy(buf
->release
, qemu_uname_release
,
10159 sizeof(buf
->release
));
10162 unlock_user_struct(buf
, arg1
, 1);
10166 case TARGET_NR_modify_ldt
:
10167 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10169 #if !defined(TARGET_X86_64)
10170 case TARGET_NR_vm86old
:
10171 goto unimplemented
;
10172 case TARGET_NR_vm86
:
10173 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10177 case TARGET_NR_adjtimex
:
10179 struct timex host_buf
;
10181 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10184 ret
= get_errno(adjtimex(&host_buf
));
10185 if (!is_error(ret
)) {
10186 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10192 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10193 case TARGET_NR_clock_adjtime
:
10195 struct timex htx
, *phtx
= &htx
;
10197 if (target_to_host_timex(phtx
, arg2
) != 0) {
10200 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10201 if (!is_error(ret
) && phtx
) {
10202 if (host_to_target_timex(arg2
, phtx
) != 0) {
10209 #ifdef TARGET_NR_create_module
10210 case TARGET_NR_create_module
:
10212 case TARGET_NR_init_module
:
10213 case TARGET_NR_delete_module
:
10214 #ifdef TARGET_NR_get_kernel_syms
10215 case TARGET_NR_get_kernel_syms
:
10217 goto unimplemented
;
10218 case TARGET_NR_quotactl
:
10219 goto unimplemented
;
10220 case TARGET_NR_getpgid
:
10221 ret
= get_errno(getpgid(arg1
));
10223 case TARGET_NR_fchdir
:
10224 ret
= get_errno(fchdir(arg1
));
10226 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10227 case TARGET_NR_bdflush
:
10228 goto unimplemented
;
10230 #ifdef TARGET_NR_sysfs
10231 case TARGET_NR_sysfs
:
10232 goto unimplemented
;
10234 case TARGET_NR_personality
:
10235 ret
= get_errno(personality(arg1
));
10237 #ifdef TARGET_NR_afs_syscall
10238 case TARGET_NR_afs_syscall
:
10239 goto unimplemented
;
10241 #ifdef TARGET_NR__llseek /* Not on alpha */
10242 case TARGET_NR__llseek
:
10245 #if !defined(__NR_llseek)
10246 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10248 ret
= get_errno(res
);
10253 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10255 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10261 #ifdef TARGET_NR_getdents
10262 case TARGET_NR_getdents
:
10263 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10264 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10266 struct target_dirent
*target_dirp
;
10267 struct linux_dirent
*dirp
;
10268 abi_long count
= arg3
;
10270 dirp
= g_try_malloc(count
);
10272 ret
= -TARGET_ENOMEM
;
10276 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10277 if (!is_error(ret
)) {
10278 struct linux_dirent
*de
;
10279 struct target_dirent
*tde
;
10281 int reclen
, treclen
;
10282 int count1
, tnamelen
;
10286 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10290 reclen
= de
->d_reclen
;
10291 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10292 assert(tnamelen
>= 0);
10293 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10294 assert(count1
+ treclen
<= count
);
10295 tde
->d_reclen
= tswap16(treclen
);
10296 tde
->d_ino
= tswapal(de
->d_ino
);
10297 tde
->d_off
= tswapal(de
->d_off
);
10298 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10299 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10301 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10305 unlock_user(target_dirp
, arg2
, ret
);
10311 struct linux_dirent
*dirp
;
10312 abi_long count
= arg3
;
10314 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10316 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10317 if (!is_error(ret
)) {
10318 struct linux_dirent
*de
;
10323 reclen
= de
->d_reclen
;
10326 de
->d_reclen
= tswap16(reclen
);
10327 tswapls(&de
->d_ino
);
10328 tswapls(&de
->d_off
);
10329 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10333 unlock_user(dirp
, arg2
, ret
);
10337 /* Implement getdents in terms of getdents64 */
10339 struct linux_dirent64
*dirp
;
10340 abi_long count
= arg3
;
10342 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10346 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10347 if (!is_error(ret
)) {
10348 /* Convert the dirent64 structs to target dirent. We do this
10349 * in-place, since we can guarantee that a target_dirent is no
10350 * larger than a dirent64; however this means we have to be
10351 * careful to read everything before writing in the new format.
10353 struct linux_dirent64
*de
;
10354 struct target_dirent
*tde
;
10359 tde
= (struct target_dirent
*)dirp
;
10361 int namelen
, treclen
;
10362 int reclen
= de
->d_reclen
;
10363 uint64_t ino
= de
->d_ino
;
10364 int64_t off
= de
->d_off
;
10365 uint8_t type
= de
->d_type
;
10367 namelen
= strlen(de
->d_name
);
10368 treclen
= offsetof(struct target_dirent
, d_name
)
10370 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10372 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10373 tde
->d_ino
= tswapal(ino
);
10374 tde
->d_off
= tswapal(off
);
10375 tde
->d_reclen
= tswap16(treclen
);
10376 /* The target_dirent type is in what was formerly a padding
10377 * byte at the end of the structure:
10379 *(((char *)tde
) + treclen
- 1) = type
;
10381 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10382 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10388 unlock_user(dirp
, arg2
, ret
);
10392 #endif /* TARGET_NR_getdents */
10393 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10394 case TARGET_NR_getdents64
:
10396 struct linux_dirent64
*dirp
;
10397 abi_long count
= arg3
;
10398 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10400 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10401 if (!is_error(ret
)) {
10402 struct linux_dirent64
*de
;
10407 reclen
= de
->d_reclen
;
10410 de
->d_reclen
= tswap16(reclen
);
10411 tswap64s((uint64_t *)&de
->d_ino
);
10412 tswap64s((uint64_t *)&de
->d_off
);
10413 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10417 unlock_user(dirp
, arg2
, ret
);
10420 #endif /* TARGET_NR_getdents64 */
10421 #if defined(TARGET_NR__newselect)
10422 case TARGET_NR__newselect
:
10423 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10426 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10427 # ifdef TARGET_NR_poll
10428 case TARGET_NR_poll
:
10430 # ifdef TARGET_NR_ppoll
10431 case TARGET_NR_ppoll
:
10434 struct target_pollfd
*target_pfd
;
10435 unsigned int nfds
= arg2
;
10436 struct pollfd
*pfd
;
10442 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10443 ret
= -TARGET_EINVAL
;
10447 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10448 sizeof(struct target_pollfd
) * nfds
, 1);
10453 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10454 for (i
= 0; i
< nfds
; i
++) {
10455 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10456 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10461 # ifdef TARGET_NR_ppoll
10462 case TARGET_NR_ppoll
:
10464 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10465 target_sigset_t
*target_set
;
10466 sigset_t _set
, *set
= &_set
;
10469 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10470 unlock_user(target_pfd
, arg1
, 0);
10478 if (arg5
!= sizeof(target_sigset_t
)) {
10479 unlock_user(target_pfd
, arg1
, 0);
10480 ret
= -TARGET_EINVAL
;
10484 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10486 unlock_user(target_pfd
, arg1
, 0);
10489 target_to_host_sigset(set
, target_set
);
10494 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10495 set
, SIGSET_T_SIZE
));
10497 if (!is_error(ret
) && arg3
) {
10498 host_to_target_timespec(arg3
, timeout_ts
);
10501 unlock_user(target_set
, arg4
, 0);
10506 # ifdef TARGET_NR_poll
10507 case TARGET_NR_poll
:
10509 struct timespec ts
, *pts
;
10512 /* Convert ms to secs, ns */
10513 ts
.tv_sec
= arg3
/ 1000;
10514 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10517 /* -ve poll() timeout means "infinite" */
10520 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10525 g_assert_not_reached();
10528 if (!is_error(ret
)) {
10529 for(i
= 0; i
< nfds
; i
++) {
10530 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10533 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10537 case TARGET_NR_flock
:
10538 /* NOTE: the flock constant seems to be the same for every
10540 ret
= get_errno(safe_flock(arg1
, arg2
));
10542 case TARGET_NR_readv
:
10544 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10546 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10547 unlock_iovec(vec
, arg2
, arg3
, 1);
10549 ret
= -host_to_target_errno(errno
);
10553 case TARGET_NR_writev
:
10555 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10557 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10558 unlock_iovec(vec
, arg2
, arg3
, 0);
10560 ret
= -host_to_target_errno(errno
);
10564 #if defined(TARGET_NR_preadv)
10565 case TARGET_NR_preadv
:
10567 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10569 unsigned long low
, high
;
10571 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10572 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10573 unlock_iovec(vec
, arg2
, arg3
, 1);
10575 ret
= -host_to_target_errno(errno
);
10580 #if defined(TARGET_NR_pwritev)
10581 case TARGET_NR_pwritev
:
10583 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10585 unsigned long low
, high
;
10587 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10588 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10589 unlock_iovec(vec
, arg2
, arg3
, 0);
10591 ret
= -host_to_target_errno(errno
);
10596 case TARGET_NR_getsid
:
10597 ret
= get_errno(getsid(arg1
));
10599 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10600 case TARGET_NR_fdatasync
:
10601 ret
= get_errno(fdatasync(arg1
));
10604 #ifdef TARGET_NR__sysctl
10605 case TARGET_NR__sysctl
:
10606 /* We don't implement this, but ENOTDIR is always a safe
10608 ret
= -TARGET_ENOTDIR
;
10611 case TARGET_NR_sched_getaffinity
:
10613 unsigned int mask_size
;
10614 unsigned long *mask
;
10617 * sched_getaffinity needs multiples of ulong, so need to take
10618 * care of mismatches between target ulong and host ulong sizes.
10620 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10621 ret
= -TARGET_EINVAL
;
10624 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10626 mask
= alloca(mask_size
);
10627 memset(mask
, 0, mask_size
);
10628 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10630 if (!is_error(ret
)) {
10632 /* More data returned than the caller's buffer will fit.
10633 * This only happens if sizeof(abi_long) < sizeof(long)
10634 * and the caller passed us a buffer holding an odd number
10635 * of abi_longs. If the host kernel is actually using the
10636 * extra 4 bytes then fail EINVAL; otherwise we can just
10637 * ignore them and only copy the interesting part.
10639 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10640 if (numcpus
> arg2
* 8) {
10641 ret
= -TARGET_EINVAL
;
10647 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10653 case TARGET_NR_sched_setaffinity
:
10655 unsigned int mask_size
;
10656 unsigned long *mask
;
10659 * sched_setaffinity needs multiples of ulong, so need to take
10660 * care of mismatches between target ulong and host ulong sizes.
10662 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10663 ret
= -TARGET_EINVAL
;
10666 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10667 mask
= alloca(mask_size
);
10669 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10674 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10677 case TARGET_NR_getcpu
:
10679 unsigned cpu
, node
;
10680 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10681 arg2
? &node
: NULL
,
10683 if (is_error(ret
)) {
10686 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10689 if (arg2
&& put_user_u32(node
, arg2
)) {
10694 case TARGET_NR_sched_setparam
:
10696 struct sched_param
*target_schp
;
10697 struct sched_param schp
;
10700 return -TARGET_EINVAL
;
10702 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10704 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10705 unlock_user_struct(target_schp
, arg2
, 0);
10706 ret
= get_errno(sched_setparam(arg1
, &schp
));
10709 case TARGET_NR_sched_getparam
:
10711 struct sched_param
*target_schp
;
10712 struct sched_param schp
;
10715 return -TARGET_EINVAL
;
10717 ret
= get_errno(sched_getparam(arg1
, &schp
));
10718 if (!is_error(ret
)) {
10719 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10721 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10722 unlock_user_struct(target_schp
, arg2
, 1);
10726 case TARGET_NR_sched_setscheduler
:
10728 struct sched_param
*target_schp
;
10729 struct sched_param schp
;
10731 return -TARGET_EINVAL
;
10733 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10735 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10736 unlock_user_struct(target_schp
, arg3
, 0);
10737 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10740 case TARGET_NR_sched_getscheduler
:
10741 ret
= get_errno(sched_getscheduler(arg1
));
10743 case TARGET_NR_sched_yield
:
10744 ret
= get_errno(sched_yield());
10746 case TARGET_NR_sched_get_priority_max
:
10747 ret
= get_errno(sched_get_priority_max(arg1
));
10749 case TARGET_NR_sched_get_priority_min
:
10750 ret
= get_errno(sched_get_priority_min(arg1
));
10752 case TARGET_NR_sched_rr_get_interval
:
10754 struct timespec ts
;
10755 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10756 if (!is_error(ret
)) {
10757 ret
= host_to_target_timespec(arg2
, &ts
);
10761 case TARGET_NR_nanosleep
:
10763 struct timespec req
, rem
;
10764 target_to_host_timespec(&req
, arg1
);
10765 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10766 if (is_error(ret
) && arg2
) {
10767 host_to_target_timespec(arg2
, &rem
);
10771 #ifdef TARGET_NR_query_module
10772 case TARGET_NR_query_module
:
10773 goto unimplemented
;
10775 #ifdef TARGET_NR_nfsservctl
10776 case TARGET_NR_nfsservctl
:
10777 goto unimplemented
;
10779 case TARGET_NR_prctl
:
10781 case PR_GET_PDEATHSIG
:
10784 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10785 if (!is_error(ret
) && arg2
10786 && put_user_ual(deathsig
, arg2
)) {
10794 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10798 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10799 arg3
, arg4
, arg5
));
10800 unlock_user(name
, arg2
, 16);
10805 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10809 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10810 arg3
, arg4
, arg5
));
10811 unlock_user(name
, arg2
, 0);
10815 #ifdef TARGET_AARCH64
10816 case TARGET_PR_SVE_SET_VL
:
10817 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10818 or PR_SVE_VL_INHERIT. Therefore, anything above
10819 ARM_MAX_VQ results in EINVAL. */
10820 ret
= -TARGET_EINVAL
;
10821 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10822 && arg2
>= 0 && arg2
<= ARM_MAX_VQ
* 16 && !(arg2
& 15)) {
10823 CPUARMState
*env
= cpu_env
;
10824 int old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10825 int vq
= MAX(arg2
/ 16, 1);
10828 aarch64_sve_narrow_vq(env
, vq
);
10830 env
->vfp
.zcr_el
[1] = vq
- 1;
10834 case TARGET_PR_SVE_GET_VL
:
10835 ret
= -TARGET_EINVAL
;
10836 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
10837 CPUARMState
*env
= cpu_env
;
10838 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10841 #endif /* AARCH64 */
10842 case PR_GET_SECCOMP
:
10843 case PR_SET_SECCOMP
:
10844 /* Disable seccomp to prevent the target disabling syscalls we
10846 ret
= -TARGET_EINVAL
;
10849 /* Most prctl options have no pointer arguments */
10850 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10854 #ifdef TARGET_NR_arch_prctl
10855 case TARGET_NR_arch_prctl
:
10856 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10857 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10860 goto unimplemented
;
10863 #ifdef TARGET_NR_pread64
10864 case TARGET_NR_pread64
:
10865 if (regpairs_aligned(cpu_env
, num
)) {
10869 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10871 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10872 unlock_user(p
, arg2
, ret
);
10874 case TARGET_NR_pwrite64
:
10875 if (regpairs_aligned(cpu_env
, num
)) {
10879 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10881 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10882 unlock_user(p
, arg2
, 0);
10885 case TARGET_NR_getcwd
:
10886 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10888 ret
= get_errno(sys_getcwd1(p
, arg2
));
10889 unlock_user(p
, arg1
, ret
);
10891 case TARGET_NR_capget
:
10892 case TARGET_NR_capset
:
10894 struct target_user_cap_header
*target_header
;
10895 struct target_user_cap_data
*target_data
= NULL
;
10896 struct __user_cap_header_struct header
;
10897 struct __user_cap_data_struct data
[2];
10898 struct __user_cap_data_struct
*dataptr
= NULL
;
10899 int i
, target_datalen
;
10900 int data_items
= 1;
10902 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10905 header
.version
= tswap32(target_header
->version
);
10906 header
.pid
= tswap32(target_header
->pid
);
10908 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10909 /* Version 2 and up takes pointer to two user_data structs */
10913 target_datalen
= sizeof(*target_data
) * data_items
;
10916 if (num
== TARGET_NR_capget
) {
10917 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10919 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10921 if (!target_data
) {
10922 unlock_user_struct(target_header
, arg1
, 0);
10926 if (num
== TARGET_NR_capset
) {
10927 for (i
= 0; i
< data_items
; i
++) {
10928 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10929 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10930 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10937 if (num
== TARGET_NR_capget
) {
10938 ret
= get_errno(capget(&header
, dataptr
));
10940 ret
= get_errno(capset(&header
, dataptr
));
10943 /* The kernel always updates version for both capget and capset */
10944 target_header
->version
= tswap32(header
.version
);
10945 unlock_user_struct(target_header
, arg1
, 1);
10948 if (num
== TARGET_NR_capget
) {
10949 for (i
= 0; i
< data_items
; i
++) {
10950 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10951 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10952 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10954 unlock_user(target_data
, arg2
, target_datalen
);
10956 unlock_user(target_data
, arg2
, 0);
10961 case TARGET_NR_sigaltstack
:
10962 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10965 #ifdef CONFIG_SENDFILE
10966 case TARGET_NR_sendfile
:
10968 off_t
*offp
= NULL
;
10971 ret
= get_user_sal(off
, arg3
);
10972 if (is_error(ret
)) {
10977 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10978 if (!is_error(ret
) && arg3
) {
10979 abi_long ret2
= put_user_sal(off
, arg3
);
10980 if (is_error(ret2
)) {
10986 #ifdef TARGET_NR_sendfile64
10987 case TARGET_NR_sendfile64
:
10989 off_t
*offp
= NULL
;
10992 ret
= get_user_s64(off
, arg3
);
10993 if (is_error(ret
)) {
10998 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10999 if (!is_error(ret
) && arg3
) {
11000 abi_long ret2
= put_user_s64(off
, arg3
);
11001 if (is_error(ret2
)) {
11009 case TARGET_NR_sendfile
:
11010 #ifdef TARGET_NR_sendfile64
11011 case TARGET_NR_sendfile64
:
11013 goto unimplemented
;
11016 #ifdef TARGET_NR_getpmsg
11017 case TARGET_NR_getpmsg
:
11018 goto unimplemented
;
11020 #ifdef TARGET_NR_putpmsg
11021 case TARGET_NR_putpmsg
:
11022 goto unimplemented
;
11024 #ifdef TARGET_NR_vfork
11025 case TARGET_NR_vfork
:
11026 ret
= get_errno(do_fork(cpu_env
,
11027 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11031 #ifdef TARGET_NR_ugetrlimit
11032 case TARGET_NR_ugetrlimit
:
11034 struct rlimit rlim
;
11035 int resource
= target_to_host_resource(arg1
);
11036 ret
= get_errno(getrlimit(resource
, &rlim
));
11037 if (!is_error(ret
)) {
11038 struct target_rlimit
*target_rlim
;
11039 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11041 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11042 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11043 unlock_user_struct(target_rlim
, arg2
, 1);
11048 #ifdef TARGET_NR_truncate64
11049 case TARGET_NR_truncate64
:
11050 if (!(p
= lock_user_string(arg1
)))
11052 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11053 unlock_user(p
, arg1
, 0);
11056 #ifdef TARGET_NR_ftruncate64
11057 case TARGET_NR_ftruncate64
:
11058 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11061 #ifdef TARGET_NR_stat64
11062 case TARGET_NR_stat64
:
11063 if (!(p
= lock_user_string(arg1
)))
11065 ret
= get_errno(stat(path(p
), &st
));
11066 unlock_user(p
, arg1
, 0);
11067 if (!is_error(ret
))
11068 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11071 #ifdef TARGET_NR_lstat64
11072 case TARGET_NR_lstat64
:
11073 if (!(p
= lock_user_string(arg1
)))
11075 ret
= get_errno(lstat(path(p
), &st
));
11076 unlock_user(p
, arg1
, 0);
11077 if (!is_error(ret
))
11078 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11081 #ifdef TARGET_NR_fstat64
11082 case TARGET_NR_fstat64
:
11083 ret
= get_errno(fstat(arg1
, &st
));
11084 if (!is_error(ret
))
11085 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11088 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11089 #ifdef TARGET_NR_fstatat64
11090 case TARGET_NR_fstatat64
:
11092 #ifdef TARGET_NR_newfstatat
11093 case TARGET_NR_newfstatat
:
11095 if (!(p
= lock_user_string(arg2
)))
11097 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11098 if (!is_error(ret
))
11099 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11102 #ifdef TARGET_NR_lchown
11103 case TARGET_NR_lchown
:
11104 if (!(p
= lock_user_string(arg1
)))
11106 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11107 unlock_user(p
, arg1
, 0);
11110 #ifdef TARGET_NR_getuid
11111 case TARGET_NR_getuid
:
11112 ret
= get_errno(high2lowuid(getuid()));
11115 #ifdef TARGET_NR_getgid
11116 case TARGET_NR_getgid
:
11117 ret
= get_errno(high2lowgid(getgid()));
11120 #ifdef TARGET_NR_geteuid
11121 case TARGET_NR_geteuid
:
11122 ret
= get_errno(high2lowuid(geteuid()));
11125 #ifdef TARGET_NR_getegid
11126 case TARGET_NR_getegid
:
11127 ret
= get_errno(high2lowgid(getegid()));
11130 case TARGET_NR_setreuid
:
11131 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11133 case TARGET_NR_setregid
:
11134 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11136 case TARGET_NR_getgroups
:
11138 int gidsetsize
= arg1
;
11139 target_id
*target_grouplist
;
11143 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11144 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11145 if (gidsetsize
== 0)
11147 if (!is_error(ret
)) {
11148 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11149 if (!target_grouplist
)
11151 for(i
= 0;i
< ret
; i
++)
11152 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11153 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11157 case TARGET_NR_setgroups
:
11159 int gidsetsize
= arg1
;
11160 target_id
*target_grouplist
;
11161 gid_t
*grouplist
= NULL
;
11164 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11165 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11166 if (!target_grouplist
) {
11167 ret
= -TARGET_EFAULT
;
11170 for (i
= 0; i
< gidsetsize
; i
++) {
11171 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11173 unlock_user(target_grouplist
, arg2
, 0);
11175 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11178 case TARGET_NR_fchown
:
11179 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11181 #if defined(TARGET_NR_fchownat)
11182 case TARGET_NR_fchownat
:
11183 if (!(p
= lock_user_string(arg2
)))
11185 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11186 low2highgid(arg4
), arg5
));
11187 unlock_user(p
, arg2
, 0);
11190 #ifdef TARGET_NR_setresuid
11191 case TARGET_NR_setresuid
:
11192 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11194 low2highuid(arg3
)));
11197 #ifdef TARGET_NR_getresuid
11198 case TARGET_NR_getresuid
:
11200 uid_t ruid
, euid
, suid
;
11201 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11202 if (!is_error(ret
)) {
11203 if (put_user_id(high2lowuid(ruid
), arg1
)
11204 || put_user_id(high2lowuid(euid
), arg2
)
11205 || put_user_id(high2lowuid(suid
), arg3
))
11211 #ifdef TARGET_NR_getresgid
11212 case TARGET_NR_setresgid
:
11213 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11215 low2highgid(arg3
)));
11218 #ifdef TARGET_NR_getresgid
11219 case TARGET_NR_getresgid
:
11221 gid_t rgid
, egid
, sgid
;
11222 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11223 if (!is_error(ret
)) {
11224 if (put_user_id(high2lowgid(rgid
), arg1
)
11225 || put_user_id(high2lowgid(egid
), arg2
)
11226 || put_user_id(high2lowgid(sgid
), arg3
))
11232 #ifdef TARGET_NR_chown
11233 case TARGET_NR_chown
:
11234 if (!(p
= lock_user_string(arg1
)))
11236 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11237 unlock_user(p
, arg1
, 0);
11240 case TARGET_NR_setuid
:
11241 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11243 case TARGET_NR_setgid
:
11244 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11246 case TARGET_NR_setfsuid
:
11247 ret
= get_errno(setfsuid(arg1
));
11249 case TARGET_NR_setfsgid
:
11250 ret
= get_errno(setfsgid(arg1
));
11253 #ifdef TARGET_NR_lchown32
11254 case TARGET_NR_lchown32
:
11255 if (!(p
= lock_user_string(arg1
)))
11257 ret
= get_errno(lchown(p
, arg2
, arg3
));
11258 unlock_user(p
, arg1
, 0);
11261 #ifdef TARGET_NR_getuid32
11262 case TARGET_NR_getuid32
:
11263 ret
= get_errno(getuid());
11267 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11268 /* Alpha specific */
11269 case TARGET_NR_getxuid
:
11273 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11275 ret
= get_errno(getuid());
11278 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11279 /* Alpha specific */
11280 case TARGET_NR_getxgid
:
11284 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11286 ret
= get_errno(getgid());
11289 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11290 /* Alpha specific */
11291 case TARGET_NR_osf_getsysinfo
:
11292 ret
= -TARGET_EOPNOTSUPP
;
11294 case TARGET_GSI_IEEE_FP_CONTROL
:
11296 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11298 /* Copied from linux ieee_fpcr_to_swcr. */
11299 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11300 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11301 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11302 | SWCR_TRAP_ENABLE_DZE
11303 | SWCR_TRAP_ENABLE_OVF
);
11304 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11305 | SWCR_TRAP_ENABLE_INE
);
11306 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11307 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11309 if (put_user_u64 (swcr
, arg2
))
11315 /* case GSI_IEEE_STATE_AT_SIGNAL:
11316 -- Not implemented in linux kernel.
11318 -- Retrieves current unaligned access state; not much used.
11319 case GSI_PROC_TYPE:
11320 -- Retrieves implver information; surely not used.
11321 case GSI_GET_HWRPB:
11322 -- Grabs a copy of the HWRPB; surely not used.
11327 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11328 /* Alpha specific */
11329 case TARGET_NR_osf_setsysinfo
:
11330 ret
= -TARGET_EOPNOTSUPP
;
11332 case TARGET_SSI_IEEE_FP_CONTROL
:
11334 uint64_t swcr
, fpcr
, orig_fpcr
;
11336 if (get_user_u64 (swcr
, arg2
)) {
11339 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11340 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11342 /* Copied from linux ieee_swcr_to_fpcr. */
11343 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11344 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11345 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11346 | SWCR_TRAP_ENABLE_DZE
11347 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11348 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11349 | SWCR_TRAP_ENABLE_INE
)) << 57;
11350 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11351 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11353 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11358 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11360 uint64_t exc
, fpcr
, orig_fpcr
;
11363 if (get_user_u64(exc
, arg2
)) {
11367 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11369 /* We only add to the exception status here. */
11370 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11372 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11375 /* Old exceptions are not signaled. */
11376 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11378 /* If any exceptions set by this call,
11379 and are unmasked, send a signal. */
11381 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11382 si_code
= TARGET_FPE_FLTRES
;
11384 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11385 si_code
= TARGET_FPE_FLTUND
;
11387 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11388 si_code
= TARGET_FPE_FLTOVF
;
11390 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11391 si_code
= TARGET_FPE_FLTDIV
;
11393 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11394 si_code
= TARGET_FPE_FLTINV
;
11396 if (si_code
!= 0) {
11397 target_siginfo_t info
;
11398 info
.si_signo
= SIGFPE
;
11400 info
.si_code
= si_code
;
11401 info
._sifields
._sigfault
._addr
11402 = ((CPUArchState
*)cpu_env
)->pc
;
11403 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11404 QEMU_SI_FAULT
, &info
);
11409 /* case SSI_NVPAIRS:
11410 -- Used with SSIN_UACPROC to enable unaligned accesses.
11411 case SSI_IEEE_STATE_AT_SIGNAL:
11412 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11413 -- Not implemented in linux kernel
11418 #ifdef TARGET_NR_osf_sigprocmask
11419 /* Alpha specific. */
11420 case TARGET_NR_osf_sigprocmask
:
11424 sigset_t set
, oldset
;
11427 case TARGET_SIG_BLOCK
:
11430 case TARGET_SIG_UNBLOCK
:
11433 case TARGET_SIG_SETMASK
:
11437 ret
= -TARGET_EINVAL
;
11441 target_to_host_old_sigset(&set
, &mask
);
11442 ret
= do_sigprocmask(how
, &set
, &oldset
);
11444 host_to_target_old_sigset(&mask
, &oldset
);
11451 #ifdef TARGET_NR_getgid32
11452 case TARGET_NR_getgid32
:
11453 ret
= get_errno(getgid());
11456 #ifdef TARGET_NR_geteuid32
11457 case TARGET_NR_geteuid32
:
11458 ret
= get_errno(geteuid());
11461 #ifdef TARGET_NR_getegid32
11462 case TARGET_NR_getegid32
:
11463 ret
= get_errno(getegid());
11466 #ifdef TARGET_NR_setreuid32
11467 case TARGET_NR_setreuid32
:
11468 ret
= get_errno(setreuid(arg1
, arg2
));
11471 #ifdef TARGET_NR_setregid32
11472 case TARGET_NR_setregid32
:
11473 ret
= get_errno(setregid(arg1
, arg2
));
11476 #ifdef TARGET_NR_getgroups32
11477 case TARGET_NR_getgroups32
:
11479 int gidsetsize
= arg1
;
11480 uint32_t *target_grouplist
;
11484 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11485 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11486 if (gidsetsize
== 0)
11488 if (!is_error(ret
)) {
11489 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11490 if (!target_grouplist
) {
11491 ret
= -TARGET_EFAULT
;
11494 for(i
= 0;i
< ret
; i
++)
11495 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11496 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11501 #ifdef TARGET_NR_setgroups32
11502 case TARGET_NR_setgroups32
:
11504 int gidsetsize
= arg1
;
11505 uint32_t *target_grouplist
;
11509 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11510 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11511 if (!target_grouplist
) {
11512 ret
= -TARGET_EFAULT
;
11515 for(i
= 0;i
< gidsetsize
; i
++)
11516 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11517 unlock_user(target_grouplist
, arg2
, 0);
11518 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11522 #ifdef TARGET_NR_fchown32
11523 case TARGET_NR_fchown32
:
11524 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11527 #ifdef TARGET_NR_setresuid32
11528 case TARGET_NR_setresuid32
:
11529 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11532 #ifdef TARGET_NR_getresuid32
11533 case TARGET_NR_getresuid32
:
11535 uid_t ruid
, euid
, suid
;
11536 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11537 if (!is_error(ret
)) {
11538 if (put_user_u32(ruid
, arg1
)
11539 || put_user_u32(euid
, arg2
)
11540 || put_user_u32(suid
, arg3
))
11546 #ifdef TARGET_NR_setresgid32
11547 case TARGET_NR_setresgid32
:
11548 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11551 #ifdef TARGET_NR_getresgid32
11552 case TARGET_NR_getresgid32
:
11554 gid_t rgid
, egid
, sgid
;
11555 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11556 if (!is_error(ret
)) {
11557 if (put_user_u32(rgid
, arg1
)
11558 || put_user_u32(egid
, arg2
)
11559 || put_user_u32(sgid
, arg3
))
11565 #ifdef TARGET_NR_chown32
11566 case TARGET_NR_chown32
:
11567 if (!(p
= lock_user_string(arg1
)))
11569 ret
= get_errno(chown(p
, arg2
, arg3
));
11570 unlock_user(p
, arg1
, 0);
11573 #ifdef TARGET_NR_setuid32
11574 case TARGET_NR_setuid32
:
11575 ret
= get_errno(sys_setuid(arg1
));
11578 #ifdef TARGET_NR_setgid32
11579 case TARGET_NR_setgid32
:
11580 ret
= get_errno(sys_setgid(arg1
));
11583 #ifdef TARGET_NR_setfsuid32
11584 case TARGET_NR_setfsuid32
:
11585 ret
= get_errno(setfsuid(arg1
));
11588 #ifdef TARGET_NR_setfsgid32
11589 case TARGET_NR_setfsgid32
:
11590 ret
= get_errno(setfsgid(arg1
));
11594 case TARGET_NR_pivot_root
:
11595 goto unimplemented
;
11596 #ifdef TARGET_NR_mincore
11597 case TARGET_NR_mincore
:
11600 ret
= -TARGET_ENOMEM
;
11601 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11605 ret
= -TARGET_EFAULT
;
11606 p
= lock_user_string(arg3
);
11610 ret
= get_errno(mincore(a
, arg2
, p
));
11611 unlock_user(p
, arg3
, ret
);
11613 unlock_user(a
, arg1
, 0);
11617 #ifdef TARGET_NR_arm_fadvise64_64
11618 case TARGET_NR_arm_fadvise64_64
:
11619 /* arm_fadvise64_64 looks like fadvise64_64 but
11620 * with different argument order: fd, advice, offset, len
11621 * rather than the usual fd, offset, len, advice.
11622 * Note that offset and len are both 64-bit so appear as
11623 * pairs of 32-bit registers.
11625 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11626 target_offset64(arg5
, arg6
), arg2
);
11627 ret
= -host_to_target_errno(ret
);
11631 #if TARGET_ABI_BITS == 32
11633 #ifdef TARGET_NR_fadvise64_64
11634 case TARGET_NR_fadvise64_64
:
11635 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11636 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11644 /* 6 args: fd, offset (high, low), len (high, low), advice */
11645 if (regpairs_aligned(cpu_env
, num
)) {
11646 /* offset is in (3,4), len in (5,6) and advice in 7 */
11654 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11655 target_offset64(arg2
, arg3
),
11656 target_offset64(arg4
, arg5
),
11661 #ifdef TARGET_NR_fadvise64
11662 case TARGET_NR_fadvise64
:
11663 /* 5 args: fd, offset (high, low), len, advice */
11664 if (regpairs_aligned(cpu_env
, num
)) {
11665 /* offset is in (3,4), len in 5 and advice in 6 */
11671 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11672 target_offset64(arg2
, arg3
),
11677 #else /* not a 32-bit ABI */
11678 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11679 #ifdef TARGET_NR_fadvise64_64
11680 case TARGET_NR_fadvise64_64
:
11682 #ifdef TARGET_NR_fadvise64
11683 case TARGET_NR_fadvise64
:
11685 #ifdef TARGET_S390X
11687 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11688 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11689 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11690 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11694 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11697 #endif /* end of 64-bit ABI fadvise handling */
11699 #ifdef TARGET_NR_madvise
11700 case TARGET_NR_madvise
:
11701 /* A straight passthrough may not be safe because qemu sometimes
11702 turns private file-backed mappings into anonymous mappings.
11703 This will break MADV_DONTNEED.
11704 This is a hint, so ignoring and returning success is ok. */
11705 ret
= get_errno(0);
11708 #if TARGET_ABI_BITS == 32
11709 case TARGET_NR_fcntl64
:
11713 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11714 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11717 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11718 copyfrom
= copy_from_user_oabi_flock64
;
11719 copyto
= copy_to_user_oabi_flock64
;
11723 cmd
= target_to_host_fcntl_cmd(arg2
);
11724 if (cmd
== -TARGET_EINVAL
) {
11730 case TARGET_F_GETLK64
:
11731 ret
= copyfrom(&fl
, arg3
);
11735 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11737 ret
= copyto(arg3
, &fl
);
11741 case TARGET_F_SETLK64
:
11742 case TARGET_F_SETLKW64
:
11743 ret
= copyfrom(&fl
, arg3
);
11747 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11750 ret
= do_fcntl(arg1
, arg2
, arg3
);
11756 #ifdef TARGET_NR_cacheflush
11757 case TARGET_NR_cacheflush
:
11758 /* self-modifying code is handled automatically, so nothing needed */
11762 #ifdef TARGET_NR_security
11763 case TARGET_NR_security
:
11764 goto unimplemented
;
11766 #ifdef TARGET_NR_getpagesize
11767 case TARGET_NR_getpagesize
:
11768 ret
= TARGET_PAGE_SIZE
;
11771 case TARGET_NR_gettid
:
11772 ret
= get_errno(gettid());
11774 #ifdef TARGET_NR_readahead
11775 case TARGET_NR_readahead
:
11776 #if TARGET_ABI_BITS == 32
11777 if (regpairs_aligned(cpu_env
, num
)) {
11782 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11784 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11789 #ifdef TARGET_NR_setxattr
11790 case TARGET_NR_listxattr
:
11791 case TARGET_NR_llistxattr
:
11795 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11797 ret
= -TARGET_EFAULT
;
11801 p
= lock_user_string(arg1
);
11803 if (num
== TARGET_NR_listxattr
) {
11804 ret
= get_errno(listxattr(p
, b
, arg3
));
11806 ret
= get_errno(llistxattr(p
, b
, arg3
));
11809 ret
= -TARGET_EFAULT
;
11811 unlock_user(p
, arg1
, 0);
11812 unlock_user(b
, arg2
, arg3
);
11815 case TARGET_NR_flistxattr
:
11819 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11821 ret
= -TARGET_EFAULT
;
11825 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11826 unlock_user(b
, arg2
, arg3
);
11829 case TARGET_NR_setxattr
:
11830 case TARGET_NR_lsetxattr
:
11832 void *p
, *n
, *v
= 0;
11834 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11836 ret
= -TARGET_EFAULT
;
11840 p
= lock_user_string(arg1
);
11841 n
= lock_user_string(arg2
);
11843 if (num
== TARGET_NR_setxattr
) {
11844 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11846 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11849 ret
= -TARGET_EFAULT
;
11851 unlock_user(p
, arg1
, 0);
11852 unlock_user(n
, arg2
, 0);
11853 unlock_user(v
, arg3
, 0);
11856 case TARGET_NR_fsetxattr
:
11860 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11862 ret
= -TARGET_EFAULT
;
11866 n
= lock_user_string(arg2
);
11868 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11870 ret
= -TARGET_EFAULT
;
11872 unlock_user(n
, arg2
, 0);
11873 unlock_user(v
, arg3
, 0);
11876 case TARGET_NR_getxattr
:
11877 case TARGET_NR_lgetxattr
:
11879 void *p
, *n
, *v
= 0;
11881 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11883 ret
= -TARGET_EFAULT
;
11887 p
= lock_user_string(arg1
);
11888 n
= lock_user_string(arg2
);
11890 if (num
== TARGET_NR_getxattr
) {
11891 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11893 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11896 ret
= -TARGET_EFAULT
;
11898 unlock_user(p
, arg1
, 0);
11899 unlock_user(n
, arg2
, 0);
11900 unlock_user(v
, arg3
, arg4
);
11903 case TARGET_NR_fgetxattr
:
11907 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11909 ret
= -TARGET_EFAULT
;
11913 n
= lock_user_string(arg2
);
11915 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11917 ret
= -TARGET_EFAULT
;
11919 unlock_user(n
, arg2
, 0);
11920 unlock_user(v
, arg3
, arg4
);
11923 case TARGET_NR_removexattr
:
11924 case TARGET_NR_lremovexattr
:
11927 p
= lock_user_string(arg1
);
11928 n
= lock_user_string(arg2
);
11930 if (num
== TARGET_NR_removexattr
) {
11931 ret
= get_errno(removexattr(p
, n
));
11933 ret
= get_errno(lremovexattr(p
, n
));
11936 ret
= -TARGET_EFAULT
;
11938 unlock_user(p
, arg1
, 0);
11939 unlock_user(n
, arg2
, 0);
11942 case TARGET_NR_fremovexattr
:
11945 n
= lock_user_string(arg2
);
11947 ret
= get_errno(fremovexattr(arg1
, n
));
11949 ret
= -TARGET_EFAULT
;
11951 unlock_user(n
, arg2
, 0);
11955 #endif /* CONFIG_ATTR */
11956 #ifdef TARGET_NR_set_thread_area
11957 case TARGET_NR_set_thread_area
:
11958 #if defined(TARGET_MIPS)
11959 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11962 #elif defined(TARGET_CRIS)
11964 ret
= -TARGET_EINVAL
;
11966 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11970 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11971 ret
= do_set_thread_area(cpu_env
, arg1
);
11973 #elif defined(TARGET_M68K)
11975 TaskState
*ts
= cpu
->opaque
;
11976 ts
->tp_value
= arg1
;
11981 goto unimplemented_nowarn
;
11984 #ifdef TARGET_NR_get_thread_area
11985 case TARGET_NR_get_thread_area
:
11986 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11987 ret
= do_get_thread_area(cpu_env
, arg1
);
11989 #elif defined(TARGET_M68K)
11991 TaskState
*ts
= cpu
->opaque
;
11992 ret
= ts
->tp_value
;
11996 goto unimplemented_nowarn
;
11999 #ifdef TARGET_NR_getdomainname
12000 case TARGET_NR_getdomainname
:
12001 goto unimplemented_nowarn
;
12004 #ifdef TARGET_NR_clock_settime
12005 case TARGET_NR_clock_settime
:
12007 struct timespec ts
;
12009 ret
= target_to_host_timespec(&ts
, arg2
);
12010 if (!is_error(ret
)) {
12011 ret
= get_errno(clock_settime(arg1
, &ts
));
12016 #ifdef TARGET_NR_clock_gettime
12017 case TARGET_NR_clock_gettime
:
12019 struct timespec ts
;
12020 ret
= get_errno(clock_gettime(arg1
, &ts
));
12021 if (!is_error(ret
)) {
12022 ret
= host_to_target_timespec(arg2
, &ts
);
12027 #ifdef TARGET_NR_clock_getres
12028 case TARGET_NR_clock_getres
:
12030 struct timespec ts
;
12031 ret
= get_errno(clock_getres(arg1
, &ts
));
12032 if (!is_error(ret
)) {
12033 host_to_target_timespec(arg2
, &ts
);
12038 #ifdef TARGET_NR_clock_nanosleep
12039 case TARGET_NR_clock_nanosleep
:
12041 struct timespec ts
;
12042 target_to_host_timespec(&ts
, arg3
);
12043 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12044 &ts
, arg4
? &ts
: NULL
));
12046 host_to_target_timespec(arg4
, &ts
);
12048 #if defined(TARGET_PPC)
12049 /* clock_nanosleep is odd in that it returns positive errno values.
12050 * On PPC, CR0 bit 3 should be set in such a situation. */
12051 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
12052 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
12059 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12060 case TARGET_NR_set_tid_address
:
12061 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
12065 case TARGET_NR_tkill
:
12066 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12069 case TARGET_NR_tgkill
:
12070 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12071 target_to_host_signal(arg3
)));
12074 #ifdef TARGET_NR_set_robust_list
12075 case TARGET_NR_set_robust_list
:
12076 case TARGET_NR_get_robust_list
:
12077 /* The ABI for supporting robust futexes has userspace pass
12078 * the kernel a pointer to a linked list which is updated by
12079 * userspace after the syscall; the list is walked by the kernel
12080 * when the thread exits. Since the linked list in QEMU guest
12081 * memory isn't a valid linked list for the host and we have
12082 * no way to reliably intercept the thread-death event, we can't
12083 * support these. Silently return ENOSYS so that guest userspace
12084 * falls back to a non-robust futex implementation (which should
12085 * be OK except in the corner case of the guest crashing while
12086 * holding a mutex that is shared with another process via
12089 goto unimplemented_nowarn
;
12092 #if defined(TARGET_NR_utimensat)
12093 case TARGET_NR_utimensat
:
12095 struct timespec
*tsp
, ts
[2];
12099 target_to_host_timespec(ts
, arg3
);
12100 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
12104 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12106 if (!(p
= lock_user_string(arg2
))) {
12107 ret
= -TARGET_EFAULT
;
12110 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12111 unlock_user(p
, arg2
, 0);
12116 case TARGET_NR_futex
:
12117 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12119 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12120 case TARGET_NR_inotify_init
:
12121 ret
= get_errno(sys_inotify_init());
12123 fd_trans_register(ret
, &target_inotify_trans
);
12127 #ifdef CONFIG_INOTIFY1
12128 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12129 case TARGET_NR_inotify_init1
:
12130 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12131 fcntl_flags_tbl
)));
12133 fd_trans_register(ret
, &target_inotify_trans
);
12138 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12139 case TARGET_NR_inotify_add_watch
:
12140 p
= lock_user_string(arg2
);
12141 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12142 unlock_user(p
, arg2
, 0);
12145 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12146 case TARGET_NR_inotify_rm_watch
:
12147 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12151 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12152 case TARGET_NR_mq_open
:
12154 struct mq_attr posix_mq_attr
;
12155 struct mq_attr
*pposix_mq_attr
;
12158 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12159 pposix_mq_attr
= NULL
;
12161 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12164 pposix_mq_attr
= &posix_mq_attr
;
12166 p
= lock_user_string(arg1
- 1);
12170 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12171 unlock_user (p
, arg1
, 0);
12175 case TARGET_NR_mq_unlink
:
12176 p
= lock_user_string(arg1
- 1);
12178 ret
= -TARGET_EFAULT
;
12181 ret
= get_errno(mq_unlink(p
));
12182 unlock_user (p
, arg1
, 0);
12185 case TARGET_NR_mq_timedsend
:
12187 struct timespec ts
;
12189 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12191 target_to_host_timespec(&ts
, arg5
);
12192 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12193 host_to_target_timespec(arg5
, &ts
);
12195 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12197 unlock_user (p
, arg2
, arg3
);
12201 case TARGET_NR_mq_timedreceive
:
12203 struct timespec ts
;
12206 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12208 target_to_host_timespec(&ts
, arg5
);
12209 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12211 host_to_target_timespec(arg5
, &ts
);
12213 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12216 unlock_user (p
, arg2
, arg3
);
12218 put_user_u32(prio
, arg4
);
12222 /* Not implemented for now... */
12223 /* case TARGET_NR_mq_notify: */
12226 case TARGET_NR_mq_getsetattr
:
12228 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12231 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12232 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12233 &posix_mq_attr_out
));
12234 } else if (arg3
!= 0) {
12235 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12237 if (ret
== 0 && arg3
!= 0) {
12238 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12244 #ifdef CONFIG_SPLICE
12245 #ifdef TARGET_NR_tee
12246 case TARGET_NR_tee
:
12248 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12252 #ifdef TARGET_NR_splice
12253 case TARGET_NR_splice
:
12255 loff_t loff_in
, loff_out
;
12256 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12258 if (get_user_u64(loff_in
, arg2
)) {
12261 ploff_in
= &loff_in
;
12264 if (get_user_u64(loff_out
, arg4
)) {
12267 ploff_out
= &loff_out
;
12269 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12271 if (put_user_u64(loff_in
, arg2
)) {
12276 if (put_user_u64(loff_out
, arg4
)) {
12283 #ifdef TARGET_NR_vmsplice
12284 case TARGET_NR_vmsplice
:
12286 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12288 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12289 unlock_iovec(vec
, arg2
, arg3
, 0);
12291 ret
= -host_to_target_errno(errno
);
12296 #endif /* CONFIG_SPLICE */
12297 #ifdef CONFIG_EVENTFD
12298 #if defined(TARGET_NR_eventfd)
12299 case TARGET_NR_eventfd
:
12300 ret
= get_errno(eventfd(arg1
, 0));
12302 fd_trans_register(ret
, &target_eventfd_trans
);
12306 #if defined(TARGET_NR_eventfd2)
12307 case TARGET_NR_eventfd2
:
12309 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12310 if (arg2
& TARGET_O_NONBLOCK
) {
12311 host_flags
|= O_NONBLOCK
;
12313 if (arg2
& TARGET_O_CLOEXEC
) {
12314 host_flags
|= O_CLOEXEC
;
12316 ret
= get_errno(eventfd(arg1
, host_flags
));
12318 fd_trans_register(ret
, &target_eventfd_trans
);
12323 #endif /* CONFIG_EVENTFD */
12324 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12325 case TARGET_NR_fallocate
:
12326 #if TARGET_ABI_BITS == 32
12327 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12328 target_offset64(arg5
, arg6
)));
12330 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12334 #if defined(CONFIG_SYNC_FILE_RANGE)
12335 #if defined(TARGET_NR_sync_file_range)
12336 case TARGET_NR_sync_file_range
:
12337 #if TARGET_ABI_BITS == 32
12338 #if defined(TARGET_MIPS)
12339 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12340 target_offset64(arg5
, arg6
), arg7
));
12342 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12343 target_offset64(arg4
, arg5
), arg6
));
12344 #endif /* !TARGET_MIPS */
12346 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12350 #if defined(TARGET_NR_sync_file_range2)
12351 case TARGET_NR_sync_file_range2
:
12352 /* This is like sync_file_range but the arguments are reordered */
12353 #if TARGET_ABI_BITS == 32
12354 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12355 target_offset64(arg5
, arg6
), arg2
));
12357 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12362 #if defined(TARGET_NR_signalfd4)
12363 case TARGET_NR_signalfd4
:
12364 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12367 #if defined(TARGET_NR_signalfd)
12368 case TARGET_NR_signalfd
:
12369 ret
= do_signalfd4(arg1
, arg2
, 0);
12372 #if defined(CONFIG_EPOLL)
12373 #if defined(TARGET_NR_epoll_create)
12374 case TARGET_NR_epoll_create
:
12375 ret
= get_errno(epoll_create(arg1
));
12378 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12379 case TARGET_NR_epoll_create1
:
12380 ret
= get_errno(epoll_create1(arg1
));
12383 #if defined(TARGET_NR_epoll_ctl)
12384 case TARGET_NR_epoll_ctl
:
12386 struct epoll_event ep
;
12387 struct epoll_event
*epp
= 0;
12389 struct target_epoll_event
*target_ep
;
12390 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12393 ep
.events
= tswap32(target_ep
->events
);
12394 /* The epoll_data_t union is just opaque data to the kernel,
12395 * so we transfer all 64 bits across and need not worry what
12396 * actual data type it is.
12398 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12399 unlock_user_struct(target_ep
, arg4
, 0);
12402 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12407 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12408 #if defined(TARGET_NR_epoll_wait)
12409 case TARGET_NR_epoll_wait
:
12411 #if defined(TARGET_NR_epoll_pwait)
12412 case TARGET_NR_epoll_pwait
:
12415 struct target_epoll_event
*target_ep
;
12416 struct epoll_event
*ep
;
12418 int maxevents
= arg3
;
12419 int timeout
= arg4
;
12421 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12422 ret
= -TARGET_EINVAL
;
12426 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12427 maxevents
* sizeof(struct target_epoll_event
), 1);
12432 ep
= g_try_new(struct epoll_event
, maxevents
);
12434 unlock_user(target_ep
, arg2
, 0);
12435 ret
= -TARGET_ENOMEM
;
12440 #if defined(TARGET_NR_epoll_pwait)
12441 case TARGET_NR_epoll_pwait
:
12443 target_sigset_t
*target_set
;
12444 sigset_t _set
, *set
= &_set
;
12447 if (arg6
!= sizeof(target_sigset_t
)) {
12448 ret
= -TARGET_EINVAL
;
12452 target_set
= lock_user(VERIFY_READ
, arg5
,
12453 sizeof(target_sigset_t
), 1);
12455 ret
= -TARGET_EFAULT
;
12458 target_to_host_sigset(set
, target_set
);
12459 unlock_user(target_set
, arg5
, 0);
12464 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12465 set
, SIGSET_T_SIZE
));
12469 #if defined(TARGET_NR_epoll_wait)
12470 case TARGET_NR_epoll_wait
:
12471 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12476 ret
= -TARGET_ENOSYS
;
12478 if (!is_error(ret
)) {
12480 for (i
= 0; i
< ret
; i
++) {
12481 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12482 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12484 unlock_user(target_ep
, arg2
,
12485 ret
* sizeof(struct target_epoll_event
));
12487 unlock_user(target_ep
, arg2
, 0);
12494 #ifdef TARGET_NR_prlimit64
12495 case TARGET_NR_prlimit64
:
12497 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12498 struct target_rlimit64
*target_rnew
, *target_rold
;
12499 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12500 int resource
= target_to_host_resource(arg2
);
12502 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12505 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12506 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12507 unlock_user_struct(target_rnew
, arg3
, 0);
12511 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12512 if (!is_error(ret
) && arg4
) {
12513 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12516 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12517 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12518 unlock_user_struct(target_rold
, arg4
, 1);
12523 #ifdef TARGET_NR_gethostname
12524 case TARGET_NR_gethostname
:
12526 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12528 ret
= get_errno(gethostname(name
, arg2
));
12529 unlock_user(name
, arg1
, arg2
);
12531 ret
= -TARGET_EFAULT
;
12536 #ifdef TARGET_NR_atomic_cmpxchg_32
12537 case TARGET_NR_atomic_cmpxchg_32
:
12539 /* should use start_exclusive from main.c */
12540 abi_ulong mem_value
;
12541 if (get_user_u32(mem_value
, arg6
)) {
12542 target_siginfo_t info
;
12543 info
.si_signo
= SIGSEGV
;
12545 info
.si_code
= TARGET_SEGV_MAPERR
;
12546 info
._sifields
._sigfault
._addr
= arg6
;
12547 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12548 QEMU_SI_FAULT
, &info
);
12552 if (mem_value
== arg2
)
12553 put_user_u32(arg1
, arg6
);
12558 #ifdef TARGET_NR_atomic_barrier
12559 case TARGET_NR_atomic_barrier
:
12561 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12567 #ifdef TARGET_NR_timer_create
12568 case TARGET_NR_timer_create
:
12570 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12572 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12575 int timer_index
= next_free_host_timer();
12577 if (timer_index
< 0) {
12578 ret
= -TARGET_EAGAIN
;
12580 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12583 phost_sevp
= &host_sevp
;
12584 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12590 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12594 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12603 #ifdef TARGET_NR_timer_settime
12604 case TARGET_NR_timer_settime
:
12606 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12607 * struct itimerspec * old_value */
12608 target_timer_t timerid
= get_timer_id(arg1
);
12612 } else if (arg3
== 0) {
12613 ret
= -TARGET_EINVAL
;
12615 timer_t htimer
= g_posix_timers
[timerid
];
12616 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12618 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12622 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12623 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12631 #ifdef TARGET_NR_timer_gettime
12632 case TARGET_NR_timer_gettime
:
12634 /* args: timer_t timerid, struct itimerspec *curr_value */
12635 target_timer_t timerid
= get_timer_id(arg1
);
12639 } else if (!arg2
) {
12640 ret
= -TARGET_EFAULT
;
12642 timer_t htimer
= g_posix_timers
[timerid
];
12643 struct itimerspec hspec
;
12644 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12646 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12647 ret
= -TARGET_EFAULT
;
12654 #ifdef TARGET_NR_timer_getoverrun
12655 case TARGET_NR_timer_getoverrun
:
12657 /* args: timer_t timerid */
12658 target_timer_t timerid
= get_timer_id(arg1
);
12663 timer_t htimer
= g_posix_timers
[timerid
];
12664 ret
= get_errno(timer_getoverrun(htimer
));
12666 fd_trans_unregister(ret
);
12671 #ifdef TARGET_NR_timer_delete
12672 case TARGET_NR_timer_delete
:
12674 /* args: timer_t timerid */
12675 target_timer_t timerid
= get_timer_id(arg1
);
12680 timer_t htimer
= g_posix_timers
[timerid
];
12681 ret
= get_errno(timer_delete(htimer
));
12682 g_posix_timers
[timerid
] = 0;
12688 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12689 case TARGET_NR_timerfd_create
:
12690 ret
= get_errno(timerfd_create(arg1
,
12691 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12695 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12696 case TARGET_NR_timerfd_gettime
:
12698 struct itimerspec its_curr
;
12700 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12702 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12709 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12710 case TARGET_NR_timerfd_settime
:
12712 struct itimerspec its_new
, its_old
, *p_new
;
12715 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12723 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12725 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12732 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12733 case TARGET_NR_ioprio_get
:
12734 ret
= get_errno(ioprio_get(arg1
, arg2
));
12738 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12739 case TARGET_NR_ioprio_set
:
12740 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12744 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12745 case TARGET_NR_setns
:
12746 ret
= get_errno(setns(arg1
, arg2
));
12749 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12750 case TARGET_NR_unshare
:
12751 ret
= get_errno(unshare(arg1
));
12754 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12755 case TARGET_NR_kcmp
:
12756 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12762 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12763 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12764 unimplemented_nowarn
:
12766 ret
= -TARGET_ENOSYS
;
12771 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12774 print_syscall_ret(num
, ret
);
12775 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12778 ret
= -TARGET_EFAULT
;