4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
283 loff_t
*, res
, uint
, wh
);
285 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
286 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
288 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group
,int,error_code
)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address
,int *,tidptr
)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
297 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
307 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
309 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
310 struct __user_cap_data_struct
*, data
);
311 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get
, int, which
, int, who
)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
325 unsigned long, idx1
, unsigned long, idx2
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
366 QEMU_IFLA_BR_FORWARD_DELAY
,
367 QEMU_IFLA_BR_HELLO_TIME
,
368 QEMU_IFLA_BR_MAX_AGE
,
369 QEMU_IFLA_BR_AGEING_TIME
,
370 QEMU_IFLA_BR_STP_STATE
,
371 QEMU_IFLA_BR_PRIORITY
,
372 QEMU_IFLA_BR_VLAN_FILTERING
,
373 QEMU_IFLA_BR_VLAN_PROTOCOL
,
374 QEMU_IFLA_BR_GROUP_FWD_MASK
,
375 QEMU_IFLA_BR_ROOT_ID
,
376 QEMU_IFLA_BR_BRIDGE_ID
,
377 QEMU_IFLA_BR_ROOT_PORT
,
378 QEMU_IFLA_BR_ROOT_PATH_COST
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
381 QEMU_IFLA_BR_HELLO_TIMER
,
382 QEMU_IFLA_BR_TCN_TIMER
,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
384 QEMU_IFLA_BR_GC_TIMER
,
385 QEMU_IFLA_BR_GROUP_ADDR
,
386 QEMU_IFLA_BR_FDB_FLUSH
,
387 QEMU_IFLA_BR_MCAST_ROUTER
,
388 QEMU_IFLA_BR_MCAST_SNOOPING
,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
390 QEMU_IFLA_BR_MCAST_QUERIER
,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
392 QEMU_IFLA_BR_MCAST_HASH_MAX
,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
408 QEMU_IFLA_BR_MCAST_IGMP_VERSION
,
409 QEMU_IFLA_BR_MCAST_MLD_VERSION
,
433 QEMU_IFLA_NET_NS_PID
,
436 QEMU_IFLA_VFINFO_LIST
,
444 QEMU_IFLA_PROMISCUITY
,
445 QEMU_IFLA_NUM_TX_QUEUES
,
446 QEMU_IFLA_NUM_RX_QUEUES
,
448 QEMU_IFLA_PHYS_PORT_ID
,
449 QEMU_IFLA_CARRIER_CHANGES
,
450 QEMU_IFLA_PHYS_SWITCH_ID
,
451 QEMU_IFLA_LINK_NETNSID
,
452 QEMU_IFLA_PHYS_PORT_NAME
,
453 QEMU_IFLA_PROTO_DOWN
,
454 QEMU_IFLA_GSO_MAX_SEGS
,
455 QEMU_IFLA_GSO_MAX_SIZE
,
459 QEMU_IFLA_NEW_NETNSID
,
460 QEMU_IFLA_IF_NETNSID
,
461 QEMU_IFLA_CARRIER_UP_COUNT
,
462 QEMU_IFLA_CARRIER_DOWN_COUNT
,
463 QEMU_IFLA_NEW_IFINDEX
,
468 QEMU_IFLA_BRPORT_UNSPEC
,
469 QEMU_IFLA_BRPORT_STATE
,
470 QEMU_IFLA_BRPORT_PRIORITY
,
471 QEMU_IFLA_BRPORT_COST
,
472 QEMU_IFLA_BRPORT_MODE
,
473 QEMU_IFLA_BRPORT_GUARD
,
474 QEMU_IFLA_BRPORT_PROTECT
,
475 QEMU_IFLA_BRPORT_FAST_LEAVE
,
476 QEMU_IFLA_BRPORT_LEARNING
,
477 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
478 QEMU_IFLA_BRPORT_PROXYARP
,
479 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
480 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
481 QEMU_IFLA_BRPORT_ROOT_ID
,
482 QEMU_IFLA_BRPORT_BRIDGE_ID
,
483 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
484 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
487 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
488 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
489 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
490 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
491 QEMU_IFLA_BRPORT_HOLD_TIMER
,
492 QEMU_IFLA_BRPORT_FLUSH
,
493 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
494 QEMU_IFLA_BRPORT_PAD
,
495 QEMU_IFLA_BRPORT_MCAST_FLOOD
,
496 QEMU_IFLA_BRPORT_MCAST_TO_UCAST
,
497 QEMU_IFLA_BRPORT_VLAN_TUNNEL
,
498 QEMU_IFLA_BRPORT_BCAST_FLOOD
,
499 QEMU_IFLA_BRPORT_GROUP_FWD_MASK
,
500 QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
,
501 QEMU___IFLA_BRPORT_MAX
505 QEMU_IFLA_INFO_UNSPEC
,
508 QEMU_IFLA_INFO_XSTATS
,
509 QEMU_IFLA_INFO_SLAVE_KIND
,
510 QEMU_IFLA_INFO_SLAVE_DATA
,
511 QEMU___IFLA_INFO_MAX
,
515 QEMU_IFLA_INET_UNSPEC
,
517 QEMU___IFLA_INET_MAX
,
521 QEMU_IFLA_INET6_UNSPEC
,
522 QEMU_IFLA_INET6_FLAGS
,
523 QEMU_IFLA_INET6_CONF
,
524 QEMU_IFLA_INET6_STATS
,
525 QEMU_IFLA_INET6_MCAST
,
526 QEMU_IFLA_INET6_CACHEINFO
,
527 QEMU_IFLA_INET6_ICMP6STATS
,
528 QEMU_IFLA_INET6_TOKEN
,
529 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
530 QEMU___IFLA_INET6_MAX
534 QEMU_IFLA_XDP_UNSPEC
,
536 QEMU_IFLA_XDP_ATTACHED
,
538 QEMU_IFLA_XDP_PROG_ID
,
542 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
543 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
544 typedef struct TargetFdTrans
{
545 TargetFdDataFunc host_to_target_data
;
546 TargetFdDataFunc target_to_host_data
;
547 TargetFdAddrFunc target_to_host_addr
;
550 static TargetFdTrans
**target_fd_trans
;
552 static unsigned int target_fd_max
;
554 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
556 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
557 return target_fd_trans
[fd
]->target_to_host_data
;
562 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
564 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
565 return target_fd_trans
[fd
]->host_to_target_data
;
570 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
572 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
573 return target_fd_trans
[fd
]->target_to_host_addr
;
578 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
582 if (fd
>= target_fd_max
) {
583 oldmax
= target_fd_max
;
584 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
585 target_fd_trans
= g_renew(TargetFdTrans
*,
586 target_fd_trans
, target_fd_max
);
587 memset((void *)(target_fd_trans
+ oldmax
), 0,
588 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
590 target_fd_trans
[fd
] = trans
;
593 static void fd_trans_unregister(int fd
)
595 if (fd
>= 0 && fd
< target_fd_max
) {
596 target_fd_trans
[fd
] = NULL
;
600 static void fd_trans_dup(int oldfd
, int newfd
)
602 fd_trans_unregister(newfd
);
603 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
604 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
608 static int sys_getcwd1(char *buf
, size_t size
)
610 if (getcwd(buf
, size
) == NULL
) {
611 /* getcwd() sets errno */
614 return strlen(buf
)+1;
617 #ifdef TARGET_NR_utimensat
618 #if defined(__NR_utimensat)
619 #define __NR_sys_utimensat __NR_utimensat
620 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
621 const struct timespec
*,tsp
,int,flags
)
623 static int sys_utimensat(int dirfd
, const char *pathname
,
624 const struct timespec times
[2], int flags
)
630 #endif /* TARGET_NR_utimensat */
632 #ifdef TARGET_NR_renameat2
633 #if defined(__NR_renameat2)
634 #define __NR_sys_renameat2 __NR_renameat2
635 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
636 const char *, new, unsigned int, flags
)
638 static int sys_renameat2(int oldfd
, const char *old
,
639 int newfd
, const char *new, int flags
)
642 return renameat(oldfd
, old
, newfd
, new);
648 #endif /* TARGET_NR_renameat2 */
650 #ifdef CONFIG_INOTIFY
651 #include <sys/inotify.h>
653 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
654 static int sys_inotify_init(void)
656 return (inotify_init());
659 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
660 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
662 return (inotify_add_watch(fd
, pathname
, mask
));
665 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
666 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
668 return (inotify_rm_watch(fd
, wd
));
671 #ifdef CONFIG_INOTIFY1
672 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
673 static int sys_inotify_init1(int flags
)
675 return (inotify_init1(flags
));
680 /* Userspace can usually survive runtime without inotify */
681 #undef TARGET_NR_inotify_init
682 #undef TARGET_NR_inotify_init1
683 #undef TARGET_NR_inotify_add_watch
684 #undef TARGET_NR_inotify_rm_watch
685 #endif /* CONFIG_INOTIFY */
687 #if defined(TARGET_NR_prlimit64)
688 #ifndef __NR_prlimit64
689 # define __NR_prlimit64 -1
691 #define __NR_sys_prlimit64 __NR_prlimit64
692 /* The glibc rlimit structure may not be that used by the underlying syscall */
693 struct host_rlimit64
{
697 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
698 const struct host_rlimit64
*, new_limit
,
699 struct host_rlimit64
*, old_limit
)
703 #if defined(TARGET_NR_timer_create)
704 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
705 static timer_t g_posix_timers
[32] = { 0, } ;
707 static inline int next_free_host_timer(void)
710 /* FIXME: Does finding the next free slot require a lock? */
711 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
712 if (g_posix_timers
[k
] == 0) {
713 g_posix_timers
[k
] = (timer_t
) 1;
721 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
723 static inline int regpairs_aligned(void *cpu_env
, int num
)
725 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
727 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
728 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
729 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
730 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
731 * of registers which translates to the same as ARM/MIPS, because we start with
733 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
734 #elif defined(TARGET_SH4)
735 /* SH4 doesn't align register pairs, except for p{read,write}64 */
736 static inline int regpairs_aligned(void *cpu_env
, int num
)
739 case TARGET_NR_pread64
:
740 case TARGET_NR_pwrite64
:
747 #elif defined(TARGET_XTENSA)
748 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
750 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
753 #define ERRNO_TABLE_SIZE 1200
755 /* target_to_host_errno_table[] is initialized from
756 * host_to_target_errno_table[] in syscall_init(). */
757 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
761 * This list is the union of errno values overridden in asm-<arch>/errno.h
762 * minus the errnos that are not actually generic to all archs.
764 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
765 [EAGAIN
] = TARGET_EAGAIN
,
766 [EIDRM
] = TARGET_EIDRM
,
767 [ECHRNG
] = TARGET_ECHRNG
,
768 [EL2NSYNC
] = TARGET_EL2NSYNC
,
769 [EL3HLT
] = TARGET_EL3HLT
,
770 [EL3RST
] = TARGET_EL3RST
,
771 [ELNRNG
] = TARGET_ELNRNG
,
772 [EUNATCH
] = TARGET_EUNATCH
,
773 [ENOCSI
] = TARGET_ENOCSI
,
774 [EL2HLT
] = TARGET_EL2HLT
,
775 [EDEADLK
] = TARGET_EDEADLK
,
776 [ENOLCK
] = TARGET_ENOLCK
,
777 [EBADE
] = TARGET_EBADE
,
778 [EBADR
] = TARGET_EBADR
,
779 [EXFULL
] = TARGET_EXFULL
,
780 [ENOANO
] = TARGET_ENOANO
,
781 [EBADRQC
] = TARGET_EBADRQC
,
782 [EBADSLT
] = TARGET_EBADSLT
,
783 [EBFONT
] = TARGET_EBFONT
,
784 [ENOSTR
] = TARGET_ENOSTR
,
785 [ENODATA
] = TARGET_ENODATA
,
786 [ETIME
] = TARGET_ETIME
,
787 [ENOSR
] = TARGET_ENOSR
,
788 [ENONET
] = TARGET_ENONET
,
789 [ENOPKG
] = TARGET_ENOPKG
,
790 [EREMOTE
] = TARGET_EREMOTE
,
791 [ENOLINK
] = TARGET_ENOLINK
,
792 [EADV
] = TARGET_EADV
,
793 [ESRMNT
] = TARGET_ESRMNT
,
794 [ECOMM
] = TARGET_ECOMM
,
795 [EPROTO
] = TARGET_EPROTO
,
796 [EDOTDOT
] = TARGET_EDOTDOT
,
797 [EMULTIHOP
] = TARGET_EMULTIHOP
,
798 [EBADMSG
] = TARGET_EBADMSG
,
799 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
800 [EOVERFLOW
] = TARGET_EOVERFLOW
,
801 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
802 [EBADFD
] = TARGET_EBADFD
,
803 [EREMCHG
] = TARGET_EREMCHG
,
804 [ELIBACC
] = TARGET_ELIBACC
,
805 [ELIBBAD
] = TARGET_ELIBBAD
,
806 [ELIBSCN
] = TARGET_ELIBSCN
,
807 [ELIBMAX
] = TARGET_ELIBMAX
,
808 [ELIBEXEC
] = TARGET_ELIBEXEC
,
809 [EILSEQ
] = TARGET_EILSEQ
,
810 [ENOSYS
] = TARGET_ENOSYS
,
811 [ELOOP
] = TARGET_ELOOP
,
812 [ERESTART
] = TARGET_ERESTART
,
813 [ESTRPIPE
] = TARGET_ESTRPIPE
,
814 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
815 [EUSERS
] = TARGET_EUSERS
,
816 [ENOTSOCK
] = TARGET_ENOTSOCK
,
817 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
818 [EMSGSIZE
] = TARGET_EMSGSIZE
,
819 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
820 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
821 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
822 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
823 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
824 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
825 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
826 [EADDRINUSE
] = TARGET_EADDRINUSE
,
827 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
828 [ENETDOWN
] = TARGET_ENETDOWN
,
829 [ENETUNREACH
] = TARGET_ENETUNREACH
,
830 [ENETRESET
] = TARGET_ENETRESET
,
831 [ECONNABORTED
] = TARGET_ECONNABORTED
,
832 [ECONNRESET
] = TARGET_ECONNRESET
,
833 [ENOBUFS
] = TARGET_ENOBUFS
,
834 [EISCONN
] = TARGET_EISCONN
,
835 [ENOTCONN
] = TARGET_ENOTCONN
,
836 [EUCLEAN
] = TARGET_EUCLEAN
,
837 [ENOTNAM
] = TARGET_ENOTNAM
,
838 [ENAVAIL
] = TARGET_ENAVAIL
,
839 [EISNAM
] = TARGET_EISNAM
,
840 [EREMOTEIO
] = TARGET_EREMOTEIO
,
841 [EDQUOT
] = TARGET_EDQUOT
,
842 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
843 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
844 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
845 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
846 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
847 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
848 [EALREADY
] = TARGET_EALREADY
,
849 [EINPROGRESS
] = TARGET_EINPROGRESS
,
850 [ESTALE
] = TARGET_ESTALE
,
851 [ECANCELED
] = TARGET_ECANCELED
,
852 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
853 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
855 [ENOKEY
] = TARGET_ENOKEY
,
858 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
861 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
864 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
867 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
869 #ifdef ENOTRECOVERABLE
870 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
873 [ENOMSG
] = TARGET_ENOMSG
,
876 [ERFKILL
] = TARGET_ERFKILL
,
879 [EHWPOISON
] = TARGET_EHWPOISON
,
883 static inline int host_to_target_errno(int err
)
885 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
886 host_to_target_errno_table
[err
]) {
887 return host_to_target_errno_table
[err
];
892 static inline int target_to_host_errno(int err
)
894 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
895 target_to_host_errno_table
[err
]) {
896 return target_to_host_errno_table
[err
];
901 static inline abi_long
get_errno(abi_long ret
)
904 return -host_to_target_errno(errno
);
909 const char *target_strerror(int err
)
911 if (err
== TARGET_ERESTARTSYS
) {
912 return "To be restarted";
914 if (err
== TARGET_QEMU_ESIGRETURN
) {
915 return "Successful exit from sigreturn";
918 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
921 return strerror(target_to_host_errno(err
));
924 #define safe_syscall0(type, name) \
925 static type safe_##name(void) \
927 return safe_syscall(__NR_##name); \
930 #define safe_syscall1(type, name, type1, arg1) \
931 static type safe_##name(type1 arg1) \
933 return safe_syscall(__NR_##name, arg1); \
936 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
937 static type safe_##name(type1 arg1, type2 arg2) \
939 return safe_syscall(__NR_##name, arg1, arg2); \
942 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
943 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
945 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
948 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
950 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
952 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
955 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
956 type4, arg4, type5, arg5) \
957 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
960 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
963 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
964 type4, arg4, type5, arg5, type6, arg6) \
965 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
966 type5 arg5, type6 arg6) \
968 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
971 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
972 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
973 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
974 int, flags
, mode_t
, mode
)
975 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
976 struct rusage
*, rusage
)
977 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
978 int, options
, struct rusage
*, rusage
)
979 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
980 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
981 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
982 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
983 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
985 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
986 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
988 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
989 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
990 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
991 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
992 safe_syscall2(int, tkill
, int, tid
, int, sig
)
993 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
994 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
995 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
996 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
997 unsigned long, pos_l
, unsigned long, pos_h
)
998 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
999 unsigned long, pos_l
, unsigned long, pos_h
)
1000 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
1002 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
1003 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
1004 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
1005 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
1006 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
1007 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
1008 safe_syscall2(int, flock
, int, fd
, int, operation
)
1009 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
1010 const struct timespec
*, uts
, size_t, sigsetsize
)
1011 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
1013 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
1014 struct timespec
*, rem
)
1015 #ifdef TARGET_NR_clock_nanosleep
1016 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
1017 const struct timespec
*, req
, struct timespec
*, rem
)
1020 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1022 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1023 long, msgtype
, int, flags
)
1024 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1025 unsigned, nsops
, const struct timespec
*, timeout
)
1027 /* This host kernel architecture uses a single ipc syscall; fake up
1028 * wrappers for the sub-operations to hide this implementation detail.
1029 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1030 * for the call parameter because some structs in there conflict with the
1031 * sys/ipc.h ones. So we just define them here, and rely on them being
1032 * the same for all host architectures.
1034 #define Q_SEMTIMEDOP 4
1037 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1039 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1040 void *, ptr
, long, fifth
)
1041 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1043 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1045 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1047 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1049 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1050 const struct timespec
*timeout
)
1052 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1056 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1057 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1058 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1059 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1060 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1062 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1063 * "third argument might be integer or pointer or not present" behaviour of
1064 * the libc function.
1066 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1067 /* Similarly for fcntl. Note that callers must always:
1068 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1069 * use the flock64 struct rather than unsuffixed flock
1070 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1073 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1075 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1078 static inline int host_to_target_sock_type(int host_type
)
1082 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1084 target_type
= TARGET_SOCK_DGRAM
;
1087 target_type
= TARGET_SOCK_STREAM
;
1090 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1094 #if defined(SOCK_CLOEXEC)
1095 if (host_type
& SOCK_CLOEXEC
) {
1096 target_type
|= TARGET_SOCK_CLOEXEC
;
1100 #if defined(SOCK_NONBLOCK)
1101 if (host_type
& SOCK_NONBLOCK
) {
1102 target_type
|= TARGET_SOCK_NONBLOCK
;
1109 static abi_ulong target_brk
;
1110 static abi_ulong target_original_brk
;
1111 static abi_ulong brk_page
;
1113 void target_set_brk(abi_ulong new_brk
)
1115 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1116 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1119 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1120 #define DEBUGF_BRK(message, args...)
1122 /* do_brk() must return target values and target errnos. */
1123 abi_long
do_brk(abi_ulong new_brk
)
1125 abi_long mapped_addr
;
1126 abi_ulong new_alloc_size
;
1128 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1134 if (new_brk
< target_original_brk
) {
1135 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1140 /* If the new brk is less than the highest page reserved to the
1141 * target heap allocation, set it and we're almost done... */
1142 if (new_brk
<= brk_page
) {
1143 /* Heap contents are initialized to zero, as for anonymous
1145 if (new_brk
> target_brk
) {
1146 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1148 target_brk
= new_brk
;
1149 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1153 /* We need to allocate more memory after the brk... Note that
1154 * we don't use MAP_FIXED because that will map over the top of
1155 * any existing mapping (like the one with the host libc or qemu
1156 * itself); instead we treat "mapped but at wrong address" as
1157 * a failure and unmap again.
1159 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1160 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1161 PROT_READ
|PROT_WRITE
,
1162 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1164 if (mapped_addr
== brk_page
) {
1165 /* Heap contents are initialized to zero, as for anonymous
1166 * mapped pages. Technically the new pages are already
1167 * initialized to zero since they *are* anonymous mapped
1168 * pages, however we have to take care with the contents that
1169 * come from the remaining part of the previous page: it may
1170 * contains garbage data due to a previous heap usage (grown
1171 * then shrunken). */
1172 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1174 target_brk
= new_brk
;
1175 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1176 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1179 } else if (mapped_addr
!= -1) {
1180 /* Mapped but at wrong address, meaning there wasn't actually
1181 * enough space for this brk.
1183 target_munmap(mapped_addr
, new_alloc_size
);
1185 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1188 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1191 #if defined(TARGET_ALPHA)
1192 /* We (partially) emulate OSF/1 on Alpha, which requires we
1193 return a proper errno, not an unchanged brk value. */
1194 return -TARGET_ENOMEM
;
1196 /* For everything else, return the previous break. */
1200 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1201 abi_ulong target_fds_addr
,
1205 abi_ulong b
, *target_fds
;
1207 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1208 if (!(target_fds
= lock_user(VERIFY_READ
,
1210 sizeof(abi_ulong
) * nw
,
1212 return -TARGET_EFAULT
;
1216 for (i
= 0; i
< nw
; i
++) {
1217 /* grab the abi_ulong */
1218 __get_user(b
, &target_fds
[i
]);
1219 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1220 /* check the bit inside the abi_ulong */
1227 unlock_user(target_fds
, target_fds_addr
, 0);
1232 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1233 abi_ulong target_fds_addr
,
1236 if (target_fds_addr
) {
1237 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1238 return -TARGET_EFAULT
;
1246 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1252 abi_ulong
*target_fds
;
1254 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1255 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1257 sizeof(abi_ulong
) * nw
,
1259 return -TARGET_EFAULT
;
1262 for (i
= 0; i
< nw
; i
++) {
1264 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1265 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1268 __put_user(v
, &target_fds
[i
]);
1271 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1276 #if defined(__alpha__)
1277 #define HOST_HZ 1024
1282 static inline abi_long
host_to_target_clock_t(long ticks
)
1284 #if HOST_HZ == TARGET_HZ
1287 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1291 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1292 const struct rusage
*rusage
)
1294 struct target_rusage
*target_rusage
;
1296 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1297 return -TARGET_EFAULT
;
1298 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1299 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1300 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1301 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1302 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1303 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1304 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1305 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1306 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1307 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1308 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1309 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1310 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1311 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1312 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1313 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1314 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1315 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1316 unlock_user_struct(target_rusage
, target_addr
, 1);
1321 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1323 abi_ulong target_rlim_swap
;
1326 target_rlim_swap
= tswapal(target_rlim
);
1327 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1328 return RLIM_INFINITY
;
1330 result
= target_rlim_swap
;
1331 if (target_rlim_swap
!= (rlim_t
)result
)
1332 return RLIM_INFINITY
;
1337 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1339 abi_ulong target_rlim_swap
;
1342 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1343 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1345 target_rlim_swap
= rlim
;
1346 result
= tswapal(target_rlim_swap
);
1351 static inline int target_to_host_resource(int code
)
1354 case TARGET_RLIMIT_AS
:
1356 case TARGET_RLIMIT_CORE
:
1358 case TARGET_RLIMIT_CPU
:
1360 case TARGET_RLIMIT_DATA
:
1362 case TARGET_RLIMIT_FSIZE
:
1363 return RLIMIT_FSIZE
;
1364 case TARGET_RLIMIT_LOCKS
:
1365 return RLIMIT_LOCKS
;
1366 case TARGET_RLIMIT_MEMLOCK
:
1367 return RLIMIT_MEMLOCK
;
1368 case TARGET_RLIMIT_MSGQUEUE
:
1369 return RLIMIT_MSGQUEUE
;
1370 case TARGET_RLIMIT_NICE
:
1372 case TARGET_RLIMIT_NOFILE
:
1373 return RLIMIT_NOFILE
;
1374 case TARGET_RLIMIT_NPROC
:
1375 return RLIMIT_NPROC
;
1376 case TARGET_RLIMIT_RSS
:
1378 case TARGET_RLIMIT_RTPRIO
:
1379 return RLIMIT_RTPRIO
;
1380 case TARGET_RLIMIT_SIGPENDING
:
1381 return RLIMIT_SIGPENDING
;
1382 case TARGET_RLIMIT_STACK
:
1383 return RLIMIT_STACK
;
1389 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1390 abi_ulong target_tv_addr
)
1392 struct target_timeval
*target_tv
;
1394 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1395 return -TARGET_EFAULT
;
1397 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1398 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1400 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1405 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1406 const struct timeval
*tv
)
1408 struct target_timeval
*target_tv
;
1410 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1411 return -TARGET_EFAULT
;
1413 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1414 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1416 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1421 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1422 abi_ulong target_tz_addr
)
1424 struct target_timezone
*target_tz
;
1426 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1427 return -TARGET_EFAULT
;
1430 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1431 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1433 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1438 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1441 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1442 abi_ulong target_mq_attr_addr
)
1444 struct target_mq_attr
*target_mq_attr
;
1446 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1447 target_mq_attr_addr
, 1))
1448 return -TARGET_EFAULT
;
1450 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1451 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1452 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1453 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1455 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1460 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1461 const struct mq_attr
*attr
)
1463 struct target_mq_attr
*target_mq_attr
;
1465 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1466 target_mq_attr_addr
, 0))
1467 return -TARGET_EFAULT
;
1469 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1470 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1471 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1472 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1474 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1480 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1481 /* do_select() must return target values and target errnos. */
1482 static abi_long
do_select(int n
,
1483 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1484 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1486 fd_set rfds
, wfds
, efds
;
1487 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1489 struct timespec ts
, *ts_ptr
;
1492 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1496 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1500 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1505 if (target_tv_addr
) {
1506 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1507 return -TARGET_EFAULT
;
1508 ts
.tv_sec
= tv
.tv_sec
;
1509 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1515 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1518 if (!is_error(ret
)) {
1519 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1520 return -TARGET_EFAULT
;
1521 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1522 return -TARGET_EFAULT
;
1523 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1524 return -TARGET_EFAULT
;
1526 if (target_tv_addr
) {
1527 tv
.tv_sec
= ts
.tv_sec
;
1528 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1529 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1530 return -TARGET_EFAULT
;
1538 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1539 static abi_long
do_old_select(abi_ulong arg1
)
1541 struct target_sel_arg_struct
*sel
;
1542 abi_ulong inp
, outp
, exp
, tvp
;
1545 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1546 return -TARGET_EFAULT
;
1549 nsel
= tswapal(sel
->n
);
1550 inp
= tswapal(sel
->inp
);
1551 outp
= tswapal(sel
->outp
);
1552 exp
= tswapal(sel
->exp
);
1553 tvp
= tswapal(sel
->tvp
);
1555 unlock_user_struct(sel
, arg1
, 0);
1557 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1562 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1565 return pipe2(host_pipe
, flags
);
1571 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1572 int flags
, int is_pipe2
)
1576 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1579 return get_errno(ret
);
1581 /* Several targets have special calling conventions for the original
1582 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1584 #if defined(TARGET_ALPHA)
1585 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1586 return host_pipe
[0];
1587 #elif defined(TARGET_MIPS)
1588 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1589 return host_pipe
[0];
1590 #elif defined(TARGET_SH4)
1591 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1592 return host_pipe
[0];
1593 #elif defined(TARGET_SPARC)
1594 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1595 return host_pipe
[0];
1599 if (put_user_s32(host_pipe
[0], pipedes
)
1600 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1601 return -TARGET_EFAULT
;
1602 return get_errno(ret
);
1605 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1606 abi_ulong target_addr
,
1609 struct target_ip_mreqn
*target_smreqn
;
1611 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1613 return -TARGET_EFAULT
;
1614 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1615 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1616 if (len
== sizeof(struct target_ip_mreqn
))
1617 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1618 unlock_user(target_smreqn
, target_addr
, 0);
1623 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1624 abi_ulong target_addr
,
1627 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1628 sa_family_t sa_family
;
1629 struct target_sockaddr
*target_saddr
;
1631 if (fd_trans_target_to_host_addr(fd
)) {
1632 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1635 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1637 return -TARGET_EFAULT
;
1639 sa_family
= tswap16(target_saddr
->sa_family
);
1641 /* Oops. The caller might send a incomplete sun_path; sun_path
1642 * must be terminated by \0 (see the manual page), but
1643 * unfortunately it is quite common to specify sockaddr_un
1644 * length as "strlen(x->sun_path)" while it should be
1645 * "strlen(...) + 1". We'll fix that here if needed.
1646 * Linux kernel has a similar feature.
1649 if (sa_family
== AF_UNIX
) {
1650 if (len
< unix_maxlen
&& len
> 0) {
1651 char *cp
= (char*)target_saddr
;
1653 if ( cp
[len
-1] && !cp
[len
] )
1656 if (len
> unix_maxlen
)
1660 memcpy(addr
, target_saddr
, len
);
1661 addr
->sa_family
= sa_family
;
1662 if (sa_family
== AF_NETLINK
) {
1663 struct sockaddr_nl
*nladdr
;
1665 nladdr
= (struct sockaddr_nl
*)addr
;
1666 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1667 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1668 } else if (sa_family
== AF_PACKET
) {
1669 struct target_sockaddr_ll
*lladdr
;
1671 lladdr
= (struct target_sockaddr_ll
*)addr
;
1672 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1673 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1675 unlock_user(target_saddr
, target_addr
, 0);
1680 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1681 struct sockaddr
*addr
,
1684 struct target_sockaddr
*target_saddr
;
1691 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1693 return -TARGET_EFAULT
;
1694 memcpy(target_saddr
, addr
, len
);
1695 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1696 sizeof(target_saddr
->sa_family
)) {
1697 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1699 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1700 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1701 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1702 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1703 } else if (addr
->sa_family
== AF_PACKET
) {
1704 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1705 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1706 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1707 } else if (addr
->sa_family
== AF_INET6
&&
1708 len
>= sizeof(struct target_sockaddr_in6
)) {
1709 struct target_sockaddr_in6
*target_in6
=
1710 (struct target_sockaddr_in6
*)target_saddr
;
1711 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1713 unlock_user(target_saddr
, target_addr
, len
);
1718 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1719 struct target_msghdr
*target_msgh
)
1721 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1722 abi_long msg_controllen
;
1723 abi_ulong target_cmsg_addr
;
1724 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1725 socklen_t space
= 0;
1727 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1728 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1730 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1731 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1732 target_cmsg_start
= target_cmsg
;
1734 return -TARGET_EFAULT
;
1736 while (cmsg
&& target_cmsg
) {
1737 void *data
= CMSG_DATA(cmsg
);
1738 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1740 int len
= tswapal(target_cmsg
->cmsg_len
)
1741 - sizeof(struct target_cmsghdr
);
1743 space
+= CMSG_SPACE(len
);
1744 if (space
> msgh
->msg_controllen
) {
1745 space
-= CMSG_SPACE(len
);
1746 /* This is a QEMU bug, since we allocated the payload
1747 * area ourselves (unlike overflow in host-to-target
1748 * conversion, which is just the guest giving us a buffer
1749 * that's too small). It can't happen for the payload types
1750 * we currently support; if it becomes an issue in future
1751 * we would need to improve our allocation strategy to
1752 * something more intelligent than "twice the size of the
1753 * target buffer we're reading from".
1755 gemu_log("Host cmsg overflow\n");
1759 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1760 cmsg
->cmsg_level
= SOL_SOCKET
;
1762 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1764 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1765 cmsg
->cmsg_len
= CMSG_LEN(len
);
1767 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1768 int *fd
= (int *)data
;
1769 int *target_fd
= (int *)target_data
;
1770 int i
, numfds
= len
/ sizeof(int);
1772 for (i
= 0; i
< numfds
; i
++) {
1773 __get_user(fd
[i
], target_fd
+ i
);
1775 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1776 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1777 struct ucred
*cred
= (struct ucred
*)data
;
1778 struct target_ucred
*target_cred
=
1779 (struct target_ucred
*)target_data
;
1781 __get_user(cred
->pid
, &target_cred
->pid
);
1782 __get_user(cred
->uid
, &target_cred
->uid
);
1783 __get_user(cred
->gid
, &target_cred
->gid
);
1785 gemu_log("Unsupported ancillary data: %d/%d\n",
1786 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1787 memcpy(data
, target_data
, len
);
1790 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1791 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1794 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1796 msgh
->msg_controllen
= space
;
1800 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1801 struct msghdr
*msgh
)
1803 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1804 abi_long msg_controllen
;
1805 abi_ulong target_cmsg_addr
;
1806 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1807 socklen_t space
= 0;
1809 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1810 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1812 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1813 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1814 target_cmsg_start
= target_cmsg
;
1816 return -TARGET_EFAULT
;
1818 while (cmsg
&& target_cmsg
) {
1819 void *data
= CMSG_DATA(cmsg
);
1820 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1822 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1823 int tgt_len
, tgt_space
;
1825 /* We never copy a half-header but may copy half-data;
1826 * this is Linux's behaviour in put_cmsg(). Note that
1827 * truncation here is a guest problem (which we report
1828 * to the guest via the CTRUNC bit), unlike truncation
1829 * in target_to_host_cmsg, which is a QEMU bug.
1831 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1832 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1836 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1837 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1839 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1841 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1843 /* Payload types which need a different size of payload on
1844 * the target must adjust tgt_len here.
1847 switch (cmsg
->cmsg_level
) {
1849 switch (cmsg
->cmsg_type
) {
1851 tgt_len
= sizeof(struct target_timeval
);
1861 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1862 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1863 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1866 /* We must now copy-and-convert len bytes of payload
1867 * into tgt_len bytes of destination space. Bear in mind
1868 * that in both source and destination we may be dealing
1869 * with a truncated value!
1871 switch (cmsg
->cmsg_level
) {
1873 switch (cmsg
->cmsg_type
) {
1876 int *fd
= (int *)data
;
1877 int *target_fd
= (int *)target_data
;
1878 int i
, numfds
= tgt_len
/ sizeof(int);
1880 for (i
= 0; i
< numfds
; i
++) {
1881 __put_user(fd
[i
], target_fd
+ i
);
1887 struct timeval
*tv
= (struct timeval
*)data
;
1888 struct target_timeval
*target_tv
=
1889 (struct target_timeval
*)target_data
;
1891 if (len
!= sizeof(struct timeval
) ||
1892 tgt_len
!= sizeof(struct target_timeval
)) {
1896 /* copy struct timeval to target */
1897 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1898 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1901 case SCM_CREDENTIALS
:
1903 struct ucred
*cred
= (struct ucred
*)data
;
1904 struct target_ucred
*target_cred
=
1905 (struct target_ucred
*)target_data
;
1907 __put_user(cred
->pid
, &target_cred
->pid
);
1908 __put_user(cred
->uid
, &target_cred
->uid
);
1909 __put_user(cred
->gid
, &target_cred
->gid
);
1918 switch (cmsg
->cmsg_type
) {
1921 uint32_t *v
= (uint32_t *)data
;
1922 uint32_t *t_int
= (uint32_t *)target_data
;
1924 if (len
!= sizeof(uint32_t) ||
1925 tgt_len
!= sizeof(uint32_t)) {
1928 __put_user(*v
, t_int
);
1934 struct sock_extended_err ee
;
1935 struct sockaddr_in offender
;
1937 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1938 struct errhdr_t
*target_errh
=
1939 (struct errhdr_t
*)target_data
;
1941 if (len
!= sizeof(struct errhdr_t
) ||
1942 tgt_len
!= sizeof(struct errhdr_t
)) {
1945 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1946 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1947 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1948 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1949 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1950 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1951 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1952 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1953 (void *) &errh
->offender
, sizeof(errh
->offender
));
1962 switch (cmsg
->cmsg_type
) {
1965 uint32_t *v
= (uint32_t *)data
;
1966 uint32_t *t_int
= (uint32_t *)target_data
;
1968 if (len
!= sizeof(uint32_t) ||
1969 tgt_len
!= sizeof(uint32_t)) {
1972 __put_user(*v
, t_int
);
1978 struct sock_extended_err ee
;
1979 struct sockaddr_in6 offender
;
1981 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1982 struct errhdr6_t
*target_errh
=
1983 (struct errhdr6_t
*)target_data
;
1985 if (len
!= sizeof(struct errhdr6_t
) ||
1986 tgt_len
!= sizeof(struct errhdr6_t
)) {
1989 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1990 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1991 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1992 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1993 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1994 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1995 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1996 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1997 (void *) &errh
->offender
, sizeof(errh
->offender
));
2007 gemu_log("Unsupported ancillary data: %d/%d\n",
2008 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
2009 memcpy(target_data
, data
, MIN(len
, tgt_len
));
2010 if (tgt_len
> len
) {
2011 memset(target_data
+ len
, 0, tgt_len
- len
);
2015 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
2016 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
2017 if (msg_controllen
< tgt_space
) {
2018 tgt_space
= msg_controllen
;
2020 msg_controllen
-= tgt_space
;
2022 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2023 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2026 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2028 target_msgh
->msg_controllen
= tswapal(space
);
2032 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2034 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2035 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2036 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2037 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2038 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2041 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2043 abi_long (*host_to_target_nlmsg
)
2044 (struct nlmsghdr
*))
2049 while (len
> sizeof(struct nlmsghdr
)) {
2051 nlmsg_len
= nlh
->nlmsg_len
;
2052 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2057 switch (nlh
->nlmsg_type
) {
2059 tswap_nlmsghdr(nlh
);
2065 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2066 e
->error
= tswap32(e
->error
);
2067 tswap_nlmsghdr(&e
->msg
);
2068 tswap_nlmsghdr(nlh
);
2072 ret
= host_to_target_nlmsg(nlh
);
2074 tswap_nlmsghdr(nlh
);
2079 tswap_nlmsghdr(nlh
);
2080 len
-= NLMSG_ALIGN(nlmsg_len
);
2081 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2086 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2088 abi_long (*target_to_host_nlmsg
)
2089 (struct nlmsghdr
*))
2093 while (len
> sizeof(struct nlmsghdr
)) {
2094 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2095 tswap32(nlh
->nlmsg_len
) > len
) {
2098 tswap_nlmsghdr(nlh
);
2099 switch (nlh
->nlmsg_type
) {
2106 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2107 e
->error
= tswap32(e
->error
);
2108 tswap_nlmsghdr(&e
->msg
);
2112 ret
= target_to_host_nlmsg(nlh
);
2117 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2118 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2123 #ifdef CONFIG_RTNETLINK
2124 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2125 size_t len
, void *context
,
2126 abi_long (*host_to_target_nlattr
)
2130 unsigned short nla_len
;
2133 while (len
> sizeof(struct nlattr
)) {
2134 nla_len
= nlattr
->nla_len
;
2135 if (nla_len
< sizeof(struct nlattr
) ||
2139 ret
= host_to_target_nlattr(nlattr
, context
);
2140 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2141 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2145 len
-= NLA_ALIGN(nla_len
);
2146 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2151 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2153 abi_long (*host_to_target_rtattr
)
2156 unsigned short rta_len
;
2159 while (len
> sizeof(struct rtattr
)) {
2160 rta_len
= rtattr
->rta_len
;
2161 if (rta_len
< sizeof(struct rtattr
) ||
2165 ret
= host_to_target_rtattr(rtattr
);
2166 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2167 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2171 len
-= RTA_ALIGN(rta_len
);
2172 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2177 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2179 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2186 switch (nlattr
->nla_type
) {
2188 case QEMU_IFLA_BR_FDB_FLUSH
:
2191 case QEMU_IFLA_BR_GROUP_ADDR
:
2194 case QEMU_IFLA_BR_VLAN_FILTERING
:
2195 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2196 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2197 case QEMU_IFLA_BR_MCAST_ROUTER
:
2198 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2199 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2200 case QEMU_IFLA_BR_MCAST_QUERIER
:
2201 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2202 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2203 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2204 case QEMU_IFLA_BR_VLAN_STATS_ENABLED
:
2205 case QEMU_IFLA_BR_MCAST_STATS_ENABLED
:
2206 case QEMU_IFLA_BR_MCAST_IGMP_VERSION
:
2207 case QEMU_IFLA_BR_MCAST_MLD_VERSION
:
2210 case QEMU_IFLA_BR_PRIORITY
:
2211 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2212 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2213 case QEMU_IFLA_BR_ROOT_PORT
:
2214 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2215 u16
= NLA_DATA(nlattr
);
2216 *u16
= tswap16(*u16
);
2219 case QEMU_IFLA_BR_FORWARD_DELAY
:
2220 case QEMU_IFLA_BR_HELLO_TIME
:
2221 case QEMU_IFLA_BR_MAX_AGE
:
2222 case QEMU_IFLA_BR_AGEING_TIME
:
2223 case QEMU_IFLA_BR_STP_STATE
:
2224 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2225 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2226 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2227 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2228 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2229 u32
= NLA_DATA(nlattr
);
2230 *u32
= tswap32(*u32
);
2233 case QEMU_IFLA_BR_HELLO_TIMER
:
2234 case QEMU_IFLA_BR_TCN_TIMER
:
2235 case QEMU_IFLA_BR_GC_TIMER
:
2236 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2237 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2238 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2239 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2240 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2241 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2242 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2243 u64
= NLA_DATA(nlattr
);
2244 *u64
= tswap64(*u64
);
2246 /* ifla_bridge_id: uin8_t[] */
2247 case QEMU_IFLA_BR_ROOT_ID
:
2248 case QEMU_IFLA_BR_BRIDGE_ID
:
2251 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2257 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2264 switch (nlattr
->nla_type
) {
2266 case QEMU_IFLA_BRPORT_STATE
:
2267 case QEMU_IFLA_BRPORT_MODE
:
2268 case QEMU_IFLA_BRPORT_GUARD
:
2269 case QEMU_IFLA_BRPORT_PROTECT
:
2270 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2271 case QEMU_IFLA_BRPORT_LEARNING
:
2272 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2273 case QEMU_IFLA_BRPORT_PROXYARP
:
2274 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2275 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2276 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2277 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2278 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2279 case QEMU_IFLA_BRPORT_MCAST_FLOOD
:
2280 case QEMU_IFLA_BRPORT_MCAST_TO_UCAST
:
2281 case QEMU_IFLA_BRPORT_VLAN_TUNNEL
:
2282 case QEMU_IFLA_BRPORT_BCAST_FLOOD
:
2283 case QEMU_IFLA_BRPORT_NEIGH_SUPPRESS
:
2286 case QEMU_IFLA_BRPORT_PRIORITY
:
2287 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2288 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2289 case QEMU_IFLA_BRPORT_ID
:
2290 case QEMU_IFLA_BRPORT_NO
:
2291 case QEMU_IFLA_BRPORT_GROUP_FWD_MASK
:
2292 u16
= NLA_DATA(nlattr
);
2293 *u16
= tswap16(*u16
);
2296 case QEMU_IFLA_BRPORT_COST
:
2297 u32
= NLA_DATA(nlattr
);
2298 *u32
= tswap32(*u32
);
2301 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2302 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2303 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2304 u64
= NLA_DATA(nlattr
);
2305 *u64
= tswap64(*u64
);
2307 /* ifla_bridge_id: uint8_t[] */
2308 case QEMU_IFLA_BRPORT_ROOT_ID
:
2309 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2312 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2318 struct linkinfo_context
{
2325 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2328 struct linkinfo_context
*li_context
= context
;
2330 switch (nlattr
->nla_type
) {
2332 case QEMU_IFLA_INFO_KIND
:
2333 li_context
->name
= NLA_DATA(nlattr
);
2334 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2336 case QEMU_IFLA_INFO_SLAVE_KIND
:
2337 li_context
->slave_name
= NLA_DATA(nlattr
);
2338 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2341 case QEMU_IFLA_INFO_XSTATS
:
2342 /* FIXME: only used by CAN */
2345 case QEMU_IFLA_INFO_DATA
:
2346 if (strncmp(li_context
->name
, "bridge",
2347 li_context
->len
) == 0) {
2348 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2351 host_to_target_data_bridge_nlattr
);
2353 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2356 case QEMU_IFLA_INFO_SLAVE_DATA
:
2357 if (strncmp(li_context
->slave_name
, "bridge",
2358 li_context
->slave_len
) == 0) {
2359 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2362 host_to_target_slave_data_bridge_nlattr
);
2364 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2365 li_context
->slave_name
);
2369 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2376 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2382 switch (nlattr
->nla_type
) {
2383 case QEMU_IFLA_INET_CONF
:
2384 u32
= NLA_DATA(nlattr
);
2385 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2387 u32
[i
] = tswap32(u32
[i
]);
2391 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2396 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2401 struct ifla_cacheinfo
*ci
;
2404 switch (nlattr
->nla_type
) {
2406 case QEMU_IFLA_INET6_TOKEN
:
2409 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2412 case QEMU_IFLA_INET6_FLAGS
:
2413 u32
= NLA_DATA(nlattr
);
2414 *u32
= tswap32(*u32
);
2417 case QEMU_IFLA_INET6_CONF
:
2418 u32
= NLA_DATA(nlattr
);
2419 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2421 u32
[i
] = tswap32(u32
[i
]);
2424 /* ifla_cacheinfo */
2425 case QEMU_IFLA_INET6_CACHEINFO
:
2426 ci
= NLA_DATA(nlattr
);
2427 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2428 ci
->tstamp
= tswap32(ci
->tstamp
);
2429 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2430 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2433 case QEMU_IFLA_INET6_STATS
:
2434 case QEMU_IFLA_INET6_ICMP6STATS
:
2435 u64
= NLA_DATA(nlattr
);
2436 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2438 u64
[i
] = tswap64(u64
[i
]);
2442 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2447 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2450 switch (nlattr
->nla_type
) {
2452 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2454 host_to_target_data_inet_nlattr
);
2456 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2458 host_to_target_data_inet6_nlattr
);
2460 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2466 static abi_long
host_to_target_data_xdp_nlattr(struct nlattr
*nlattr
,
2471 switch (nlattr
->nla_type
) {
2473 case QEMU_IFLA_XDP_ATTACHED
:
2476 case QEMU_IFLA_XDP_PROG_ID
:
2477 u32
= NLA_DATA(nlattr
);
2478 *u32
= tswap32(*u32
);
2481 gemu_log("Unknown host XDP type: %d\n", nlattr
->nla_type
);
2487 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2490 struct rtnl_link_stats
*st
;
2491 struct rtnl_link_stats64
*st64
;
2492 struct rtnl_link_ifmap
*map
;
2493 struct linkinfo_context li_context
;
2495 switch (rtattr
->rta_type
) {
2497 case QEMU_IFLA_ADDRESS
:
2498 case QEMU_IFLA_BROADCAST
:
2500 case QEMU_IFLA_IFNAME
:
2501 case QEMU_IFLA_QDISC
:
2504 case QEMU_IFLA_OPERSTATE
:
2505 case QEMU_IFLA_LINKMODE
:
2506 case QEMU_IFLA_CARRIER
:
2507 case QEMU_IFLA_PROTO_DOWN
:
2511 case QEMU_IFLA_LINK
:
2512 case QEMU_IFLA_WEIGHT
:
2513 case QEMU_IFLA_TXQLEN
:
2514 case QEMU_IFLA_CARRIER_CHANGES
:
2515 case QEMU_IFLA_NUM_RX_QUEUES
:
2516 case QEMU_IFLA_NUM_TX_QUEUES
:
2517 case QEMU_IFLA_PROMISCUITY
:
2518 case QEMU_IFLA_EXT_MASK
:
2519 case QEMU_IFLA_LINK_NETNSID
:
2520 case QEMU_IFLA_GROUP
:
2521 case QEMU_IFLA_MASTER
:
2522 case QEMU_IFLA_NUM_VF
:
2523 case QEMU_IFLA_GSO_MAX_SEGS
:
2524 case QEMU_IFLA_GSO_MAX_SIZE
:
2525 case QEMU_IFLA_CARRIER_UP_COUNT
:
2526 case QEMU_IFLA_CARRIER_DOWN_COUNT
:
2527 u32
= RTA_DATA(rtattr
);
2528 *u32
= tswap32(*u32
);
2530 /* struct rtnl_link_stats */
2531 case QEMU_IFLA_STATS
:
2532 st
= RTA_DATA(rtattr
);
2533 st
->rx_packets
= tswap32(st
->rx_packets
);
2534 st
->tx_packets
= tswap32(st
->tx_packets
);
2535 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2536 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2537 st
->rx_errors
= tswap32(st
->rx_errors
);
2538 st
->tx_errors
= tswap32(st
->tx_errors
);
2539 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2540 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2541 st
->multicast
= tswap32(st
->multicast
);
2542 st
->collisions
= tswap32(st
->collisions
);
2544 /* detailed rx_errors: */
2545 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2546 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2547 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2548 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2549 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2550 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2552 /* detailed tx_errors */
2553 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2554 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2555 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2556 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2557 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2560 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2561 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2563 /* struct rtnl_link_stats64 */
2564 case QEMU_IFLA_STATS64
:
2565 st64
= RTA_DATA(rtattr
);
2566 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2567 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2568 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2569 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2570 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2571 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2572 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2573 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2574 st64
->multicast
= tswap64(st64
->multicast
);
2575 st64
->collisions
= tswap64(st64
->collisions
);
2577 /* detailed rx_errors: */
2578 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2579 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2580 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2581 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2582 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2583 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2585 /* detailed tx_errors */
2586 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2587 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2588 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2589 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2590 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2593 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2594 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2596 /* struct rtnl_link_ifmap */
2598 map
= RTA_DATA(rtattr
);
2599 map
->mem_start
= tswap64(map
->mem_start
);
2600 map
->mem_end
= tswap64(map
->mem_end
);
2601 map
->base_addr
= tswap64(map
->base_addr
);
2602 map
->irq
= tswap16(map
->irq
);
2605 case QEMU_IFLA_LINKINFO
:
2606 memset(&li_context
, 0, sizeof(li_context
));
2607 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2609 host_to_target_data_linkinfo_nlattr
);
2610 case QEMU_IFLA_AF_SPEC
:
2611 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2613 host_to_target_data_spec_nlattr
);
2615 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2617 host_to_target_data_xdp_nlattr
);
2619 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2625 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2628 struct ifa_cacheinfo
*ci
;
2630 switch (rtattr
->rta_type
) {
2631 /* binary: depends on family type */
2641 u32
= RTA_DATA(rtattr
);
2642 *u32
= tswap32(*u32
);
2644 /* struct ifa_cacheinfo */
2646 ci
= RTA_DATA(rtattr
);
2647 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2648 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2649 ci
->cstamp
= tswap32(ci
->cstamp
);
2650 ci
->tstamp
= tswap32(ci
->tstamp
);
2653 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2659 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2662 switch (rtattr
->rta_type
) {
2663 /* binary: depends on family type */
2672 u32
= RTA_DATA(rtattr
);
2673 *u32
= tswap32(*u32
);
2676 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2682 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2683 uint32_t rtattr_len
)
2685 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2686 host_to_target_data_link_rtattr
);
2689 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2690 uint32_t rtattr_len
)
2692 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2693 host_to_target_data_addr_rtattr
);
2696 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2697 uint32_t rtattr_len
)
2699 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2700 host_to_target_data_route_rtattr
);
2703 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2706 struct ifinfomsg
*ifi
;
2707 struct ifaddrmsg
*ifa
;
2710 nlmsg_len
= nlh
->nlmsg_len
;
2711 switch (nlh
->nlmsg_type
) {
2715 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2716 ifi
= NLMSG_DATA(nlh
);
2717 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2718 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2719 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2720 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2721 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2722 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2728 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2729 ifa
= NLMSG_DATA(nlh
);
2730 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2731 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2732 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2738 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2739 rtm
= NLMSG_DATA(nlh
);
2740 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2741 host_to_target_route_rtattr(RTM_RTA(rtm
),
2742 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2746 return -TARGET_EINVAL
;
2751 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2754 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2757 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2759 abi_long (*target_to_host_rtattr
)
2764 while (len
>= sizeof(struct rtattr
)) {
2765 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2766 tswap16(rtattr
->rta_len
) > len
) {
2769 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2770 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2771 ret
= target_to_host_rtattr(rtattr
);
2775 len
-= RTA_ALIGN(rtattr
->rta_len
);
2776 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2777 RTA_ALIGN(rtattr
->rta_len
));
2782 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2784 switch (rtattr
->rta_type
) {
2786 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2792 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2794 switch (rtattr
->rta_type
) {
2795 /* binary: depends on family type */
2800 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2806 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2809 switch (rtattr
->rta_type
) {
2810 /* binary: depends on family type */
2818 u32
= RTA_DATA(rtattr
);
2819 *u32
= tswap32(*u32
);
2822 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2828 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2829 uint32_t rtattr_len
)
2831 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2832 target_to_host_data_link_rtattr
);
2835 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2836 uint32_t rtattr_len
)
2838 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2839 target_to_host_data_addr_rtattr
);
2842 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2843 uint32_t rtattr_len
)
2845 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2846 target_to_host_data_route_rtattr
);
2849 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2851 struct ifinfomsg
*ifi
;
2852 struct ifaddrmsg
*ifa
;
2855 switch (nlh
->nlmsg_type
) {
2860 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2861 ifi
= NLMSG_DATA(nlh
);
2862 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2863 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2864 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2865 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2866 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2867 NLMSG_LENGTH(sizeof(*ifi
)));
2873 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2874 ifa
= NLMSG_DATA(nlh
);
2875 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2876 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2877 NLMSG_LENGTH(sizeof(*ifa
)));
2884 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2885 rtm
= NLMSG_DATA(nlh
);
2886 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2887 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2888 NLMSG_LENGTH(sizeof(*rtm
)));
2892 return -TARGET_EOPNOTSUPP
;
2897 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2899 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2901 #endif /* CONFIG_RTNETLINK */
2903 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2905 switch (nlh
->nlmsg_type
) {
2907 gemu_log("Unknown host audit message type %d\n",
2909 return -TARGET_EINVAL
;
2914 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2917 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2920 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2922 switch (nlh
->nlmsg_type
) {
2924 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2925 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2928 gemu_log("Unknown target audit message type %d\n",
2930 return -TARGET_EINVAL
;
2936 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2938 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2941 /* do_setsockopt() Must return target values and target errnos. */
2942 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2943 abi_ulong optval_addr
, socklen_t optlen
)
2947 struct ip_mreqn
*ip_mreq
;
2948 struct ip_mreq_source
*ip_mreq_source
;
2952 /* TCP options all take an 'int' value. */
2953 if (optlen
< sizeof(uint32_t))
2954 return -TARGET_EINVAL
;
2956 if (get_user_u32(val
, optval_addr
))
2957 return -TARGET_EFAULT
;
2958 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2965 case IP_ROUTER_ALERT
:
2969 case IP_MTU_DISCOVER
:
2976 case IP_MULTICAST_TTL
:
2977 case IP_MULTICAST_LOOP
:
2979 if (optlen
>= sizeof(uint32_t)) {
2980 if (get_user_u32(val
, optval_addr
))
2981 return -TARGET_EFAULT
;
2982 } else if (optlen
>= 1) {
2983 if (get_user_u8(val
, optval_addr
))
2984 return -TARGET_EFAULT
;
2986 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2988 case IP_ADD_MEMBERSHIP
:
2989 case IP_DROP_MEMBERSHIP
:
2990 if (optlen
< sizeof (struct target_ip_mreq
) ||
2991 optlen
> sizeof (struct target_ip_mreqn
))
2992 return -TARGET_EINVAL
;
2994 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2995 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2996 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2999 case IP_BLOCK_SOURCE
:
3000 case IP_UNBLOCK_SOURCE
:
3001 case IP_ADD_SOURCE_MEMBERSHIP
:
3002 case IP_DROP_SOURCE_MEMBERSHIP
:
3003 if (optlen
!= sizeof (struct target_ip_mreq_source
))
3004 return -TARGET_EINVAL
;
3006 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3007 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
3008 unlock_user (ip_mreq_source
, optval_addr
, 0);
3017 case IPV6_MTU_DISCOVER
:
3020 case IPV6_RECVPKTINFO
:
3021 case IPV6_UNICAST_HOPS
:
3022 case IPV6_MULTICAST_HOPS
:
3023 case IPV6_MULTICAST_LOOP
:
3025 case IPV6_RECVHOPLIMIT
:
3026 case IPV6_2292HOPLIMIT
:
3029 if (optlen
< sizeof(uint32_t)) {
3030 return -TARGET_EINVAL
;
3032 if (get_user_u32(val
, optval_addr
)) {
3033 return -TARGET_EFAULT
;
3035 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3036 &val
, sizeof(val
)));
3040 struct in6_pktinfo pki
;
3042 if (optlen
< sizeof(pki
)) {
3043 return -TARGET_EINVAL
;
3046 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
3047 return -TARGET_EFAULT
;
3050 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
3052 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3053 &pki
, sizeof(pki
)));
3064 struct icmp6_filter icmp6f
;
3066 if (optlen
> sizeof(icmp6f
)) {
3067 optlen
= sizeof(icmp6f
);
3070 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3071 return -TARGET_EFAULT
;
3074 for (val
= 0; val
< 8; val
++) {
3075 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3078 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3090 /* those take an u32 value */
3091 if (optlen
< sizeof(uint32_t)) {
3092 return -TARGET_EINVAL
;
3095 if (get_user_u32(val
, optval_addr
)) {
3096 return -TARGET_EFAULT
;
3098 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3099 &val
, sizeof(val
)));
3106 case TARGET_SOL_SOCKET
:
3108 case TARGET_SO_RCVTIMEO
:
3112 optname
= SO_RCVTIMEO
;
3115 if (optlen
!= sizeof(struct target_timeval
)) {
3116 return -TARGET_EINVAL
;
3119 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3120 return -TARGET_EFAULT
;
3123 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3127 case TARGET_SO_SNDTIMEO
:
3128 optname
= SO_SNDTIMEO
;
3130 case TARGET_SO_ATTACH_FILTER
:
3132 struct target_sock_fprog
*tfprog
;
3133 struct target_sock_filter
*tfilter
;
3134 struct sock_fprog fprog
;
3135 struct sock_filter
*filter
;
3138 if (optlen
!= sizeof(*tfprog
)) {
3139 return -TARGET_EINVAL
;
3141 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3142 return -TARGET_EFAULT
;
3144 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3145 tswapal(tfprog
->filter
), 0)) {
3146 unlock_user_struct(tfprog
, optval_addr
, 1);
3147 return -TARGET_EFAULT
;
3150 fprog
.len
= tswap16(tfprog
->len
);
3151 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3152 if (filter
== NULL
) {
3153 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3154 unlock_user_struct(tfprog
, optval_addr
, 1);
3155 return -TARGET_ENOMEM
;
3157 for (i
= 0; i
< fprog
.len
; i
++) {
3158 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3159 filter
[i
].jt
= tfilter
[i
].jt
;
3160 filter
[i
].jf
= tfilter
[i
].jf
;
3161 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3163 fprog
.filter
= filter
;
3165 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3166 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3169 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3170 unlock_user_struct(tfprog
, optval_addr
, 1);
3173 case TARGET_SO_BINDTODEVICE
:
3175 char *dev_ifname
, *addr_ifname
;
3177 if (optlen
> IFNAMSIZ
- 1) {
3178 optlen
= IFNAMSIZ
- 1;
3180 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3182 return -TARGET_EFAULT
;
3184 optname
= SO_BINDTODEVICE
;
3185 addr_ifname
= alloca(IFNAMSIZ
);
3186 memcpy(addr_ifname
, dev_ifname
, optlen
);
3187 addr_ifname
[optlen
] = 0;
3188 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3189 addr_ifname
, optlen
));
3190 unlock_user (dev_ifname
, optval_addr
, 0);
3193 /* Options with 'int' argument. */
3194 case TARGET_SO_DEBUG
:
3197 case TARGET_SO_REUSEADDR
:
3198 optname
= SO_REUSEADDR
;
3200 case TARGET_SO_TYPE
:
3203 case TARGET_SO_ERROR
:
3206 case TARGET_SO_DONTROUTE
:
3207 optname
= SO_DONTROUTE
;
3209 case TARGET_SO_BROADCAST
:
3210 optname
= SO_BROADCAST
;
3212 case TARGET_SO_SNDBUF
:
3213 optname
= SO_SNDBUF
;
3215 case TARGET_SO_SNDBUFFORCE
:
3216 optname
= SO_SNDBUFFORCE
;
3218 case TARGET_SO_RCVBUF
:
3219 optname
= SO_RCVBUF
;
3221 case TARGET_SO_RCVBUFFORCE
:
3222 optname
= SO_RCVBUFFORCE
;
3224 case TARGET_SO_KEEPALIVE
:
3225 optname
= SO_KEEPALIVE
;
3227 case TARGET_SO_OOBINLINE
:
3228 optname
= SO_OOBINLINE
;
3230 case TARGET_SO_NO_CHECK
:
3231 optname
= SO_NO_CHECK
;
3233 case TARGET_SO_PRIORITY
:
3234 optname
= SO_PRIORITY
;
3237 case TARGET_SO_BSDCOMPAT
:
3238 optname
= SO_BSDCOMPAT
;
3241 case TARGET_SO_PASSCRED
:
3242 optname
= SO_PASSCRED
;
3244 case TARGET_SO_PASSSEC
:
3245 optname
= SO_PASSSEC
;
3247 case TARGET_SO_TIMESTAMP
:
3248 optname
= SO_TIMESTAMP
;
3250 case TARGET_SO_RCVLOWAT
:
3251 optname
= SO_RCVLOWAT
;
3256 if (optlen
< sizeof(uint32_t))
3257 return -TARGET_EINVAL
;
3259 if (get_user_u32(val
, optval_addr
))
3260 return -TARGET_EFAULT
;
3261 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3265 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3266 ret
= -TARGET_ENOPROTOOPT
;
3271 /* do_getsockopt() Must return target values and target errnos. */
3272 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3273 abi_ulong optval_addr
, abi_ulong optlen
)
3280 case TARGET_SOL_SOCKET
:
3283 /* These don't just return a single integer */
3284 case TARGET_SO_LINGER
:
3285 case TARGET_SO_RCVTIMEO
:
3286 case TARGET_SO_SNDTIMEO
:
3287 case TARGET_SO_PEERNAME
:
3289 case TARGET_SO_PEERCRED
: {
3292 struct target_ucred
*tcr
;
3294 if (get_user_u32(len
, optlen
)) {
3295 return -TARGET_EFAULT
;
3298 return -TARGET_EINVAL
;
3302 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3310 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3311 return -TARGET_EFAULT
;
3313 __put_user(cr
.pid
, &tcr
->pid
);
3314 __put_user(cr
.uid
, &tcr
->uid
);
3315 __put_user(cr
.gid
, &tcr
->gid
);
3316 unlock_user_struct(tcr
, optval_addr
, 1);
3317 if (put_user_u32(len
, optlen
)) {
3318 return -TARGET_EFAULT
;
3322 /* Options with 'int' argument. */
3323 case TARGET_SO_DEBUG
:
3326 case TARGET_SO_REUSEADDR
:
3327 optname
= SO_REUSEADDR
;
3329 case TARGET_SO_TYPE
:
3332 case TARGET_SO_ERROR
:
3335 case TARGET_SO_DONTROUTE
:
3336 optname
= SO_DONTROUTE
;
3338 case TARGET_SO_BROADCAST
:
3339 optname
= SO_BROADCAST
;
3341 case TARGET_SO_SNDBUF
:
3342 optname
= SO_SNDBUF
;
3344 case TARGET_SO_RCVBUF
:
3345 optname
= SO_RCVBUF
;
3347 case TARGET_SO_KEEPALIVE
:
3348 optname
= SO_KEEPALIVE
;
3350 case TARGET_SO_OOBINLINE
:
3351 optname
= SO_OOBINLINE
;
3353 case TARGET_SO_NO_CHECK
:
3354 optname
= SO_NO_CHECK
;
3356 case TARGET_SO_PRIORITY
:
3357 optname
= SO_PRIORITY
;
3360 case TARGET_SO_BSDCOMPAT
:
3361 optname
= SO_BSDCOMPAT
;
3364 case TARGET_SO_PASSCRED
:
3365 optname
= SO_PASSCRED
;
3367 case TARGET_SO_TIMESTAMP
:
3368 optname
= SO_TIMESTAMP
;
3370 case TARGET_SO_RCVLOWAT
:
3371 optname
= SO_RCVLOWAT
;
3373 case TARGET_SO_ACCEPTCONN
:
3374 optname
= SO_ACCEPTCONN
;
3381 /* TCP options all take an 'int' value. */
3383 if (get_user_u32(len
, optlen
))
3384 return -TARGET_EFAULT
;
3386 return -TARGET_EINVAL
;
3388 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3391 if (optname
== SO_TYPE
) {
3392 val
= host_to_target_sock_type(val
);
3397 if (put_user_u32(val
, optval_addr
))
3398 return -TARGET_EFAULT
;
3400 if (put_user_u8(val
, optval_addr
))
3401 return -TARGET_EFAULT
;
3403 if (put_user_u32(len
, optlen
))
3404 return -TARGET_EFAULT
;
3411 case IP_ROUTER_ALERT
:
3415 case IP_MTU_DISCOVER
:
3421 case IP_MULTICAST_TTL
:
3422 case IP_MULTICAST_LOOP
:
3423 if (get_user_u32(len
, optlen
))
3424 return -TARGET_EFAULT
;
3426 return -TARGET_EINVAL
;
3428 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3431 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3433 if (put_user_u32(len
, optlen
)
3434 || put_user_u8(val
, optval_addr
))
3435 return -TARGET_EFAULT
;
3437 if (len
> sizeof(int))
3439 if (put_user_u32(len
, optlen
)
3440 || put_user_u32(val
, optval_addr
))
3441 return -TARGET_EFAULT
;
3445 ret
= -TARGET_ENOPROTOOPT
;
3451 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3453 ret
= -TARGET_EOPNOTSUPP
;
3459 /* Convert target low/high pair representing file offset into the host
3460 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3461 * as the kernel doesn't handle them either.
3463 static void target_to_host_low_high(abi_ulong tlow
,
3465 unsigned long *hlow
,
3466 unsigned long *hhigh
)
3468 uint64_t off
= tlow
|
3469 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3470 TARGET_LONG_BITS
/ 2;
3473 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3476 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3477 abi_ulong count
, int copy
)
3479 struct target_iovec
*target_vec
;
3481 abi_ulong total_len
, max_len
;
3484 bool bad_address
= false;
3490 if (count
> IOV_MAX
) {
3495 vec
= g_try_new0(struct iovec
, count
);
3501 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3502 count
* sizeof(struct target_iovec
), 1);
3503 if (target_vec
== NULL
) {
3508 /* ??? If host page size > target page size, this will result in a
3509 value larger than what we can actually support. */
3510 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3513 for (i
= 0; i
< count
; i
++) {
3514 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3515 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3520 } else if (len
== 0) {
3521 /* Zero length pointer is ignored. */
3522 vec
[i
].iov_base
= 0;
3524 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3525 /* If the first buffer pointer is bad, this is a fault. But
3526 * subsequent bad buffers will result in a partial write; this
3527 * is realized by filling the vector with null pointers and
3529 if (!vec
[i
].iov_base
) {
3540 if (len
> max_len
- total_len
) {
3541 len
= max_len
- total_len
;
3544 vec
[i
].iov_len
= len
;
3548 unlock_user(target_vec
, target_addr
, 0);
3553 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3554 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3557 unlock_user(target_vec
, target_addr
, 0);
3564 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3565 abi_ulong count
, int copy
)
3567 struct target_iovec
*target_vec
;
3570 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3571 count
* sizeof(struct target_iovec
), 1);
3573 for (i
= 0; i
< count
; i
++) {
3574 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3575 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3579 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3581 unlock_user(target_vec
, target_addr
, 0);
3587 static inline int target_to_host_sock_type(int *type
)
3590 int target_type
= *type
;
3592 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3593 case TARGET_SOCK_DGRAM
:
3594 host_type
= SOCK_DGRAM
;
3596 case TARGET_SOCK_STREAM
:
3597 host_type
= SOCK_STREAM
;
3600 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3603 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3604 #if defined(SOCK_CLOEXEC)
3605 host_type
|= SOCK_CLOEXEC
;
3607 return -TARGET_EINVAL
;
3610 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3611 #if defined(SOCK_NONBLOCK)
3612 host_type
|= SOCK_NONBLOCK
;
3613 #elif !defined(O_NONBLOCK)
3614 return -TARGET_EINVAL
;
3621 /* Try to emulate socket type flags after socket creation. */
3622 static int sock_flags_fixup(int fd
, int target_type
)
3624 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3625 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3626 int flags
= fcntl(fd
, F_GETFL
);
3627 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3629 return -TARGET_EINVAL
;
3636 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3637 abi_ulong target_addr
,
3640 struct sockaddr
*addr
= host_addr
;
3641 struct target_sockaddr
*target_saddr
;
3643 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3644 if (!target_saddr
) {
3645 return -TARGET_EFAULT
;
3648 memcpy(addr
, target_saddr
, len
);
3649 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3650 /* spkt_protocol is big-endian */
3652 unlock_user(target_saddr
, target_addr
, 0);
3656 static TargetFdTrans target_packet_trans
= {
3657 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3660 #ifdef CONFIG_RTNETLINK
3661 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3665 ret
= target_to_host_nlmsg_route(buf
, len
);
3673 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3677 ret
= host_to_target_nlmsg_route(buf
, len
);
3685 static TargetFdTrans target_netlink_route_trans
= {
3686 .target_to_host_data
= netlink_route_target_to_host
,
3687 .host_to_target_data
= netlink_route_host_to_target
,
3689 #endif /* CONFIG_RTNETLINK */
3691 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3695 ret
= target_to_host_nlmsg_audit(buf
, len
);
3703 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3707 ret
= host_to_target_nlmsg_audit(buf
, len
);
3715 static TargetFdTrans target_netlink_audit_trans
= {
3716 .target_to_host_data
= netlink_audit_target_to_host
,
3717 .host_to_target_data
= netlink_audit_host_to_target
,
3720 /* do_socket() Must return target values and target errnos. */
3721 static abi_long
do_socket(int domain
, int type
, int protocol
)
3723 int target_type
= type
;
3726 ret
= target_to_host_sock_type(&type
);
3731 if (domain
== PF_NETLINK
&& !(
3732 #ifdef CONFIG_RTNETLINK
3733 protocol
== NETLINK_ROUTE
||
3735 protocol
== NETLINK_KOBJECT_UEVENT
||
3736 protocol
== NETLINK_AUDIT
)) {
3737 return -EPFNOSUPPORT
;
3740 if (domain
== AF_PACKET
||
3741 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3742 protocol
= tswap16(protocol
);
3745 ret
= get_errno(socket(domain
, type
, protocol
));
3747 ret
= sock_flags_fixup(ret
, target_type
);
3748 if (type
== SOCK_PACKET
) {
3749 /* Manage an obsolete case :
3750 * if socket type is SOCK_PACKET, bind by name
3752 fd_trans_register(ret
, &target_packet_trans
);
3753 } else if (domain
== PF_NETLINK
) {
3755 #ifdef CONFIG_RTNETLINK
3757 fd_trans_register(ret
, &target_netlink_route_trans
);
3760 case NETLINK_KOBJECT_UEVENT
:
3761 /* nothing to do: messages are strings */
3764 fd_trans_register(ret
, &target_netlink_audit_trans
);
3767 g_assert_not_reached();
3774 /* do_bind() Must return target values and target errnos. */
3775 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3781 if ((int)addrlen
< 0) {
3782 return -TARGET_EINVAL
;
3785 addr
= alloca(addrlen
+1);
3787 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3791 return get_errno(bind(sockfd
, addr
, addrlen
));
3794 /* do_connect() Must return target values and target errnos. */
3795 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3801 if ((int)addrlen
< 0) {
3802 return -TARGET_EINVAL
;
3805 addr
= alloca(addrlen
+1);
3807 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3811 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3814 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3815 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3816 int flags
, int send
)
3822 abi_ulong target_vec
;
3824 if (msgp
->msg_name
) {
3825 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3826 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3827 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3828 tswapal(msgp
->msg_name
),
3830 if (ret
== -TARGET_EFAULT
) {
3831 /* For connected sockets msg_name and msg_namelen must
3832 * be ignored, so returning EFAULT immediately is wrong.
3833 * Instead, pass a bad msg_name to the host kernel, and
3834 * let it decide whether to return EFAULT or not.
3836 msg
.msg_name
= (void *)-1;
3841 msg
.msg_name
= NULL
;
3842 msg
.msg_namelen
= 0;
3844 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3845 msg
.msg_control
= alloca(msg
.msg_controllen
);
3846 memset(msg
.msg_control
, 0, msg
.msg_controllen
);
3848 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3850 count
= tswapal(msgp
->msg_iovlen
);
3851 target_vec
= tswapal(msgp
->msg_iov
);
3853 if (count
> IOV_MAX
) {
3854 /* sendrcvmsg returns a different errno for this condition than
3855 * readv/writev, so we must catch it here before lock_iovec() does.
3857 ret
= -TARGET_EMSGSIZE
;
3861 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3862 target_vec
, count
, send
);
3864 ret
= -host_to_target_errno(errno
);
3867 msg
.msg_iovlen
= count
;
3871 if (fd_trans_target_to_host_data(fd
)) {
3874 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3875 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3876 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3877 msg
.msg_iov
->iov_len
);
3879 msg
.msg_iov
->iov_base
= host_msg
;
3880 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3884 ret
= target_to_host_cmsg(&msg
, msgp
);
3886 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3890 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3891 if (!is_error(ret
)) {
3893 if (fd_trans_host_to_target_data(fd
)) {
3894 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3897 ret
= host_to_target_cmsg(msgp
, &msg
);
3899 if (!is_error(ret
)) {
3900 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3901 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3902 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3903 msg
.msg_name
, msg
.msg_namelen
);
3915 unlock_iovec(vec
, target_vec
, count
, !send
);
3920 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3921 int flags
, int send
)
3924 struct target_msghdr
*msgp
;
3926 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3930 return -TARGET_EFAULT
;
3932 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3933 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3937 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3938 * so it might not have this *mmsg-specific flag either.
3940 #ifndef MSG_WAITFORONE
3941 #define MSG_WAITFORONE 0x10000
3944 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3945 unsigned int vlen
, unsigned int flags
,
3948 struct target_mmsghdr
*mmsgp
;
3952 if (vlen
> UIO_MAXIOV
) {
3956 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3958 return -TARGET_EFAULT
;
3961 for (i
= 0; i
< vlen
; i
++) {
3962 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3963 if (is_error(ret
)) {
3966 mmsgp
[i
].msg_len
= tswap32(ret
);
3967 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3968 if (flags
& MSG_WAITFORONE
) {
3969 flags
|= MSG_DONTWAIT
;
3973 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3975 /* Return number of datagrams sent if we sent any at all;
3976 * otherwise return the error.
3984 /* do_accept4() Must return target values and target errnos. */
3985 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3986 abi_ulong target_addrlen_addr
, int flags
)
3993 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3995 if (target_addr
== 0) {
3996 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3999 /* linux returns EINVAL if addrlen pointer is invalid */
4000 if (get_user_u32(addrlen
, target_addrlen_addr
))
4001 return -TARGET_EINVAL
;
4003 if ((int)addrlen
< 0) {
4004 return -TARGET_EINVAL
;
4007 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4008 return -TARGET_EINVAL
;
4010 addr
= alloca(addrlen
);
4012 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
4013 if (!is_error(ret
)) {
4014 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4015 if (put_user_u32(addrlen
, target_addrlen_addr
))
4016 ret
= -TARGET_EFAULT
;
4021 /* do_getpeername() Must return target values and target errnos. */
4022 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
4023 abi_ulong target_addrlen_addr
)
4029 if (get_user_u32(addrlen
, target_addrlen_addr
))
4030 return -TARGET_EFAULT
;
4032 if ((int)addrlen
< 0) {
4033 return -TARGET_EINVAL
;
4036 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4037 return -TARGET_EFAULT
;
4039 addr
= alloca(addrlen
);
4041 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
4042 if (!is_error(ret
)) {
4043 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4044 if (put_user_u32(addrlen
, target_addrlen_addr
))
4045 ret
= -TARGET_EFAULT
;
4050 /* do_getsockname() Must return target values and target errnos. */
4051 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
4052 abi_ulong target_addrlen_addr
)
4058 if (get_user_u32(addrlen
, target_addrlen_addr
))
4059 return -TARGET_EFAULT
;
4061 if ((int)addrlen
< 0) {
4062 return -TARGET_EINVAL
;
4065 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4066 return -TARGET_EFAULT
;
4068 addr
= alloca(addrlen
);
4070 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4071 if (!is_error(ret
)) {
4072 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4073 if (put_user_u32(addrlen
, target_addrlen_addr
))
4074 ret
= -TARGET_EFAULT
;
4079 /* do_socketpair() Must return target values and target errnos. */
4080 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4081 abi_ulong target_tab_addr
)
4086 target_to_host_sock_type(&type
);
4088 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4089 if (!is_error(ret
)) {
4090 if (put_user_s32(tab
[0], target_tab_addr
)
4091 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4092 ret
= -TARGET_EFAULT
;
4097 /* do_sendto() Must return target values and target errnos. */
4098 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4099 abi_ulong target_addr
, socklen_t addrlen
)
4103 void *copy_msg
= NULL
;
4106 if ((int)addrlen
< 0) {
4107 return -TARGET_EINVAL
;
4110 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4112 return -TARGET_EFAULT
;
4113 if (fd_trans_target_to_host_data(fd
)) {
4114 copy_msg
= host_msg
;
4115 host_msg
= g_malloc(len
);
4116 memcpy(host_msg
, copy_msg
, len
);
4117 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4123 addr
= alloca(addrlen
+1);
4124 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4128 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4130 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4135 host_msg
= copy_msg
;
4137 unlock_user(host_msg
, msg
, 0);
4141 /* do_recvfrom() Must return target values and target errnos. */
4142 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4143 abi_ulong target_addr
,
4144 abi_ulong target_addrlen
)
4151 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4153 return -TARGET_EFAULT
;
4155 if (get_user_u32(addrlen
, target_addrlen
)) {
4156 ret
= -TARGET_EFAULT
;
4159 if ((int)addrlen
< 0) {
4160 ret
= -TARGET_EINVAL
;
4163 addr
= alloca(addrlen
);
4164 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4167 addr
= NULL
; /* To keep compiler quiet. */
4168 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4170 if (!is_error(ret
)) {
4171 if (fd_trans_host_to_target_data(fd
)) {
4172 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4175 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4176 if (put_user_u32(addrlen
, target_addrlen
)) {
4177 ret
= -TARGET_EFAULT
;
4181 unlock_user(host_msg
, msg
, len
);
4184 unlock_user(host_msg
, msg
, 0);
4189 #ifdef TARGET_NR_socketcall
4190 /* do_socketcall() must return target values and target errnos. */
4191 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4193 static const unsigned nargs
[] = { /* number of arguments per operation */
4194 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4195 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4196 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4197 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4198 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4199 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4200 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4201 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4202 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4203 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4204 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4205 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4206 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4207 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4208 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4209 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4210 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4211 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4212 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4213 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4215 abi_long a
[6]; /* max 6 args */
4218 /* check the range of the first argument num */
4219 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4220 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4221 return -TARGET_EINVAL
;
4223 /* ensure we have space for args */
4224 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4225 return -TARGET_EINVAL
;
4227 /* collect the arguments in a[] according to nargs[] */
4228 for (i
= 0; i
< nargs
[num
]; ++i
) {
4229 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4230 return -TARGET_EFAULT
;
4233 /* now when we have the args, invoke the appropriate underlying function */
4235 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4236 return do_socket(a
[0], a
[1], a
[2]);
4237 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4238 return do_bind(a
[0], a
[1], a
[2]);
4239 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4240 return do_connect(a
[0], a
[1], a
[2]);
4241 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4242 return get_errno(listen(a
[0], a
[1]));
4243 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4244 return do_accept4(a
[0], a
[1], a
[2], 0);
4245 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4246 return do_getsockname(a
[0], a
[1], a
[2]);
4247 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4248 return do_getpeername(a
[0], a
[1], a
[2]);
4249 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4250 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4251 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4252 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4253 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4254 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4255 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4256 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4257 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4258 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4259 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4260 return get_errno(shutdown(a
[0], a
[1]));
4261 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4262 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4263 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4264 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4265 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4266 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4267 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4268 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4269 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4270 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4271 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4272 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4273 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4274 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4276 gemu_log("Unsupported socketcall: %d\n", num
);
4277 return -TARGET_EINVAL
;
4282 #define N_SHM_REGIONS 32
4284 static struct shm_region
{
4288 } shm_regions
[N_SHM_REGIONS
];
4290 #ifndef TARGET_SEMID64_DS
4291 /* asm-generic version of this struct */
4292 struct target_semid64_ds
4294 struct target_ipc_perm sem_perm
;
4295 abi_ulong sem_otime
;
4296 #if TARGET_ABI_BITS == 32
4297 abi_ulong __unused1
;
4299 abi_ulong sem_ctime
;
4300 #if TARGET_ABI_BITS == 32
4301 abi_ulong __unused2
;
4303 abi_ulong sem_nsems
;
4304 abi_ulong __unused3
;
4305 abi_ulong __unused4
;
4309 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4310 abi_ulong target_addr
)
4312 struct target_ipc_perm
*target_ip
;
4313 struct target_semid64_ds
*target_sd
;
4315 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4316 return -TARGET_EFAULT
;
4317 target_ip
= &(target_sd
->sem_perm
);
4318 host_ip
->__key
= tswap32(target_ip
->__key
);
4319 host_ip
->uid
= tswap32(target_ip
->uid
);
4320 host_ip
->gid
= tswap32(target_ip
->gid
);
4321 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4322 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4323 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4324 host_ip
->mode
= tswap32(target_ip
->mode
);
4326 host_ip
->mode
= tswap16(target_ip
->mode
);
4328 #if defined(TARGET_PPC)
4329 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4331 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4333 unlock_user_struct(target_sd
, target_addr
, 0);
4337 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4338 struct ipc_perm
*host_ip
)
4340 struct target_ipc_perm
*target_ip
;
4341 struct target_semid64_ds
*target_sd
;
4343 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4344 return -TARGET_EFAULT
;
4345 target_ip
= &(target_sd
->sem_perm
);
4346 target_ip
->__key
= tswap32(host_ip
->__key
);
4347 target_ip
->uid
= tswap32(host_ip
->uid
);
4348 target_ip
->gid
= tswap32(host_ip
->gid
);
4349 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4350 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4351 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4352 target_ip
->mode
= tswap32(host_ip
->mode
);
4354 target_ip
->mode
= tswap16(host_ip
->mode
);
4356 #if defined(TARGET_PPC)
4357 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4359 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4361 unlock_user_struct(target_sd
, target_addr
, 1);
4365 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4366 abi_ulong target_addr
)
4368 struct target_semid64_ds
*target_sd
;
4370 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4371 return -TARGET_EFAULT
;
4372 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4373 return -TARGET_EFAULT
;
4374 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4375 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4376 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4377 unlock_user_struct(target_sd
, target_addr
, 0);
4381 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4382 struct semid_ds
*host_sd
)
4384 struct target_semid64_ds
*target_sd
;
4386 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4387 return -TARGET_EFAULT
;
4388 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4389 return -TARGET_EFAULT
;
4390 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4391 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4392 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4393 unlock_user_struct(target_sd
, target_addr
, 1);
4397 struct target_seminfo
{
4410 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4411 struct seminfo
*host_seminfo
)
4413 struct target_seminfo
*target_seminfo
;
4414 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4415 return -TARGET_EFAULT
;
4416 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4417 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4418 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4419 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4420 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4421 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4422 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4423 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4424 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4425 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4426 unlock_user_struct(target_seminfo
, target_addr
, 1);
4432 struct semid_ds
*buf
;
4433 unsigned short *array
;
4434 struct seminfo
*__buf
;
4437 union target_semun
{
4444 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4445 abi_ulong target_addr
)
4448 unsigned short *array
;
4450 struct semid_ds semid_ds
;
4453 semun
.buf
= &semid_ds
;
4455 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4457 return get_errno(ret
);
4459 nsems
= semid_ds
.sem_nsems
;
4461 *host_array
= g_try_new(unsigned short, nsems
);
4463 return -TARGET_ENOMEM
;
4465 array
= lock_user(VERIFY_READ
, target_addr
,
4466 nsems
*sizeof(unsigned short), 1);
4468 g_free(*host_array
);
4469 return -TARGET_EFAULT
;
4472 for(i
=0; i
<nsems
; i
++) {
4473 __get_user((*host_array
)[i
], &array
[i
]);
4475 unlock_user(array
, target_addr
, 0);
4480 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4481 unsigned short **host_array
)
4484 unsigned short *array
;
4486 struct semid_ds semid_ds
;
4489 semun
.buf
= &semid_ds
;
4491 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4493 return get_errno(ret
);
4495 nsems
= semid_ds
.sem_nsems
;
4497 array
= lock_user(VERIFY_WRITE
, target_addr
,
4498 nsems
*sizeof(unsigned short), 0);
4500 return -TARGET_EFAULT
;
4502 for(i
=0; i
<nsems
; i
++) {
4503 __put_user((*host_array
)[i
], &array
[i
]);
4505 g_free(*host_array
);
4506 unlock_user(array
, target_addr
, 1);
4511 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4512 abi_ulong target_arg
)
4514 union target_semun target_su
= { .buf
= target_arg
};
4516 struct semid_ds dsarg
;
4517 unsigned short *array
= NULL
;
4518 struct seminfo seminfo
;
4519 abi_long ret
= -TARGET_EINVAL
;
4526 /* In 64 bit cross-endian situations, we will erroneously pick up
4527 * the wrong half of the union for the "val" element. To rectify
4528 * this, the entire 8-byte structure is byteswapped, followed by
4529 * a swap of the 4 byte val field. In other cases, the data is
4530 * already in proper host byte order. */
4531 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4532 target_su
.buf
= tswapal(target_su
.buf
);
4533 arg
.val
= tswap32(target_su
.val
);
4535 arg
.val
= target_su
.val
;
4537 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4541 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4545 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4546 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4553 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4557 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4558 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4564 arg
.__buf
= &seminfo
;
4565 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4566 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4574 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4581 struct target_sembuf
{
4582 unsigned short sem_num
;
4587 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4588 abi_ulong target_addr
,
4591 struct target_sembuf
*target_sembuf
;
4594 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4595 nsops
*sizeof(struct target_sembuf
), 1);
4597 return -TARGET_EFAULT
;
4599 for(i
=0; i
<nsops
; i
++) {
4600 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4601 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4602 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4605 unlock_user(target_sembuf
, target_addr
, 0);
4610 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4612 struct sembuf sops
[nsops
];
4614 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4615 return -TARGET_EFAULT
;
4617 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4620 struct target_msqid_ds
4622 struct target_ipc_perm msg_perm
;
4623 abi_ulong msg_stime
;
4624 #if TARGET_ABI_BITS == 32
4625 abi_ulong __unused1
;
4627 abi_ulong msg_rtime
;
4628 #if TARGET_ABI_BITS == 32
4629 abi_ulong __unused2
;
4631 abi_ulong msg_ctime
;
4632 #if TARGET_ABI_BITS == 32
4633 abi_ulong __unused3
;
4635 abi_ulong __msg_cbytes
;
4637 abi_ulong msg_qbytes
;
4638 abi_ulong msg_lspid
;
4639 abi_ulong msg_lrpid
;
4640 abi_ulong __unused4
;
4641 abi_ulong __unused5
;
4644 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4645 abi_ulong target_addr
)
4647 struct target_msqid_ds
*target_md
;
4649 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4650 return -TARGET_EFAULT
;
4651 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4652 return -TARGET_EFAULT
;
4653 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4654 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4655 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4656 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4657 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4658 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4659 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4660 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4661 unlock_user_struct(target_md
, target_addr
, 0);
4665 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4666 struct msqid_ds
*host_md
)
4668 struct target_msqid_ds
*target_md
;
4670 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4671 return -TARGET_EFAULT
;
4672 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4673 return -TARGET_EFAULT
;
4674 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4675 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4676 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4677 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4678 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4679 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4680 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4681 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4682 unlock_user_struct(target_md
, target_addr
, 1);
4686 struct target_msginfo
{
4694 unsigned short int msgseg
;
4697 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4698 struct msginfo
*host_msginfo
)
4700 struct target_msginfo
*target_msginfo
;
4701 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4702 return -TARGET_EFAULT
;
4703 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4704 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4705 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4706 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4707 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4708 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4709 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4710 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4711 unlock_user_struct(target_msginfo
, target_addr
, 1);
4715 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4717 struct msqid_ds dsarg
;
4718 struct msginfo msginfo
;
4719 abi_long ret
= -TARGET_EINVAL
;
4727 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4728 return -TARGET_EFAULT
;
4729 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4730 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4731 return -TARGET_EFAULT
;
4734 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4738 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4739 if (host_to_target_msginfo(ptr
, &msginfo
))
4740 return -TARGET_EFAULT
;
4747 struct target_msgbuf
{
4752 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4753 ssize_t msgsz
, int msgflg
)
4755 struct target_msgbuf
*target_mb
;
4756 struct msgbuf
*host_mb
;
4760 return -TARGET_EINVAL
;
4763 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4764 return -TARGET_EFAULT
;
4765 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4767 unlock_user_struct(target_mb
, msgp
, 0);
4768 return -TARGET_ENOMEM
;
4770 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4771 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4772 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4774 unlock_user_struct(target_mb
, msgp
, 0);
4779 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4780 ssize_t msgsz
, abi_long msgtyp
,
4783 struct target_msgbuf
*target_mb
;
4785 struct msgbuf
*host_mb
;
4789 return -TARGET_EINVAL
;
4792 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4793 return -TARGET_EFAULT
;
4795 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4797 ret
= -TARGET_ENOMEM
;
4800 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4803 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4804 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4805 if (!target_mtext
) {
4806 ret
= -TARGET_EFAULT
;
4809 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4810 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4813 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4817 unlock_user_struct(target_mb
, msgp
, 1);
4822 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4823 abi_ulong target_addr
)
4825 struct target_shmid_ds
*target_sd
;
4827 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4828 return -TARGET_EFAULT
;
4829 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4830 return -TARGET_EFAULT
;
4831 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4832 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4833 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4834 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4835 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4836 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4837 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4838 unlock_user_struct(target_sd
, target_addr
, 0);
4842 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4843 struct shmid_ds
*host_sd
)
4845 struct target_shmid_ds
*target_sd
;
4847 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4848 return -TARGET_EFAULT
;
4849 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4850 return -TARGET_EFAULT
;
4851 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4852 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4853 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4854 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4855 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4856 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4857 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4858 unlock_user_struct(target_sd
, target_addr
, 1);
4862 struct target_shminfo
{
4870 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4871 struct shminfo
*host_shminfo
)
4873 struct target_shminfo
*target_shminfo
;
4874 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4875 return -TARGET_EFAULT
;
4876 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4877 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4878 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4879 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4880 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4881 unlock_user_struct(target_shminfo
, target_addr
, 1);
4885 struct target_shm_info
{
4890 abi_ulong swap_attempts
;
4891 abi_ulong swap_successes
;
4894 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4895 struct shm_info
*host_shm_info
)
4897 struct target_shm_info
*target_shm_info
;
4898 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4899 return -TARGET_EFAULT
;
4900 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4901 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4902 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4903 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4904 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4905 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4906 unlock_user_struct(target_shm_info
, target_addr
, 1);
4910 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4912 struct shmid_ds dsarg
;
4913 struct shminfo shminfo
;
4914 struct shm_info shm_info
;
4915 abi_long ret
= -TARGET_EINVAL
;
4923 if (target_to_host_shmid_ds(&dsarg
, buf
))
4924 return -TARGET_EFAULT
;
4925 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4926 if (host_to_target_shmid_ds(buf
, &dsarg
))
4927 return -TARGET_EFAULT
;
4930 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4931 if (host_to_target_shminfo(buf
, &shminfo
))
4932 return -TARGET_EFAULT
;
4935 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4936 if (host_to_target_shm_info(buf
, &shm_info
))
4937 return -TARGET_EFAULT
;
4942 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4949 #ifndef TARGET_FORCE_SHMLBA
4950 /* For most architectures, SHMLBA is the same as the page size;
4951 * some architectures have larger values, in which case they should
4952 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4953 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4954 * and defining its own value for SHMLBA.
4956 * The kernel also permits SHMLBA to be set by the architecture to a
4957 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4958 * this means that addresses are rounded to the large size if
4959 * SHM_RND is set but addresses not aligned to that size are not rejected
4960 * as long as they are at least page-aligned. Since the only architecture
4961 * which uses this is ia64 this code doesn't provide for that oddity.
4963 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4965 return TARGET_PAGE_SIZE
;
4969 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4970 int shmid
, abi_ulong shmaddr
, int shmflg
)
4974 struct shmid_ds shm_info
;
4978 /* find out the length of the shared memory segment */
4979 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4980 if (is_error(ret
)) {
4981 /* can't get length, bail out */
4985 shmlba
= target_shmlba(cpu_env
);
4987 if (shmaddr
& (shmlba
- 1)) {
4988 if (shmflg
& SHM_RND
) {
4989 shmaddr
&= ~(shmlba
- 1);
4991 return -TARGET_EINVAL
;
4994 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4995 return -TARGET_EINVAL
;
5001 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
5003 abi_ulong mmap_start
;
5005 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
5007 if (mmap_start
== -1) {
5009 host_raddr
= (void *)-1;
5011 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
5014 if (host_raddr
== (void *)-1) {
5016 return get_errno((long)host_raddr
);
5018 raddr
=h2g((unsigned long)host_raddr
);
5020 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
5021 PAGE_VALID
| PAGE_READ
|
5022 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
5024 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
5025 if (!shm_regions
[i
].in_use
) {
5026 shm_regions
[i
].in_use
= true;
5027 shm_regions
[i
].start
= raddr
;
5028 shm_regions
[i
].size
= shm_info
.shm_segsz
;
5038 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
5045 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
5046 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
5047 shm_regions
[i
].in_use
= false;
5048 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
5052 rv
= get_errno(shmdt(g2h(shmaddr
)));
5059 #ifdef TARGET_NR_ipc
5060 /* ??? This only works with linear mappings. */
5061 /* do_ipc() must return target values and target errnos. */
5062 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5063 unsigned int call
, abi_long first
,
5064 abi_long second
, abi_long third
,
5065 abi_long ptr
, abi_long fifth
)
5070 version
= call
>> 16;
5075 ret
= do_semop(first
, ptr
, second
);
5079 ret
= get_errno(semget(first
, second
, third
));
5082 case IPCOP_semctl
: {
5083 /* The semun argument to semctl is passed by value, so dereference the
5086 get_user_ual(atptr
, ptr
);
5087 ret
= do_semctl(first
, second
, third
, atptr
);
5092 ret
= get_errno(msgget(first
, second
));
5096 ret
= do_msgsnd(first
, ptr
, second
, third
);
5100 ret
= do_msgctl(first
, second
, ptr
);
5107 struct target_ipc_kludge
{
5112 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5113 ret
= -TARGET_EFAULT
;
5117 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5119 unlock_user_struct(tmp
, ptr
, 0);
5123 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5132 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5133 if (is_error(raddr
))
5134 return get_errno(raddr
);
5135 if (put_user_ual(raddr
, third
))
5136 return -TARGET_EFAULT
;
5140 ret
= -TARGET_EINVAL
;
5145 ret
= do_shmdt(ptr
);
5149 /* IPC_* flag values are the same on all linux platforms */
5150 ret
= get_errno(shmget(first
, second
, third
));
5153 /* IPC_* and SHM_* command values are the same on all linux platforms */
5155 ret
= do_shmctl(first
, second
, ptr
);
5158 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5159 ret
= -TARGET_ENOSYS
;
5166 /* kernel structure types definitions */
5168 #define STRUCT(name, ...) STRUCT_ ## name,
5169 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5171 #include "syscall_types.h"
5175 #undef STRUCT_SPECIAL
5177 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5178 #define STRUCT_SPECIAL(name)
5179 #include "syscall_types.h"
5181 #undef STRUCT_SPECIAL
5183 typedef struct IOCTLEntry IOCTLEntry
;
5185 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5186 int fd
, int cmd
, abi_long arg
);
5190 unsigned int host_cmd
;
5193 do_ioctl_fn
*do_ioctl
;
5194 const argtype arg_type
[5];
5197 #define IOC_R 0x0001
5198 #define IOC_W 0x0002
5199 #define IOC_RW (IOC_R | IOC_W)
5201 #define MAX_STRUCT_SIZE 4096
5203 #ifdef CONFIG_FIEMAP
5204 /* So fiemap access checks don't overflow on 32 bit systems.
5205 * This is very slightly smaller than the limit imposed by
5206 * the underlying kernel.
5208 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5209 / sizeof(struct fiemap_extent))
5211 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5212 int fd
, int cmd
, abi_long arg
)
5214 /* The parameter for this ioctl is a struct fiemap followed
5215 * by an array of struct fiemap_extent whose size is set
5216 * in fiemap->fm_extent_count. The array is filled in by the
5219 int target_size_in
, target_size_out
;
5221 const argtype
*arg_type
= ie
->arg_type
;
5222 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5225 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5229 assert(arg_type
[0] == TYPE_PTR
);
5230 assert(ie
->access
== IOC_RW
);
5232 target_size_in
= thunk_type_size(arg_type
, 0);
5233 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5235 return -TARGET_EFAULT
;
5237 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5238 unlock_user(argptr
, arg
, 0);
5239 fm
= (struct fiemap
*)buf_temp
;
5240 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5241 return -TARGET_EINVAL
;
5244 outbufsz
= sizeof (*fm
) +
5245 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5247 if (outbufsz
> MAX_STRUCT_SIZE
) {
5248 /* We can't fit all the extents into the fixed size buffer.
5249 * Allocate one that is large enough and use it instead.
5251 fm
= g_try_malloc(outbufsz
);
5253 return -TARGET_ENOMEM
;
5255 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5258 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5259 if (!is_error(ret
)) {
5260 target_size_out
= target_size_in
;
5261 /* An extent_count of 0 means we were only counting the extents
5262 * so there are no structs to copy
5264 if (fm
->fm_extent_count
!= 0) {
5265 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5267 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5269 ret
= -TARGET_EFAULT
;
5271 /* Convert the struct fiemap */
5272 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5273 if (fm
->fm_extent_count
!= 0) {
5274 p
= argptr
+ target_size_in
;
5275 /* ...and then all the struct fiemap_extents */
5276 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5277 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5282 unlock_user(argptr
, arg
, target_size_out
);
5292 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5293 int fd
, int cmd
, abi_long arg
)
5295 const argtype
*arg_type
= ie
->arg_type
;
5299 struct ifconf
*host_ifconf
;
5301 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5302 int target_ifreq_size
;
5307 abi_long target_ifc_buf
;
5311 assert(arg_type
[0] == TYPE_PTR
);
5312 assert(ie
->access
== IOC_RW
);
5315 target_size
= thunk_type_size(arg_type
, 0);
5317 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5319 return -TARGET_EFAULT
;
5320 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5321 unlock_user(argptr
, arg
, 0);
5323 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5324 target_ifc_len
= host_ifconf
->ifc_len
;
5325 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5327 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5328 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5329 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5331 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5332 if (outbufsz
> MAX_STRUCT_SIZE
) {
5333 /* We can't fit all the extents into the fixed size buffer.
5334 * Allocate one that is large enough and use it instead.
5336 host_ifconf
= malloc(outbufsz
);
5338 return -TARGET_ENOMEM
;
5340 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5343 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5345 host_ifconf
->ifc_len
= host_ifc_len
;
5346 host_ifconf
->ifc_buf
= host_ifc_buf
;
5348 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5349 if (!is_error(ret
)) {
5350 /* convert host ifc_len to target ifc_len */
5352 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5353 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5354 host_ifconf
->ifc_len
= target_ifc_len
;
5356 /* restore target ifc_buf */
5358 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5360 /* copy struct ifconf to target user */
5362 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5364 return -TARGET_EFAULT
;
5365 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5366 unlock_user(argptr
, arg
, target_size
);
5368 /* copy ifreq[] to target user */
5370 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5371 for (i
= 0; i
< nb_ifreq
; i
++) {
5372 thunk_convert(argptr
+ i
* target_ifreq_size
,
5373 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5374 ifreq_arg_type
, THUNK_TARGET
);
5376 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5386 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5387 int cmd
, abi_long arg
)
5390 struct dm_ioctl
*host_dm
;
5391 abi_long guest_data
;
5392 uint32_t guest_data_size
;
5394 const argtype
*arg_type
= ie
->arg_type
;
5396 void *big_buf
= NULL
;
5400 target_size
= thunk_type_size(arg_type
, 0);
5401 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5403 ret
= -TARGET_EFAULT
;
5406 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5407 unlock_user(argptr
, arg
, 0);
5409 /* buf_temp is too small, so fetch things into a bigger buffer */
5410 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5411 memcpy(big_buf
, buf_temp
, target_size
);
5415 guest_data
= arg
+ host_dm
->data_start
;
5416 if ((guest_data
- arg
) < 0) {
5417 ret
= -TARGET_EINVAL
;
5420 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5421 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5423 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5425 ret
= -TARGET_EFAULT
;
5429 switch (ie
->host_cmd
) {
5431 case DM_LIST_DEVICES
:
5434 case DM_DEV_SUSPEND
:
5437 case DM_TABLE_STATUS
:
5438 case DM_TABLE_CLEAR
:
5440 case DM_LIST_VERSIONS
:
5444 case DM_DEV_SET_GEOMETRY
:
5445 /* data contains only strings */
5446 memcpy(host_data
, argptr
, guest_data_size
);
5449 memcpy(host_data
, argptr
, guest_data_size
);
5450 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5454 void *gspec
= argptr
;
5455 void *cur_data
= host_data
;
5456 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5457 int spec_size
= thunk_type_size(arg_type
, 0);
5460 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5461 struct dm_target_spec
*spec
= cur_data
;
5465 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5466 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5468 spec
->next
= sizeof(*spec
) + slen
;
5469 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5471 cur_data
+= spec
->next
;
5476 ret
= -TARGET_EINVAL
;
5477 unlock_user(argptr
, guest_data
, 0);
5480 unlock_user(argptr
, guest_data
, 0);
5482 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5483 if (!is_error(ret
)) {
5484 guest_data
= arg
+ host_dm
->data_start
;
5485 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5486 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5487 switch (ie
->host_cmd
) {
5492 case DM_DEV_SUSPEND
:
5495 case DM_TABLE_CLEAR
:
5497 case DM_DEV_SET_GEOMETRY
:
5498 /* no return data */
5500 case DM_LIST_DEVICES
:
5502 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5503 uint32_t remaining_data
= guest_data_size
;
5504 void *cur_data
= argptr
;
5505 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5506 int nl_size
= 12; /* can't use thunk_size due to alignment */
5509 uint32_t next
= nl
->next
;
5511 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5513 if (remaining_data
< nl
->next
) {
5514 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5517 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5518 strcpy(cur_data
+ nl_size
, nl
->name
);
5519 cur_data
+= nl
->next
;
5520 remaining_data
-= nl
->next
;
5524 nl
= (void*)nl
+ next
;
5529 case DM_TABLE_STATUS
:
5531 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5532 void *cur_data
= argptr
;
5533 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5534 int spec_size
= thunk_type_size(arg_type
, 0);
5537 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5538 uint32_t next
= spec
->next
;
5539 int slen
= strlen((char*)&spec
[1]) + 1;
5540 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5541 if (guest_data_size
< spec
->next
) {
5542 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5545 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5546 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5547 cur_data
= argptr
+ spec
->next
;
5548 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5554 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5555 int count
= *(uint32_t*)hdata
;
5556 uint64_t *hdev
= hdata
+ 8;
5557 uint64_t *gdev
= argptr
+ 8;
5560 *(uint32_t*)argptr
= tswap32(count
);
5561 for (i
= 0; i
< count
; i
++) {
5562 *gdev
= tswap64(*hdev
);
5568 case DM_LIST_VERSIONS
:
5570 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5571 uint32_t remaining_data
= guest_data_size
;
5572 void *cur_data
= argptr
;
5573 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5574 int vers_size
= thunk_type_size(arg_type
, 0);
5577 uint32_t next
= vers
->next
;
5579 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5581 if (remaining_data
< vers
->next
) {
5582 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5585 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5586 strcpy(cur_data
+ vers_size
, vers
->name
);
5587 cur_data
+= vers
->next
;
5588 remaining_data
-= vers
->next
;
5592 vers
= (void*)vers
+ next
;
5597 unlock_user(argptr
, guest_data
, 0);
5598 ret
= -TARGET_EINVAL
;
5601 unlock_user(argptr
, guest_data
, guest_data_size
);
5603 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5605 ret
= -TARGET_EFAULT
;
5608 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5609 unlock_user(argptr
, arg
, target_size
);
5616 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5617 int cmd
, abi_long arg
)
5621 const argtype
*arg_type
= ie
->arg_type
;
5622 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5625 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5626 struct blkpg_partition host_part
;
5628 /* Read and convert blkpg */
5630 target_size
= thunk_type_size(arg_type
, 0);
5631 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5633 ret
= -TARGET_EFAULT
;
5636 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5637 unlock_user(argptr
, arg
, 0);
5639 switch (host_blkpg
->op
) {
5640 case BLKPG_ADD_PARTITION
:
5641 case BLKPG_DEL_PARTITION
:
5642 /* payload is struct blkpg_partition */
5645 /* Unknown opcode */
5646 ret
= -TARGET_EINVAL
;
5650 /* Read and convert blkpg->data */
5651 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5652 target_size
= thunk_type_size(part_arg_type
, 0);
5653 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5655 ret
= -TARGET_EFAULT
;
5658 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5659 unlock_user(argptr
, arg
, 0);
5661 /* Swizzle the data pointer to our local copy and call! */
5662 host_blkpg
->data
= &host_part
;
5663 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5669 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5670 int fd
, int cmd
, abi_long arg
)
5672 const argtype
*arg_type
= ie
->arg_type
;
5673 const StructEntry
*se
;
5674 const argtype
*field_types
;
5675 const int *dst_offsets
, *src_offsets
;
5678 abi_ulong
*target_rt_dev_ptr
;
5679 unsigned long *host_rt_dev_ptr
;
5683 assert(ie
->access
== IOC_W
);
5684 assert(*arg_type
== TYPE_PTR
);
5686 assert(*arg_type
== TYPE_STRUCT
);
5687 target_size
= thunk_type_size(arg_type
, 0);
5688 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5690 return -TARGET_EFAULT
;
5693 assert(*arg_type
== (int)STRUCT_rtentry
);
5694 se
= struct_entries
+ *arg_type
++;
5695 assert(se
->convert
[0] == NULL
);
5696 /* convert struct here to be able to catch rt_dev string */
5697 field_types
= se
->field_types
;
5698 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5699 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5700 for (i
= 0; i
< se
->nb_fields
; i
++) {
5701 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5702 assert(*field_types
== TYPE_PTRVOID
);
5703 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5704 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5705 if (*target_rt_dev_ptr
!= 0) {
5706 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5707 tswapal(*target_rt_dev_ptr
));
5708 if (!*host_rt_dev_ptr
) {
5709 unlock_user(argptr
, arg
, 0);
5710 return -TARGET_EFAULT
;
5713 *host_rt_dev_ptr
= 0;
5718 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5719 argptr
+ src_offsets
[i
],
5720 field_types
, THUNK_HOST
);
5722 unlock_user(argptr
, arg
, 0);
5724 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5725 if (*host_rt_dev_ptr
!= 0) {
5726 unlock_user((void *)*host_rt_dev_ptr
,
5727 *target_rt_dev_ptr
, 0);
5732 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5733 int fd
, int cmd
, abi_long arg
)
5735 int sig
= target_to_host_signal(arg
);
5736 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5740 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5741 int fd
, int cmd
, abi_long arg
)
5743 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5744 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5748 static IOCTLEntry ioctl_entries
[] = {
5749 #define IOCTL(cmd, access, ...) \
5750 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5751 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5752 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5753 #define IOCTL_IGNORE(cmd) \
5754 { TARGET_ ## cmd, 0, #cmd },
5759 /* ??? Implement proper locking for ioctls. */
5760 /* do_ioctl() Must return target values and target errnos. */
5761 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5763 const IOCTLEntry
*ie
;
5764 const argtype
*arg_type
;
5766 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5772 if (ie
->target_cmd
== 0) {
5773 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5774 return -TARGET_ENOSYS
;
5776 if (ie
->target_cmd
== cmd
)
5780 arg_type
= ie
->arg_type
;
5782 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5785 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5786 } else if (!ie
->host_cmd
) {
5787 /* Some architectures define BSD ioctls in their headers
5788 that are not implemented in Linux. */
5789 return -TARGET_ENOSYS
;
5792 switch(arg_type
[0]) {
5795 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5799 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5803 target_size
= thunk_type_size(arg_type
, 0);
5804 switch(ie
->access
) {
5806 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5807 if (!is_error(ret
)) {
5808 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5810 return -TARGET_EFAULT
;
5811 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5812 unlock_user(argptr
, arg
, target_size
);
5816 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5818 return -TARGET_EFAULT
;
5819 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5820 unlock_user(argptr
, arg
, 0);
5821 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5825 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5827 return -TARGET_EFAULT
;
5828 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5829 unlock_user(argptr
, arg
, 0);
5830 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5831 if (!is_error(ret
)) {
5832 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5834 return -TARGET_EFAULT
;
5835 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5836 unlock_user(argptr
, arg
, target_size
);
5842 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5843 (long)cmd
, arg_type
[0]);
5844 ret
= -TARGET_ENOSYS
;
5850 static const bitmask_transtbl iflag_tbl
[] = {
5851 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5852 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5853 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5854 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5855 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5856 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5857 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5858 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5859 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5860 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5861 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5862 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5863 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5864 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5868 static const bitmask_transtbl oflag_tbl
[] = {
5869 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5870 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5871 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5872 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5873 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5874 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5875 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5876 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5877 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5878 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5879 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5880 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5881 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5882 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5883 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5884 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5885 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5886 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5887 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5888 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5889 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5890 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5891 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5892 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5896 static const bitmask_transtbl cflag_tbl
[] = {
5897 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5898 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5899 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5900 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5901 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5902 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5903 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5904 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5905 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5906 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5907 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5908 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5909 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5910 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5911 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5912 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5913 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5914 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5915 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5916 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5917 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5918 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5919 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5920 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5921 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5922 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5923 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5924 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5925 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5926 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5927 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5931 static const bitmask_transtbl lflag_tbl
[] = {
5932 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5933 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5934 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5935 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5936 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5937 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5938 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5939 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5940 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5941 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5942 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5943 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5944 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5945 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5946 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5950 static void target_to_host_termios (void *dst
, const void *src
)
5952 struct host_termios
*host
= dst
;
5953 const struct target_termios
*target
= src
;
5956 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5958 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5960 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5962 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5963 host
->c_line
= target
->c_line
;
5965 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5966 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5967 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5968 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5969 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5970 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5971 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5972 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5973 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5974 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5975 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5976 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5977 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5978 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5979 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5980 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5981 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5982 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5985 static void host_to_target_termios (void *dst
, const void *src
)
5987 struct target_termios
*target
= dst
;
5988 const struct host_termios
*host
= src
;
5991 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5993 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5995 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5997 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5998 target
->c_line
= host
->c_line
;
6000 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
6001 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
6002 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
6003 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
6004 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
6005 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
6006 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
6007 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
6008 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
6009 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
6010 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
6011 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
6012 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
6013 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
6014 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
6015 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
6016 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
6017 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
6020 static const StructEntry struct_termios_def
= {
6021 .convert
= { host_to_target_termios
, target_to_host_termios
},
6022 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
6023 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
6026 static bitmask_transtbl mmap_flags_tbl
[] = {
6027 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
6028 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
6029 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
6030 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
6031 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
6032 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
6033 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
6034 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
6035 MAP_DENYWRITE
, MAP_DENYWRITE
},
6036 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
6037 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
6038 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
6039 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
6040 MAP_NORESERVE
, MAP_NORESERVE
},
6041 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
6042 /* MAP_STACK had been ignored by the kernel for quite some time.
6043 Recognize it for the target insofar as we do not want to pass
6044 it through to the host. */
6045 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
6049 #if defined(TARGET_I386)
6051 /* NOTE: there is really one LDT for all the threads */
6052 static uint8_t *ldt_table
;
6054 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6061 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6062 if (size
> bytecount
)
6064 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6066 return -TARGET_EFAULT
;
6067 /* ??? Should this by byteswapped? */
6068 memcpy(p
, ldt_table
, size
);
6069 unlock_user(p
, ptr
, size
);
6073 /* XXX: add locking support */
6074 static abi_long
write_ldt(CPUX86State
*env
,
6075 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6077 struct target_modify_ldt_ldt_s ldt_info
;
6078 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6079 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6080 int seg_not_present
, useable
, lm
;
6081 uint32_t *lp
, entry_1
, entry_2
;
6083 if (bytecount
!= sizeof(ldt_info
))
6084 return -TARGET_EINVAL
;
6085 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6086 return -TARGET_EFAULT
;
6087 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6088 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6089 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6090 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6091 unlock_user_struct(target_ldt_info
, ptr
, 0);
6093 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6094 return -TARGET_EINVAL
;
6095 seg_32bit
= ldt_info
.flags
& 1;
6096 contents
= (ldt_info
.flags
>> 1) & 3;
6097 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6098 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6099 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6100 useable
= (ldt_info
.flags
>> 6) & 1;
6104 lm
= (ldt_info
.flags
>> 7) & 1;
6106 if (contents
== 3) {
6108 return -TARGET_EINVAL
;
6109 if (seg_not_present
== 0)
6110 return -TARGET_EINVAL
;
6112 /* allocate the LDT */
6114 env
->ldt
.base
= target_mmap(0,
6115 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6116 PROT_READ
|PROT_WRITE
,
6117 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6118 if (env
->ldt
.base
== -1)
6119 return -TARGET_ENOMEM
;
6120 memset(g2h(env
->ldt
.base
), 0,
6121 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6122 env
->ldt
.limit
= 0xffff;
6123 ldt_table
= g2h(env
->ldt
.base
);
6126 /* NOTE: same code as Linux kernel */
6127 /* Allow LDTs to be cleared by the user. */
6128 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6131 read_exec_only
== 1 &&
6133 limit_in_pages
== 0 &&
6134 seg_not_present
== 1 &&
6142 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6143 (ldt_info
.limit
& 0x0ffff);
6144 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6145 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6146 (ldt_info
.limit
& 0xf0000) |
6147 ((read_exec_only
^ 1) << 9) |
6149 ((seg_not_present
^ 1) << 15) |
6151 (limit_in_pages
<< 23) |
6155 entry_2
|= (useable
<< 20);
6157 /* Install the new entry ... */
6159 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6160 lp
[0] = tswap32(entry_1
);
6161 lp
[1] = tswap32(entry_2
);
6165 /* specific and weird i386 syscalls */
6166 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6167 unsigned long bytecount
)
6173 ret
= read_ldt(ptr
, bytecount
);
6176 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6179 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6182 ret
= -TARGET_ENOSYS
;
6188 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6189 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6191 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6192 struct target_modify_ldt_ldt_s ldt_info
;
6193 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6194 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6195 int seg_not_present
, useable
, lm
;
6196 uint32_t *lp
, entry_1
, entry_2
;
6199 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6200 if (!target_ldt_info
)
6201 return -TARGET_EFAULT
;
6202 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6203 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6204 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6205 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6206 if (ldt_info
.entry_number
== -1) {
6207 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6208 if (gdt_table
[i
] == 0) {
6209 ldt_info
.entry_number
= i
;
6210 target_ldt_info
->entry_number
= tswap32(i
);
6215 unlock_user_struct(target_ldt_info
, ptr
, 1);
6217 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6218 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6219 return -TARGET_EINVAL
;
6220 seg_32bit
= ldt_info
.flags
& 1;
6221 contents
= (ldt_info
.flags
>> 1) & 3;
6222 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6223 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6224 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6225 useable
= (ldt_info
.flags
>> 6) & 1;
6229 lm
= (ldt_info
.flags
>> 7) & 1;
6232 if (contents
== 3) {
6233 if (seg_not_present
== 0)
6234 return -TARGET_EINVAL
;
6237 /* NOTE: same code as Linux kernel */
6238 /* Allow LDTs to be cleared by the user. */
6239 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6240 if ((contents
== 0 &&
6241 read_exec_only
== 1 &&
6243 limit_in_pages
== 0 &&
6244 seg_not_present
== 1 &&
6252 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6253 (ldt_info
.limit
& 0x0ffff);
6254 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6255 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6256 (ldt_info
.limit
& 0xf0000) |
6257 ((read_exec_only
^ 1) << 9) |
6259 ((seg_not_present
^ 1) << 15) |
6261 (limit_in_pages
<< 23) |
6266 /* Install the new entry ... */
6268 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6269 lp
[0] = tswap32(entry_1
);
6270 lp
[1] = tswap32(entry_2
);
6274 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6276 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6277 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6278 uint32_t base_addr
, limit
, flags
;
6279 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6280 int seg_not_present
, useable
, lm
;
6281 uint32_t *lp
, entry_1
, entry_2
;
6283 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6284 if (!target_ldt_info
)
6285 return -TARGET_EFAULT
;
6286 idx
= tswap32(target_ldt_info
->entry_number
);
6287 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6288 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6289 unlock_user_struct(target_ldt_info
, ptr
, 1);
6290 return -TARGET_EINVAL
;
6292 lp
= (uint32_t *)(gdt_table
+ idx
);
6293 entry_1
= tswap32(lp
[0]);
6294 entry_2
= tswap32(lp
[1]);
6296 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6297 contents
= (entry_2
>> 10) & 3;
6298 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6299 seg_32bit
= (entry_2
>> 22) & 1;
6300 limit_in_pages
= (entry_2
>> 23) & 1;
6301 useable
= (entry_2
>> 20) & 1;
6305 lm
= (entry_2
>> 21) & 1;
6307 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6308 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6309 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6310 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6311 base_addr
= (entry_1
>> 16) |
6312 (entry_2
& 0xff000000) |
6313 ((entry_2
& 0xff) << 16);
6314 target_ldt_info
->base_addr
= tswapal(base_addr
);
6315 target_ldt_info
->limit
= tswap32(limit
);
6316 target_ldt_info
->flags
= tswap32(flags
);
6317 unlock_user_struct(target_ldt_info
, ptr
, 1);
6320 #endif /* TARGET_I386 && TARGET_ABI32 */
6322 #ifndef TARGET_ABI32
6323 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6330 case TARGET_ARCH_SET_GS
:
6331 case TARGET_ARCH_SET_FS
:
6332 if (code
== TARGET_ARCH_SET_GS
)
6336 cpu_x86_load_seg(env
, idx
, 0);
6337 env
->segs
[idx
].base
= addr
;
6339 case TARGET_ARCH_GET_GS
:
6340 case TARGET_ARCH_GET_FS
:
6341 if (code
== TARGET_ARCH_GET_GS
)
6345 val
= env
->segs
[idx
].base
;
6346 if (put_user(val
, addr
, abi_ulong
))
6347 ret
= -TARGET_EFAULT
;
6350 ret
= -TARGET_EINVAL
;
6357 #endif /* defined(TARGET_I386) */
6359 #define NEW_STACK_SIZE 0x40000
6362 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6365 pthread_mutex_t mutex
;
6366 pthread_cond_t cond
;
6369 abi_ulong child_tidptr
;
6370 abi_ulong parent_tidptr
;
6374 static void *clone_func(void *arg
)
6376 new_thread_info
*info
= arg
;
6381 rcu_register_thread();
6382 tcg_register_thread();
6384 cpu
= ENV_GET_CPU(env
);
6386 ts
= (TaskState
*)cpu
->opaque
;
6387 info
->tid
= gettid();
6389 if (info
->child_tidptr
)
6390 put_user_u32(info
->tid
, info
->child_tidptr
);
6391 if (info
->parent_tidptr
)
6392 put_user_u32(info
->tid
, info
->parent_tidptr
);
6393 /* Enable signals. */
6394 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6395 /* Signal to the parent that we're ready. */
6396 pthread_mutex_lock(&info
->mutex
);
6397 pthread_cond_broadcast(&info
->cond
);
6398 pthread_mutex_unlock(&info
->mutex
);
6399 /* Wait until the parent has finished initializing the tls state. */
6400 pthread_mutex_lock(&clone_lock
);
6401 pthread_mutex_unlock(&clone_lock
);
6407 /* do_fork() Must return host values and target errnos (unlike most
6408 do_*() functions). */
6409 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6410 abi_ulong parent_tidptr
, target_ulong newtls
,
6411 abi_ulong child_tidptr
)
6413 CPUState
*cpu
= ENV_GET_CPU(env
);
6417 CPUArchState
*new_env
;
6420 flags
&= ~CLONE_IGNORED_FLAGS
;
6422 /* Emulate vfork() with fork() */
6423 if (flags
& CLONE_VFORK
)
6424 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6426 if (flags
& CLONE_VM
) {
6427 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6428 new_thread_info info
;
6429 pthread_attr_t attr
;
6431 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6432 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6433 return -TARGET_EINVAL
;
6436 ts
= g_new0(TaskState
, 1);
6437 init_task_state(ts
);
6439 /* Grab a mutex so that thread setup appears atomic. */
6440 pthread_mutex_lock(&clone_lock
);
6442 /* we create a new CPU instance. */
6443 new_env
= cpu_copy(env
);
6444 /* Init regs that differ from the parent. */
6445 cpu_clone_regs(new_env
, newsp
);
6446 new_cpu
= ENV_GET_CPU(new_env
);
6447 new_cpu
->opaque
= ts
;
6448 ts
->bprm
= parent_ts
->bprm
;
6449 ts
->info
= parent_ts
->info
;
6450 ts
->signal_mask
= parent_ts
->signal_mask
;
6452 if (flags
& CLONE_CHILD_CLEARTID
) {
6453 ts
->child_tidptr
= child_tidptr
;
6456 if (flags
& CLONE_SETTLS
) {
6457 cpu_set_tls (new_env
, newtls
);
6460 memset(&info
, 0, sizeof(info
));
6461 pthread_mutex_init(&info
.mutex
, NULL
);
6462 pthread_mutex_lock(&info
.mutex
);
6463 pthread_cond_init(&info
.cond
, NULL
);
6465 if (flags
& CLONE_CHILD_SETTID
) {
6466 info
.child_tidptr
= child_tidptr
;
6468 if (flags
& CLONE_PARENT_SETTID
) {
6469 info
.parent_tidptr
= parent_tidptr
;
6472 ret
= pthread_attr_init(&attr
);
6473 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6474 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6475 /* It is not safe to deliver signals until the child has finished
6476 initializing, so temporarily block all signals. */
6477 sigfillset(&sigmask
);
6478 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6480 /* If this is our first additional thread, we need to ensure we
6481 * generate code for parallel execution and flush old translations.
6483 if (!parallel_cpus
) {
6484 parallel_cpus
= true;
6488 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6489 /* TODO: Free new CPU state if thread creation failed. */
6491 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6492 pthread_attr_destroy(&attr
);
6494 /* Wait for the child to initialize. */
6495 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6500 pthread_mutex_unlock(&info
.mutex
);
6501 pthread_cond_destroy(&info
.cond
);
6502 pthread_mutex_destroy(&info
.mutex
);
6503 pthread_mutex_unlock(&clone_lock
);
6505 /* if no CLONE_VM, we consider it is a fork */
6506 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6507 return -TARGET_EINVAL
;
6510 /* We can't support custom termination signals */
6511 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6512 return -TARGET_EINVAL
;
6515 if (block_signals()) {
6516 return -TARGET_ERESTARTSYS
;
6522 /* Child Process. */
6523 cpu_clone_regs(env
, newsp
);
6525 /* There is a race condition here. The parent process could
6526 theoretically read the TID in the child process before the child
6527 tid is set. This would require using either ptrace
6528 (not implemented) or having *_tidptr to point at a shared memory
6529 mapping. We can't repeat the spinlock hack used above because
6530 the child process gets its own copy of the lock. */
6531 if (flags
& CLONE_CHILD_SETTID
)
6532 put_user_u32(gettid(), child_tidptr
);
6533 if (flags
& CLONE_PARENT_SETTID
)
6534 put_user_u32(gettid(), parent_tidptr
);
6535 ts
= (TaskState
*)cpu
->opaque
;
6536 if (flags
& CLONE_SETTLS
)
6537 cpu_set_tls (env
, newtls
);
6538 if (flags
& CLONE_CHILD_CLEARTID
)
6539 ts
->child_tidptr
= child_tidptr
;
6547 /* warning : doesn't handle linux specific flags... */
6548 static int target_to_host_fcntl_cmd(int cmd
)
6553 case TARGET_F_DUPFD
:
6554 case TARGET_F_GETFD
:
6555 case TARGET_F_SETFD
:
6556 case TARGET_F_GETFL
:
6557 case TARGET_F_SETFL
:
6560 case TARGET_F_GETLK
:
6563 case TARGET_F_SETLK
:
6566 case TARGET_F_SETLKW
:
6569 case TARGET_F_GETOWN
:
6572 case TARGET_F_SETOWN
:
6575 case TARGET_F_GETSIG
:
6578 case TARGET_F_SETSIG
:
6581 #if TARGET_ABI_BITS == 32
6582 case TARGET_F_GETLK64
:
6585 case TARGET_F_SETLK64
:
6588 case TARGET_F_SETLKW64
:
6592 case TARGET_F_SETLEASE
:
6595 case TARGET_F_GETLEASE
:
6598 #ifdef F_DUPFD_CLOEXEC
6599 case TARGET_F_DUPFD_CLOEXEC
:
6600 ret
= F_DUPFD_CLOEXEC
;
6603 case TARGET_F_NOTIFY
:
6607 case TARGET_F_GETOWN_EX
:
6612 case TARGET_F_SETOWN_EX
:
6617 case TARGET_F_SETPIPE_SZ
:
6620 case TARGET_F_GETPIPE_SZ
:
6625 ret
= -TARGET_EINVAL
;
6629 #if defined(__powerpc64__)
6630 /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6631 * is not supported by kernel. The glibc fcntl call actually adjusts
6632 * them to 5, 6 and 7 before making the syscall(). Since we make the
6633 * syscall directly, adjust to what is supported by the kernel.
6635 if (ret
>= F_GETLK64
&& ret
<= F_SETLKW64
) {
6636 ret
-= F_GETLK64
- 5;
6643 #define FLOCK_TRANSTBL \
6645 TRANSTBL_CONVERT(F_RDLCK); \
6646 TRANSTBL_CONVERT(F_WRLCK); \
6647 TRANSTBL_CONVERT(F_UNLCK); \
6648 TRANSTBL_CONVERT(F_EXLCK); \
6649 TRANSTBL_CONVERT(F_SHLCK); \
6652 static int target_to_host_flock(int type
)
6654 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6656 #undef TRANSTBL_CONVERT
6657 return -TARGET_EINVAL
;
6660 static int host_to_target_flock(int type
)
6662 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6664 #undef TRANSTBL_CONVERT
6665 /* if we don't know how to convert the value coming
6666 * from the host we copy to the target field as-is
6671 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6672 abi_ulong target_flock_addr
)
6674 struct target_flock
*target_fl
;
6677 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6678 return -TARGET_EFAULT
;
6681 __get_user(l_type
, &target_fl
->l_type
);
6682 l_type
= target_to_host_flock(l_type
);
6686 fl
->l_type
= l_type
;
6687 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6688 __get_user(fl
->l_start
, &target_fl
->l_start
);
6689 __get_user(fl
->l_len
, &target_fl
->l_len
);
6690 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6691 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6695 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6696 const struct flock64
*fl
)
6698 struct target_flock
*target_fl
;
6701 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6702 return -TARGET_EFAULT
;
6705 l_type
= host_to_target_flock(fl
->l_type
);
6706 __put_user(l_type
, &target_fl
->l_type
);
6707 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6708 __put_user(fl
->l_start
, &target_fl
->l_start
);
6709 __put_user(fl
->l_len
, &target_fl
->l_len
);
6710 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6711 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6715 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6716 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6718 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6719 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6720 abi_ulong target_flock_addr
)
6722 struct target_oabi_flock64
*target_fl
;
6725 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6726 return -TARGET_EFAULT
;
6729 __get_user(l_type
, &target_fl
->l_type
);
6730 l_type
= target_to_host_flock(l_type
);
6734 fl
->l_type
= l_type
;
6735 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6736 __get_user(fl
->l_start
, &target_fl
->l_start
);
6737 __get_user(fl
->l_len
, &target_fl
->l_len
);
6738 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6739 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6743 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6744 const struct flock64
*fl
)
6746 struct target_oabi_flock64
*target_fl
;
6749 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6750 return -TARGET_EFAULT
;
6753 l_type
= host_to_target_flock(fl
->l_type
);
6754 __put_user(l_type
, &target_fl
->l_type
);
6755 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6756 __put_user(fl
->l_start
, &target_fl
->l_start
);
6757 __put_user(fl
->l_len
, &target_fl
->l_len
);
6758 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6759 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6764 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6765 abi_ulong target_flock_addr
)
6767 struct target_flock64
*target_fl
;
6770 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6771 return -TARGET_EFAULT
;
6774 __get_user(l_type
, &target_fl
->l_type
);
6775 l_type
= target_to_host_flock(l_type
);
6779 fl
->l_type
= l_type
;
6780 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6781 __get_user(fl
->l_start
, &target_fl
->l_start
);
6782 __get_user(fl
->l_len
, &target_fl
->l_len
);
6783 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6784 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6788 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6789 const struct flock64
*fl
)
6791 struct target_flock64
*target_fl
;
6794 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6795 return -TARGET_EFAULT
;
6798 l_type
= host_to_target_flock(fl
->l_type
);
6799 __put_user(l_type
, &target_fl
->l_type
);
6800 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6801 __put_user(fl
->l_start
, &target_fl
->l_start
);
6802 __put_user(fl
->l_len
, &target_fl
->l_len
);
6803 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6804 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6808 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6810 struct flock64 fl64
;
6812 struct f_owner_ex fox
;
6813 struct target_f_owner_ex
*target_fox
;
6816 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6818 if (host_cmd
== -TARGET_EINVAL
)
6822 case TARGET_F_GETLK
:
6823 ret
= copy_from_user_flock(&fl64
, arg
);
6827 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6829 ret
= copy_to_user_flock(arg
, &fl64
);
6833 case TARGET_F_SETLK
:
6834 case TARGET_F_SETLKW
:
6835 ret
= copy_from_user_flock(&fl64
, arg
);
6839 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6842 case TARGET_F_GETLK64
:
6843 ret
= copy_from_user_flock64(&fl64
, arg
);
6847 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6849 ret
= copy_to_user_flock64(arg
, &fl64
);
6852 case TARGET_F_SETLK64
:
6853 case TARGET_F_SETLKW64
:
6854 ret
= copy_from_user_flock64(&fl64
, arg
);
6858 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6861 case TARGET_F_GETFL
:
6862 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6864 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6868 case TARGET_F_SETFL
:
6869 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6870 target_to_host_bitmask(arg
,
6875 case TARGET_F_GETOWN_EX
:
6876 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6878 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6879 return -TARGET_EFAULT
;
6880 target_fox
->type
= tswap32(fox
.type
);
6881 target_fox
->pid
= tswap32(fox
.pid
);
6882 unlock_user_struct(target_fox
, arg
, 1);
6888 case TARGET_F_SETOWN_EX
:
6889 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6890 return -TARGET_EFAULT
;
6891 fox
.type
= tswap32(target_fox
->type
);
6892 fox
.pid
= tswap32(target_fox
->pid
);
6893 unlock_user_struct(target_fox
, arg
, 0);
6894 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6898 case TARGET_F_SETOWN
:
6899 case TARGET_F_GETOWN
:
6900 case TARGET_F_SETSIG
:
6901 case TARGET_F_GETSIG
:
6902 case TARGET_F_SETLEASE
:
6903 case TARGET_F_GETLEASE
:
6904 case TARGET_F_SETPIPE_SZ
:
6905 case TARGET_F_GETPIPE_SZ
:
6906 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6910 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6918 static inline int high2lowuid(int uid
)
6926 static inline int high2lowgid(int gid
)
6934 static inline int low2highuid(int uid
)
6936 if ((int16_t)uid
== -1)
6942 static inline int low2highgid(int gid
)
6944 if ((int16_t)gid
== -1)
6949 static inline int tswapid(int id
)
6954 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6956 #else /* !USE_UID16 */
6957 static inline int high2lowuid(int uid
)
6961 static inline int high2lowgid(int gid
)
6965 static inline int low2highuid(int uid
)
6969 static inline int low2highgid(int gid
)
6973 static inline int tswapid(int id
)
6978 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6980 #endif /* USE_UID16 */
6982 /* We must do direct syscalls for setting UID/GID, because we want to
6983 * implement the Linux system call semantics of "change only for this thread",
6984 * not the libc/POSIX semantics of "change for all threads in process".
6985 * (See http://ewontfix.com/17/ for more details.)
6986 * We use the 32-bit version of the syscalls if present; if it is not
6987 * then either the host architecture supports 32-bit UIDs natively with
6988 * the standard syscall, or the 16-bit UID is the best we can do.
6990 #ifdef __NR_setuid32
6991 #define __NR_sys_setuid __NR_setuid32
6993 #define __NR_sys_setuid __NR_setuid
6995 #ifdef __NR_setgid32
6996 #define __NR_sys_setgid __NR_setgid32
6998 #define __NR_sys_setgid __NR_setgid
7000 #ifdef __NR_setresuid32
7001 #define __NR_sys_setresuid __NR_setresuid32
7003 #define __NR_sys_setresuid __NR_setresuid
7005 #ifdef __NR_setresgid32
7006 #define __NR_sys_setresgid __NR_setresgid32
7008 #define __NR_sys_setresgid __NR_setresgid
7011 _syscall1(int, sys_setuid
, uid_t
, uid
)
7012 _syscall1(int, sys_setgid
, gid_t
, gid
)
7013 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
7014 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
7016 void syscall_init(void)
7019 const argtype
*arg_type
;
7023 thunk_init(STRUCT_MAX
);
7025 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7026 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7027 #include "syscall_types.h"
7029 #undef STRUCT_SPECIAL
7031 /* Build target_to_host_errno_table[] table from
7032 * host_to_target_errno_table[]. */
7033 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
7034 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
7037 /* we patch the ioctl size if necessary. We rely on the fact that
7038 no ioctl has all the bits at '1' in the size field */
7040 while (ie
->target_cmd
!= 0) {
7041 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
7042 TARGET_IOC_SIZEMASK
) {
7043 arg_type
= ie
->arg_type
;
7044 if (arg_type
[0] != TYPE_PTR
) {
7045 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
7050 size
= thunk_type_size(arg_type
, 0);
7051 ie
->target_cmd
= (ie
->target_cmd
&
7052 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
7053 (size
<< TARGET_IOC_SIZESHIFT
);
7056 /* automatic consistency check if same arch */
7057 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7058 (defined(__x86_64__) && defined(TARGET_X86_64))
7059 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
7060 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7061 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
7068 #if TARGET_ABI_BITS == 32
7069 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
7071 #ifdef TARGET_WORDS_BIGENDIAN
7072 return ((uint64_t)word0
<< 32) | word1
;
7074 return ((uint64_t)word1
<< 32) | word0
;
7077 #else /* TARGET_ABI_BITS == 32 */
7078 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
7082 #endif /* TARGET_ABI_BITS != 32 */
7084 #ifdef TARGET_NR_truncate64
7085 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
7090 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7094 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7098 #ifdef TARGET_NR_ftruncate64
7099 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7104 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7108 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7112 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
7113 abi_ulong target_addr
)
7115 struct target_timespec
*target_ts
;
7117 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
7118 return -TARGET_EFAULT
;
7119 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7120 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7121 unlock_user_struct(target_ts
, target_addr
, 0);
7125 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7126 struct timespec
*host_ts
)
7128 struct target_timespec
*target_ts
;
7130 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7131 return -TARGET_EFAULT
;
7132 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7133 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7134 unlock_user_struct(target_ts
, target_addr
, 1);
7138 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7139 abi_ulong target_addr
)
7141 struct target_itimerspec
*target_itspec
;
7143 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7144 return -TARGET_EFAULT
;
7147 host_itspec
->it_interval
.tv_sec
=
7148 tswapal(target_itspec
->it_interval
.tv_sec
);
7149 host_itspec
->it_interval
.tv_nsec
=
7150 tswapal(target_itspec
->it_interval
.tv_nsec
);
7151 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7152 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7154 unlock_user_struct(target_itspec
, target_addr
, 1);
7158 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7159 struct itimerspec
*host_its
)
7161 struct target_itimerspec
*target_itspec
;
7163 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7164 return -TARGET_EFAULT
;
7167 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7168 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7170 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7171 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7173 unlock_user_struct(target_itspec
, target_addr
, 0);
7177 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7178 abi_long target_addr
)
7180 struct target_timex
*target_tx
;
7182 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7183 return -TARGET_EFAULT
;
7186 __get_user(host_tx
->modes
, &target_tx
->modes
);
7187 __get_user(host_tx
->offset
, &target_tx
->offset
);
7188 __get_user(host_tx
->freq
, &target_tx
->freq
);
7189 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7190 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7191 __get_user(host_tx
->status
, &target_tx
->status
);
7192 __get_user(host_tx
->constant
, &target_tx
->constant
);
7193 __get_user(host_tx
->precision
, &target_tx
->precision
);
7194 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7195 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7196 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7197 __get_user(host_tx
->tick
, &target_tx
->tick
);
7198 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7199 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7200 __get_user(host_tx
->shift
, &target_tx
->shift
);
7201 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7202 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7203 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7204 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7205 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7206 __get_user(host_tx
->tai
, &target_tx
->tai
);
7208 unlock_user_struct(target_tx
, target_addr
, 0);
7212 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7213 struct timex
*host_tx
)
7215 struct target_timex
*target_tx
;
7217 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7218 return -TARGET_EFAULT
;
7221 __put_user(host_tx
->modes
, &target_tx
->modes
);
7222 __put_user(host_tx
->offset
, &target_tx
->offset
);
7223 __put_user(host_tx
->freq
, &target_tx
->freq
);
7224 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7225 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7226 __put_user(host_tx
->status
, &target_tx
->status
);
7227 __put_user(host_tx
->constant
, &target_tx
->constant
);
7228 __put_user(host_tx
->precision
, &target_tx
->precision
);
7229 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7230 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7231 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7232 __put_user(host_tx
->tick
, &target_tx
->tick
);
7233 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7234 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7235 __put_user(host_tx
->shift
, &target_tx
->shift
);
7236 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7237 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7238 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7239 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7240 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7241 __put_user(host_tx
->tai
, &target_tx
->tai
);
7243 unlock_user_struct(target_tx
, target_addr
, 1);
7248 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7249 abi_ulong target_addr
)
7251 struct target_sigevent
*target_sevp
;
7253 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7254 return -TARGET_EFAULT
;
7257 /* This union is awkward on 64 bit systems because it has a 32 bit
7258 * integer and a pointer in it; we follow the conversion approach
7259 * used for handling sigval types in signal.c so the guest should get
7260 * the correct value back even if we did a 64 bit byteswap and it's
7261 * using the 32 bit integer.
7263 host_sevp
->sigev_value
.sival_ptr
=
7264 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7265 host_sevp
->sigev_signo
=
7266 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7267 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7268 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7270 unlock_user_struct(target_sevp
, target_addr
, 1);
7274 #if defined(TARGET_NR_mlockall)
7275 static inline int target_to_host_mlockall_arg(int arg
)
7279 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7280 result
|= MCL_CURRENT
;
7282 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7283 result
|= MCL_FUTURE
;
7289 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
7290 defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
7291 defined(TARGET_NR_newfstatat))
7292 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7293 abi_ulong target_addr
,
7294 struct stat
*host_st
)
7296 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7297 if (((CPUARMState
*)cpu_env
)->eabi
) {
7298 struct target_eabi_stat64
*target_st
;
7300 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7301 return -TARGET_EFAULT
;
7302 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7303 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7304 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7305 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7306 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7308 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7309 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7310 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7311 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7312 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7313 __put_user(host_st
->st_size
, &target_st
->st_size
);
7314 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7315 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7316 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7317 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7318 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7319 unlock_user_struct(target_st
, target_addr
, 1);
7323 #if defined(TARGET_HAS_STRUCT_STAT64)
7324 struct target_stat64
*target_st
;
7326 struct target_stat
*target_st
;
7329 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7330 return -TARGET_EFAULT
;
7331 memset(target_st
, 0, sizeof(*target_st
));
7332 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7333 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7334 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7335 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7337 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7338 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7339 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7340 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7341 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7342 /* XXX: better use of kernel struct */
7343 __put_user(host_st
->st_size
, &target_st
->st_size
);
7344 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7345 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7346 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7347 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7348 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7349 unlock_user_struct(target_st
, target_addr
, 1);
7356 /* ??? Using host futex calls even when target atomic operations
7357 are not really atomic probably breaks things. However implementing
7358 futexes locally would make futexes shared between multiple processes
7359 tricky. However they're probably useless because guest atomic
7360 operations won't work either. */
7361 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7362 target_ulong uaddr2
, int val3
)
7364 struct timespec ts
, *pts
;
7367 /* ??? We assume FUTEX_* constants are the same on both host
7369 #ifdef FUTEX_CMD_MASK
7370 base_op
= op
& FUTEX_CMD_MASK
;
7376 case FUTEX_WAIT_BITSET
:
7379 target_to_host_timespec(pts
, timeout
);
7383 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7386 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7388 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7390 case FUTEX_CMP_REQUEUE
:
7392 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7393 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7394 But the prototype takes a `struct timespec *'; insert casts
7395 to satisfy the compiler. We do not need to tswap TIMEOUT
7396 since it's not compared to guest memory. */
7397 pts
= (struct timespec
*)(uintptr_t) timeout
;
7398 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7400 (base_op
== FUTEX_CMP_REQUEUE
7404 return -TARGET_ENOSYS
;
7407 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7408 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7409 abi_long handle
, abi_long mount_id
,
7412 struct file_handle
*target_fh
;
7413 struct file_handle
*fh
;
7417 unsigned int size
, total_size
;
7419 if (get_user_s32(size
, handle
)) {
7420 return -TARGET_EFAULT
;
7423 name
= lock_user_string(pathname
);
7425 return -TARGET_EFAULT
;
7428 total_size
= sizeof(struct file_handle
) + size
;
7429 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7431 unlock_user(name
, pathname
, 0);
7432 return -TARGET_EFAULT
;
7435 fh
= g_malloc0(total_size
);
7436 fh
->handle_bytes
= size
;
7438 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7439 unlock_user(name
, pathname
, 0);
7441 /* man name_to_handle_at(2):
7442 * Other than the use of the handle_bytes field, the caller should treat
7443 * the file_handle structure as an opaque data type
7446 memcpy(target_fh
, fh
, total_size
);
7447 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7448 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7450 unlock_user(target_fh
, handle
, total_size
);
7452 if (put_user_s32(mid
, mount_id
)) {
7453 return -TARGET_EFAULT
;
7461 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7462 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7465 struct file_handle
*target_fh
;
7466 struct file_handle
*fh
;
7467 unsigned int size
, total_size
;
7470 if (get_user_s32(size
, handle
)) {
7471 return -TARGET_EFAULT
;
7474 total_size
= sizeof(struct file_handle
) + size
;
7475 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7477 return -TARGET_EFAULT
;
7480 fh
= g_memdup(target_fh
, total_size
);
7481 fh
->handle_bytes
= size
;
7482 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7484 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7485 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7489 unlock_user(target_fh
, handle
, total_size
);
7495 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7497 /* signalfd siginfo conversion */
7500 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7501 const struct signalfd_siginfo
*info
)
7503 int sig
= host_to_target_signal(info
->ssi_signo
);
7505 /* linux/signalfd.h defines a ssi_addr_lsb
7506 * not defined in sys/signalfd.h but used by some kernels
7509 #ifdef BUS_MCEERR_AO
7510 if (tinfo
->ssi_signo
== SIGBUS
&&
7511 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7512 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7513 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7514 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7515 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7519 tinfo
->ssi_signo
= tswap32(sig
);
7520 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7521 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7522 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7523 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7524 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7525 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7526 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7527 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7528 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7529 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7530 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7531 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7532 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7533 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7534 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7537 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7541 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7542 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7548 static TargetFdTrans target_signalfd_trans
= {
7549 .host_to_target_data
= host_to_target_data_signalfd
,
7552 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7555 target_sigset_t
*target_mask
;
7559 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7560 return -TARGET_EINVAL
;
7562 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7563 return -TARGET_EFAULT
;
7566 target_to_host_sigset(&host_mask
, target_mask
);
7568 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7570 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7572 fd_trans_register(ret
, &target_signalfd_trans
);
7575 unlock_user_struct(target_mask
, mask
, 0);
7581 /* Map host to target signal numbers for the wait family of syscalls.
7582 Assume all other status bits are the same. */
7583 int host_to_target_waitstatus(int status
)
7585 if (WIFSIGNALED(status
)) {
7586 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7588 if (WIFSTOPPED(status
)) {
7589 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7595 static int open_self_cmdline(void *cpu_env
, int fd
)
7597 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7598 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7601 for (i
= 0; i
< bprm
->argc
; i
++) {
7602 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7604 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7612 static int open_self_maps(void *cpu_env
, int fd
)
7614 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7615 TaskState
*ts
= cpu
->opaque
;
7621 fp
= fopen("/proc/self/maps", "r");
7626 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7627 int fields
, dev_maj
, dev_min
, inode
;
7628 uint64_t min
, max
, offset
;
7629 char flag_r
, flag_w
, flag_x
, flag_p
;
7630 char path
[512] = "";
7631 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7632 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7633 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7635 if ((fields
< 10) || (fields
> 11)) {
7638 if (h2g_valid(min
)) {
7639 int flags
= page_get_flags(h2g(min
));
7640 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7641 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7644 if (h2g(min
) == ts
->info
->stack_limit
) {
7645 pstrcpy(path
, sizeof(path
), " [stack]");
7647 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7648 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7649 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7650 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7651 path
[0] ? " " : "", path
);
7661 static int open_self_stat(void *cpu_env
, int fd
)
7663 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7664 TaskState
*ts
= cpu
->opaque
;
7665 abi_ulong start_stack
= ts
->info
->start_stack
;
7668 for (i
= 0; i
< 44; i
++) {
7676 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7677 } else if (i
== 1) {
7679 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7680 } else if (i
== 27) {
7683 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7685 /* for the rest, there is MasterCard */
7686 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7690 if (write(fd
, buf
, len
) != len
) {
7698 static int open_self_auxv(void *cpu_env
, int fd
)
7700 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7701 TaskState
*ts
= cpu
->opaque
;
7702 abi_ulong auxv
= ts
->info
->saved_auxv
;
7703 abi_ulong len
= ts
->info
->auxv_len
;
7707 * Auxiliary vector is stored in target process stack.
7708 * read in whole auxv vector and copy it to file
7710 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7714 r
= write(fd
, ptr
, len
);
7721 lseek(fd
, 0, SEEK_SET
);
7722 unlock_user(ptr
, auxv
, len
);
7728 static int is_proc_myself(const char *filename
, const char *entry
)
7730 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7731 filename
+= strlen("/proc/");
7732 if (!strncmp(filename
, "self/", strlen("self/"))) {
7733 filename
+= strlen("self/");
7734 } else if (*filename
>= '1' && *filename
<= '9') {
7736 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7737 if (!strncmp(filename
, myself
, strlen(myself
))) {
7738 filename
+= strlen(myself
);
7745 if (!strcmp(filename
, entry
)) {
7752 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7753 static int is_proc(const char *filename
, const char *entry
)
7755 return strcmp(filename
, entry
) == 0;
7758 static int open_net_route(void *cpu_env
, int fd
)
7765 fp
= fopen("/proc/net/route", "r");
7772 read
= getline(&line
, &len
, fp
);
7773 dprintf(fd
, "%s", line
);
7777 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7779 uint32_t dest
, gw
, mask
;
7780 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7781 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7782 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7783 &mask
, &mtu
, &window
, &irtt
);
7784 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7785 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7786 metric
, tswap32(mask
), mtu
, window
, irtt
);
7796 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7799 const char *filename
;
7800 int (*fill
)(void *cpu_env
, int fd
);
7801 int (*cmp
)(const char *s1
, const char *s2
);
7803 const struct fake_open
*fake_open
;
7804 static const struct fake_open fakes
[] = {
7805 { "maps", open_self_maps
, is_proc_myself
},
7806 { "stat", open_self_stat
, is_proc_myself
},
7807 { "auxv", open_self_auxv
, is_proc_myself
},
7808 { "cmdline", open_self_cmdline
, is_proc_myself
},
7809 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7810 { "/proc/net/route", open_net_route
, is_proc
},
7812 { NULL
, NULL
, NULL
}
7815 if (is_proc_myself(pathname
, "exe")) {
7816 int execfd
= qemu_getauxval(AT_EXECFD
);
7817 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7820 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7821 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7826 if (fake_open
->filename
) {
7828 char filename
[PATH_MAX
];
7831 /* create temporary file to map stat to */
7832 tmpdir
= getenv("TMPDIR");
7835 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7836 fd
= mkstemp(filename
);
7842 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7848 lseek(fd
, 0, SEEK_SET
);
7853 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7856 #define TIMER_MAGIC 0x0caf0000
7857 #define TIMER_MAGIC_MASK 0xffff0000
7859 /* Convert QEMU provided timer ID back to internal 16bit index format */
7860 static target_timer_t
get_timer_id(abi_long arg
)
7862 target_timer_t timerid
= arg
;
7864 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7865 return -TARGET_EINVAL
;
7870 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7871 return -TARGET_EINVAL
;
7877 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7879 uint64_t *counter
= buf
;
7882 if (len
< sizeof(uint64_t)) {
7886 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7887 *counter
= tswap64(*counter
);
7894 static TargetFdTrans target_eventfd_trans
= {
7895 .host_to_target_data
= swap_data_eventfd
,
7896 .target_to_host_data
= swap_data_eventfd
,
7899 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7900 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7901 defined(__NR_inotify_init1))
7902 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7904 struct inotify_event
*ev
;
7908 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7909 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7912 ev
->wd
= tswap32(ev
->wd
);
7913 ev
->mask
= tswap32(ev
->mask
);
7914 ev
->cookie
= tswap32(ev
->cookie
);
7915 ev
->len
= tswap32(name_len
);
7921 static TargetFdTrans target_inotify_trans
= {
7922 .host_to_target_data
= host_to_target_data_inotify
,
7926 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7928 abi_ulong target_addr
,
7931 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7932 unsigned host_bits
= sizeof(*host_mask
) * 8;
7933 abi_ulong
*target_mask
;
7936 assert(host_size
>= target_size
);
7938 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7940 return -TARGET_EFAULT
;
7942 memset(host_mask
, 0, host_size
);
7944 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7945 unsigned bit
= i
* target_bits
;
7948 __get_user(val
, &target_mask
[i
]);
7949 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7950 if (val
& (1UL << j
)) {
7951 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7956 unlock_user(target_mask
, target_addr
, 0);
7960 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7962 abi_ulong target_addr
,
7965 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7966 unsigned host_bits
= sizeof(*host_mask
) * 8;
7967 abi_ulong
*target_mask
;
7970 assert(host_size
>= target_size
);
7972 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7974 return -TARGET_EFAULT
;
7977 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7978 unsigned bit
= i
* target_bits
;
7981 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7982 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7986 __put_user(val
, &target_mask
[i
]);
7989 unlock_user(target_mask
, target_addr
, target_size
);
7993 /* do_syscall() should always have a single exit point at the end so
7994 that actions, such as logging of syscall results, can be performed.
7995 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7996 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7997 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7998 abi_long arg5
, abi_long arg6
, abi_long arg7
,
8001 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
8003 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8004 || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8005 || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64)
8008 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8009 || defined(TARGET_NR_fstatfs)
8014 #if defined(DEBUG_ERESTARTSYS)
8015 /* Debug-only code for exercising the syscall-restart code paths
8016 * in the per-architecture cpu main loops: restart every syscall
8017 * the guest makes once before letting it through.
8024 return -TARGET_ERESTARTSYS
;
8030 gemu_log("syscall %d", num
);
8032 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
8034 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
8037 case TARGET_NR_exit
:
8038 /* In old applications this may be used to implement _exit(2).
8039 However in threaded applictions it is used for thread termination,
8040 and _exit_group is used for application termination.
8041 Do thread termination if we have more then one thread. */
8043 if (block_signals()) {
8044 ret
= -TARGET_ERESTARTSYS
;
8050 if (CPU_NEXT(first_cpu
)) {
8053 /* Remove the CPU from the list. */
8054 QTAILQ_REMOVE(&cpus
, cpu
, node
);
8059 if (ts
->child_tidptr
) {
8060 put_user_u32(0, ts
->child_tidptr
);
8061 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
8065 object_unref(OBJECT(cpu
));
8067 rcu_unregister_thread();
8072 preexit_cleanup(cpu_env
, arg1
);
8074 ret
= 0; /* avoid warning */
8076 case TARGET_NR_read
:
8080 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
8082 ret
= get_errno(safe_read(arg1
, p
, arg3
));
8084 fd_trans_host_to_target_data(arg1
)) {
8085 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
8087 unlock_user(p
, arg2
, ret
);
8090 case TARGET_NR_write
:
8091 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
8093 if (fd_trans_target_to_host_data(arg1
)) {
8094 void *copy
= g_malloc(arg3
);
8095 memcpy(copy
, p
, arg3
);
8096 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
8098 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8102 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8104 unlock_user(p
, arg2
, 0);
8106 #ifdef TARGET_NR_open
8107 case TARGET_NR_open
:
8108 if (!(p
= lock_user_string(arg1
)))
8110 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8111 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8113 fd_trans_unregister(ret
);
8114 unlock_user(p
, arg1
, 0);
8117 case TARGET_NR_openat
:
8118 if (!(p
= lock_user_string(arg2
)))
8120 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8121 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8123 fd_trans_unregister(ret
);
8124 unlock_user(p
, arg2
, 0);
8126 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8127 case TARGET_NR_name_to_handle_at
:
8128 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8131 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8132 case TARGET_NR_open_by_handle_at
:
8133 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8134 fd_trans_unregister(ret
);
8137 case TARGET_NR_close
:
8138 fd_trans_unregister(arg1
);
8139 ret
= get_errno(close(arg1
));
8144 #ifdef TARGET_NR_fork
8145 case TARGET_NR_fork
:
8146 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8149 #ifdef TARGET_NR_waitpid
8150 case TARGET_NR_waitpid
:
8153 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8154 if (!is_error(ret
) && arg2
&& ret
8155 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8160 #ifdef TARGET_NR_waitid
8161 case TARGET_NR_waitid
:
8165 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8166 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8167 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8169 host_to_target_siginfo(p
, &info
);
8170 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8175 #ifdef TARGET_NR_creat /* not on alpha */
8176 case TARGET_NR_creat
:
8177 if (!(p
= lock_user_string(arg1
)))
8179 ret
= get_errno(creat(p
, arg2
));
8180 fd_trans_unregister(ret
);
8181 unlock_user(p
, arg1
, 0);
8184 #ifdef TARGET_NR_link
8185 case TARGET_NR_link
:
8188 p
= lock_user_string(arg1
);
8189 p2
= lock_user_string(arg2
);
8191 ret
= -TARGET_EFAULT
;
8193 ret
= get_errno(link(p
, p2
));
8194 unlock_user(p2
, arg2
, 0);
8195 unlock_user(p
, arg1
, 0);
8199 #if defined(TARGET_NR_linkat)
8200 case TARGET_NR_linkat
:
8205 p
= lock_user_string(arg2
);
8206 p2
= lock_user_string(arg4
);
8208 ret
= -TARGET_EFAULT
;
8210 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8211 unlock_user(p
, arg2
, 0);
8212 unlock_user(p2
, arg4
, 0);
8216 #ifdef TARGET_NR_unlink
8217 case TARGET_NR_unlink
:
8218 if (!(p
= lock_user_string(arg1
)))
8220 ret
= get_errno(unlink(p
));
8221 unlock_user(p
, arg1
, 0);
8224 #if defined(TARGET_NR_unlinkat)
8225 case TARGET_NR_unlinkat
:
8226 if (!(p
= lock_user_string(arg2
)))
8228 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8229 unlock_user(p
, arg2
, 0);
8232 case TARGET_NR_execve
:
8234 char **argp
, **envp
;
8237 abi_ulong guest_argp
;
8238 abi_ulong guest_envp
;
8245 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8246 if (get_user_ual(addr
, gp
))
8254 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8255 if (get_user_ual(addr
, gp
))
8262 argp
= g_new0(char *, argc
+ 1);
8263 envp
= g_new0(char *, envc
+ 1);
8265 for (gp
= guest_argp
, q
= argp
; gp
;
8266 gp
+= sizeof(abi_ulong
), q
++) {
8267 if (get_user_ual(addr
, gp
))
8271 if (!(*q
= lock_user_string(addr
)))
8273 total_size
+= strlen(*q
) + 1;
8277 for (gp
= guest_envp
, q
= envp
; gp
;
8278 gp
+= sizeof(abi_ulong
), q
++) {
8279 if (get_user_ual(addr
, gp
))
8283 if (!(*q
= lock_user_string(addr
)))
8285 total_size
+= strlen(*q
) + 1;
8289 if (!(p
= lock_user_string(arg1
)))
8291 /* Although execve() is not an interruptible syscall it is
8292 * a special case where we must use the safe_syscall wrapper:
8293 * if we allow a signal to happen before we make the host
8294 * syscall then we will 'lose' it, because at the point of
8295 * execve the process leaves QEMU's control. So we use the
8296 * safe syscall wrapper to ensure that we either take the
8297 * signal as a guest signal, or else it does not happen
8298 * before the execve completes and makes it the other
8299 * program's problem.
8301 ret
= get_errno(safe_execve(p
, argp
, envp
));
8302 unlock_user(p
, arg1
, 0);
8307 ret
= -TARGET_EFAULT
;
8310 for (gp
= guest_argp
, q
= argp
; *q
;
8311 gp
+= sizeof(abi_ulong
), q
++) {
8312 if (get_user_ual(addr
, gp
)
8315 unlock_user(*q
, addr
, 0);
8317 for (gp
= guest_envp
, q
= envp
; *q
;
8318 gp
+= sizeof(abi_ulong
), q
++) {
8319 if (get_user_ual(addr
, gp
)
8322 unlock_user(*q
, addr
, 0);
8329 case TARGET_NR_chdir
:
8330 if (!(p
= lock_user_string(arg1
)))
8332 ret
= get_errno(chdir(p
));
8333 unlock_user(p
, arg1
, 0);
8335 #ifdef TARGET_NR_time
8336 case TARGET_NR_time
:
8339 ret
= get_errno(time(&host_time
));
8342 && put_user_sal(host_time
, arg1
))
8347 #ifdef TARGET_NR_mknod
8348 case TARGET_NR_mknod
:
8349 if (!(p
= lock_user_string(arg1
)))
8351 ret
= get_errno(mknod(p
, arg2
, arg3
));
8352 unlock_user(p
, arg1
, 0);
8355 #if defined(TARGET_NR_mknodat)
8356 case TARGET_NR_mknodat
:
8357 if (!(p
= lock_user_string(arg2
)))
8359 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8360 unlock_user(p
, arg2
, 0);
8363 #ifdef TARGET_NR_chmod
8364 case TARGET_NR_chmod
:
8365 if (!(p
= lock_user_string(arg1
)))
8367 ret
= get_errno(chmod(p
, arg2
));
8368 unlock_user(p
, arg1
, 0);
8371 #ifdef TARGET_NR_break
8372 case TARGET_NR_break
:
8375 #ifdef TARGET_NR_oldstat
8376 case TARGET_NR_oldstat
:
8379 #ifdef TARGET_NR_lseek
8380 case TARGET_NR_lseek
:
8381 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8384 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8385 /* Alpha specific */
8386 case TARGET_NR_getxpid
:
8387 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8388 ret
= get_errno(getpid());
8391 #ifdef TARGET_NR_getpid
8392 case TARGET_NR_getpid
:
8393 ret
= get_errno(getpid());
8396 case TARGET_NR_mount
:
8398 /* need to look at the data field */
8402 p
= lock_user_string(arg1
);
8410 p2
= lock_user_string(arg2
);
8413 unlock_user(p
, arg1
, 0);
8419 p3
= lock_user_string(arg3
);
8422 unlock_user(p
, arg1
, 0);
8424 unlock_user(p2
, arg2
, 0);
8431 /* FIXME - arg5 should be locked, but it isn't clear how to
8432 * do that since it's not guaranteed to be a NULL-terminated
8436 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8438 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8440 ret
= get_errno(ret
);
8443 unlock_user(p
, arg1
, 0);
8445 unlock_user(p2
, arg2
, 0);
8447 unlock_user(p3
, arg3
, 0);
8451 #ifdef TARGET_NR_umount
8452 case TARGET_NR_umount
:
8453 if (!(p
= lock_user_string(arg1
)))
8455 ret
= get_errno(umount(p
));
8456 unlock_user(p
, arg1
, 0);
8459 #ifdef TARGET_NR_stime /* not on alpha */
8460 case TARGET_NR_stime
:
8463 if (get_user_sal(host_time
, arg1
))
8465 ret
= get_errno(stime(&host_time
));
8469 case TARGET_NR_ptrace
:
8471 #ifdef TARGET_NR_alarm /* not on alpha */
8472 case TARGET_NR_alarm
:
8476 #ifdef TARGET_NR_oldfstat
8477 case TARGET_NR_oldfstat
:
8480 #ifdef TARGET_NR_pause /* not on alpha */
8481 case TARGET_NR_pause
:
8482 if (!block_signals()) {
8483 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8485 ret
= -TARGET_EINTR
;
8488 #ifdef TARGET_NR_utime
8489 case TARGET_NR_utime
:
8491 struct utimbuf tbuf
, *host_tbuf
;
8492 struct target_utimbuf
*target_tbuf
;
8494 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8496 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8497 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8498 unlock_user_struct(target_tbuf
, arg2
, 0);
8503 if (!(p
= lock_user_string(arg1
)))
8505 ret
= get_errno(utime(p
, host_tbuf
));
8506 unlock_user(p
, arg1
, 0);
8510 #ifdef TARGET_NR_utimes
8511 case TARGET_NR_utimes
:
8513 struct timeval
*tvp
, tv
[2];
8515 if (copy_from_user_timeval(&tv
[0], arg2
)
8516 || copy_from_user_timeval(&tv
[1],
8517 arg2
+ sizeof(struct target_timeval
)))
8523 if (!(p
= lock_user_string(arg1
)))
8525 ret
= get_errno(utimes(p
, tvp
));
8526 unlock_user(p
, arg1
, 0);
8530 #if defined(TARGET_NR_futimesat)
8531 case TARGET_NR_futimesat
:
8533 struct timeval
*tvp
, tv
[2];
8535 if (copy_from_user_timeval(&tv
[0], arg3
)
8536 || copy_from_user_timeval(&tv
[1],
8537 arg3
+ sizeof(struct target_timeval
)))
8543 if (!(p
= lock_user_string(arg2
)))
8545 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8546 unlock_user(p
, arg2
, 0);
8550 #ifdef TARGET_NR_stty
8551 case TARGET_NR_stty
:
8554 #ifdef TARGET_NR_gtty
8555 case TARGET_NR_gtty
:
8558 #ifdef TARGET_NR_access
8559 case TARGET_NR_access
:
8560 if (!(p
= lock_user_string(arg1
)))
8562 ret
= get_errno(access(path(p
), arg2
));
8563 unlock_user(p
, arg1
, 0);
8566 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8567 case TARGET_NR_faccessat
:
8568 if (!(p
= lock_user_string(arg2
)))
8570 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8571 unlock_user(p
, arg2
, 0);
8574 #ifdef TARGET_NR_nice /* not on alpha */
8575 case TARGET_NR_nice
:
8576 ret
= get_errno(nice(arg1
));
8579 #ifdef TARGET_NR_ftime
8580 case TARGET_NR_ftime
:
8583 case TARGET_NR_sync
:
8587 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8588 case TARGET_NR_syncfs
:
8589 ret
= get_errno(syncfs(arg1
));
8592 case TARGET_NR_kill
:
8593 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8595 #ifdef TARGET_NR_rename
8596 case TARGET_NR_rename
:
8599 p
= lock_user_string(arg1
);
8600 p2
= lock_user_string(arg2
);
8602 ret
= -TARGET_EFAULT
;
8604 ret
= get_errno(rename(p
, p2
));
8605 unlock_user(p2
, arg2
, 0);
8606 unlock_user(p
, arg1
, 0);
8610 #if defined(TARGET_NR_renameat)
8611 case TARGET_NR_renameat
:
8614 p
= lock_user_string(arg2
);
8615 p2
= lock_user_string(arg4
);
8617 ret
= -TARGET_EFAULT
;
8619 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8620 unlock_user(p2
, arg4
, 0);
8621 unlock_user(p
, arg2
, 0);
8625 #if defined(TARGET_NR_renameat2)
8626 case TARGET_NR_renameat2
:
8629 p
= lock_user_string(arg2
);
8630 p2
= lock_user_string(arg4
);
8632 ret
= -TARGET_EFAULT
;
8634 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8636 unlock_user(p2
, arg4
, 0);
8637 unlock_user(p
, arg2
, 0);
8641 #ifdef TARGET_NR_mkdir
8642 case TARGET_NR_mkdir
:
8643 if (!(p
= lock_user_string(arg1
)))
8645 ret
= get_errno(mkdir(p
, arg2
));
8646 unlock_user(p
, arg1
, 0);
8649 #if defined(TARGET_NR_mkdirat)
8650 case TARGET_NR_mkdirat
:
8651 if (!(p
= lock_user_string(arg2
)))
8653 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8654 unlock_user(p
, arg2
, 0);
8657 #ifdef TARGET_NR_rmdir
8658 case TARGET_NR_rmdir
:
8659 if (!(p
= lock_user_string(arg1
)))
8661 ret
= get_errno(rmdir(p
));
8662 unlock_user(p
, arg1
, 0);
8666 ret
= get_errno(dup(arg1
));
8668 fd_trans_dup(arg1
, ret
);
8671 #ifdef TARGET_NR_pipe
8672 case TARGET_NR_pipe
:
8673 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8676 #ifdef TARGET_NR_pipe2
8677 case TARGET_NR_pipe2
:
8678 ret
= do_pipe(cpu_env
, arg1
,
8679 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8682 case TARGET_NR_times
:
8684 struct target_tms
*tmsp
;
8686 ret
= get_errno(times(&tms
));
8688 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8691 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8692 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8693 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8694 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8697 ret
= host_to_target_clock_t(ret
);
8700 #ifdef TARGET_NR_prof
8701 case TARGET_NR_prof
:
8704 #ifdef TARGET_NR_signal
8705 case TARGET_NR_signal
:
8708 case TARGET_NR_acct
:
8710 ret
= get_errno(acct(NULL
));
8712 if (!(p
= lock_user_string(arg1
)))
8714 ret
= get_errno(acct(path(p
)));
8715 unlock_user(p
, arg1
, 0);
8718 #ifdef TARGET_NR_umount2
8719 case TARGET_NR_umount2
:
8720 if (!(p
= lock_user_string(arg1
)))
8722 ret
= get_errno(umount2(p
, arg2
));
8723 unlock_user(p
, arg1
, 0);
8726 #ifdef TARGET_NR_lock
8727 case TARGET_NR_lock
:
8730 case TARGET_NR_ioctl
:
8731 ret
= do_ioctl(arg1
, arg2
, arg3
);
8733 #ifdef TARGET_NR_fcntl
8734 case TARGET_NR_fcntl
:
8735 ret
= do_fcntl(arg1
, arg2
, arg3
);
8738 #ifdef TARGET_NR_mpx
8742 case TARGET_NR_setpgid
:
8743 ret
= get_errno(setpgid(arg1
, arg2
));
8745 #ifdef TARGET_NR_ulimit
8746 case TARGET_NR_ulimit
:
8749 #ifdef TARGET_NR_oldolduname
8750 case TARGET_NR_oldolduname
:
8753 case TARGET_NR_umask
:
8754 ret
= get_errno(umask(arg1
));
8756 case TARGET_NR_chroot
:
8757 if (!(p
= lock_user_string(arg1
)))
8759 ret
= get_errno(chroot(p
));
8760 unlock_user(p
, arg1
, 0);
8762 #ifdef TARGET_NR_ustat
8763 case TARGET_NR_ustat
:
8766 #ifdef TARGET_NR_dup2
8767 case TARGET_NR_dup2
:
8768 ret
= get_errno(dup2(arg1
, arg2
));
8770 fd_trans_dup(arg1
, arg2
);
8774 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8775 case TARGET_NR_dup3
:
8779 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8782 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8783 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8785 fd_trans_dup(arg1
, arg2
);
8790 #ifdef TARGET_NR_getppid /* not on alpha */
8791 case TARGET_NR_getppid
:
8792 ret
= get_errno(getppid());
8795 #ifdef TARGET_NR_getpgrp
8796 case TARGET_NR_getpgrp
:
8797 ret
= get_errno(getpgrp());
8800 case TARGET_NR_setsid
:
8801 ret
= get_errno(setsid());
8803 #ifdef TARGET_NR_sigaction
8804 case TARGET_NR_sigaction
:
8806 #if defined(TARGET_ALPHA)
8807 struct target_sigaction act
, oact
, *pact
= 0;
8808 struct target_old_sigaction
*old_act
;
8810 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8812 act
._sa_handler
= old_act
->_sa_handler
;
8813 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8814 act
.sa_flags
= old_act
->sa_flags
;
8815 act
.sa_restorer
= 0;
8816 unlock_user_struct(old_act
, arg2
, 0);
8819 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8820 if (!is_error(ret
) && arg3
) {
8821 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8823 old_act
->_sa_handler
= oact
._sa_handler
;
8824 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8825 old_act
->sa_flags
= oact
.sa_flags
;
8826 unlock_user_struct(old_act
, arg3
, 1);
8828 #elif defined(TARGET_MIPS)
8829 struct target_sigaction act
, oact
, *pact
, *old_act
;
8832 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8834 act
._sa_handler
= old_act
->_sa_handler
;
8835 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8836 act
.sa_flags
= old_act
->sa_flags
;
8837 unlock_user_struct(old_act
, arg2
, 0);
8843 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8845 if (!is_error(ret
) && arg3
) {
8846 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8848 old_act
->_sa_handler
= oact
._sa_handler
;
8849 old_act
->sa_flags
= oact
.sa_flags
;
8850 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8851 old_act
->sa_mask
.sig
[1] = 0;
8852 old_act
->sa_mask
.sig
[2] = 0;
8853 old_act
->sa_mask
.sig
[3] = 0;
8854 unlock_user_struct(old_act
, arg3
, 1);
8857 struct target_old_sigaction
*old_act
;
8858 struct target_sigaction act
, oact
, *pact
;
8860 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8862 act
._sa_handler
= old_act
->_sa_handler
;
8863 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8864 act
.sa_flags
= old_act
->sa_flags
;
8865 act
.sa_restorer
= old_act
->sa_restorer
;
8866 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8867 act
.ka_restorer
= 0;
8869 unlock_user_struct(old_act
, arg2
, 0);
8874 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8875 if (!is_error(ret
) && arg3
) {
8876 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8878 old_act
->_sa_handler
= oact
._sa_handler
;
8879 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8880 old_act
->sa_flags
= oact
.sa_flags
;
8881 old_act
->sa_restorer
= oact
.sa_restorer
;
8882 unlock_user_struct(old_act
, arg3
, 1);
8888 case TARGET_NR_rt_sigaction
:
8890 #if defined(TARGET_ALPHA)
8891 /* For Alpha and SPARC this is a 5 argument syscall, with
8892 * a 'restorer' parameter which must be copied into the
8893 * sa_restorer field of the sigaction struct.
8894 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8895 * and arg5 is the sigsetsize.
8896 * Alpha also has a separate rt_sigaction struct that it uses
8897 * here; SPARC uses the usual sigaction struct.
8899 struct target_rt_sigaction
*rt_act
;
8900 struct target_sigaction act
, oact
, *pact
= 0;
8902 if (arg4
!= sizeof(target_sigset_t
)) {
8903 ret
= -TARGET_EINVAL
;
8907 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8909 act
._sa_handler
= rt_act
->_sa_handler
;
8910 act
.sa_mask
= rt_act
->sa_mask
;
8911 act
.sa_flags
= rt_act
->sa_flags
;
8912 act
.sa_restorer
= arg5
;
8913 unlock_user_struct(rt_act
, arg2
, 0);
8916 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8917 if (!is_error(ret
) && arg3
) {
8918 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8920 rt_act
->_sa_handler
= oact
._sa_handler
;
8921 rt_act
->sa_mask
= oact
.sa_mask
;
8922 rt_act
->sa_flags
= oact
.sa_flags
;
8923 unlock_user_struct(rt_act
, arg3
, 1);
8927 target_ulong restorer
= arg4
;
8928 target_ulong sigsetsize
= arg5
;
8930 target_ulong sigsetsize
= arg4
;
8932 struct target_sigaction
*act
;
8933 struct target_sigaction
*oact
;
8935 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8936 ret
= -TARGET_EINVAL
;
8940 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8943 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8944 act
->ka_restorer
= restorer
;
8950 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8951 ret
= -TARGET_EFAULT
;
8952 goto rt_sigaction_fail
;
8956 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8959 unlock_user_struct(act
, arg2
, 0);
8961 unlock_user_struct(oact
, arg3
, 1);
8965 #ifdef TARGET_NR_sgetmask /* not on alpha */
8966 case TARGET_NR_sgetmask
:
8969 abi_ulong target_set
;
8970 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8972 host_to_target_old_sigset(&target_set
, &cur_set
);
8978 #ifdef TARGET_NR_ssetmask /* not on alpha */
8979 case TARGET_NR_ssetmask
:
8982 abi_ulong target_set
= arg1
;
8983 target_to_host_old_sigset(&set
, &target_set
);
8984 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8986 host_to_target_old_sigset(&target_set
, &oset
);
8992 #ifdef TARGET_NR_sigprocmask
8993 case TARGET_NR_sigprocmask
:
8995 #if defined(TARGET_ALPHA)
8996 sigset_t set
, oldset
;
9001 case TARGET_SIG_BLOCK
:
9004 case TARGET_SIG_UNBLOCK
:
9007 case TARGET_SIG_SETMASK
:
9011 ret
= -TARGET_EINVAL
;
9015 target_to_host_old_sigset(&set
, &mask
);
9017 ret
= do_sigprocmask(how
, &set
, &oldset
);
9018 if (!is_error(ret
)) {
9019 host_to_target_old_sigset(&mask
, &oldset
);
9021 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
9024 sigset_t set
, oldset
, *set_ptr
;
9029 case TARGET_SIG_BLOCK
:
9032 case TARGET_SIG_UNBLOCK
:
9035 case TARGET_SIG_SETMASK
:
9039 ret
= -TARGET_EINVAL
;
9042 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9044 target_to_host_old_sigset(&set
, p
);
9045 unlock_user(p
, arg2
, 0);
9051 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9052 if (!is_error(ret
) && arg3
) {
9053 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9055 host_to_target_old_sigset(p
, &oldset
);
9056 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9062 case TARGET_NR_rt_sigprocmask
:
9065 sigset_t set
, oldset
, *set_ptr
;
9067 if (arg4
!= sizeof(target_sigset_t
)) {
9068 ret
= -TARGET_EINVAL
;
9074 case TARGET_SIG_BLOCK
:
9077 case TARGET_SIG_UNBLOCK
:
9080 case TARGET_SIG_SETMASK
:
9084 ret
= -TARGET_EINVAL
;
9087 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
9089 target_to_host_sigset(&set
, p
);
9090 unlock_user(p
, arg2
, 0);
9096 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
9097 if (!is_error(ret
) && arg3
) {
9098 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
9100 host_to_target_sigset(p
, &oldset
);
9101 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9105 #ifdef TARGET_NR_sigpending
9106 case TARGET_NR_sigpending
:
9109 ret
= get_errno(sigpending(&set
));
9110 if (!is_error(ret
)) {
9111 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9113 host_to_target_old_sigset(p
, &set
);
9114 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9119 case TARGET_NR_rt_sigpending
:
9123 /* Yes, this check is >, not != like most. We follow the kernel's
9124 * logic and it does it like this because it implements
9125 * NR_sigpending through the same code path, and in that case
9126 * the old_sigset_t is smaller in size.
9128 if (arg2
> sizeof(target_sigset_t
)) {
9129 ret
= -TARGET_EINVAL
;
9133 ret
= get_errno(sigpending(&set
));
9134 if (!is_error(ret
)) {
9135 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9137 host_to_target_sigset(p
, &set
);
9138 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9142 #ifdef TARGET_NR_sigsuspend
9143 case TARGET_NR_sigsuspend
:
9145 TaskState
*ts
= cpu
->opaque
;
9146 #if defined(TARGET_ALPHA)
9147 abi_ulong mask
= arg1
;
9148 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9150 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9152 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9153 unlock_user(p
, arg1
, 0);
9155 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9157 if (ret
!= -TARGET_ERESTARTSYS
) {
9158 ts
->in_sigsuspend
= 1;
9163 case TARGET_NR_rt_sigsuspend
:
9165 TaskState
*ts
= cpu
->opaque
;
9167 if (arg2
!= sizeof(target_sigset_t
)) {
9168 ret
= -TARGET_EINVAL
;
9171 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9173 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9174 unlock_user(p
, arg1
, 0);
9175 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9177 if (ret
!= -TARGET_ERESTARTSYS
) {
9178 ts
->in_sigsuspend
= 1;
9182 case TARGET_NR_rt_sigtimedwait
:
9185 struct timespec uts
, *puts
;
9188 if (arg4
!= sizeof(target_sigset_t
)) {
9189 ret
= -TARGET_EINVAL
;
9193 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9195 target_to_host_sigset(&set
, p
);
9196 unlock_user(p
, arg1
, 0);
9199 target_to_host_timespec(puts
, arg3
);
9203 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9205 if (!is_error(ret
)) {
9207 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9212 host_to_target_siginfo(p
, &uinfo
);
9213 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9215 ret
= host_to_target_signal(ret
);
9219 case TARGET_NR_rt_sigqueueinfo
:
9223 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9227 target_to_host_siginfo(&uinfo
, p
);
9228 unlock_user(p
, arg3
, 0);
9229 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9232 case TARGET_NR_rt_tgsigqueueinfo
:
9236 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9240 target_to_host_siginfo(&uinfo
, p
);
9241 unlock_user(p
, arg4
, 0);
9242 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9245 #ifdef TARGET_NR_sigreturn
9246 case TARGET_NR_sigreturn
:
9247 if (block_signals()) {
9248 ret
= -TARGET_ERESTARTSYS
;
9250 ret
= do_sigreturn(cpu_env
);
9254 case TARGET_NR_rt_sigreturn
:
9255 if (block_signals()) {
9256 ret
= -TARGET_ERESTARTSYS
;
9258 ret
= do_rt_sigreturn(cpu_env
);
9261 case TARGET_NR_sethostname
:
9262 if (!(p
= lock_user_string(arg1
)))
9264 ret
= get_errno(sethostname(p
, arg2
));
9265 unlock_user(p
, arg1
, 0);
9267 #ifdef TARGET_NR_setrlimit
9268 case TARGET_NR_setrlimit
:
9270 int resource
= target_to_host_resource(arg1
);
9271 struct target_rlimit
*target_rlim
;
9273 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9275 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9276 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9277 unlock_user_struct(target_rlim
, arg2
, 0);
9278 ret
= get_errno(setrlimit(resource
, &rlim
));
9282 #ifdef TARGET_NR_getrlimit
9283 case TARGET_NR_getrlimit
:
9285 int resource
= target_to_host_resource(arg1
);
9286 struct target_rlimit
*target_rlim
;
9289 ret
= get_errno(getrlimit(resource
, &rlim
));
9290 if (!is_error(ret
)) {
9291 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9293 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9294 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9295 unlock_user_struct(target_rlim
, arg2
, 1);
9300 case TARGET_NR_getrusage
:
9302 struct rusage rusage
;
9303 ret
= get_errno(getrusage(arg1
, &rusage
));
9304 if (!is_error(ret
)) {
9305 ret
= host_to_target_rusage(arg2
, &rusage
);
9309 case TARGET_NR_gettimeofday
:
9312 ret
= get_errno(gettimeofday(&tv
, NULL
));
9313 if (!is_error(ret
)) {
9314 if (copy_to_user_timeval(arg1
, &tv
))
9319 case TARGET_NR_settimeofday
:
9321 struct timeval tv
, *ptv
= NULL
;
9322 struct timezone tz
, *ptz
= NULL
;
9325 if (copy_from_user_timeval(&tv
, arg1
)) {
9332 if (copy_from_user_timezone(&tz
, arg2
)) {
9338 ret
= get_errno(settimeofday(ptv
, ptz
));
9341 #if defined(TARGET_NR_select)
9342 case TARGET_NR_select
:
9343 #if defined(TARGET_WANT_NI_OLD_SELECT)
9344 /* some architectures used to have old_select here
9345 * but now ENOSYS it.
9347 ret
= -TARGET_ENOSYS
;
9348 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9349 ret
= do_old_select(arg1
);
9351 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9355 #ifdef TARGET_NR_pselect6
9356 case TARGET_NR_pselect6
:
9358 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9359 fd_set rfds
, wfds
, efds
;
9360 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9361 struct timespec ts
, *ts_ptr
;
9364 * The 6th arg is actually two args smashed together,
9365 * so we cannot use the C library.
9373 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9374 target_sigset_t
*target_sigset
;
9382 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9386 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9390 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9396 * This takes a timespec, and not a timeval, so we cannot
9397 * use the do_select() helper ...
9400 if (target_to_host_timespec(&ts
, ts_addr
)) {
9408 /* Extract the two packed args for the sigset */
9411 sig
.size
= SIGSET_T_SIZE
;
9413 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9417 arg_sigset
= tswapal(arg7
[0]);
9418 arg_sigsize
= tswapal(arg7
[1]);
9419 unlock_user(arg7
, arg6
, 0);
9423 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9424 /* Like the kernel, we enforce correct size sigsets */
9425 ret
= -TARGET_EINVAL
;
9428 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9429 sizeof(*target_sigset
), 1);
9430 if (!target_sigset
) {
9433 target_to_host_sigset(&set
, target_sigset
);
9434 unlock_user(target_sigset
, arg_sigset
, 0);
9442 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9445 if (!is_error(ret
)) {
9446 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9448 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9450 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9453 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9459 #ifdef TARGET_NR_symlink
9460 case TARGET_NR_symlink
:
9463 p
= lock_user_string(arg1
);
9464 p2
= lock_user_string(arg2
);
9466 ret
= -TARGET_EFAULT
;
9468 ret
= get_errno(symlink(p
, p2
));
9469 unlock_user(p2
, arg2
, 0);
9470 unlock_user(p
, arg1
, 0);
9474 #if defined(TARGET_NR_symlinkat)
9475 case TARGET_NR_symlinkat
:
9478 p
= lock_user_string(arg1
);
9479 p2
= lock_user_string(arg3
);
9481 ret
= -TARGET_EFAULT
;
9483 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9484 unlock_user(p2
, arg3
, 0);
9485 unlock_user(p
, arg1
, 0);
9489 #ifdef TARGET_NR_oldlstat
9490 case TARGET_NR_oldlstat
:
9493 #ifdef TARGET_NR_readlink
9494 case TARGET_NR_readlink
:
9497 p
= lock_user_string(arg1
);
9498 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9500 ret
= -TARGET_EFAULT
;
9502 /* Short circuit this for the magic exe check. */
9503 ret
= -TARGET_EINVAL
;
9504 } else if (is_proc_myself((const char *)p
, "exe")) {
9505 char real
[PATH_MAX
], *temp
;
9506 temp
= realpath(exec_path
, real
);
9507 /* Return value is # of bytes that we wrote to the buffer. */
9509 ret
= get_errno(-1);
9511 /* Don't worry about sign mismatch as earlier mapping
9512 * logic would have thrown a bad address error. */
9513 ret
= MIN(strlen(real
), arg3
);
9514 /* We cannot NUL terminate the string. */
9515 memcpy(p2
, real
, ret
);
9518 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9520 unlock_user(p2
, arg2
, ret
);
9521 unlock_user(p
, arg1
, 0);
9525 #if defined(TARGET_NR_readlinkat)
9526 case TARGET_NR_readlinkat
:
9529 p
= lock_user_string(arg2
);
9530 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9532 ret
= -TARGET_EFAULT
;
9533 } else if (is_proc_myself((const char *)p
, "exe")) {
9534 char real
[PATH_MAX
], *temp
;
9535 temp
= realpath(exec_path
, real
);
9536 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9537 snprintf((char *)p2
, arg4
, "%s", real
);
9539 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9541 unlock_user(p2
, arg3
, ret
);
9542 unlock_user(p
, arg2
, 0);
9546 #ifdef TARGET_NR_uselib
9547 case TARGET_NR_uselib
:
9550 #ifdef TARGET_NR_swapon
9551 case TARGET_NR_swapon
:
9552 if (!(p
= lock_user_string(arg1
)))
9554 ret
= get_errno(swapon(p
, arg2
));
9555 unlock_user(p
, arg1
, 0);
9558 case TARGET_NR_reboot
:
9559 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9560 /* arg4 must be ignored in all other cases */
9561 p
= lock_user_string(arg4
);
9565 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9566 unlock_user(p
, arg4
, 0);
9568 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9571 #ifdef TARGET_NR_readdir
9572 case TARGET_NR_readdir
:
9575 #ifdef TARGET_NR_mmap
9576 case TARGET_NR_mmap
:
9577 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9578 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9579 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9580 || defined(TARGET_S390X)
9583 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9584 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9592 unlock_user(v
, arg1
, 0);
9593 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9594 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9598 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9599 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9605 #ifdef TARGET_NR_mmap2
9606 case TARGET_NR_mmap2
:
9608 #define MMAP_SHIFT 12
9610 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9611 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9613 arg6
<< MMAP_SHIFT
));
9616 case TARGET_NR_munmap
:
9617 ret
= get_errno(target_munmap(arg1
, arg2
));
9619 case TARGET_NR_mprotect
:
9621 TaskState
*ts
= cpu
->opaque
;
9622 /* Special hack to detect libc making the stack executable. */
9623 if ((arg3
& PROT_GROWSDOWN
)
9624 && arg1
>= ts
->info
->stack_limit
9625 && arg1
<= ts
->info
->start_stack
) {
9626 arg3
&= ~PROT_GROWSDOWN
;
9627 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9628 arg1
= ts
->info
->stack_limit
;
9631 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9633 #ifdef TARGET_NR_mremap
9634 case TARGET_NR_mremap
:
9635 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9638 /* ??? msync/mlock/munlock are broken for softmmu. */
9639 #ifdef TARGET_NR_msync
9640 case TARGET_NR_msync
:
9641 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9644 #ifdef TARGET_NR_mlock
9645 case TARGET_NR_mlock
:
9646 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9649 #ifdef TARGET_NR_munlock
9650 case TARGET_NR_munlock
:
9651 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9654 #ifdef TARGET_NR_mlockall
9655 case TARGET_NR_mlockall
:
9656 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9659 #ifdef TARGET_NR_munlockall
9660 case TARGET_NR_munlockall
:
9661 ret
= get_errno(munlockall());
9664 #ifdef TARGET_NR_truncate
9665 case TARGET_NR_truncate
:
9666 if (!(p
= lock_user_string(arg1
)))
9668 ret
= get_errno(truncate(p
, arg2
));
9669 unlock_user(p
, arg1
, 0);
9672 #ifdef TARGET_NR_ftruncate
9673 case TARGET_NR_ftruncate
:
9674 ret
= get_errno(ftruncate(arg1
, arg2
));
9677 case TARGET_NR_fchmod
:
9678 ret
= get_errno(fchmod(arg1
, arg2
));
9680 #if defined(TARGET_NR_fchmodat)
9681 case TARGET_NR_fchmodat
:
9682 if (!(p
= lock_user_string(arg2
)))
9684 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9685 unlock_user(p
, arg2
, 0);
9688 case TARGET_NR_getpriority
:
9689 /* Note that negative values are valid for getpriority, so we must
9690 differentiate based on errno settings. */
9692 ret
= getpriority(arg1
, arg2
);
9693 if (ret
== -1 && errno
!= 0) {
9694 ret
= -host_to_target_errno(errno
);
9698 /* Return value is the unbiased priority. Signal no error. */
9699 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9701 /* Return value is a biased priority to avoid negative numbers. */
9705 case TARGET_NR_setpriority
:
9706 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9708 #ifdef TARGET_NR_profil
9709 case TARGET_NR_profil
:
9712 #ifdef TARGET_NR_statfs
9713 case TARGET_NR_statfs
:
9714 if (!(p
= lock_user_string(arg1
)))
9716 ret
= get_errno(statfs(path(p
), &stfs
));
9717 unlock_user(p
, arg1
, 0);
9719 if (!is_error(ret
)) {
9720 struct target_statfs
*target_stfs
;
9722 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9724 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9725 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9726 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9727 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9728 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9729 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9730 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9731 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9732 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9733 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9734 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9735 #ifdef _STATFS_F_FLAGS
9736 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9738 __put_user(0, &target_stfs
->f_flags
);
9740 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9741 unlock_user_struct(target_stfs
, arg2
, 1);
9745 #ifdef TARGET_NR_fstatfs
9746 case TARGET_NR_fstatfs
:
9747 ret
= get_errno(fstatfs(arg1
, &stfs
));
9748 goto convert_statfs
;
9750 #ifdef TARGET_NR_statfs64
9751 case TARGET_NR_statfs64
:
9752 if (!(p
= lock_user_string(arg1
)))
9754 ret
= get_errno(statfs(path(p
), &stfs
));
9755 unlock_user(p
, arg1
, 0);
9757 if (!is_error(ret
)) {
9758 struct target_statfs64
*target_stfs
;
9760 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9762 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9763 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9764 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9765 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9766 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9767 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9768 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9769 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9770 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9771 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9772 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9773 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9774 unlock_user_struct(target_stfs
, arg3
, 1);
9777 case TARGET_NR_fstatfs64
:
9778 ret
= get_errno(fstatfs(arg1
, &stfs
));
9779 goto convert_statfs64
;
9781 #ifdef TARGET_NR_ioperm
9782 case TARGET_NR_ioperm
:
9785 #ifdef TARGET_NR_socketcall
9786 case TARGET_NR_socketcall
:
9787 ret
= do_socketcall(arg1
, arg2
);
9790 #ifdef TARGET_NR_accept
9791 case TARGET_NR_accept
:
9792 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9795 #ifdef TARGET_NR_accept4
9796 case TARGET_NR_accept4
:
9797 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9800 #ifdef TARGET_NR_bind
9801 case TARGET_NR_bind
:
9802 ret
= do_bind(arg1
, arg2
, arg3
);
9805 #ifdef TARGET_NR_connect
9806 case TARGET_NR_connect
:
9807 ret
= do_connect(arg1
, arg2
, arg3
);
9810 #ifdef TARGET_NR_getpeername
9811 case TARGET_NR_getpeername
:
9812 ret
= do_getpeername(arg1
, arg2
, arg3
);
9815 #ifdef TARGET_NR_getsockname
9816 case TARGET_NR_getsockname
:
9817 ret
= do_getsockname(arg1
, arg2
, arg3
);
9820 #ifdef TARGET_NR_getsockopt
9821 case TARGET_NR_getsockopt
:
9822 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9825 #ifdef TARGET_NR_listen
9826 case TARGET_NR_listen
:
9827 ret
= get_errno(listen(arg1
, arg2
));
9830 #ifdef TARGET_NR_recv
9831 case TARGET_NR_recv
:
9832 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9835 #ifdef TARGET_NR_recvfrom
9836 case TARGET_NR_recvfrom
:
9837 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9840 #ifdef TARGET_NR_recvmsg
9841 case TARGET_NR_recvmsg
:
9842 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9845 #ifdef TARGET_NR_send
9846 case TARGET_NR_send
:
9847 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9850 #ifdef TARGET_NR_sendmsg
9851 case TARGET_NR_sendmsg
:
9852 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9855 #ifdef TARGET_NR_sendmmsg
9856 case TARGET_NR_sendmmsg
:
9857 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9859 case TARGET_NR_recvmmsg
:
9860 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9863 #ifdef TARGET_NR_sendto
9864 case TARGET_NR_sendto
:
9865 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9868 #ifdef TARGET_NR_shutdown
9869 case TARGET_NR_shutdown
:
9870 ret
= get_errno(shutdown(arg1
, arg2
));
9873 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9874 case TARGET_NR_getrandom
:
9875 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9879 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9880 unlock_user(p
, arg1
, ret
);
9883 #ifdef TARGET_NR_socket
9884 case TARGET_NR_socket
:
9885 ret
= do_socket(arg1
, arg2
, arg3
);
9888 #ifdef TARGET_NR_socketpair
9889 case TARGET_NR_socketpair
:
9890 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9893 #ifdef TARGET_NR_setsockopt
9894 case TARGET_NR_setsockopt
:
9895 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9898 #if defined(TARGET_NR_syslog)
9899 case TARGET_NR_syslog
:
9904 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9905 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9906 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9907 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9908 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9909 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9910 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9911 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9913 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9916 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9917 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9918 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9920 ret
= -TARGET_EINVAL
;
9928 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9930 ret
= -TARGET_EFAULT
;
9933 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9934 unlock_user(p
, arg2
, arg3
);
9944 case TARGET_NR_setitimer
:
9946 struct itimerval value
, ovalue
, *pvalue
;
9950 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9951 || copy_from_user_timeval(&pvalue
->it_value
,
9952 arg2
+ sizeof(struct target_timeval
)))
9957 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9958 if (!is_error(ret
) && arg3
) {
9959 if (copy_to_user_timeval(arg3
,
9960 &ovalue
.it_interval
)
9961 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9967 case TARGET_NR_getitimer
:
9969 struct itimerval value
;
9971 ret
= get_errno(getitimer(arg1
, &value
));
9972 if (!is_error(ret
) && arg2
) {
9973 if (copy_to_user_timeval(arg2
,
9975 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9981 #ifdef TARGET_NR_stat
9982 case TARGET_NR_stat
:
9983 if (!(p
= lock_user_string(arg1
)))
9985 ret
= get_errno(stat(path(p
), &st
));
9986 unlock_user(p
, arg1
, 0);
9989 #ifdef TARGET_NR_lstat
9990 case TARGET_NR_lstat
:
9991 if (!(p
= lock_user_string(arg1
)))
9993 ret
= get_errno(lstat(path(p
), &st
));
9994 unlock_user(p
, arg1
, 0);
9997 #ifdef TARGET_NR_fstat
9998 case TARGET_NR_fstat
:
10000 ret
= get_errno(fstat(arg1
, &st
));
10001 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10004 if (!is_error(ret
)) {
10005 struct target_stat
*target_st
;
10007 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
10009 memset(target_st
, 0, sizeof(*target_st
));
10010 __put_user(st
.st_dev
, &target_st
->st_dev
);
10011 __put_user(st
.st_ino
, &target_st
->st_ino
);
10012 __put_user(st
.st_mode
, &target_st
->st_mode
);
10013 __put_user(st
.st_uid
, &target_st
->st_uid
);
10014 __put_user(st
.st_gid
, &target_st
->st_gid
);
10015 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
10016 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
10017 __put_user(st
.st_size
, &target_st
->st_size
);
10018 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
10019 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
10020 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
10021 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
10022 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
10023 unlock_user_struct(target_st
, arg2
, 1);
10028 #ifdef TARGET_NR_olduname
10029 case TARGET_NR_olduname
:
10030 goto unimplemented
;
10032 #ifdef TARGET_NR_iopl
10033 case TARGET_NR_iopl
:
10034 goto unimplemented
;
10036 case TARGET_NR_vhangup
:
10037 ret
= get_errno(vhangup());
10039 #ifdef TARGET_NR_idle
10040 case TARGET_NR_idle
:
10041 goto unimplemented
;
10043 #ifdef TARGET_NR_syscall
10044 case TARGET_NR_syscall
:
10045 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
10046 arg6
, arg7
, arg8
, 0);
10049 case TARGET_NR_wait4
:
10052 abi_long status_ptr
= arg2
;
10053 struct rusage rusage
, *rusage_ptr
;
10054 abi_ulong target_rusage
= arg4
;
10055 abi_long rusage_err
;
10057 rusage_ptr
= &rusage
;
10060 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
10061 if (!is_error(ret
)) {
10062 if (status_ptr
&& ret
) {
10063 status
= host_to_target_waitstatus(status
);
10064 if (put_user_s32(status
, status_ptr
))
10067 if (target_rusage
) {
10068 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
10076 #ifdef TARGET_NR_swapoff
10077 case TARGET_NR_swapoff
:
10078 if (!(p
= lock_user_string(arg1
)))
10080 ret
= get_errno(swapoff(p
));
10081 unlock_user(p
, arg1
, 0);
10084 case TARGET_NR_sysinfo
:
10086 struct target_sysinfo
*target_value
;
10087 struct sysinfo value
;
10088 ret
= get_errno(sysinfo(&value
));
10089 if (!is_error(ret
) && arg1
)
10091 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
10093 __put_user(value
.uptime
, &target_value
->uptime
);
10094 __put_user(value
.loads
[0], &target_value
->loads
[0]);
10095 __put_user(value
.loads
[1], &target_value
->loads
[1]);
10096 __put_user(value
.loads
[2], &target_value
->loads
[2]);
10097 __put_user(value
.totalram
, &target_value
->totalram
);
10098 __put_user(value
.freeram
, &target_value
->freeram
);
10099 __put_user(value
.sharedram
, &target_value
->sharedram
);
10100 __put_user(value
.bufferram
, &target_value
->bufferram
);
10101 __put_user(value
.totalswap
, &target_value
->totalswap
);
10102 __put_user(value
.freeswap
, &target_value
->freeswap
);
10103 __put_user(value
.procs
, &target_value
->procs
);
10104 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
10105 __put_user(value
.freehigh
, &target_value
->freehigh
);
10106 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
10107 unlock_user_struct(target_value
, arg1
, 1);
10111 #ifdef TARGET_NR_ipc
10112 case TARGET_NR_ipc
:
10113 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
10116 #ifdef TARGET_NR_semget
10117 case TARGET_NR_semget
:
10118 ret
= get_errno(semget(arg1
, arg2
, arg3
));
10121 #ifdef TARGET_NR_semop
10122 case TARGET_NR_semop
:
10123 ret
= do_semop(arg1
, arg2
, arg3
);
10126 #ifdef TARGET_NR_semctl
10127 case TARGET_NR_semctl
:
10128 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
10131 #ifdef TARGET_NR_msgctl
10132 case TARGET_NR_msgctl
:
10133 ret
= do_msgctl(arg1
, arg2
, arg3
);
10136 #ifdef TARGET_NR_msgget
10137 case TARGET_NR_msgget
:
10138 ret
= get_errno(msgget(arg1
, arg2
));
10141 #ifdef TARGET_NR_msgrcv
10142 case TARGET_NR_msgrcv
:
10143 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10146 #ifdef TARGET_NR_msgsnd
10147 case TARGET_NR_msgsnd
:
10148 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10151 #ifdef TARGET_NR_shmget
10152 case TARGET_NR_shmget
:
10153 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
10156 #ifdef TARGET_NR_shmctl
10157 case TARGET_NR_shmctl
:
10158 ret
= do_shmctl(arg1
, arg2
, arg3
);
10161 #ifdef TARGET_NR_shmat
10162 case TARGET_NR_shmat
:
10163 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10166 #ifdef TARGET_NR_shmdt
10167 case TARGET_NR_shmdt
:
10168 ret
= do_shmdt(arg1
);
10171 case TARGET_NR_fsync
:
10172 ret
= get_errno(fsync(arg1
));
10174 case TARGET_NR_clone
:
10175 /* Linux manages to have three different orderings for its
10176 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10177 * match the kernel's CONFIG_CLONE_* settings.
10178 * Microblaze is further special in that it uses a sixth
10179 * implicit argument to clone for the TLS pointer.
10181 #if defined(TARGET_MICROBLAZE)
10182 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10183 #elif defined(TARGET_CLONE_BACKWARDS)
10184 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10185 #elif defined(TARGET_CLONE_BACKWARDS2)
10186 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10188 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10191 #ifdef __NR_exit_group
10192 /* new thread calls */
10193 case TARGET_NR_exit_group
:
10194 preexit_cleanup(cpu_env
, arg1
);
10195 ret
= get_errno(exit_group(arg1
));
10198 case TARGET_NR_setdomainname
:
10199 if (!(p
= lock_user_string(arg1
)))
10201 ret
= get_errno(setdomainname(p
, arg2
));
10202 unlock_user(p
, arg1
, 0);
10204 case TARGET_NR_uname
:
10205 /* no need to transcode because we use the linux syscall */
10207 struct new_utsname
* buf
;
10209 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10211 ret
= get_errno(sys_uname(buf
));
10212 if (!is_error(ret
)) {
10213 /* Overwrite the native machine name with whatever is being
10215 g_strlcpy(buf
->machine
, cpu_to_uname_machine(cpu_env
),
10216 sizeof(buf
->machine
));
10217 /* Allow the user to override the reported release. */
10218 if (qemu_uname_release
&& *qemu_uname_release
) {
10219 g_strlcpy(buf
->release
, qemu_uname_release
,
10220 sizeof(buf
->release
));
10223 unlock_user_struct(buf
, arg1
, 1);
10227 case TARGET_NR_modify_ldt
:
10228 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10230 #if !defined(TARGET_X86_64)
10231 case TARGET_NR_vm86old
:
10232 goto unimplemented
;
10233 case TARGET_NR_vm86
:
10234 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10238 case TARGET_NR_adjtimex
:
10240 struct timex host_buf
;
10242 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10245 ret
= get_errno(adjtimex(&host_buf
));
10246 if (!is_error(ret
)) {
10247 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10253 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10254 case TARGET_NR_clock_adjtime
:
10256 struct timex htx
, *phtx
= &htx
;
10258 if (target_to_host_timex(phtx
, arg2
) != 0) {
10261 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10262 if (!is_error(ret
) && phtx
) {
10263 if (host_to_target_timex(arg2
, phtx
) != 0) {
10270 #ifdef TARGET_NR_create_module
10271 case TARGET_NR_create_module
:
10273 case TARGET_NR_init_module
:
10274 case TARGET_NR_delete_module
:
10275 #ifdef TARGET_NR_get_kernel_syms
10276 case TARGET_NR_get_kernel_syms
:
10278 goto unimplemented
;
10279 case TARGET_NR_quotactl
:
10280 goto unimplemented
;
10281 case TARGET_NR_getpgid
:
10282 ret
= get_errno(getpgid(arg1
));
10284 case TARGET_NR_fchdir
:
10285 ret
= get_errno(fchdir(arg1
));
10287 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10288 case TARGET_NR_bdflush
:
10289 goto unimplemented
;
10291 #ifdef TARGET_NR_sysfs
10292 case TARGET_NR_sysfs
:
10293 goto unimplemented
;
10295 case TARGET_NR_personality
:
10296 ret
= get_errno(personality(arg1
));
10298 #ifdef TARGET_NR_afs_syscall
10299 case TARGET_NR_afs_syscall
:
10300 goto unimplemented
;
10302 #ifdef TARGET_NR__llseek /* Not on alpha */
10303 case TARGET_NR__llseek
:
10306 #if !defined(__NR_llseek)
10307 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10309 ret
= get_errno(res
);
10314 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10316 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10322 #ifdef TARGET_NR_getdents
10323 case TARGET_NR_getdents
:
10324 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10325 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10327 struct target_dirent
*target_dirp
;
10328 struct linux_dirent
*dirp
;
10329 abi_long count
= arg3
;
10331 dirp
= g_try_malloc(count
);
10333 ret
= -TARGET_ENOMEM
;
10337 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10338 if (!is_error(ret
)) {
10339 struct linux_dirent
*de
;
10340 struct target_dirent
*tde
;
10342 int reclen
, treclen
;
10343 int count1
, tnamelen
;
10347 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10351 reclen
= de
->d_reclen
;
10352 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10353 assert(tnamelen
>= 0);
10354 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10355 assert(count1
+ treclen
<= count
);
10356 tde
->d_reclen
= tswap16(treclen
);
10357 tde
->d_ino
= tswapal(de
->d_ino
);
10358 tde
->d_off
= tswapal(de
->d_off
);
10359 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10360 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10362 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10366 unlock_user(target_dirp
, arg2
, ret
);
10372 struct linux_dirent
*dirp
;
10373 abi_long count
= arg3
;
10375 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10377 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10378 if (!is_error(ret
)) {
10379 struct linux_dirent
*de
;
10384 reclen
= de
->d_reclen
;
10387 de
->d_reclen
= tswap16(reclen
);
10388 tswapls(&de
->d_ino
);
10389 tswapls(&de
->d_off
);
10390 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10394 unlock_user(dirp
, arg2
, ret
);
10398 /* Implement getdents in terms of getdents64 */
10400 struct linux_dirent64
*dirp
;
10401 abi_long count
= arg3
;
10403 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10407 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10408 if (!is_error(ret
)) {
10409 /* Convert the dirent64 structs to target dirent. We do this
10410 * in-place, since we can guarantee that a target_dirent is no
10411 * larger than a dirent64; however this means we have to be
10412 * careful to read everything before writing in the new format.
10414 struct linux_dirent64
*de
;
10415 struct target_dirent
*tde
;
10420 tde
= (struct target_dirent
*)dirp
;
10422 int namelen
, treclen
;
10423 int reclen
= de
->d_reclen
;
10424 uint64_t ino
= de
->d_ino
;
10425 int64_t off
= de
->d_off
;
10426 uint8_t type
= de
->d_type
;
10428 namelen
= strlen(de
->d_name
);
10429 treclen
= offsetof(struct target_dirent
, d_name
)
10431 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10433 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10434 tde
->d_ino
= tswapal(ino
);
10435 tde
->d_off
= tswapal(off
);
10436 tde
->d_reclen
= tswap16(treclen
);
10437 /* The target_dirent type is in what was formerly a padding
10438 * byte at the end of the structure:
10440 *(((char *)tde
) + treclen
- 1) = type
;
10442 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10443 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10449 unlock_user(dirp
, arg2
, ret
);
10453 #endif /* TARGET_NR_getdents */
10454 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10455 case TARGET_NR_getdents64
:
10457 struct linux_dirent64
*dirp
;
10458 abi_long count
= arg3
;
10459 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10461 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10462 if (!is_error(ret
)) {
10463 struct linux_dirent64
*de
;
10468 reclen
= de
->d_reclen
;
10471 de
->d_reclen
= tswap16(reclen
);
10472 tswap64s((uint64_t *)&de
->d_ino
);
10473 tswap64s((uint64_t *)&de
->d_off
);
10474 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10478 unlock_user(dirp
, arg2
, ret
);
10481 #endif /* TARGET_NR_getdents64 */
10482 #if defined(TARGET_NR__newselect)
10483 case TARGET_NR__newselect
:
10484 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10487 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10488 # ifdef TARGET_NR_poll
10489 case TARGET_NR_poll
:
10491 # ifdef TARGET_NR_ppoll
10492 case TARGET_NR_ppoll
:
10495 struct target_pollfd
*target_pfd
;
10496 unsigned int nfds
= arg2
;
10497 struct pollfd
*pfd
;
10503 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10504 ret
= -TARGET_EINVAL
;
10508 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10509 sizeof(struct target_pollfd
) * nfds
, 1);
10514 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10515 for (i
= 0; i
< nfds
; i
++) {
10516 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10517 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10522 # ifdef TARGET_NR_ppoll
10523 case TARGET_NR_ppoll
:
10525 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10526 target_sigset_t
*target_set
;
10527 sigset_t _set
, *set
= &_set
;
10530 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10531 unlock_user(target_pfd
, arg1
, 0);
10539 if (arg5
!= sizeof(target_sigset_t
)) {
10540 unlock_user(target_pfd
, arg1
, 0);
10541 ret
= -TARGET_EINVAL
;
10545 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10547 unlock_user(target_pfd
, arg1
, 0);
10550 target_to_host_sigset(set
, target_set
);
10555 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10556 set
, SIGSET_T_SIZE
));
10558 if (!is_error(ret
) && arg3
) {
10559 host_to_target_timespec(arg3
, timeout_ts
);
10562 unlock_user(target_set
, arg4
, 0);
10567 # ifdef TARGET_NR_poll
10568 case TARGET_NR_poll
:
10570 struct timespec ts
, *pts
;
10573 /* Convert ms to secs, ns */
10574 ts
.tv_sec
= arg3
/ 1000;
10575 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10578 /* -ve poll() timeout means "infinite" */
10581 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10586 g_assert_not_reached();
10589 if (!is_error(ret
)) {
10590 for(i
= 0; i
< nfds
; i
++) {
10591 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10594 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10598 case TARGET_NR_flock
:
10599 /* NOTE: the flock constant seems to be the same for every
10601 ret
= get_errno(safe_flock(arg1
, arg2
));
10603 case TARGET_NR_readv
:
10605 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10607 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10608 unlock_iovec(vec
, arg2
, arg3
, 1);
10610 ret
= -host_to_target_errno(errno
);
10614 case TARGET_NR_writev
:
10616 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10618 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10619 unlock_iovec(vec
, arg2
, arg3
, 0);
10621 ret
= -host_to_target_errno(errno
);
10625 #if defined(TARGET_NR_preadv)
10626 case TARGET_NR_preadv
:
10628 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10630 unsigned long low
, high
;
10632 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10633 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10634 unlock_iovec(vec
, arg2
, arg3
, 1);
10636 ret
= -host_to_target_errno(errno
);
10641 #if defined(TARGET_NR_pwritev)
10642 case TARGET_NR_pwritev
:
10644 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10646 unsigned long low
, high
;
10648 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10649 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10650 unlock_iovec(vec
, arg2
, arg3
, 0);
10652 ret
= -host_to_target_errno(errno
);
10657 case TARGET_NR_getsid
:
10658 ret
= get_errno(getsid(arg1
));
10660 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10661 case TARGET_NR_fdatasync
:
10662 ret
= get_errno(fdatasync(arg1
));
10665 #ifdef TARGET_NR__sysctl
10666 case TARGET_NR__sysctl
:
10667 /* We don't implement this, but ENOTDIR is always a safe
10669 ret
= -TARGET_ENOTDIR
;
10672 case TARGET_NR_sched_getaffinity
:
10674 unsigned int mask_size
;
10675 unsigned long *mask
;
10678 * sched_getaffinity needs multiples of ulong, so need to take
10679 * care of mismatches between target ulong and host ulong sizes.
10681 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10682 ret
= -TARGET_EINVAL
;
10685 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10687 mask
= alloca(mask_size
);
10688 memset(mask
, 0, mask_size
);
10689 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10691 if (!is_error(ret
)) {
10693 /* More data returned than the caller's buffer will fit.
10694 * This only happens if sizeof(abi_long) < sizeof(long)
10695 * and the caller passed us a buffer holding an odd number
10696 * of abi_longs. If the host kernel is actually using the
10697 * extra 4 bytes then fail EINVAL; otherwise we can just
10698 * ignore them and only copy the interesting part.
10700 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10701 if (numcpus
> arg2
* 8) {
10702 ret
= -TARGET_EINVAL
;
10708 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10714 case TARGET_NR_sched_setaffinity
:
10716 unsigned int mask_size
;
10717 unsigned long *mask
;
10720 * sched_setaffinity needs multiples of ulong, so need to take
10721 * care of mismatches between target ulong and host ulong sizes.
10723 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10724 ret
= -TARGET_EINVAL
;
10727 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10728 mask
= alloca(mask_size
);
10730 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10735 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10738 case TARGET_NR_getcpu
:
10740 unsigned cpu
, node
;
10741 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10742 arg2
? &node
: NULL
,
10744 if (is_error(ret
)) {
10747 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10750 if (arg2
&& put_user_u32(node
, arg2
)) {
10755 case TARGET_NR_sched_setparam
:
10757 struct sched_param
*target_schp
;
10758 struct sched_param schp
;
10761 return -TARGET_EINVAL
;
10763 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10765 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10766 unlock_user_struct(target_schp
, arg2
, 0);
10767 ret
= get_errno(sched_setparam(arg1
, &schp
));
10770 case TARGET_NR_sched_getparam
:
10772 struct sched_param
*target_schp
;
10773 struct sched_param schp
;
10776 return -TARGET_EINVAL
;
10778 ret
= get_errno(sched_getparam(arg1
, &schp
));
10779 if (!is_error(ret
)) {
10780 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10782 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10783 unlock_user_struct(target_schp
, arg2
, 1);
10787 case TARGET_NR_sched_setscheduler
:
10789 struct sched_param
*target_schp
;
10790 struct sched_param schp
;
10792 return -TARGET_EINVAL
;
10794 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10796 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10797 unlock_user_struct(target_schp
, arg3
, 0);
10798 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10801 case TARGET_NR_sched_getscheduler
:
10802 ret
= get_errno(sched_getscheduler(arg1
));
10804 case TARGET_NR_sched_yield
:
10805 ret
= get_errno(sched_yield());
10807 case TARGET_NR_sched_get_priority_max
:
10808 ret
= get_errno(sched_get_priority_max(arg1
));
10810 case TARGET_NR_sched_get_priority_min
:
10811 ret
= get_errno(sched_get_priority_min(arg1
));
10813 case TARGET_NR_sched_rr_get_interval
:
10815 struct timespec ts
;
10816 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10817 if (!is_error(ret
)) {
10818 ret
= host_to_target_timespec(arg2
, &ts
);
10822 case TARGET_NR_nanosleep
:
10824 struct timespec req
, rem
;
10825 target_to_host_timespec(&req
, arg1
);
10826 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10827 if (is_error(ret
) && arg2
) {
10828 host_to_target_timespec(arg2
, &rem
);
10832 #ifdef TARGET_NR_query_module
10833 case TARGET_NR_query_module
:
10834 goto unimplemented
;
10836 #ifdef TARGET_NR_nfsservctl
10837 case TARGET_NR_nfsservctl
:
10838 goto unimplemented
;
10840 case TARGET_NR_prctl
:
10842 case PR_GET_PDEATHSIG
:
10845 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10846 if (!is_error(ret
) && arg2
10847 && put_user_ual(deathsig
, arg2
)) {
10855 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10859 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10860 arg3
, arg4
, arg5
));
10861 unlock_user(name
, arg2
, 16);
10866 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10870 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10871 arg3
, arg4
, arg5
));
10872 unlock_user(name
, arg2
, 0);
10876 #ifdef TARGET_AARCH64
10877 case TARGET_PR_SVE_SET_VL
:
10879 * We cannot support either PR_SVE_SET_VL_ONEXEC or
10880 * PR_SVE_VL_INHERIT. Note the kernel definition
10881 * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10882 * even though the current architectural maximum is VQ=16.
10884 ret
= -TARGET_EINVAL
;
10885 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10886 && arg2
>= 0 && arg2
<= 512 * 16 && !(arg2
& 15)) {
10887 CPUARMState
*env
= cpu_env
;
10888 ARMCPU
*cpu
= arm_env_get_cpu(env
);
10889 uint32_t vq
, old_vq
;
10891 old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10892 vq
= MAX(arg2
/ 16, 1);
10893 vq
= MIN(vq
, cpu
->sve_max_vq
);
10896 aarch64_sve_narrow_vq(env
, vq
);
10898 env
->vfp
.zcr_el
[1] = vq
- 1;
10902 case TARGET_PR_SVE_GET_VL
:
10903 ret
= -TARGET_EINVAL
;
10904 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
10905 CPUARMState
*env
= cpu_env
;
10906 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10909 #endif /* AARCH64 */
10910 case PR_GET_SECCOMP
:
10911 case PR_SET_SECCOMP
:
10912 /* Disable seccomp to prevent the target disabling syscalls we
10914 ret
= -TARGET_EINVAL
;
10917 /* Most prctl options have no pointer arguments */
10918 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10922 #ifdef TARGET_NR_arch_prctl
10923 case TARGET_NR_arch_prctl
:
10924 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10925 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10928 goto unimplemented
;
10931 #ifdef TARGET_NR_pread64
10932 case TARGET_NR_pread64
:
10933 if (regpairs_aligned(cpu_env
, num
)) {
10937 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10939 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10940 unlock_user(p
, arg2
, ret
);
10942 case TARGET_NR_pwrite64
:
10943 if (regpairs_aligned(cpu_env
, num
)) {
10947 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10949 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10950 unlock_user(p
, arg2
, 0);
10953 case TARGET_NR_getcwd
:
10954 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10956 ret
= get_errno(sys_getcwd1(p
, arg2
));
10957 unlock_user(p
, arg1
, ret
);
10959 case TARGET_NR_capget
:
10960 case TARGET_NR_capset
:
10962 struct target_user_cap_header
*target_header
;
10963 struct target_user_cap_data
*target_data
= NULL
;
10964 struct __user_cap_header_struct header
;
10965 struct __user_cap_data_struct data
[2];
10966 struct __user_cap_data_struct
*dataptr
= NULL
;
10967 int i
, target_datalen
;
10968 int data_items
= 1;
10970 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10973 header
.version
= tswap32(target_header
->version
);
10974 header
.pid
= tswap32(target_header
->pid
);
10976 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10977 /* Version 2 and up takes pointer to two user_data structs */
10981 target_datalen
= sizeof(*target_data
) * data_items
;
10984 if (num
== TARGET_NR_capget
) {
10985 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10987 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10989 if (!target_data
) {
10990 unlock_user_struct(target_header
, arg1
, 0);
10994 if (num
== TARGET_NR_capset
) {
10995 for (i
= 0; i
< data_items
; i
++) {
10996 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10997 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10998 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
11005 if (num
== TARGET_NR_capget
) {
11006 ret
= get_errno(capget(&header
, dataptr
));
11008 ret
= get_errno(capset(&header
, dataptr
));
11011 /* The kernel always updates version for both capget and capset */
11012 target_header
->version
= tswap32(header
.version
);
11013 unlock_user_struct(target_header
, arg1
, 1);
11016 if (num
== TARGET_NR_capget
) {
11017 for (i
= 0; i
< data_items
; i
++) {
11018 target_data
[i
].effective
= tswap32(data
[i
].effective
);
11019 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
11020 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
11022 unlock_user(target_data
, arg2
, target_datalen
);
11024 unlock_user(target_data
, arg2
, 0);
11029 case TARGET_NR_sigaltstack
:
11030 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
11033 #ifdef CONFIG_SENDFILE
11034 #ifdef TARGET_NR_sendfile
11035 case TARGET_NR_sendfile
:
11037 off_t
*offp
= NULL
;
11040 ret
= get_user_sal(off
, arg3
);
11041 if (is_error(ret
)) {
11046 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11047 if (!is_error(ret
) && arg3
) {
11048 abi_long ret2
= put_user_sal(off
, arg3
);
11049 if (is_error(ret2
)) {
11056 #ifdef TARGET_NR_sendfile64
11057 case TARGET_NR_sendfile64
:
11059 off_t
*offp
= NULL
;
11062 ret
= get_user_s64(off
, arg3
);
11063 if (is_error(ret
)) {
11068 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
11069 if (!is_error(ret
) && arg3
) {
11070 abi_long ret2
= put_user_s64(off
, arg3
);
11071 if (is_error(ret2
)) {
11079 case TARGET_NR_sendfile
:
11080 #ifdef TARGET_NR_sendfile64
11081 case TARGET_NR_sendfile64
:
11083 goto unimplemented
;
11086 #ifdef TARGET_NR_getpmsg
11087 case TARGET_NR_getpmsg
:
11088 goto unimplemented
;
11090 #ifdef TARGET_NR_putpmsg
11091 case TARGET_NR_putpmsg
:
11092 goto unimplemented
;
11094 #ifdef TARGET_NR_vfork
11095 case TARGET_NR_vfork
:
11096 ret
= get_errno(do_fork(cpu_env
,
11097 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
11101 #ifdef TARGET_NR_ugetrlimit
11102 case TARGET_NR_ugetrlimit
:
11104 struct rlimit rlim
;
11105 int resource
= target_to_host_resource(arg1
);
11106 ret
= get_errno(getrlimit(resource
, &rlim
));
11107 if (!is_error(ret
)) {
11108 struct target_rlimit
*target_rlim
;
11109 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
11111 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
11112 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
11113 unlock_user_struct(target_rlim
, arg2
, 1);
11118 #ifdef TARGET_NR_truncate64
11119 case TARGET_NR_truncate64
:
11120 if (!(p
= lock_user_string(arg1
)))
11122 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
11123 unlock_user(p
, arg1
, 0);
11126 #ifdef TARGET_NR_ftruncate64
11127 case TARGET_NR_ftruncate64
:
11128 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11131 #ifdef TARGET_NR_stat64
11132 case TARGET_NR_stat64
:
11133 if (!(p
= lock_user_string(arg1
)))
11135 ret
= get_errno(stat(path(p
), &st
));
11136 unlock_user(p
, arg1
, 0);
11137 if (!is_error(ret
))
11138 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11141 #ifdef TARGET_NR_lstat64
11142 case TARGET_NR_lstat64
:
11143 if (!(p
= lock_user_string(arg1
)))
11145 ret
= get_errno(lstat(path(p
), &st
));
11146 unlock_user(p
, arg1
, 0);
11147 if (!is_error(ret
))
11148 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11151 #ifdef TARGET_NR_fstat64
11152 case TARGET_NR_fstat64
:
11153 ret
= get_errno(fstat(arg1
, &st
));
11154 if (!is_error(ret
))
11155 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11158 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11159 #ifdef TARGET_NR_fstatat64
11160 case TARGET_NR_fstatat64
:
11162 #ifdef TARGET_NR_newfstatat
11163 case TARGET_NR_newfstatat
:
11165 if (!(p
= lock_user_string(arg2
)))
11167 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11168 if (!is_error(ret
))
11169 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11172 #ifdef TARGET_NR_lchown
11173 case TARGET_NR_lchown
:
11174 if (!(p
= lock_user_string(arg1
)))
11176 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11177 unlock_user(p
, arg1
, 0);
11180 #ifdef TARGET_NR_getuid
11181 case TARGET_NR_getuid
:
11182 ret
= get_errno(high2lowuid(getuid()));
11185 #ifdef TARGET_NR_getgid
11186 case TARGET_NR_getgid
:
11187 ret
= get_errno(high2lowgid(getgid()));
11190 #ifdef TARGET_NR_geteuid
11191 case TARGET_NR_geteuid
:
11192 ret
= get_errno(high2lowuid(geteuid()));
11195 #ifdef TARGET_NR_getegid
11196 case TARGET_NR_getegid
:
11197 ret
= get_errno(high2lowgid(getegid()));
11200 case TARGET_NR_setreuid
:
11201 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11203 case TARGET_NR_setregid
:
11204 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11206 case TARGET_NR_getgroups
:
11208 int gidsetsize
= arg1
;
11209 target_id
*target_grouplist
;
11213 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11214 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11215 if (gidsetsize
== 0)
11217 if (!is_error(ret
)) {
11218 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11219 if (!target_grouplist
)
11221 for(i
= 0;i
< ret
; i
++)
11222 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11223 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11227 case TARGET_NR_setgroups
:
11229 int gidsetsize
= arg1
;
11230 target_id
*target_grouplist
;
11231 gid_t
*grouplist
= NULL
;
11234 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11235 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11236 if (!target_grouplist
) {
11237 ret
= -TARGET_EFAULT
;
11240 for (i
= 0; i
< gidsetsize
; i
++) {
11241 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11243 unlock_user(target_grouplist
, arg2
, 0);
11245 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11248 case TARGET_NR_fchown
:
11249 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11251 #if defined(TARGET_NR_fchownat)
11252 case TARGET_NR_fchownat
:
11253 if (!(p
= lock_user_string(arg2
)))
11255 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11256 low2highgid(arg4
), arg5
));
11257 unlock_user(p
, arg2
, 0);
11260 #ifdef TARGET_NR_setresuid
11261 case TARGET_NR_setresuid
:
11262 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11264 low2highuid(arg3
)));
11267 #ifdef TARGET_NR_getresuid
11268 case TARGET_NR_getresuid
:
11270 uid_t ruid
, euid
, suid
;
11271 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11272 if (!is_error(ret
)) {
11273 if (put_user_id(high2lowuid(ruid
), arg1
)
11274 || put_user_id(high2lowuid(euid
), arg2
)
11275 || put_user_id(high2lowuid(suid
), arg3
))
11281 #ifdef TARGET_NR_getresgid
11282 case TARGET_NR_setresgid
:
11283 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11285 low2highgid(arg3
)));
11288 #ifdef TARGET_NR_getresgid
11289 case TARGET_NR_getresgid
:
11291 gid_t rgid
, egid
, sgid
;
11292 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11293 if (!is_error(ret
)) {
11294 if (put_user_id(high2lowgid(rgid
), arg1
)
11295 || put_user_id(high2lowgid(egid
), arg2
)
11296 || put_user_id(high2lowgid(sgid
), arg3
))
11302 #ifdef TARGET_NR_chown
11303 case TARGET_NR_chown
:
11304 if (!(p
= lock_user_string(arg1
)))
11306 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11307 unlock_user(p
, arg1
, 0);
11310 case TARGET_NR_setuid
:
11311 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11313 case TARGET_NR_setgid
:
11314 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11316 case TARGET_NR_setfsuid
:
11317 ret
= get_errno(setfsuid(arg1
));
11319 case TARGET_NR_setfsgid
:
11320 ret
= get_errno(setfsgid(arg1
));
11323 #ifdef TARGET_NR_lchown32
11324 case TARGET_NR_lchown32
:
11325 if (!(p
= lock_user_string(arg1
)))
11327 ret
= get_errno(lchown(p
, arg2
, arg3
));
11328 unlock_user(p
, arg1
, 0);
11331 #ifdef TARGET_NR_getuid32
11332 case TARGET_NR_getuid32
:
11333 ret
= get_errno(getuid());
11337 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11338 /* Alpha specific */
11339 case TARGET_NR_getxuid
:
11343 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11345 ret
= get_errno(getuid());
11348 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11349 /* Alpha specific */
11350 case TARGET_NR_getxgid
:
11354 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11356 ret
= get_errno(getgid());
11359 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11360 /* Alpha specific */
11361 case TARGET_NR_osf_getsysinfo
:
11362 ret
= -TARGET_EOPNOTSUPP
;
11364 case TARGET_GSI_IEEE_FP_CONTROL
:
11366 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11368 /* Copied from linux ieee_fpcr_to_swcr. */
11369 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11370 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11371 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11372 | SWCR_TRAP_ENABLE_DZE
11373 | SWCR_TRAP_ENABLE_OVF
);
11374 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11375 | SWCR_TRAP_ENABLE_INE
);
11376 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11377 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11379 if (put_user_u64 (swcr
, arg2
))
11385 /* case GSI_IEEE_STATE_AT_SIGNAL:
11386 -- Not implemented in linux kernel.
11388 -- Retrieves current unaligned access state; not much used.
11389 case GSI_PROC_TYPE:
11390 -- Retrieves implver information; surely not used.
11391 case GSI_GET_HWRPB:
11392 -- Grabs a copy of the HWRPB; surely not used.
11397 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11398 /* Alpha specific */
11399 case TARGET_NR_osf_setsysinfo
:
11400 ret
= -TARGET_EOPNOTSUPP
;
11402 case TARGET_SSI_IEEE_FP_CONTROL
:
11404 uint64_t swcr
, fpcr
, orig_fpcr
;
11406 if (get_user_u64 (swcr
, arg2
)) {
11409 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11410 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11412 /* Copied from linux ieee_swcr_to_fpcr. */
11413 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11414 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11415 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11416 | SWCR_TRAP_ENABLE_DZE
11417 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11418 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11419 | SWCR_TRAP_ENABLE_INE
)) << 57;
11420 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11421 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11423 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11428 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11430 uint64_t exc
, fpcr
, orig_fpcr
;
11433 if (get_user_u64(exc
, arg2
)) {
11437 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11439 /* We only add to the exception status here. */
11440 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11442 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11445 /* Old exceptions are not signaled. */
11446 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11448 /* If any exceptions set by this call,
11449 and are unmasked, send a signal. */
11451 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11452 si_code
= TARGET_FPE_FLTRES
;
11454 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11455 si_code
= TARGET_FPE_FLTUND
;
11457 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11458 si_code
= TARGET_FPE_FLTOVF
;
11460 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11461 si_code
= TARGET_FPE_FLTDIV
;
11463 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11464 si_code
= TARGET_FPE_FLTINV
;
11466 if (si_code
!= 0) {
11467 target_siginfo_t info
;
11468 info
.si_signo
= SIGFPE
;
11470 info
.si_code
= si_code
;
11471 info
._sifields
._sigfault
._addr
11472 = ((CPUArchState
*)cpu_env
)->pc
;
11473 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11474 QEMU_SI_FAULT
, &info
);
11479 /* case SSI_NVPAIRS:
11480 -- Used with SSIN_UACPROC to enable unaligned accesses.
11481 case SSI_IEEE_STATE_AT_SIGNAL:
11482 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11483 -- Not implemented in linux kernel
11488 #ifdef TARGET_NR_osf_sigprocmask
11489 /* Alpha specific. */
11490 case TARGET_NR_osf_sigprocmask
:
11494 sigset_t set
, oldset
;
11497 case TARGET_SIG_BLOCK
:
11500 case TARGET_SIG_UNBLOCK
:
11503 case TARGET_SIG_SETMASK
:
11507 ret
= -TARGET_EINVAL
;
11511 target_to_host_old_sigset(&set
, &mask
);
11512 ret
= do_sigprocmask(how
, &set
, &oldset
);
11514 host_to_target_old_sigset(&mask
, &oldset
);
11521 #ifdef TARGET_NR_getgid32
11522 case TARGET_NR_getgid32
:
11523 ret
= get_errno(getgid());
11526 #ifdef TARGET_NR_geteuid32
11527 case TARGET_NR_geteuid32
:
11528 ret
= get_errno(geteuid());
11531 #ifdef TARGET_NR_getegid32
11532 case TARGET_NR_getegid32
:
11533 ret
= get_errno(getegid());
11536 #ifdef TARGET_NR_setreuid32
11537 case TARGET_NR_setreuid32
:
11538 ret
= get_errno(setreuid(arg1
, arg2
));
11541 #ifdef TARGET_NR_setregid32
11542 case TARGET_NR_setregid32
:
11543 ret
= get_errno(setregid(arg1
, arg2
));
11546 #ifdef TARGET_NR_getgroups32
11547 case TARGET_NR_getgroups32
:
11549 int gidsetsize
= arg1
;
11550 uint32_t *target_grouplist
;
11554 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11555 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11556 if (gidsetsize
== 0)
11558 if (!is_error(ret
)) {
11559 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11560 if (!target_grouplist
) {
11561 ret
= -TARGET_EFAULT
;
11564 for(i
= 0;i
< ret
; i
++)
11565 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11566 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11571 #ifdef TARGET_NR_setgroups32
11572 case TARGET_NR_setgroups32
:
11574 int gidsetsize
= arg1
;
11575 uint32_t *target_grouplist
;
11579 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11580 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11581 if (!target_grouplist
) {
11582 ret
= -TARGET_EFAULT
;
11585 for(i
= 0;i
< gidsetsize
; i
++)
11586 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11587 unlock_user(target_grouplist
, arg2
, 0);
11588 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11592 #ifdef TARGET_NR_fchown32
11593 case TARGET_NR_fchown32
:
11594 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11597 #ifdef TARGET_NR_setresuid32
11598 case TARGET_NR_setresuid32
:
11599 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11602 #ifdef TARGET_NR_getresuid32
11603 case TARGET_NR_getresuid32
:
11605 uid_t ruid
, euid
, suid
;
11606 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11607 if (!is_error(ret
)) {
11608 if (put_user_u32(ruid
, arg1
)
11609 || put_user_u32(euid
, arg2
)
11610 || put_user_u32(suid
, arg3
))
11616 #ifdef TARGET_NR_setresgid32
11617 case TARGET_NR_setresgid32
:
11618 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11621 #ifdef TARGET_NR_getresgid32
11622 case TARGET_NR_getresgid32
:
11624 gid_t rgid
, egid
, sgid
;
11625 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11626 if (!is_error(ret
)) {
11627 if (put_user_u32(rgid
, arg1
)
11628 || put_user_u32(egid
, arg2
)
11629 || put_user_u32(sgid
, arg3
))
11635 #ifdef TARGET_NR_chown32
11636 case TARGET_NR_chown32
:
11637 if (!(p
= lock_user_string(arg1
)))
11639 ret
= get_errno(chown(p
, arg2
, arg3
));
11640 unlock_user(p
, arg1
, 0);
11643 #ifdef TARGET_NR_setuid32
11644 case TARGET_NR_setuid32
:
11645 ret
= get_errno(sys_setuid(arg1
));
11648 #ifdef TARGET_NR_setgid32
11649 case TARGET_NR_setgid32
:
11650 ret
= get_errno(sys_setgid(arg1
));
11653 #ifdef TARGET_NR_setfsuid32
11654 case TARGET_NR_setfsuid32
:
11655 ret
= get_errno(setfsuid(arg1
));
11658 #ifdef TARGET_NR_setfsgid32
11659 case TARGET_NR_setfsgid32
:
11660 ret
= get_errno(setfsgid(arg1
));
11664 case TARGET_NR_pivot_root
:
11665 goto unimplemented
;
11666 #ifdef TARGET_NR_mincore
11667 case TARGET_NR_mincore
:
11670 ret
= -TARGET_ENOMEM
;
11671 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11675 ret
= -TARGET_EFAULT
;
11676 p
= lock_user_string(arg3
);
11680 ret
= get_errno(mincore(a
, arg2
, p
));
11681 unlock_user(p
, arg3
, ret
);
11683 unlock_user(a
, arg1
, 0);
11687 #ifdef TARGET_NR_arm_fadvise64_64
11688 case TARGET_NR_arm_fadvise64_64
:
11689 /* arm_fadvise64_64 looks like fadvise64_64 but
11690 * with different argument order: fd, advice, offset, len
11691 * rather than the usual fd, offset, len, advice.
11692 * Note that offset and len are both 64-bit so appear as
11693 * pairs of 32-bit registers.
11695 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11696 target_offset64(arg5
, arg6
), arg2
);
11697 ret
= -host_to_target_errno(ret
);
11701 #if TARGET_ABI_BITS == 32
11703 #ifdef TARGET_NR_fadvise64_64
11704 case TARGET_NR_fadvise64_64
:
11705 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11706 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11714 /* 6 args: fd, offset (high, low), len (high, low), advice */
11715 if (regpairs_aligned(cpu_env
, num
)) {
11716 /* offset is in (3,4), len in (5,6) and advice in 7 */
11724 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11725 target_offset64(arg2
, arg3
),
11726 target_offset64(arg4
, arg5
),
11731 #ifdef TARGET_NR_fadvise64
11732 case TARGET_NR_fadvise64
:
11733 /* 5 args: fd, offset (high, low), len, advice */
11734 if (regpairs_aligned(cpu_env
, num
)) {
11735 /* offset is in (3,4), len in 5 and advice in 6 */
11741 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11742 target_offset64(arg2
, arg3
),
11747 #else /* not a 32-bit ABI */
11748 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11749 #ifdef TARGET_NR_fadvise64_64
11750 case TARGET_NR_fadvise64_64
:
11752 #ifdef TARGET_NR_fadvise64
11753 case TARGET_NR_fadvise64
:
11755 #ifdef TARGET_S390X
11757 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11758 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11759 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11760 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11764 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11767 #endif /* end of 64-bit ABI fadvise handling */
11769 #ifdef TARGET_NR_madvise
11770 case TARGET_NR_madvise
:
11771 /* A straight passthrough may not be safe because qemu sometimes
11772 turns private file-backed mappings into anonymous mappings.
11773 This will break MADV_DONTNEED.
11774 This is a hint, so ignoring and returning success is ok. */
11775 ret
= get_errno(0);
11778 #if TARGET_ABI_BITS == 32
11779 case TARGET_NR_fcntl64
:
11783 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11784 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11787 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11788 copyfrom
= copy_from_user_oabi_flock64
;
11789 copyto
= copy_to_user_oabi_flock64
;
11793 cmd
= target_to_host_fcntl_cmd(arg2
);
11794 if (cmd
== -TARGET_EINVAL
) {
11800 case TARGET_F_GETLK64
:
11801 ret
= copyfrom(&fl
, arg3
);
11805 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11807 ret
= copyto(arg3
, &fl
);
11811 case TARGET_F_SETLK64
:
11812 case TARGET_F_SETLKW64
:
11813 ret
= copyfrom(&fl
, arg3
);
11817 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11820 ret
= do_fcntl(arg1
, arg2
, arg3
);
11826 #ifdef TARGET_NR_cacheflush
11827 case TARGET_NR_cacheflush
:
11828 /* self-modifying code is handled automatically, so nothing needed */
11832 #ifdef TARGET_NR_security
11833 case TARGET_NR_security
:
11834 goto unimplemented
;
11836 #ifdef TARGET_NR_getpagesize
11837 case TARGET_NR_getpagesize
:
11838 ret
= TARGET_PAGE_SIZE
;
11841 case TARGET_NR_gettid
:
11842 ret
= get_errno(gettid());
11844 #ifdef TARGET_NR_readahead
11845 case TARGET_NR_readahead
:
11846 #if TARGET_ABI_BITS == 32
11847 if (regpairs_aligned(cpu_env
, num
)) {
11852 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11854 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11859 #ifdef TARGET_NR_setxattr
11860 case TARGET_NR_listxattr
:
11861 case TARGET_NR_llistxattr
:
11865 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11867 ret
= -TARGET_EFAULT
;
11871 p
= lock_user_string(arg1
);
11873 if (num
== TARGET_NR_listxattr
) {
11874 ret
= get_errno(listxattr(p
, b
, arg3
));
11876 ret
= get_errno(llistxattr(p
, b
, arg3
));
11879 ret
= -TARGET_EFAULT
;
11881 unlock_user(p
, arg1
, 0);
11882 unlock_user(b
, arg2
, arg3
);
11885 case TARGET_NR_flistxattr
:
11889 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11891 ret
= -TARGET_EFAULT
;
11895 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11896 unlock_user(b
, arg2
, arg3
);
11899 case TARGET_NR_setxattr
:
11900 case TARGET_NR_lsetxattr
:
11902 void *p
, *n
, *v
= 0;
11904 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11906 ret
= -TARGET_EFAULT
;
11910 p
= lock_user_string(arg1
);
11911 n
= lock_user_string(arg2
);
11913 if (num
== TARGET_NR_setxattr
) {
11914 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11916 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11919 ret
= -TARGET_EFAULT
;
11921 unlock_user(p
, arg1
, 0);
11922 unlock_user(n
, arg2
, 0);
11923 unlock_user(v
, arg3
, 0);
11926 case TARGET_NR_fsetxattr
:
11930 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11932 ret
= -TARGET_EFAULT
;
11936 n
= lock_user_string(arg2
);
11938 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11940 ret
= -TARGET_EFAULT
;
11942 unlock_user(n
, arg2
, 0);
11943 unlock_user(v
, arg3
, 0);
11946 case TARGET_NR_getxattr
:
11947 case TARGET_NR_lgetxattr
:
11949 void *p
, *n
, *v
= 0;
11951 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11953 ret
= -TARGET_EFAULT
;
11957 p
= lock_user_string(arg1
);
11958 n
= lock_user_string(arg2
);
11960 if (num
== TARGET_NR_getxattr
) {
11961 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11963 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11966 ret
= -TARGET_EFAULT
;
11968 unlock_user(p
, arg1
, 0);
11969 unlock_user(n
, arg2
, 0);
11970 unlock_user(v
, arg3
, arg4
);
11973 case TARGET_NR_fgetxattr
:
11977 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11979 ret
= -TARGET_EFAULT
;
11983 n
= lock_user_string(arg2
);
11985 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11987 ret
= -TARGET_EFAULT
;
11989 unlock_user(n
, arg2
, 0);
11990 unlock_user(v
, arg3
, arg4
);
11993 case TARGET_NR_removexattr
:
11994 case TARGET_NR_lremovexattr
:
11997 p
= lock_user_string(arg1
);
11998 n
= lock_user_string(arg2
);
12000 if (num
== TARGET_NR_removexattr
) {
12001 ret
= get_errno(removexattr(p
, n
));
12003 ret
= get_errno(lremovexattr(p
, n
));
12006 ret
= -TARGET_EFAULT
;
12008 unlock_user(p
, arg1
, 0);
12009 unlock_user(n
, arg2
, 0);
12012 case TARGET_NR_fremovexattr
:
12015 n
= lock_user_string(arg2
);
12017 ret
= get_errno(fremovexattr(arg1
, n
));
12019 ret
= -TARGET_EFAULT
;
12021 unlock_user(n
, arg2
, 0);
12025 #endif /* CONFIG_ATTR */
12026 #ifdef TARGET_NR_set_thread_area
12027 case TARGET_NR_set_thread_area
:
12028 #if defined(TARGET_MIPS)
12029 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
12032 #elif defined(TARGET_CRIS)
12034 ret
= -TARGET_EINVAL
;
12036 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
12040 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12041 ret
= do_set_thread_area(cpu_env
, arg1
);
12043 #elif defined(TARGET_M68K)
12045 TaskState
*ts
= cpu
->opaque
;
12046 ts
->tp_value
= arg1
;
12051 goto unimplemented_nowarn
;
12054 #ifdef TARGET_NR_get_thread_area
12055 case TARGET_NR_get_thread_area
:
12056 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12057 ret
= do_get_thread_area(cpu_env
, arg1
);
12059 #elif defined(TARGET_M68K)
12061 TaskState
*ts
= cpu
->opaque
;
12062 ret
= ts
->tp_value
;
12066 goto unimplemented_nowarn
;
12069 #ifdef TARGET_NR_getdomainname
12070 case TARGET_NR_getdomainname
:
12071 goto unimplemented_nowarn
;
12074 #ifdef TARGET_NR_clock_settime
12075 case TARGET_NR_clock_settime
:
12077 struct timespec ts
;
12079 ret
= target_to_host_timespec(&ts
, arg2
);
12080 if (!is_error(ret
)) {
12081 ret
= get_errno(clock_settime(arg1
, &ts
));
12086 #ifdef TARGET_NR_clock_gettime
12087 case TARGET_NR_clock_gettime
:
12089 struct timespec ts
;
12090 ret
= get_errno(clock_gettime(arg1
, &ts
));
12091 if (!is_error(ret
)) {
12092 ret
= host_to_target_timespec(arg2
, &ts
);
12097 #ifdef TARGET_NR_clock_getres
12098 case TARGET_NR_clock_getres
:
12100 struct timespec ts
;
12101 ret
= get_errno(clock_getres(arg1
, &ts
));
12102 if (!is_error(ret
)) {
12103 host_to_target_timespec(arg2
, &ts
);
12108 #ifdef TARGET_NR_clock_nanosleep
12109 case TARGET_NR_clock_nanosleep
:
12111 struct timespec ts
;
12112 target_to_host_timespec(&ts
, arg3
);
12113 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
12114 &ts
, arg4
? &ts
: NULL
));
12116 host_to_target_timespec(arg4
, &ts
);
12118 #if defined(TARGET_PPC)
12119 /* clock_nanosleep is odd in that it returns positive errno values.
12120 * On PPC, CR0 bit 3 should be set in such a situation. */
12121 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
12122 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
12129 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12130 case TARGET_NR_set_tid_address
:
12131 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
12135 case TARGET_NR_tkill
:
12136 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12139 case TARGET_NR_tgkill
:
12140 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12141 target_to_host_signal(arg3
)));
12144 #ifdef TARGET_NR_set_robust_list
12145 case TARGET_NR_set_robust_list
:
12146 case TARGET_NR_get_robust_list
:
12147 /* The ABI for supporting robust futexes has userspace pass
12148 * the kernel a pointer to a linked list which is updated by
12149 * userspace after the syscall; the list is walked by the kernel
12150 * when the thread exits. Since the linked list in QEMU guest
12151 * memory isn't a valid linked list for the host and we have
12152 * no way to reliably intercept the thread-death event, we can't
12153 * support these. Silently return ENOSYS so that guest userspace
12154 * falls back to a non-robust futex implementation (which should
12155 * be OK except in the corner case of the guest crashing while
12156 * holding a mutex that is shared with another process via
12159 goto unimplemented_nowarn
;
12162 #if defined(TARGET_NR_utimensat)
12163 case TARGET_NR_utimensat
:
12165 struct timespec
*tsp
, ts
[2];
12169 target_to_host_timespec(ts
, arg3
);
12170 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
12174 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12176 if (!(p
= lock_user_string(arg2
))) {
12177 ret
= -TARGET_EFAULT
;
12180 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12181 unlock_user(p
, arg2
, 0);
12186 case TARGET_NR_futex
:
12187 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12189 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12190 case TARGET_NR_inotify_init
:
12191 ret
= get_errno(sys_inotify_init());
12193 fd_trans_register(ret
, &target_inotify_trans
);
12197 #ifdef CONFIG_INOTIFY1
12198 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12199 case TARGET_NR_inotify_init1
:
12200 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12201 fcntl_flags_tbl
)));
12203 fd_trans_register(ret
, &target_inotify_trans
);
12208 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12209 case TARGET_NR_inotify_add_watch
:
12210 p
= lock_user_string(arg2
);
12211 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12212 unlock_user(p
, arg2
, 0);
12215 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12216 case TARGET_NR_inotify_rm_watch
:
12217 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12221 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12222 case TARGET_NR_mq_open
:
12224 struct mq_attr posix_mq_attr
;
12225 struct mq_attr
*pposix_mq_attr
;
12228 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12229 pposix_mq_attr
= NULL
;
12231 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12234 pposix_mq_attr
= &posix_mq_attr
;
12236 p
= lock_user_string(arg1
- 1);
12240 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12241 unlock_user (p
, arg1
, 0);
12245 case TARGET_NR_mq_unlink
:
12246 p
= lock_user_string(arg1
- 1);
12248 ret
= -TARGET_EFAULT
;
12251 ret
= get_errno(mq_unlink(p
));
12252 unlock_user (p
, arg1
, 0);
12255 case TARGET_NR_mq_timedsend
:
12257 struct timespec ts
;
12259 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12261 target_to_host_timespec(&ts
, arg5
);
12262 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12263 host_to_target_timespec(arg5
, &ts
);
12265 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12267 unlock_user (p
, arg2
, arg3
);
12271 case TARGET_NR_mq_timedreceive
:
12273 struct timespec ts
;
12276 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12278 target_to_host_timespec(&ts
, arg5
);
12279 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12281 host_to_target_timespec(arg5
, &ts
);
12283 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12286 unlock_user (p
, arg2
, arg3
);
12288 put_user_u32(prio
, arg4
);
12292 /* Not implemented for now... */
12293 /* case TARGET_NR_mq_notify: */
12296 case TARGET_NR_mq_getsetattr
:
12298 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12301 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12302 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12303 &posix_mq_attr_out
));
12304 } else if (arg3
!= 0) {
12305 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12307 if (ret
== 0 && arg3
!= 0) {
12308 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12314 #ifdef CONFIG_SPLICE
12315 #ifdef TARGET_NR_tee
12316 case TARGET_NR_tee
:
12318 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12322 #ifdef TARGET_NR_splice
12323 case TARGET_NR_splice
:
12325 loff_t loff_in
, loff_out
;
12326 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12328 if (get_user_u64(loff_in
, arg2
)) {
12331 ploff_in
= &loff_in
;
12334 if (get_user_u64(loff_out
, arg4
)) {
12337 ploff_out
= &loff_out
;
12339 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12341 if (put_user_u64(loff_in
, arg2
)) {
12346 if (put_user_u64(loff_out
, arg4
)) {
12353 #ifdef TARGET_NR_vmsplice
12354 case TARGET_NR_vmsplice
:
12356 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12358 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12359 unlock_iovec(vec
, arg2
, arg3
, 0);
12361 ret
= -host_to_target_errno(errno
);
12366 #endif /* CONFIG_SPLICE */
12367 #ifdef CONFIG_EVENTFD
12368 #if defined(TARGET_NR_eventfd)
12369 case TARGET_NR_eventfd
:
12370 ret
= get_errno(eventfd(arg1
, 0));
12372 fd_trans_register(ret
, &target_eventfd_trans
);
12376 #if defined(TARGET_NR_eventfd2)
12377 case TARGET_NR_eventfd2
:
12379 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12380 if (arg2
& TARGET_O_NONBLOCK
) {
12381 host_flags
|= O_NONBLOCK
;
12383 if (arg2
& TARGET_O_CLOEXEC
) {
12384 host_flags
|= O_CLOEXEC
;
12386 ret
= get_errno(eventfd(arg1
, host_flags
));
12388 fd_trans_register(ret
, &target_eventfd_trans
);
12393 #endif /* CONFIG_EVENTFD */
12394 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12395 case TARGET_NR_fallocate
:
12396 #if TARGET_ABI_BITS == 32
12397 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12398 target_offset64(arg5
, arg6
)));
12400 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12404 #if defined(CONFIG_SYNC_FILE_RANGE)
12405 #if defined(TARGET_NR_sync_file_range)
12406 case TARGET_NR_sync_file_range
:
12407 #if TARGET_ABI_BITS == 32
12408 #if defined(TARGET_MIPS)
12409 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12410 target_offset64(arg5
, arg6
), arg7
));
12412 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12413 target_offset64(arg4
, arg5
), arg6
));
12414 #endif /* !TARGET_MIPS */
12416 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12420 #if defined(TARGET_NR_sync_file_range2)
12421 case TARGET_NR_sync_file_range2
:
12422 /* This is like sync_file_range but the arguments are reordered */
12423 #if TARGET_ABI_BITS == 32
12424 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12425 target_offset64(arg5
, arg6
), arg2
));
12427 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12432 #if defined(TARGET_NR_signalfd4)
12433 case TARGET_NR_signalfd4
:
12434 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12437 #if defined(TARGET_NR_signalfd)
12438 case TARGET_NR_signalfd
:
12439 ret
= do_signalfd4(arg1
, arg2
, 0);
12442 #if defined(CONFIG_EPOLL)
12443 #if defined(TARGET_NR_epoll_create)
12444 case TARGET_NR_epoll_create
:
12445 ret
= get_errno(epoll_create(arg1
));
12448 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12449 case TARGET_NR_epoll_create1
:
12450 ret
= get_errno(epoll_create1(arg1
));
12453 #if defined(TARGET_NR_epoll_ctl)
12454 case TARGET_NR_epoll_ctl
:
12456 struct epoll_event ep
;
12457 struct epoll_event
*epp
= 0;
12459 struct target_epoll_event
*target_ep
;
12460 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12463 ep
.events
= tswap32(target_ep
->events
);
12464 /* The epoll_data_t union is just opaque data to the kernel,
12465 * so we transfer all 64 bits across and need not worry what
12466 * actual data type it is.
12468 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12469 unlock_user_struct(target_ep
, arg4
, 0);
12472 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12477 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12478 #if defined(TARGET_NR_epoll_wait)
12479 case TARGET_NR_epoll_wait
:
12481 #if defined(TARGET_NR_epoll_pwait)
12482 case TARGET_NR_epoll_pwait
:
12485 struct target_epoll_event
*target_ep
;
12486 struct epoll_event
*ep
;
12488 int maxevents
= arg3
;
12489 int timeout
= arg4
;
12491 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12492 ret
= -TARGET_EINVAL
;
12496 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12497 maxevents
* sizeof(struct target_epoll_event
), 1);
12502 ep
= g_try_new(struct epoll_event
, maxevents
);
12504 unlock_user(target_ep
, arg2
, 0);
12505 ret
= -TARGET_ENOMEM
;
12510 #if defined(TARGET_NR_epoll_pwait)
12511 case TARGET_NR_epoll_pwait
:
12513 target_sigset_t
*target_set
;
12514 sigset_t _set
, *set
= &_set
;
12517 if (arg6
!= sizeof(target_sigset_t
)) {
12518 ret
= -TARGET_EINVAL
;
12522 target_set
= lock_user(VERIFY_READ
, arg5
,
12523 sizeof(target_sigset_t
), 1);
12525 ret
= -TARGET_EFAULT
;
12528 target_to_host_sigset(set
, target_set
);
12529 unlock_user(target_set
, arg5
, 0);
12534 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12535 set
, SIGSET_T_SIZE
));
12539 #if defined(TARGET_NR_epoll_wait)
12540 case TARGET_NR_epoll_wait
:
12541 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12546 ret
= -TARGET_ENOSYS
;
12548 if (!is_error(ret
)) {
12550 for (i
= 0; i
< ret
; i
++) {
12551 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12552 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12554 unlock_user(target_ep
, arg2
,
12555 ret
* sizeof(struct target_epoll_event
));
12557 unlock_user(target_ep
, arg2
, 0);
12564 #ifdef TARGET_NR_prlimit64
12565 case TARGET_NR_prlimit64
:
12567 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12568 struct target_rlimit64
*target_rnew
, *target_rold
;
12569 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12570 int resource
= target_to_host_resource(arg2
);
12572 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12575 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12576 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12577 unlock_user_struct(target_rnew
, arg3
, 0);
12581 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12582 if (!is_error(ret
) && arg4
) {
12583 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12586 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12587 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12588 unlock_user_struct(target_rold
, arg4
, 1);
12593 #ifdef TARGET_NR_gethostname
12594 case TARGET_NR_gethostname
:
12596 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12598 ret
= get_errno(gethostname(name
, arg2
));
12599 unlock_user(name
, arg1
, arg2
);
12601 ret
= -TARGET_EFAULT
;
12606 #ifdef TARGET_NR_atomic_cmpxchg_32
12607 case TARGET_NR_atomic_cmpxchg_32
:
12609 /* should use start_exclusive from main.c */
12610 abi_ulong mem_value
;
12611 if (get_user_u32(mem_value
, arg6
)) {
12612 target_siginfo_t info
;
12613 info
.si_signo
= SIGSEGV
;
12615 info
.si_code
= TARGET_SEGV_MAPERR
;
12616 info
._sifields
._sigfault
._addr
= arg6
;
12617 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12618 QEMU_SI_FAULT
, &info
);
12622 if (mem_value
== arg2
)
12623 put_user_u32(arg1
, arg6
);
12628 #ifdef TARGET_NR_atomic_barrier
12629 case TARGET_NR_atomic_barrier
:
12631 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12637 #ifdef TARGET_NR_timer_create
12638 case TARGET_NR_timer_create
:
12640 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12642 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12645 int timer_index
= next_free_host_timer();
12647 if (timer_index
< 0) {
12648 ret
= -TARGET_EAGAIN
;
12650 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12653 phost_sevp
= &host_sevp
;
12654 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12660 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12664 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12673 #ifdef TARGET_NR_timer_settime
12674 case TARGET_NR_timer_settime
:
12676 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12677 * struct itimerspec * old_value */
12678 target_timer_t timerid
= get_timer_id(arg1
);
12682 } else if (arg3
== 0) {
12683 ret
= -TARGET_EINVAL
;
12685 timer_t htimer
= g_posix_timers
[timerid
];
12686 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12688 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12692 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12693 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12701 #ifdef TARGET_NR_timer_gettime
12702 case TARGET_NR_timer_gettime
:
12704 /* args: timer_t timerid, struct itimerspec *curr_value */
12705 target_timer_t timerid
= get_timer_id(arg1
);
12709 } else if (!arg2
) {
12710 ret
= -TARGET_EFAULT
;
12712 timer_t htimer
= g_posix_timers
[timerid
];
12713 struct itimerspec hspec
;
12714 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12716 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12717 ret
= -TARGET_EFAULT
;
12724 #ifdef TARGET_NR_timer_getoverrun
12725 case TARGET_NR_timer_getoverrun
:
12727 /* args: timer_t timerid */
12728 target_timer_t timerid
= get_timer_id(arg1
);
12733 timer_t htimer
= g_posix_timers
[timerid
];
12734 ret
= get_errno(timer_getoverrun(htimer
));
12736 fd_trans_unregister(ret
);
12741 #ifdef TARGET_NR_timer_delete
12742 case TARGET_NR_timer_delete
:
12744 /* args: timer_t timerid */
12745 target_timer_t timerid
= get_timer_id(arg1
);
12750 timer_t htimer
= g_posix_timers
[timerid
];
12751 ret
= get_errno(timer_delete(htimer
));
12752 g_posix_timers
[timerid
] = 0;
12758 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12759 case TARGET_NR_timerfd_create
:
12760 ret
= get_errno(timerfd_create(arg1
,
12761 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12765 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12766 case TARGET_NR_timerfd_gettime
:
12768 struct itimerspec its_curr
;
12770 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12772 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12779 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12780 case TARGET_NR_timerfd_settime
:
12782 struct itimerspec its_new
, its_old
, *p_new
;
12785 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12793 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12795 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12802 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12803 case TARGET_NR_ioprio_get
:
12804 ret
= get_errno(ioprio_get(arg1
, arg2
));
12808 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12809 case TARGET_NR_ioprio_set
:
12810 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12814 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12815 case TARGET_NR_setns
:
12816 ret
= get_errno(setns(arg1
, arg2
));
12819 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12820 case TARGET_NR_unshare
:
12821 ret
= get_errno(unshare(arg1
));
12824 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12825 case TARGET_NR_kcmp
:
12826 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12829 #ifdef TARGET_NR_swapcontext
12830 case TARGET_NR_swapcontext
:
12831 /* PowerPC specific. */
12832 ret
= do_swapcontext(cpu_env
, arg1
, arg2
, arg3
);
12838 qemu_log_mask(LOG_UNIMP
, "Unsupported syscall: %d\n", num
);
12839 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12840 unimplemented_nowarn
:
12842 ret
= -TARGET_ENOSYS
;
12847 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12850 print_syscall_ret(num
, ret
);
12851 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12854 ret
= -TARGET_EFAULT
;