4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
283 loff_t
*, res
, uint
, wh
);
285 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
286 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
288 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group
,int,error_code
)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address
,int *,tidptr
)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
297 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
307 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
309 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
310 struct __user_cap_data_struct
*, data
);
311 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get
, int, which
, int, who
)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
325 unsigned long, idx1
, unsigned long, idx2
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
366 QEMU_IFLA_BR_FORWARD_DELAY
,
367 QEMU_IFLA_BR_HELLO_TIME
,
368 QEMU_IFLA_BR_MAX_AGE
,
369 QEMU_IFLA_BR_AGEING_TIME
,
370 QEMU_IFLA_BR_STP_STATE
,
371 QEMU_IFLA_BR_PRIORITY
,
372 QEMU_IFLA_BR_VLAN_FILTERING
,
373 QEMU_IFLA_BR_VLAN_PROTOCOL
,
374 QEMU_IFLA_BR_GROUP_FWD_MASK
,
375 QEMU_IFLA_BR_ROOT_ID
,
376 QEMU_IFLA_BR_BRIDGE_ID
,
377 QEMU_IFLA_BR_ROOT_PORT
,
378 QEMU_IFLA_BR_ROOT_PATH_COST
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
381 QEMU_IFLA_BR_HELLO_TIMER
,
382 QEMU_IFLA_BR_TCN_TIMER
,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
384 QEMU_IFLA_BR_GC_TIMER
,
385 QEMU_IFLA_BR_GROUP_ADDR
,
386 QEMU_IFLA_BR_FDB_FLUSH
,
387 QEMU_IFLA_BR_MCAST_ROUTER
,
388 QEMU_IFLA_BR_MCAST_SNOOPING
,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
390 QEMU_IFLA_BR_MCAST_QUERIER
,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
392 QEMU_IFLA_BR_MCAST_HASH_MAX
,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
431 QEMU_IFLA_NET_NS_PID
,
434 QEMU_IFLA_VFINFO_LIST
,
442 QEMU_IFLA_PROMISCUITY
,
443 QEMU_IFLA_NUM_TX_QUEUES
,
444 QEMU_IFLA_NUM_RX_QUEUES
,
446 QEMU_IFLA_PHYS_PORT_ID
,
447 QEMU_IFLA_CARRIER_CHANGES
,
448 QEMU_IFLA_PHYS_SWITCH_ID
,
449 QEMU_IFLA_LINK_NETNSID
,
450 QEMU_IFLA_PHYS_PORT_NAME
,
451 QEMU_IFLA_PROTO_DOWN
,
452 QEMU_IFLA_GSO_MAX_SEGS
,
453 QEMU_IFLA_GSO_MAX_SIZE
,
460 QEMU_IFLA_BRPORT_UNSPEC
,
461 QEMU_IFLA_BRPORT_STATE
,
462 QEMU_IFLA_BRPORT_PRIORITY
,
463 QEMU_IFLA_BRPORT_COST
,
464 QEMU_IFLA_BRPORT_MODE
,
465 QEMU_IFLA_BRPORT_GUARD
,
466 QEMU_IFLA_BRPORT_PROTECT
,
467 QEMU_IFLA_BRPORT_FAST_LEAVE
,
468 QEMU_IFLA_BRPORT_LEARNING
,
469 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
470 QEMU_IFLA_BRPORT_PROXYARP
,
471 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
472 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
473 QEMU_IFLA_BRPORT_ROOT_ID
,
474 QEMU_IFLA_BRPORT_BRIDGE_ID
,
475 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
476 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
479 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
480 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
481 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
482 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
483 QEMU_IFLA_BRPORT_HOLD_TIMER
,
484 QEMU_IFLA_BRPORT_FLUSH
,
485 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
486 QEMU_IFLA_BRPORT_PAD
,
487 QEMU___IFLA_BRPORT_MAX
491 QEMU_IFLA_INFO_UNSPEC
,
494 QEMU_IFLA_INFO_XSTATS
,
495 QEMU_IFLA_INFO_SLAVE_KIND
,
496 QEMU_IFLA_INFO_SLAVE_DATA
,
497 QEMU___IFLA_INFO_MAX
,
501 QEMU_IFLA_INET_UNSPEC
,
503 QEMU___IFLA_INET_MAX
,
507 QEMU_IFLA_INET6_UNSPEC
,
508 QEMU_IFLA_INET6_FLAGS
,
509 QEMU_IFLA_INET6_CONF
,
510 QEMU_IFLA_INET6_STATS
,
511 QEMU_IFLA_INET6_MCAST
,
512 QEMU_IFLA_INET6_CACHEINFO
,
513 QEMU_IFLA_INET6_ICMP6STATS
,
514 QEMU_IFLA_INET6_TOKEN
,
515 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
516 QEMU___IFLA_INET6_MAX
519 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
520 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
521 typedef struct TargetFdTrans
{
522 TargetFdDataFunc host_to_target_data
;
523 TargetFdDataFunc target_to_host_data
;
524 TargetFdAddrFunc target_to_host_addr
;
527 static TargetFdTrans
**target_fd_trans
;
529 static unsigned int target_fd_max
;
531 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
533 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
534 return target_fd_trans
[fd
]->target_to_host_data
;
539 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
541 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
542 return target_fd_trans
[fd
]->host_to_target_data
;
547 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
549 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
550 return target_fd_trans
[fd
]->target_to_host_addr
;
555 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
559 if (fd
>= target_fd_max
) {
560 oldmax
= target_fd_max
;
561 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
562 target_fd_trans
= g_renew(TargetFdTrans
*,
563 target_fd_trans
, target_fd_max
);
564 memset((void *)(target_fd_trans
+ oldmax
), 0,
565 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
567 target_fd_trans
[fd
] = trans
;
570 static void fd_trans_unregister(int fd
)
572 if (fd
>= 0 && fd
< target_fd_max
) {
573 target_fd_trans
[fd
] = NULL
;
577 static void fd_trans_dup(int oldfd
, int newfd
)
579 fd_trans_unregister(newfd
);
580 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
581 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
585 static int sys_getcwd1(char *buf
, size_t size
)
587 if (getcwd(buf
, size
) == NULL
) {
588 /* getcwd() sets errno */
591 return strlen(buf
)+1;
594 #ifdef TARGET_NR_utimensat
595 #if defined(__NR_utimensat)
596 #define __NR_sys_utimensat __NR_utimensat
597 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
598 const struct timespec
*,tsp
,int,flags
)
600 static int sys_utimensat(int dirfd
, const char *pathname
,
601 const struct timespec times
[2], int flags
)
607 #endif /* TARGET_NR_utimensat */
609 #ifdef TARGET_NR_renameat2
610 #if defined(__NR_renameat2)
611 #define __NR_sys_renameat2 __NR_renameat2
612 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
613 const char *, new, unsigned int, flags
)
615 static int sys_renameat2(int oldfd
, const char *old
,
616 int newfd
, const char *new, int flags
)
619 return renameat(oldfd
, old
, newfd
, new);
625 #endif /* TARGET_NR_renameat2 */
627 #ifdef CONFIG_INOTIFY
628 #include <sys/inotify.h>
630 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
631 static int sys_inotify_init(void)
633 return (inotify_init());
636 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
637 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
639 return (inotify_add_watch(fd
, pathname
, mask
));
642 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
643 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
645 return (inotify_rm_watch(fd
, wd
));
648 #ifdef CONFIG_INOTIFY1
649 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
650 static int sys_inotify_init1(int flags
)
652 return (inotify_init1(flags
));
657 /* Userspace can usually survive runtime without inotify */
658 #undef TARGET_NR_inotify_init
659 #undef TARGET_NR_inotify_init1
660 #undef TARGET_NR_inotify_add_watch
661 #undef TARGET_NR_inotify_rm_watch
662 #endif /* CONFIG_INOTIFY */
664 #if defined(TARGET_NR_prlimit64)
665 #ifndef __NR_prlimit64
666 # define __NR_prlimit64 -1
668 #define __NR_sys_prlimit64 __NR_prlimit64
669 /* The glibc rlimit structure may not be that used by the underlying syscall */
670 struct host_rlimit64
{
674 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
675 const struct host_rlimit64
*, new_limit
,
676 struct host_rlimit64
*, old_limit
)
680 #if defined(TARGET_NR_timer_create)
681 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
682 static timer_t g_posix_timers
[32] = { 0, } ;
684 static inline int next_free_host_timer(void)
687 /* FIXME: Does finding the next free slot require a lock? */
688 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
689 if (g_posix_timers
[k
] == 0) {
690 g_posix_timers
[k
] = (timer_t
) 1;
698 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
700 static inline int regpairs_aligned(void *cpu_env
, int num
)
702 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
704 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
705 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
706 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
707 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
708 * of registers which translates to the same as ARM/MIPS, because we start with
710 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
711 #elif defined(TARGET_SH4)
712 /* SH4 doesn't align register pairs, except for p{read,write}64 */
713 static inline int regpairs_aligned(void *cpu_env
, int num
)
716 case TARGET_NR_pread64
:
717 case TARGET_NR_pwrite64
:
724 #elif defined(TARGET_XTENSA)
725 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
727 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
730 #define ERRNO_TABLE_SIZE 1200
732 /* target_to_host_errno_table[] is initialized from
733 * host_to_target_errno_table[] in syscall_init(). */
734 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
738 * This list is the union of errno values overridden in asm-<arch>/errno.h
739 * minus the errnos that are not actually generic to all archs.
741 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
742 [EAGAIN
] = TARGET_EAGAIN
,
743 [EIDRM
] = TARGET_EIDRM
,
744 [ECHRNG
] = TARGET_ECHRNG
,
745 [EL2NSYNC
] = TARGET_EL2NSYNC
,
746 [EL3HLT
] = TARGET_EL3HLT
,
747 [EL3RST
] = TARGET_EL3RST
,
748 [ELNRNG
] = TARGET_ELNRNG
,
749 [EUNATCH
] = TARGET_EUNATCH
,
750 [ENOCSI
] = TARGET_ENOCSI
,
751 [EL2HLT
] = TARGET_EL2HLT
,
752 [EDEADLK
] = TARGET_EDEADLK
,
753 [ENOLCK
] = TARGET_ENOLCK
,
754 [EBADE
] = TARGET_EBADE
,
755 [EBADR
] = TARGET_EBADR
,
756 [EXFULL
] = TARGET_EXFULL
,
757 [ENOANO
] = TARGET_ENOANO
,
758 [EBADRQC
] = TARGET_EBADRQC
,
759 [EBADSLT
] = TARGET_EBADSLT
,
760 [EBFONT
] = TARGET_EBFONT
,
761 [ENOSTR
] = TARGET_ENOSTR
,
762 [ENODATA
] = TARGET_ENODATA
,
763 [ETIME
] = TARGET_ETIME
,
764 [ENOSR
] = TARGET_ENOSR
,
765 [ENONET
] = TARGET_ENONET
,
766 [ENOPKG
] = TARGET_ENOPKG
,
767 [EREMOTE
] = TARGET_EREMOTE
,
768 [ENOLINK
] = TARGET_ENOLINK
,
769 [EADV
] = TARGET_EADV
,
770 [ESRMNT
] = TARGET_ESRMNT
,
771 [ECOMM
] = TARGET_ECOMM
,
772 [EPROTO
] = TARGET_EPROTO
,
773 [EDOTDOT
] = TARGET_EDOTDOT
,
774 [EMULTIHOP
] = TARGET_EMULTIHOP
,
775 [EBADMSG
] = TARGET_EBADMSG
,
776 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
777 [EOVERFLOW
] = TARGET_EOVERFLOW
,
778 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
779 [EBADFD
] = TARGET_EBADFD
,
780 [EREMCHG
] = TARGET_EREMCHG
,
781 [ELIBACC
] = TARGET_ELIBACC
,
782 [ELIBBAD
] = TARGET_ELIBBAD
,
783 [ELIBSCN
] = TARGET_ELIBSCN
,
784 [ELIBMAX
] = TARGET_ELIBMAX
,
785 [ELIBEXEC
] = TARGET_ELIBEXEC
,
786 [EILSEQ
] = TARGET_EILSEQ
,
787 [ENOSYS
] = TARGET_ENOSYS
,
788 [ELOOP
] = TARGET_ELOOP
,
789 [ERESTART
] = TARGET_ERESTART
,
790 [ESTRPIPE
] = TARGET_ESTRPIPE
,
791 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
792 [EUSERS
] = TARGET_EUSERS
,
793 [ENOTSOCK
] = TARGET_ENOTSOCK
,
794 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
795 [EMSGSIZE
] = TARGET_EMSGSIZE
,
796 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
797 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
798 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
799 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
800 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
801 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
802 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
803 [EADDRINUSE
] = TARGET_EADDRINUSE
,
804 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
805 [ENETDOWN
] = TARGET_ENETDOWN
,
806 [ENETUNREACH
] = TARGET_ENETUNREACH
,
807 [ENETRESET
] = TARGET_ENETRESET
,
808 [ECONNABORTED
] = TARGET_ECONNABORTED
,
809 [ECONNRESET
] = TARGET_ECONNRESET
,
810 [ENOBUFS
] = TARGET_ENOBUFS
,
811 [EISCONN
] = TARGET_EISCONN
,
812 [ENOTCONN
] = TARGET_ENOTCONN
,
813 [EUCLEAN
] = TARGET_EUCLEAN
,
814 [ENOTNAM
] = TARGET_ENOTNAM
,
815 [ENAVAIL
] = TARGET_ENAVAIL
,
816 [EISNAM
] = TARGET_EISNAM
,
817 [EREMOTEIO
] = TARGET_EREMOTEIO
,
818 [EDQUOT
] = TARGET_EDQUOT
,
819 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
820 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
821 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
822 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
823 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
824 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
825 [EALREADY
] = TARGET_EALREADY
,
826 [EINPROGRESS
] = TARGET_EINPROGRESS
,
827 [ESTALE
] = TARGET_ESTALE
,
828 [ECANCELED
] = TARGET_ECANCELED
,
829 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
830 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
832 [ENOKEY
] = TARGET_ENOKEY
,
835 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
838 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
841 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
844 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
846 #ifdef ENOTRECOVERABLE
847 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
850 [ENOMSG
] = TARGET_ENOMSG
,
853 [ERFKILL
] = TARGET_ERFKILL
,
856 [EHWPOISON
] = TARGET_EHWPOISON
,
860 static inline int host_to_target_errno(int err
)
862 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
863 host_to_target_errno_table
[err
]) {
864 return host_to_target_errno_table
[err
];
869 static inline int target_to_host_errno(int err
)
871 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
872 target_to_host_errno_table
[err
]) {
873 return target_to_host_errno_table
[err
];
878 static inline abi_long
get_errno(abi_long ret
)
881 return -host_to_target_errno(errno
);
886 static inline int is_error(abi_long ret
)
888 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
891 const char *target_strerror(int err
)
893 if (err
== TARGET_ERESTARTSYS
) {
894 return "To be restarted";
896 if (err
== TARGET_QEMU_ESIGRETURN
) {
897 return "Successful exit from sigreturn";
900 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
903 return strerror(target_to_host_errno(err
));
906 #define safe_syscall0(type, name) \
907 static type safe_##name(void) \
909 return safe_syscall(__NR_##name); \
912 #define safe_syscall1(type, name, type1, arg1) \
913 static type safe_##name(type1 arg1) \
915 return safe_syscall(__NR_##name, arg1); \
918 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
919 static type safe_##name(type1 arg1, type2 arg2) \
921 return safe_syscall(__NR_##name, arg1, arg2); \
924 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
927 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
930 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
932 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
934 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
937 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
938 type4, arg4, type5, arg5) \
939 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
942 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
945 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
946 type4, arg4, type5, arg5, type6, arg6) \
947 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
948 type5 arg5, type6 arg6) \
950 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
953 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
954 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
955 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
956 int, flags
, mode_t
, mode
)
957 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
958 struct rusage
*, rusage
)
959 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
960 int, options
, struct rusage
*, rusage
)
961 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
962 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
963 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
964 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
965 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
967 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
968 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
970 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
971 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
972 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
973 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
974 safe_syscall2(int, tkill
, int, tid
, int, sig
)
975 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
976 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
977 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
978 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
979 unsigned long, pos_l
, unsigned long, pos_h
)
980 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
981 unsigned long, pos_l
, unsigned long, pos_h
)
982 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
984 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
985 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
986 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
987 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
988 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
989 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
990 safe_syscall2(int, flock
, int, fd
, int, operation
)
991 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
992 const struct timespec
*, uts
, size_t, sigsetsize
)
993 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
995 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
996 struct timespec
*, rem
)
997 #ifdef TARGET_NR_clock_nanosleep
998 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
999 const struct timespec
*, req
, struct timespec
*, rem
)
1002 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1004 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1005 long, msgtype
, int, flags
)
1006 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1007 unsigned, nsops
, const struct timespec
*, timeout
)
1009 /* This host kernel architecture uses a single ipc syscall; fake up
1010 * wrappers for the sub-operations to hide this implementation detail.
1011 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1012 * for the call parameter because some structs in there conflict with the
1013 * sys/ipc.h ones. So we just define them here, and rely on them being
1014 * the same for all host architectures.
1016 #define Q_SEMTIMEDOP 4
1019 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1021 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1022 void *, ptr
, long, fifth
)
1023 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1025 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1027 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1029 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1031 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1032 const struct timespec
*timeout
)
1034 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1038 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1039 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1040 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1041 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1042 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1044 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1045 * "third argument might be integer or pointer or not present" behaviour of
1046 * the libc function.
1048 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1049 /* Similarly for fcntl. Note that callers must always:
1050 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1051 * use the flock64 struct rather than unsuffixed flock
1052 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1055 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1057 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1060 static inline int host_to_target_sock_type(int host_type
)
1064 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1066 target_type
= TARGET_SOCK_DGRAM
;
1069 target_type
= TARGET_SOCK_STREAM
;
1072 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1076 #if defined(SOCK_CLOEXEC)
1077 if (host_type
& SOCK_CLOEXEC
) {
1078 target_type
|= TARGET_SOCK_CLOEXEC
;
1082 #if defined(SOCK_NONBLOCK)
1083 if (host_type
& SOCK_NONBLOCK
) {
1084 target_type
|= TARGET_SOCK_NONBLOCK
;
1091 static abi_ulong target_brk
;
1092 static abi_ulong target_original_brk
;
1093 static abi_ulong brk_page
;
1095 void target_set_brk(abi_ulong new_brk
)
1097 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1098 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1101 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1102 #define DEBUGF_BRK(message, args...)
1104 /* do_brk() must return target values and target errnos. */
1105 abi_long
do_brk(abi_ulong new_brk
)
1107 abi_long mapped_addr
;
1108 abi_ulong new_alloc_size
;
1110 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1116 if (new_brk
< target_original_brk
) {
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1122 /* If the new brk is less than the highest page reserved to the
1123 * target heap allocation, set it and we're almost done... */
1124 if (new_brk
<= brk_page
) {
1125 /* Heap contents are initialized to zero, as for anonymous
1127 if (new_brk
> target_brk
) {
1128 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1130 target_brk
= new_brk
;
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1135 /* We need to allocate more memory after the brk... Note that
1136 * we don't use MAP_FIXED because that will map over the top of
1137 * any existing mapping (like the one with the host libc or qemu
1138 * itself); instead we treat "mapped but at wrong address" as
1139 * a failure and unmap again.
1141 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1142 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1143 PROT_READ
|PROT_WRITE
,
1144 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1146 if (mapped_addr
== brk_page
) {
1147 /* Heap contents are initialized to zero, as for anonymous
1148 * mapped pages. Technically the new pages are already
1149 * initialized to zero since they *are* anonymous mapped
1150 * pages, however we have to take care with the contents that
1151 * come from the remaining part of the previous page: it may
1152 * contains garbage data due to a previous heap usage (grown
1153 * then shrunken). */
1154 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1156 target_brk
= new_brk
;
1157 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1158 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1161 } else if (mapped_addr
!= -1) {
1162 /* Mapped but at wrong address, meaning there wasn't actually
1163 * enough space for this brk.
1165 target_munmap(mapped_addr
, new_alloc_size
);
1167 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1170 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1173 #if defined(TARGET_ALPHA)
1174 /* We (partially) emulate OSF/1 on Alpha, which requires we
1175 return a proper errno, not an unchanged brk value. */
1176 return -TARGET_ENOMEM
;
1178 /* For everything else, return the previous break. */
1182 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1183 abi_ulong target_fds_addr
,
1187 abi_ulong b
, *target_fds
;
1189 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1190 if (!(target_fds
= lock_user(VERIFY_READ
,
1192 sizeof(abi_ulong
) * nw
,
1194 return -TARGET_EFAULT
;
1198 for (i
= 0; i
< nw
; i
++) {
1199 /* grab the abi_ulong */
1200 __get_user(b
, &target_fds
[i
]);
1201 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1202 /* check the bit inside the abi_ulong */
1209 unlock_user(target_fds
, target_fds_addr
, 0);
1214 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1215 abi_ulong target_fds_addr
,
1218 if (target_fds_addr
) {
1219 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1220 return -TARGET_EFAULT
;
1228 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1234 abi_ulong
*target_fds
;
1236 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1237 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1239 sizeof(abi_ulong
) * nw
,
1241 return -TARGET_EFAULT
;
1244 for (i
= 0; i
< nw
; i
++) {
1246 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1247 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1250 __put_user(v
, &target_fds
[i
]);
1253 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1258 #if defined(__alpha__)
1259 #define HOST_HZ 1024
1264 static inline abi_long
host_to_target_clock_t(long ticks
)
1266 #if HOST_HZ == TARGET_HZ
1269 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1273 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1274 const struct rusage
*rusage
)
1276 struct target_rusage
*target_rusage
;
1278 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1279 return -TARGET_EFAULT
;
1280 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1281 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1282 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1283 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1284 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1285 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1286 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1287 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1288 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1289 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1290 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1291 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1292 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1293 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1294 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1295 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1296 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1297 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1298 unlock_user_struct(target_rusage
, target_addr
, 1);
1303 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1305 abi_ulong target_rlim_swap
;
1308 target_rlim_swap
= tswapal(target_rlim
);
1309 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1310 return RLIM_INFINITY
;
1312 result
= target_rlim_swap
;
1313 if (target_rlim_swap
!= (rlim_t
)result
)
1314 return RLIM_INFINITY
;
1319 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1321 abi_ulong target_rlim_swap
;
1324 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1325 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1327 target_rlim_swap
= rlim
;
1328 result
= tswapal(target_rlim_swap
);
1333 static inline int target_to_host_resource(int code
)
1336 case TARGET_RLIMIT_AS
:
1338 case TARGET_RLIMIT_CORE
:
1340 case TARGET_RLIMIT_CPU
:
1342 case TARGET_RLIMIT_DATA
:
1344 case TARGET_RLIMIT_FSIZE
:
1345 return RLIMIT_FSIZE
;
1346 case TARGET_RLIMIT_LOCKS
:
1347 return RLIMIT_LOCKS
;
1348 case TARGET_RLIMIT_MEMLOCK
:
1349 return RLIMIT_MEMLOCK
;
1350 case TARGET_RLIMIT_MSGQUEUE
:
1351 return RLIMIT_MSGQUEUE
;
1352 case TARGET_RLIMIT_NICE
:
1354 case TARGET_RLIMIT_NOFILE
:
1355 return RLIMIT_NOFILE
;
1356 case TARGET_RLIMIT_NPROC
:
1357 return RLIMIT_NPROC
;
1358 case TARGET_RLIMIT_RSS
:
1360 case TARGET_RLIMIT_RTPRIO
:
1361 return RLIMIT_RTPRIO
;
1362 case TARGET_RLIMIT_SIGPENDING
:
1363 return RLIMIT_SIGPENDING
;
1364 case TARGET_RLIMIT_STACK
:
1365 return RLIMIT_STACK
;
1371 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1372 abi_ulong target_tv_addr
)
1374 struct target_timeval
*target_tv
;
1376 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1377 return -TARGET_EFAULT
;
1379 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1380 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1382 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1387 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1388 const struct timeval
*tv
)
1390 struct target_timeval
*target_tv
;
1392 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1393 return -TARGET_EFAULT
;
1395 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1396 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1398 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1403 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1404 abi_ulong target_tz_addr
)
1406 struct target_timezone
*target_tz
;
1408 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1409 return -TARGET_EFAULT
;
1412 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1413 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1415 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1420 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1423 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1424 abi_ulong target_mq_attr_addr
)
1426 struct target_mq_attr
*target_mq_attr
;
1428 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1429 target_mq_attr_addr
, 1))
1430 return -TARGET_EFAULT
;
1432 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1433 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1434 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1435 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1437 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1442 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1443 const struct mq_attr
*attr
)
1445 struct target_mq_attr
*target_mq_attr
;
1447 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1448 target_mq_attr_addr
, 0))
1449 return -TARGET_EFAULT
;
1451 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1452 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1453 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1454 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1456 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1462 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1463 /* do_select() must return target values and target errnos. */
1464 static abi_long
do_select(int n
,
1465 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1466 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1468 fd_set rfds
, wfds
, efds
;
1469 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1471 struct timespec ts
, *ts_ptr
;
1474 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1478 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1482 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1487 if (target_tv_addr
) {
1488 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1489 return -TARGET_EFAULT
;
1490 ts
.tv_sec
= tv
.tv_sec
;
1491 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1497 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1500 if (!is_error(ret
)) {
1501 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1502 return -TARGET_EFAULT
;
1503 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1504 return -TARGET_EFAULT
;
1505 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1506 return -TARGET_EFAULT
;
1508 if (target_tv_addr
) {
1509 tv
.tv_sec
= ts
.tv_sec
;
1510 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1511 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1512 return -TARGET_EFAULT
;
1520 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1521 static abi_long
do_old_select(abi_ulong arg1
)
1523 struct target_sel_arg_struct
*sel
;
1524 abi_ulong inp
, outp
, exp
, tvp
;
1527 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1528 return -TARGET_EFAULT
;
1531 nsel
= tswapal(sel
->n
);
1532 inp
= tswapal(sel
->inp
);
1533 outp
= tswapal(sel
->outp
);
1534 exp
= tswapal(sel
->exp
);
1535 tvp
= tswapal(sel
->tvp
);
1537 unlock_user_struct(sel
, arg1
, 0);
1539 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1544 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1547 return pipe2(host_pipe
, flags
);
1553 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1554 int flags
, int is_pipe2
)
1558 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1561 return get_errno(ret
);
1563 /* Several targets have special calling conventions for the original
1564 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1566 #if defined(TARGET_ALPHA)
1567 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1568 return host_pipe
[0];
1569 #elif defined(TARGET_MIPS)
1570 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1571 return host_pipe
[0];
1572 #elif defined(TARGET_SH4)
1573 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1574 return host_pipe
[0];
1575 #elif defined(TARGET_SPARC)
1576 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1577 return host_pipe
[0];
1581 if (put_user_s32(host_pipe
[0], pipedes
)
1582 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1583 return -TARGET_EFAULT
;
1584 return get_errno(ret
);
1587 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1588 abi_ulong target_addr
,
1591 struct target_ip_mreqn
*target_smreqn
;
1593 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1595 return -TARGET_EFAULT
;
1596 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1597 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1598 if (len
== sizeof(struct target_ip_mreqn
))
1599 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1600 unlock_user(target_smreqn
, target_addr
, 0);
1605 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1606 abi_ulong target_addr
,
1609 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1610 sa_family_t sa_family
;
1611 struct target_sockaddr
*target_saddr
;
1613 if (fd_trans_target_to_host_addr(fd
)) {
1614 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1617 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1619 return -TARGET_EFAULT
;
1621 sa_family
= tswap16(target_saddr
->sa_family
);
1623 /* Oops. The caller might send a incomplete sun_path; sun_path
1624 * must be terminated by \0 (see the manual page), but
1625 * unfortunately it is quite common to specify sockaddr_un
1626 * length as "strlen(x->sun_path)" while it should be
1627 * "strlen(...) + 1". We'll fix that here if needed.
1628 * Linux kernel has a similar feature.
1631 if (sa_family
== AF_UNIX
) {
1632 if (len
< unix_maxlen
&& len
> 0) {
1633 char *cp
= (char*)target_saddr
;
1635 if ( cp
[len
-1] && !cp
[len
] )
1638 if (len
> unix_maxlen
)
1642 memcpy(addr
, target_saddr
, len
);
1643 addr
->sa_family
= sa_family
;
1644 if (sa_family
== AF_NETLINK
) {
1645 struct sockaddr_nl
*nladdr
;
1647 nladdr
= (struct sockaddr_nl
*)addr
;
1648 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1649 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1650 } else if (sa_family
== AF_PACKET
) {
1651 struct target_sockaddr_ll
*lladdr
;
1653 lladdr
= (struct target_sockaddr_ll
*)addr
;
1654 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1655 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1657 unlock_user(target_saddr
, target_addr
, 0);
1662 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1663 struct sockaddr
*addr
,
1666 struct target_sockaddr
*target_saddr
;
1673 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1675 return -TARGET_EFAULT
;
1676 memcpy(target_saddr
, addr
, len
);
1677 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1678 sizeof(target_saddr
->sa_family
)) {
1679 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1681 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1682 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1683 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1684 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1685 } else if (addr
->sa_family
== AF_PACKET
) {
1686 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1687 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1688 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1689 } else if (addr
->sa_family
== AF_INET6
&&
1690 len
>= sizeof(struct target_sockaddr_in6
)) {
1691 struct target_sockaddr_in6
*target_in6
=
1692 (struct target_sockaddr_in6
*)target_saddr
;
1693 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1695 unlock_user(target_saddr
, target_addr
, len
);
1700 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1701 struct target_msghdr
*target_msgh
)
1703 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1704 abi_long msg_controllen
;
1705 abi_ulong target_cmsg_addr
;
1706 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1707 socklen_t space
= 0;
1709 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1710 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1712 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1713 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1714 target_cmsg_start
= target_cmsg
;
1716 return -TARGET_EFAULT
;
1718 while (cmsg
&& target_cmsg
) {
1719 void *data
= CMSG_DATA(cmsg
);
1720 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1722 int len
= tswapal(target_cmsg
->cmsg_len
)
1723 - sizeof(struct target_cmsghdr
);
1725 space
+= CMSG_SPACE(len
);
1726 if (space
> msgh
->msg_controllen
) {
1727 space
-= CMSG_SPACE(len
);
1728 /* This is a QEMU bug, since we allocated the payload
1729 * area ourselves (unlike overflow in host-to-target
1730 * conversion, which is just the guest giving us a buffer
1731 * that's too small). It can't happen for the payload types
1732 * we currently support; if it becomes an issue in future
1733 * we would need to improve our allocation strategy to
1734 * something more intelligent than "twice the size of the
1735 * target buffer we're reading from".
1737 gemu_log("Host cmsg overflow\n");
1741 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1742 cmsg
->cmsg_level
= SOL_SOCKET
;
1744 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1746 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1747 cmsg
->cmsg_len
= CMSG_LEN(len
);
1749 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1750 int *fd
= (int *)data
;
1751 int *target_fd
= (int *)target_data
;
1752 int i
, numfds
= len
/ sizeof(int);
1754 for (i
= 0; i
< numfds
; i
++) {
1755 __get_user(fd
[i
], target_fd
+ i
);
1757 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1758 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1759 struct ucred
*cred
= (struct ucred
*)data
;
1760 struct target_ucred
*target_cred
=
1761 (struct target_ucred
*)target_data
;
1763 __get_user(cred
->pid
, &target_cred
->pid
);
1764 __get_user(cred
->uid
, &target_cred
->uid
);
1765 __get_user(cred
->gid
, &target_cred
->gid
);
1767 gemu_log("Unsupported ancillary data: %d/%d\n",
1768 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1769 memcpy(data
, target_data
, len
);
1772 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1773 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1776 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1778 msgh
->msg_controllen
= space
;
1782 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1783 struct msghdr
*msgh
)
1785 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1786 abi_long msg_controllen
;
1787 abi_ulong target_cmsg_addr
;
1788 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1789 socklen_t space
= 0;
1791 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1792 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1794 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1795 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1796 target_cmsg_start
= target_cmsg
;
1798 return -TARGET_EFAULT
;
1800 while (cmsg
&& target_cmsg
) {
1801 void *data
= CMSG_DATA(cmsg
);
1802 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1804 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1805 int tgt_len
, tgt_space
;
1807 /* We never copy a half-header but may copy half-data;
1808 * this is Linux's behaviour in put_cmsg(). Note that
1809 * truncation here is a guest problem (which we report
1810 * to the guest via the CTRUNC bit), unlike truncation
1811 * in target_to_host_cmsg, which is a QEMU bug.
1813 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1814 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1818 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1819 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1821 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1823 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1825 /* Payload types which need a different size of payload on
1826 * the target must adjust tgt_len here.
1828 switch (cmsg
->cmsg_level
) {
1830 switch (cmsg
->cmsg_type
) {
1832 tgt_len
= sizeof(struct target_timeval
);
1842 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1843 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1844 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1847 /* We must now copy-and-convert len bytes of payload
1848 * into tgt_len bytes of destination space. Bear in mind
1849 * that in both source and destination we may be dealing
1850 * with a truncated value!
1852 switch (cmsg
->cmsg_level
) {
1854 switch (cmsg
->cmsg_type
) {
1857 int *fd
= (int *)data
;
1858 int *target_fd
= (int *)target_data
;
1859 int i
, numfds
= tgt_len
/ sizeof(int);
1861 for (i
= 0; i
< numfds
; i
++) {
1862 __put_user(fd
[i
], target_fd
+ i
);
1868 struct timeval
*tv
= (struct timeval
*)data
;
1869 struct target_timeval
*target_tv
=
1870 (struct target_timeval
*)target_data
;
1872 if (len
!= sizeof(struct timeval
) ||
1873 tgt_len
!= sizeof(struct target_timeval
)) {
1877 /* copy struct timeval to target */
1878 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1879 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1882 case SCM_CREDENTIALS
:
1884 struct ucred
*cred
= (struct ucred
*)data
;
1885 struct target_ucred
*target_cred
=
1886 (struct target_ucred
*)target_data
;
1888 __put_user(cred
->pid
, &target_cred
->pid
);
1889 __put_user(cred
->uid
, &target_cred
->uid
);
1890 __put_user(cred
->gid
, &target_cred
->gid
);
1899 switch (cmsg
->cmsg_type
) {
1902 uint32_t *v
= (uint32_t *)data
;
1903 uint32_t *t_int
= (uint32_t *)target_data
;
1905 if (len
!= sizeof(uint32_t) ||
1906 tgt_len
!= sizeof(uint32_t)) {
1909 __put_user(*v
, t_int
);
1915 struct sock_extended_err ee
;
1916 struct sockaddr_in offender
;
1918 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1919 struct errhdr_t
*target_errh
=
1920 (struct errhdr_t
*)target_data
;
1922 if (len
!= sizeof(struct errhdr_t
) ||
1923 tgt_len
!= sizeof(struct errhdr_t
)) {
1926 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1927 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1928 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1929 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1930 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1931 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1932 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1933 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1934 (void *) &errh
->offender
, sizeof(errh
->offender
));
1943 switch (cmsg
->cmsg_type
) {
1946 uint32_t *v
= (uint32_t *)data
;
1947 uint32_t *t_int
= (uint32_t *)target_data
;
1949 if (len
!= sizeof(uint32_t) ||
1950 tgt_len
!= sizeof(uint32_t)) {
1953 __put_user(*v
, t_int
);
1959 struct sock_extended_err ee
;
1960 struct sockaddr_in6 offender
;
1962 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1963 struct errhdr6_t
*target_errh
=
1964 (struct errhdr6_t
*)target_data
;
1966 if (len
!= sizeof(struct errhdr6_t
) ||
1967 tgt_len
!= sizeof(struct errhdr6_t
)) {
1970 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1971 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1972 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1973 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1974 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1975 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1976 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1977 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1978 (void *) &errh
->offender
, sizeof(errh
->offender
));
1988 gemu_log("Unsupported ancillary data: %d/%d\n",
1989 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1990 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1991 if (tgt_len
> len
) {
1992 memset(target_data
+ len
, 0, tgt_len
- len
);
1996 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1997 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1998 if (msg_controllen
< tgt_space
) {
1999 tgt_space
= msg_controllen
;
2001 msg_controllen
-= tgt_space
;
2003 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2004 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2007 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2009 target_msgh
->msg_controllen
= tswapal(space
);
2013 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2015 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2016 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2017 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2018 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2019 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2022 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2024 abi_long (*host_to_target_nlmsg
)
2025 (struct nlmsghdr
*))
2030 while (len
> sizeof(struct nlmsghdr
)) {
2032 nlmsg_len
= nlh
->nlmsg_len
;
2033 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2038 switch (nlh
->nlmsg_type
) {
2040 tswap_nlmsghdr(nlh
);
2046 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2047 e
->error
= tswap32(e
->error
);
2048 tswap_nlmsghdr(&e
->msg
);
2049 tswap_nlmsghdr(nlh
);
2053 ret
= host_to_target_nlmsg(nlh
);
2055 tswap_nlmsghdr(nlh
);
2060 tswap_nlmsghdr(nlh
);
2061 len
-= NLMSG_ALIGN(nlmsg_len
);
2062 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2067 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2069 abi_long (*target_to_host_nlmsg
)
2070 (struct nlmsghdr
*))
2074 while (len
> sizeof(struct nlmsghdr
)) {
2075 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2076 tswap32(nlh
->nlmsg_len
) > len
) {
2079 tswap_nlmsghdr(nlh
);
2080 switch (nlh
->nlmsg_type
) {
2087 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2088 e
->error
= tswap32(e
->error
);
2089 tswap_nlmsghdr(&e
->msg
);
2093 ret
= target_to_host_nlmsg(nlh
);
2098 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2099 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2104 #ifdef CONFIG_RTNETLINK
2105 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2106 size_t len
, void *context
,
2107 abi_long (*host_to_target_nlattr
)
2111 unsigned short nla_len
;
2114 while (len
> sizeof(struct nlattr
)) {
2115 nla_len
= nlattr
->nla_len
;
2116 if (nla_len
< sizeof(struct nlattr
) ||
2120 ret
= host_to_target_nlattr(nlattr
, context
);
2121 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2122 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2126 len
-= NLA_ALIGN(nla_len
);
2127 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2132 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2134 abi_long (*host_to_target_rtattr
)
2137 unsigned short rta_len
;
2140 while (len
> sizeof(struct rtattr
)) {
2141 rta_len
= rtattr
->rta_len
;
2142 if (rta_len
< sizeof(struct rtattr
) ||
2146 ret
= host_to_target_rtattr(rtattr
);
2147 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2148 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2152 len
-= RTA_ALIGN(rta_len
);
2153 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2158 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2160 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2167 switch (nlattr
->nla_type
) {
2169 case QEMU_IFLA_BR_FDB_FLUSH
:
2172 case QEMU_IFLA_BR_GROUP_ADDR
:
2175 case QEMU_IFLA_BR_VLAN_FILTERING
:
2176 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2177 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2178 case QEMU_IFLA_BR_MCAST_ROUTER
:
2179 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2180 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2181 case QEMU_IFLA_BR_MCAST_QUERIER
:
2182 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2183 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2184 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2187 case QEMU_IFLA_BR_PRIORITY
:
2188 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2189 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2190 case QEMU_IFLA_BR_ROOT_PORT
:
2191 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2192 u16
= NLA_DATA(nlattr
);
2193 *u16
= tswap16(*u16
);
2196 case QEMU_IFLA_BR_FORWARD_DELAY
:
2197 case QEMU_IFLA_BR_HELLO_TIME
:
2198 case QEMU_IFLA_BR_MAX_AGE
:
2199 case QEMU_IFLA_BR_AGEING_TIME
:
2200 case QEMU_IFLA_BR_STP_STATE
:
2201 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2202 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2203 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2204 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2206 u32
= NLA_DATA(nlattr
);
2207 *u32
= tswap32(*u32
);
2210 case QEMU_IFLA_BR_HELLO_TIMER
:
2211 case QEMU_IFLA_BR_TCN_TIMER
:
2212 case QEMU_IFLA_BR_GC_TIMER
:
2213 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2214 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2215 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2216 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2217 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2218 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2219 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2220 u64
= NLA_DATA(nlattr
);
2221 *u64
= tswap64(*u64
);
2223 /* ifla_bridge_id: uin8_t[] */
2224 case QEMU_IFLA_BR_ROOT_ID
:
2225 case QEMU_IFLA_BR_BRIDGE_ID
:
2228 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2234 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2241 switch (nlattr
->nla_type
) {
2243 case QEMU_IFLA_BRPORT_STATE
:
2244 case QEMU_IFLA_BRPORT_MODE
:
2245 case QEMU_IFLA_BRPORT_GUARD
:
2246 case QEMU_IFLA_BRPORT_PROTECT
:
2247 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2248 case QEMU_IFLA_BRPORT_LEARNING
:
2249 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2250 case QEMU_IFLA_BRPORT_PROXYARP
:
2251 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2252 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2253 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2254 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2255 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2258 case QEMU_IFLA_BRPORT_PRIORITY
:
2259 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2260 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2261 case QEMU_IFLA_BRPORT_ID
:
2262 case QEMU_IFLA_BRPORT_NO
:
2263 u16
= NLA_DATA(nlattr
);
2264 *u16
= tswap16(*u16
);
2267 case QEMU_IFLA_BRPORT_COST
:
2268 u32
= NLA_DATA(nlattr
);
2269 *u32
= tswap32(*u32
);
2272 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2273 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2274 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2275 u64
= NLA_DATA(nlattr
);
2276 *u64
= tswap64(*u64
);
2278 /* ifla_bridge_id: uint8_t[] */
2279 case QEMU_IFLA_BRPORT_ROOT_ID
:
2280 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2283 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2289 struct linkinfo_context
{
2296 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2299 struct linkinfo_context
*li_context
= context
;
2301 switch (nlattr
->nla_type
) {
2303 case QEMU_IFLA_INFO_KIND
:
2304 li_context
->name
= NLA_DATA(nlattr
);
2305 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2307 case QEMU_IFLA_INFO_SLAVE_KIND
:
2308 li_context
->slave_name
= NLA_DATA(nlattr
);
2309 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2312 case QEMU_IFLA_INFO_XSTATS
:
2313 /* FIXME: only used by CAN */
2316 case QEMU_IFLA_INFO_DATA
:
2317 if (strncmp(li_context
->name
, "bridge",
2318 li_context
->len
) == 0) {
2319 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2322 host_to_target_data_bridge_nlattr
);
2324 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2327 case QEMU_IFLA_INFO_SLAVE_DATA
:
2328 if (strncmp(li_context
->slave_name
, "bridge",
2329 li_context
->slave_len
) == 0) {
2330 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2333 host_to_target_slave_data_bridge_nlattr
);
2335 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2336 li_context
->slave_name
);
2340 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2347 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2353 switch (nlattr
->nla_type
) {
2354 case QEMU_IFLA_INET_CONF
:
2355 u32
= NLA_DATA(nlattr
);
2356 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2358 u32
[i
] = tswap32(u32
[i
]);
2362 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2367 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2372 struct ifla_cacheinfo
*ci
;
2375 switch (nlattr
->nla_type
) {
2377 case QEMU_IFLA_INET6_TOKEN
:
2380 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2383 case QEMU_IFLA_INET6_FLAGS
:
2384 u32
= NLA_DATA(nlattr
);
2385 *u32
= tswap32(*u32
);
2388 case QEMU_IFLA_INET6_CONF
:
2389 u32
= NLA_DATA(nlattr
);
2390 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2392 u32
[i
] = tswap32(u32
[i
]);
2395 /* ifla_cacheinfo */
2396 case QEMU_IFLA_INET6_CACHEINFO
:
2397 ci
= NLA_DATA(nlattr
);
2398 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2399 ci
->tstamp
= tswap32(ci
->tstamp
);
2400 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2401 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2404 case QEMU_IFLA_INET6_STATS
:
2405 case QEMU_IFLA_INET6_ICMP6STATS
:
2406 u64
= NLA_DATA(nlattr
);
2407 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2409 u64
[i
] = tswap64(u64
[i
]);
2413 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2418 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2421 switch (nlattr
->nla_type
) {
2423 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2425 host_to_target_data_inet_nlattr
);
2427 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2429 host_to_target_data_inet6_nlattr
);
2431 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2437 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2440 struct rtnl_link_stats
*st
;
2441 struct rtnl_link_stats64
*st64
;
2442 struct rtnl_link_ifmap
*map
;
2443 struct linkinfo_context li_context
;
2445 switch (rtattr
->rta_type
) {
2447 case QEMU_IFLA_ADDRESS
:
2448 case QEMU_IFLA_BROADCAST
:
2450 case QEMU_IFLA_IFNAME
:
2451 case QEMU_IFLA_QDISC
:
2454 case QEMU_IFLA_OPERSTATE
:
2455 case QEMU_IFLA_LINKMODE
:
2456 case QEMU_IFLA_CARRIER
:
2457 case QEMU_IFLA_PROTO_DOWN
:
2461 case QEMU_IFLA_LINK
:
2462 case QEMU_IFLA_WEIGHT
:
2463 case QEMU_IFLA_TXQLEN
:
2464 case QEMU_IFLA_CARRIER_CHANGES
:
2465 case QEMU_IFLA_NUM_RX_QUEUES
:
2466 case QEMU_IFLA_NUM_TX_QUEUES
:
2467 case QEMU_IFLA_PROMISCUITY
:
2468 case QEMU_IFLA_EXT_MASK
:
2469 case QEMU_IFLA_LINK_NETNSID
:
2470 case QEMU_IFLA_GROUP
:
2471 case QEMU_IFLA_MASTER
:
2472 case QEMU_IFLA_NUM_VF
:
2473 case QEMU_IFLA_GSO_MAX_SEGS
:
2474 case QEMU_IFLA_GSO_MAX_SIZE
:
2475 u32
= RTA_DATA(rtattr
);
2476 *u32
= tswap32(*u32
);
2478 /* struct rtnl_link_stats */
2479 case QEMU_IFLA_STATS
:
2480 st
= RTA_DATA(rtattr
);
2481 st
->rx_packets
= tswap32(st
->rx_packets
);
2482 st
->tx_packets
= tswap32(st
->tx_packets
);
2483 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2484 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2485 st
->rx_errors
= tswap32(st
->rx_errors
);
2486 st
->tx_errors
= tswap32(st
->tx_errors
);
2487 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2488 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2489 st
->multicast
= tswap32(st
->multicast
);
2490 st
->collisions
= tswap32(st
->collisions
);
2492 /* detailed rx_errors: */
2493 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2494 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2495 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2496 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2497 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2498 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2500 /* detailed tx_errors */
2501 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2502 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2503 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2504 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2505 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2508 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2509 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2511 /* struct rtnl_link_stats64 */
2512 case QEMU_IFLA_STATS64
:
2513 st64
= RTA_DATA(rtattr
);
2514 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2515 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2516 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2517 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2518 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2519 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2520 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2521 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2522 st64
->multicast
= tswap64(st64
->multicast
);
2523 st64
->collisions
= tswap64(st64
->collisions
);
2525 /* detailed rx_errors: */
2526 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2527 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2528 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2529 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2530 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2531 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2533 /* detailed tx_errors */
2534 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2535 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2536 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2537 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2538 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2541 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2542 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2544 /* struct rtnl_link_ifmap */
2546 map
= RTA_DATA(rtattr
);
2547 map
->mem_start
= tswap64(map
->mem_start
);
2548 map
->mem_end
= tswap64(map
->mem_end
);
2549 map
->base_addr
= tswap64(map
->base_addr
);
2550 map
->irq
= tswap16(map
->irq
);
2553 case QEMU_IFLA_LINKINFO
:
2554 memset(&li_context
, 0, sizeof(li_context
));
2555 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2557 host_to_target_data_linkinfo_nlattr
);
2558 case QEMU_IFLA_AF_SPEC
:
2559 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2561 host_to_target_data_spec_nlattr
);
2563 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2569 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2572 struct ifa_cacheinfo
*ci
;
2574 switch (rtattr
->rta_type
) {
2575 /* binary: depends on family type */
2585 u32
= RTA_DATA(rtattr
);
2586 *u32
= tswap32(*u32
);
2588 /* struct ifa_cacheinfo */
2590 ci
= RTA_DATA(rtattr
);
2591 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2592 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2593 ci
->cstamp
= tswap32(ci
->cstamp
);
2594 ci
->tstamp
= tswap32(ci
->tstamp
);
2597 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2603 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2606 switch (rtattr
->rta_type
) {
2607 /* binary: depends on family type */
2616 u32
= RTA_DATA(rtattr
);
2617 *u32
= tswap32(*u32
);
2620 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2626 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2627 uint32_t rtattr_len
)
2629 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2630 host_to_target_data_link_rtattr
);
2633 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2634 uint32_t rtattr_len
)
2636 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2637 host_to_target_data_addr_rtattr
);
2640 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2641 uint32_t rtattr_len
)
2643 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2644 host_to_target_data_route_rtattr
);
2647 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2650 struct ifinfomsg
*ifi
;
2651 struct ifaddrmsg
*ifa
;
2654 nlmsg_len
= nlh
->nlmsg_len
;
2655 switch (nlh
->nlmsg_type
) {
2659 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2660 ifi
= NLMSG_DATA(nlh
);
2661 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2662 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2663 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2664 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2665 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2666 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2672 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2673 ifa
= NLMSG_DATA(nlh
);
2674 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2675 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2676 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2682 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2683 rtm
= NLMSG_DATA(nlh
);
2684 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2685 host_to_target_route_rtattr(RTM_RTA(rtm
),
2686 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2690 return -TARGET_EINVAL
;
2695 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2698 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2701 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2703 abi_long (*target_to_host_rtattr
)
2708 while (len
>= sizeof(struct rtattr
)) {
2709 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2710 tswap16(rtattr
->rta_len
) > len
) {
2713 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2714 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2715 ret
= target_to_host_rtattr(rtattr
);
2719 len
-= RTA_ALIGN(rtattr
->rta_len
);
2720 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2721 RTA_ALIGN(rtattr
->rta_len
));
2726 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2728 switch (rtattr
->rta_type
) {
2730 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2736 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2738 switch (rtattr
->rta_type
) {
2739 /* binary: depends on family type */
2744 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2750 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2753 switch (rtattr
->rta_type
) {
2754 /* binary: depends on family type */
2762 u32
= RTA_DATA(rtattr
);
2763 *u32
= tswap32(*u32
);
2766 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2772 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2773 uint32_t rtattr_len
)
2775 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2776 target_to_host_data_link_rtattr
);
2779 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2780 uint32_t rtattr_len
)
2782 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2783 target_to_host_data_addr_rtattr
);
2786 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2787 uint32_t rtattr_len
)
2789 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2790 target_to_host_data_route_rtattr
);
2793 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2795 struct ifinfomsg
*ifi
;
2796 struct ifaddrmsg
*ifa
;
2799 switch (nlh
->nlmsg_type
) {
2804 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2805 ifi
= NLMSG_DATA(nlh
);
2806 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2807 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2808 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2809 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2810 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2811 NLMSG_LENGTH(sizeof(*ifi
)));
2817 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2818 ifa
= NLMSG_DATA(nlh
);
2819 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2820 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2821 NLMSG_LENGTH(sizeof(*ifa
)));
2828 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2829 rtm
= NLMSG_DATA(nlh
);
2830 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2831 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2832 NLMSG_LENGTH(sizeof(*rtm
)));
2836 return -TARGET_EOPNOTSUPP
;
2841 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2843 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2845 #endif /* CONFIG_RTNETLINK */
2847 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2849 switch (nlh
->nlmsg_type
) {
2851 gemu_log("Unknown host audit message type %d\n",
2853 return -TARGET_EINVAL
;
2858 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2861 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2864 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2866 switch (nlh
->nlmsg_type
) {
2868 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2869 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2872 gemu_log("Unknown target audit message type %d\n",
2874 return -TARGET_EINVAL
;
2880 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2882 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2885 /* do_setsockopt() Must return target values and target errnos. */
2886 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2887 abi_ulong optval_addr
, socklen_t optlen
)
2891 struct ip_mreqn
*ip_mreq
;
2892 struct ip_mreq_source
*ip_mreq_source
;
2896 /* TCP options all take an 'int' value. */
2897 if (optlen
< sizeof(uint32_t))
2898 return -TARGET_EINVAL
;
2900 if (get_user_u32(val
, optval_addr
))
2901 return -TARGET_EFAULT
;
2902 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2909 case IP_ROUTER_ALERT
:
2913 case IP_MTU_DISCOVER
:
2920 case IP_MULTICAST_TTL
:
2921 case IP_MULTICAST_LOOP
:
2923 if (optlen
>= sizeof(uint32_t)) {
2924 if (get_user_u32(val
, optval_addr
))
2925 return -TARGET_EFAULT
;
2926 } else if (optlen
>= 1) {
2927 if (get_user_u8(val
, optval_addr
))
2928 return -TARGET_EFAULT
;
2930 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2932 case IP_ADD_MEMBERSHIP
:
2933 case IP_DROP_MEMBERSHIP
:
2934 if (optlen
< sizeof (struct target_ip_mreq
) ||
2935 optlen
> sizeof (struct target_ip_mreqn
))
2936 return -TARGET_EINVAL
;
2938 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2939 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2940 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2943 case IP_BLOCK_SOURCE
:
2944 case IP_UNBLOCK_SOURCE
:
2945 case IP_ADD_SOURCE_MEMBERSHIP
:
2946 case IP_DROP_SOURCE_MEMBERSHIP
:
2947 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2948 return -TARGET_EINVAL
;
2950 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2951 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2952 unlock_user (ip_mreq_source
, optval_addr
, 0);
2961 case IPV6_MTU_DISCOVER
:
2964 case IPV6_RECVPKTINFO
:
2965 case IPV6_UNICAST_HOPS
:
2967 case IPV6_RECVHOPLIMIT
:
2968 case IPV6_2292HOPLIMIT
:
2971 if (optlen
< sizeof(uint32_t)) {
2972 return -TARGET_EINVAL
;
2974 if (get_user_u32(val
, optval_addr
)) {
2975 return -TARGET_EFAULT
;
2977 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2978 &val
, sizeof(val
)));
2982 struct in6_pktinfo pki
;
2984 if (optlen
< sizeof(pki
)) {
2985 return -TARGET_EINVAL
;
2988 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2989 return -TARGET_EFAULT
;
2992 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2994 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2995 &pki
, sizeof(pki
)));
3006 struct icmp6_filter icmp6f
;
3008 if (optlen
> sizeof(icmp6f
)) {
3009 optlen
= sizeof(icmp6f
);
3012 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3013 return -TARGET_EFAULT
;
3016 for (val
= 0; val
< 8; val
++) {
3017 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3020 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3032 /* those take an u32 value */
3033 if (optlen
< sizeof(uint32_t)) {
3034 return -TARGET_EINVAL
;
3037 if (get_user_u32(val
, optval_addr
)) {
3038 return -TARGET_EFAULT
;
3040 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3041 &val
, sizeof(val
)));
3048 case TARGET_SOL_SOCKET
:
3050 case TARGET_SO_RCVTIMEO
:
3054 optname
= SO_RCVTIMEO
;
3057 if (optlen
!= sizeof(struct target_timeval
)) {
3058 return -TARGET_EINVAL
;
3061 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3062 return -TARGET_EFAULT
;
3065 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3069 case TARGET_SO_SNDTIMEO
:
3070 optname
= SO_SNDTIMEO
;
3072 case TARGET_SO_ATTACH_FILTER
:
3074 struct target_sock_fprog
*tfprog
;
3075 struct target_sock_filter
*tfilter
;
3076 struct sock_fprog fprog
;
3077 struct sock_filter
*filter
;
3080 if (optlen
!= sizeof(*tfprog
)) {
3081 return -TARGET_EINVAL
;
3083 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3084 return -TARGET_EFAULT
;
3086 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3087 tswapal(tfprog
->filter
), 0)) {
3088 unlock_user_struct(tfprog
, optval_addr
, 1);
3089 return -TARGET_EFAULT
;
3092 fprog
.len
= tswap16(tfprog
->len
);
3093 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3094 if (filter
== NULL
) {
3095 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3096 unlock_user_struct(tfprog
, optval_addr
, 1);
3097 return -TARGET_ENOMEM
;
3099 for (i
= 0; i
< fprog
.len
; i
++) {
3100 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3101 filter
[i
].jt
= tfilter
[i
].jt
;
3102 filter
[i
].jf
= tfilter
[i
].jf
;
3103 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3105 fprog
.filter
= filter
;
3107 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3108 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3111 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3112 unlock_user_struct(tfprog
, optval_addr
, 1);
3115 case TARGET_SO_BINDTODEVICE
:
3117 char *dev_ifname
, *addr_ifname
;
3119 if (optlen
> IFNAMSIZ
- 1) {
3120 optlen
= IFNAMSIZ
- 1;
3122 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3124 return -TARGET_EFAULT
;
3126 optname
= SO_BINDTODEVICE
;
3127 addr_ifname
= alloca(IFNAMSIZ
);
3128 memcpy(addr_ifname
, dev_ifname
, optlen
);
3129 addr_ifname
[optlen
] = 0;
3130 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3131 addr_ifname
, optlen
));
3132 unlock_user (dev_ifname
, optval_addr
, 0);
3135 /* Options with 'int' argument. */
3136 case TARGET_SO_DEBUG
:
3139 case TARGET_SO_REUSEADDR
:
3140 optname
= SO_REUSEADDR
;
3142 case TARGET_SO_TYPE
:
3145 case TARGET_SO_ERROR
:
3148 case TARGET_SO_DONTROUTE
:
3149 optname
= SO_DONTROUTE
;
3151 case TARGET_SO_BROADCAST
:
3152 optname
= SO_BROADCAST
;
3154 case TARGET_SO_SNDBUF
:
3155 optname
= SO_SNDBUF
;
3157 case TARGET_SO_SNDBUFFORCE
:
3158 optname
= SO_SNDBUFFORCE
;
3160 case TARGET_SO_RCVBUF
:
3161 optname
= SO_RCVBUF
;
3163 case TARGET_SO_RCVBUFFORCE
:
3164 optname
= SO_RCVBUFFORCE
;
3166 case TARGET_SO_KEEPALIVE
:
3167 optname
= SO_KEEPALIVE
;
3169 case TARGET_SO_OOBINLINE
:
3170 optname
= SO_OOBINLINE
;
3172 case TARGET_SO_NO_CHECK
:
3173 optname
= SO_NO_CHECK
;
3175 case TARGET_SO_PRIORITY
:
3176 optname
= SO_PRIORITY
;
3179 case TARGET_SO_BSDCOMPAT
:
3180 optname
= SO_BSDCOMPAT
;
3183 case TARGET_SO_PASSCRED
:
3184 optname
= SO_PASSCRED
;
3186 case TARGET_SO_PASSSEC
:
3187 optname
= SO_PASSSEC
;
3189 case TARGET_SO_TIMESTAMP
:
3190 optname
= SO_TIMESTAMP
;
3192 case TARGET_SO_RCVLOWAT
:
3193 optname
= SO_RCVLOWAT
;
3198 if (optlen
< sizeof(uint32_t))
3199 return -TARGET_EINVAL
;
3201 if (get_user_u32(val
, optval_addr
))
3202 return -TARGET_EFAULT
;
3203 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3207 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3208 ret
= -TARGET_ENOPROTOOPT
;
3213 /* do_getsockopt() Must return target values and target errnos. */
3214 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3215 abi_ulong optval_addr
, abi_ulong optlen
)
3222 case TARGET_SOL_SOCKET
:
3225 /* These don't just return a single integer */
3226 case TARGET_SO_LINGER
:
3227 case TARGET_SO_RCVTIMEO
:
3228 case TARGET_SO_SNDTIMEO
:
3229 case TARGET_SO_PEERNAME
:
3231 case TARGET_SO_PEERCRED
: {
3234 struct target_ucred
*tcr
;
3236 if (get_user_u32(len
, optlen
)) {
3237 return -TARGET_EFAULT
;
3240 return -TARGET_EINVAL
;
3244 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3252 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3253 return -TARGET_EFAULT
;
3255 __put_user(cr
.pid
, &tcr
->pid
);
3256 __put_user(cr
.uid
, &tcr
->uid
);
3257 __put_user(cr
.gid
, &tcr
->gid
);
3258 unlock_user_struct(tcr
, optval_addr
, 1);
3259 if (put_user_u32(len
, optlen
)) {
3260 return -TARGET_EFAULT
;
3264 /* Options with 'int' argument. */
3265 case TARGET_SO_DEBUG
:
3268 case TARGET_SO_REUSEADDR
:
3269 optname
= SO_REUSEADDR
;
3271 case TARGET_SO_TYPE
:
3274 case TARGET_SO_ERROR
:
3277 case TARGET_SO_DONTROUTE
:
3278 optname
= SO_DONTROUTE
;
3280 case TARGET_SO_BROADCAST
:
3281 optname
= SO_BROADCAST
;
3283 case TARGET_SO_SNDBUF
:
3284 optname
= SO_SNDBUF
;
3286 case TARGET_SO_RCVBUF
:
3287 optname
= SO_RCVBUF
;
3289 case TARGET_SO_KEEPALIVE
:
3290 optname
= SO_KEEPALIVE
;
3292 case TARGET_SO_OOBINLINE
:
3293 optname
= SO_OOBINLINE
;
3295 case TARGET_SO_NO_CHECK
:
3296 optname
= SO_NO_CHECK
;
3298 case TARGET_SO_PRIORITY
:
3299 optname
= SO_PRIORITY
;
3302 case TARGET_SO_BSDCOMPAT
:
3303 optname
= SO_BSDCOMPAT
;
3306 case TARGET_SO_PASSCRED
:
3307 optname
= SO_PASSCRED
;
3309 case TARGET_SO_TIMESTAMP
:
3310 optname
= SO_TIMESTAMP
;
3312 case TARGET_SO_RCVLOWAT
:
3313 optname
= SO_RCVLOWAT
;
3315 case TARGET_SO_ACCEPTCONN
:
3316 optname
= SO_ACCEPTCONN
;
3323 /* TCP options all take an 'int' value. */
3325 if (get_user_u32(len
, optlen
))
3326 return -TARGET_EFAULT
;
3328 return -TARGET_EINVAL
;
3330 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3333 if (optname
== SO_TYPE
) {
3334 val
= host_to_target_sock_type(val
);
3339 if (put_user_u32(val
, optval_addr
))
3340 return -TARGET_EFAULT
;
3342 if (put_user_u8(val
, optval_addr
))
3343 return -TARGET_EFAULT
;
3345 if (put_user_u32(len
, optlen
))
3346 return -TARGET_EFAULT
;
3353 case IP_ROUTER_ALERT
:
3357 case IP_MTU_DISCOVER
:
3363 case IP_MULTICAST_TTL
:
3364 case IP_MULTICAST_LOOP
:
3365 if (get_user_u32(len
, optlen
))
3366 return -TARGET_EFAULT
;
3368 return -TARGET_EINVAL
;
3370 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3373 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3375 if (put_user_u32(len
, optlen
)
3376 || put_user_u8(val
, optval_addr
))
3377 return -TARGET_EFAULT
;
3379 if (len
> sizeof(int))
3381 if (put_user_u32(len
, optlen
)
3382 || put_user_u32(val
, optval_addr
))
3383 return -TARGET_EFAULT
;
3387 ret
= -TARGET_ENOPROTOOPT
;
3393 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3395 ret
= -TARGET_EOPNOTSUPP
;
3401 /* Convert target low/high pair representing file offset into the host
3402 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3403 * as the kernel doesn't handle them either.
3405 static void target_to_host_low_high(abi_ulong tlow
,
3407 unsigned long *hlow
,
3408 unsigned long *hhigh
)
3410 uint64_t off
= tlow
|
3411 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3412 TARGET_LONG_BITS
/ 2;
3415 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3418 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3419 abi_ulong count
, int copy
)
3421 struct target_iovec
*target_vec
;
3423 abi_ulong total_len
, max_len
;
3426 bool bad_address
= false;
3432 if (count
> IOV_MAX
) {
3437 vec
= g_try_new0(struct iovec
, count
);
3443 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3444 count
* sizeof(struct target_iovec
), 1);
3445 if (target_vec
== NULL
) {
3450 /* ??? If host page size > target page size, this will result in a
3451 value larger than what we can actually support. */
3452 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3455 for (i
= 0; i
< count
; i
++) {
3456 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3457 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3462 } else if (len
== 0) {
3463 /* Zero length pointer is ignored. */
3464 vec
[i
].iov_base
= 0;
3466 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3467 /* If the first buffer pointer is bad, this is a fault. But
3468 * subsequent bad buffers will result in a partial write; this
3469 * is realized by filling the vector with null pointers and
3471 if (!vec
[i
].iov_base
) {
3482 if (len
> max_len
- total_len
) {
3483 len
= max_len
- total_len
;
3486 vec
[i
].iov_len
= len
;
3490 unlock_user(target_vec
, target_addr
, 0);
3495 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3496 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3499 unlock_user(target_vec
, target_addr
, 0);
3506 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3507 abi_ulong count
, int copy
)
3509 struct target_iovec
*target_vec
;
3512 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3513 count
* sizeof(struct target_iovec
), 1);
3515 for (i
= 0; i
< count
; i
++) {
3516 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3517 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3521 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3523 unlock_user(target_vec
, target_addr
, 0);
3529 static inline int target_to_host_sock_type(int *type
)
3532 int target_type
= *type
;
3534 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3535 case TARGET_SOCK_DGRAM
:
3536 host_type
= SOCK_DGRAM
;
3538 case TARGET_SOCK_STREAM
:
3539 host_type
= SOCK_STREAM
;
3542 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3545 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3546 #if defined(SOCK_CLOEXEC)
3547 host_type
|= SOCK_CLOEXEC
;
3549 return -TARGET_EINVAL
;
3552 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3553 #if defined(SOCK_NONBLOCK)
3554 host_type
|= SOCK_NONBLOCK
;
3555 #elif !defined(O_NONBLOCK)
3556 return -TARGET_EINVAL
;
3563 /* Try to emulate socket type flags after socket creation. */
3564 static int sock_flags_fixup(int fd
, int target_type
)
3566 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3567 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3568 int flags
= fcntl(fd
, F_GETFL
);
3569 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3571 return -TARGET_EINVAL
;
3578 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3579 abi_ulong target_addr
,
3582 struct sockaddr
*addr
= host_addr
;
3583 struct target_sockaddr
*target_saddr
;
3585 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3586 if (!target_saddr
) {
3587 return -TARGET_EFAULT
;
3590 memcpy(addr
, target_saddr
, len
);
3591 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3592 /* spkt_protocol is big-endian */
3594 unlock_user(target_saddr
, target_addr
, 0);
3598 static TargetFdTrans target_packet_trans
= {
3599 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3602 #ifdef CONFIG_RTNETLINK
3603 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3607 ret
= target_to_host_nlmsg_route(buf
, len
);
3615 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3619 ret
= host_to_target_nlmsg_route(buf
, len
);
3627 static TargetFdTrans target_netlink_route_trans
= {
3628 .target_to_host_data
= netlink_route_target_to_host
,
3629 .host_to_target_data
= netlink_route_host_to_target
,
3631 #endif /* CONFIG_RTNETLINK */
3633 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3637 ret
= target_to_host_nlmsg_audit(buf
, len
);
3645 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3649 ret
= host_to_target_nlmsg_audit(buf
, len
);
3657 static TargetFdTrans target_netlink_audit_trans
= {
3658 .target_to_host_data
= netlink_audit_target_to_host
,
3659 .host_to_target_data
= netlink_audit_host_to_target
,
3662 /* do_socket() Must return target values and target errnos. */
3663 static abi_long
do_socket(int domain
, int type
, int protocol
)
3665 int target_type
= type
;
3668 ret
= target_to_host_sock_type(&type
);
3673 if (domain
== PF_NETLINK
&& !(
3674 #ifdef CONFIG_RTNETLINK
3675 protocol
== NETLINK_ROUTE
||
3677 protocol
== NETLINK_KOBJECT_UEVENT
||
3678 protocol
== NETLINK_AUDIT
)) {
3679 return -EPFNOSUPPORT
;
3682 if (domain
== AF_PACKET
||
3683 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3684 protocol
= tswap16(protocol
);
3687 ret
= get_errno(socket(domain
, type
, protocol
));
3689 ret
= sock_flags_fixup(ret
, target_type
);
3690 if (type
== SOCK_PACKET
) {
3691 /* Manage an obsolete case :
3692 * if socket type is SOCK_PACKET, bind by name
3694 fd_trans_register(ret
, &target_packet_trans
);
3695 } else if (domain
== PF_NETLINK
) {
3697 #ifdef CONFIG_RTNETLINK
3699 fd_trans_register(ret
, &target_netlink_route_trans
);
3702 case NETLINK_KOBJECT_UEVENT
:
3703 /* nothing to do: messages are strings */
3706 fd_trans_register(ret
, &target_netlink_audit_trans
);
3709 g_assert_not_reached();
3716 /* do_bind() Must return target values and target errnos. */
3717 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3723 if ((int)addrlen
< 0) {
3724 return -TARGET_EINVAL
;
3727 addr
= alloca(addrlen
+1);
3729 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3733 return get_errno(bind(sockfd
, addr
, addrlen
));
3736 /* do_connect() Must return target values and target errnos. */
3737 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3743 if ((int)addrlen
< 0) {
3744 return -TARGET_EINVAL
;
3747 addr
= alloca(addrlen
+1);
3749 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3753 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3756 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3757 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3758 int flags
, int send
)
3764 abi_ulong target_vec
;
3766 if (msgp
->msg_name
) {
3767 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3768 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3769 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3770 tswapal(msgp
->msg_name
),
3772 if (ret
== -TARGET_EFAULT
) {
3773 /* For connected sockets msg_name and msg_namelen must
3774 * be ignored, so returning EFAULT immediately is wrong.
3775 * Instead, pass a bad msg_name to the host kernel, and
3776 * let it decide whether to return EFAULT or not.
3778 msg
.msg_name
= (void *)-1;
3783 msg
.msg_name
= NULL
;
3784 msg
.msg_namelen
= 0;
3786 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3787 msg
.msg_control
= alloca(msg
.msg_controllen
);
3788 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3790 count
= tswapal(msgp
->msg_iovlen
);
3791 target_vec
= tswapal(msgp
->msg_iov
);
3793 if (count
> IOV_MAX
) {
3794 /* sendrcvmsg returns a different errno for this condition than
3795 * readv/writev, so we must catch it here before lock_iovec() does.
3797 ret
= -TARGET_EMSGSIZE
;
3801 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3802 target_vec
, count
, send
);
3804 ret
= -host_to_target_errno(errno
);
3807 msg
.msg_iovlen
= count
;
3811 if (fd_trans_target_to_host_data(fd
)) {
3814 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3815 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3816 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3817 msg
.msg_iov
->iov_len
);
3819 msg
.msg_iov
->iov_base
= host_msg
;
3820 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3824 ret
= target_to_host_cmsg(&msg
, msgp
);
3826 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3830 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3831 if (!is_error(ret
)) {
3833 if (fd_trans_host_to_target_data(fd
)) {
3834 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3837 ret
= host_to_target_cmsg(msgp
, &msg
);
3839 if (!is_error(ret
)) {
3840 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3841 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3842 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3843 msg
.msg_name
, msg
.msg_namelen
);
3855 unlock_iovec(vec
, target_vec
, count
, !send
);
3860 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3861 int flags
, int send
)
3864 struct target_msghdr
*msgp
;
3866 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3870 return -TARGET_EFAULT
;
3872 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3873 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3877 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3878 * so it might not have this *mmsg-specific flag either.
3880 #ifndef MSG_WAITFORONE
3881 #define MSG_WAITFORONE 0x10000
3884 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3885 unsigned int vlen
, unsigned int flags
,
3888 struct target_mmsghdr
*mmsgp
;
3892 if (vlen
> UIO_MAXIOV
) {
3896 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3898 return -TARGET_EFAULT
;
3901 for (i
= 0; i
< vlen
; i
++) {
3902 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3903 if (is_error(ret
)) {
3906 mmsgp
[i
].msg_len
= tswap32(ret
);
3907 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3908 if (flags
& MSG_WAITFORONE
) {
3909 flags
|= MSG_DONTWAIT
;
3913 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3915 /* Return number of datagrams sent if we sent any at all;
3916 * otherwise return the error.
3924 /* do_accept4() Must return target values and target errnos. */
3925 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3926 abi_ulong target_addrlen_addr
, int flags
)
3933 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3935 if (target_addr
== 0) {
3936 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3939 /* linux returns EINVAL if addrlen pointer is invalid */
3940 if (get_user_u32(addrlen
, target_addrlen_addr
))
3941 return -TARGET_EINVAL
;
3943 if ((int)addrlen
< 0) {
3944 return -TARGET_EINVAL
;
3947 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3948 return -TARGET_EINVAL
;
3950 addr
= alloca(addrlen
);
3952 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3953 if (!is_error(ret
)) {
3954 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3955 if (put_user_u32(addrlen
, target_addrlen_addr
))
3956 ret
= -TARGET_EFAULT
;
3961 /* do_getpeername() Must return target values and target errnos. */
3962 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3963 abi_ulong target_addrlen_addr
)
3969 if (get_user_u32(addrlen
, target_addrlen_addr
))
3970 return -TARGET_EFAULT
;
3972 if ((int)addrlen
< 0) {
3973 return -TARGET_EINVAL
;
3976 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3977 return -TARGET_EFAULT
;
3979 addr
= alloca(addrlen
);
3981 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3982 if (!is_error(ret
)) {
3983 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3984 if (put_user_u32(addrlen
, target_addrlen_addr
))
3985 ret
= -TARGET_EFAULT
;
3990 /* do_getsockname() Must return target values and target errnos. */
3991 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3992 abi_ulong target_addrlen_addr
)
3998 if (get_user_u32(addrlen
, target_addrlen_addr
))
3999 return -TARGET_EFAULT
;
4001 if ((int)addrlen
< 0) {
4002 return -TARGET_EINVAL
;
4005 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4006 return -TARGET_EFAULT
;
4008 addr
= alloca(addrlen
);
4010 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4011 if (!is_error(ret
)) {
4012 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4013 if (put_user_u32(addrlen
, target_addrlen_addr
))
4014 ret
= -TARGET_EFAULT
;
4019 /* do_socketpair() Must return target values and target errnos. */
4020 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4021 abi_ulong target_tab_addr
)
4026 target_to_host_sock_type(&type
);
4028 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4029 if (!is_error(ret
)) {
4030 if (put_user_s32(tab
[0], target_tab_addr
)
4031 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4032 ret
= -TARGET_EFAULT
;
4037 /* do_sendto() Must return target values and target errnos. */
4038 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4039 abi_ulong target_addr
, socklen_t addrlen
)
4043 void *copy_msg
= NULL
;
4046 if ((int)addrlen
< 0) {
4047 return -TARGET_EINVAL
;
4050 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4052 return -TARGET_EFAULT
;
4053 if (fd_trans_target_to_host_data(fd
)) {
4054 copy_msg
= host_msg
;
4055 host_msg
= g_malloc(len
);
4056 memcpy(host_msg
, copy_msg
, len
);
4057 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4063 addr
= alloca(addrlen
+1);
4064 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4068 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4070 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4075 host_msg
= copy_msg
;
4077 unlock_user(host_msg
, msg
, 0);
4081 /* do_recvfrom() Must return target values and target errnos. */
4082 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4083 abi_ulong target_addr
,
4084 abi_ulong target_addrlen
)
4091 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4093 return -TARGET_EFAULT
;
4095 if (get_user_u32(addrlen
, target_addrlen
)) {
4096 ret
= -TARGET_EFAULT
;
4099 if ((int)addrlen
< 0) {
4100 ret
= -TARGET_EINVAL
;
4103 addr
= alloca(addrlen
);
4104 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4107 addr
= NULL
; /* To keep compiler quiet. */
4108 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4110 if (!is_error(ret
)) {
4111 if (fd_trans_host_to_target_data(fd
)) {
4112 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4115 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4116 if (put_user_u32(addrlen
, target_addrlen
)) {
4117 ret
= -TARGET_EFAULT
;
4121 unlock_user(host_msg
, msg
, len
);
4124 unlock_user(host_msg
, msg
, 0);
4129 #ifdef TARGET_NR_socketcall
4130 /* do_socketcall() must return target values and target errnos. */
4131 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4133 static const unsigned nargs
[] = { /* number of arguments per operation */
4134 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4135 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4136 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4137 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4138 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4139 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4140 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4141 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4142 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4143 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4144 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4145 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4146 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4147 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4148 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4149 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4150 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4151 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4152 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4153 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4155 abi_long a
[6]; /* max 6 args */
4158 /* check the range of the first argument num */
4159 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4160 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4161 return -TARGET_EINVAL
;
4163 /* ensure we have space for args */
4164 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4165 return -TARGET_EINVAL
;
4167 /* collect the arguments in a[] according to nargs[] */
4168 for (i
= 0; i
< nargs
[num
]; ++i
) {
4169 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4170 return -TARGET_EFAULT
;
4173 /* now when we have the args, invoke the appropriate underlying function */
4175 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4176 return do_socket(a
[0], a
[1], a
[2]);
4177 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4178 return do_bind(a
[0], a
[1], a
[2]);
4179 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4180 return do_connect(a
[0], a
[1], a
[2]);
4181 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4182 return get_errno(listen(a
[0], a
[1]));
4183 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4184 return do_accept4(a
[0], a
[1], a
[2], 0);
4185 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4186 return do_getsockname(a
[0], a
[1], a
[2]);
4187 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4188 return do_getpeername(a
[0], a
[1], a
[2]);
4189 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4190 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4191 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4192 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4193 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4194 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4195 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4196 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4197 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4198 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4199 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4200 return get_errno(shutdown(a
[0], a
[1]));
4201 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4202 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4203 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4204 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4205 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4206 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4207 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4208 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4209 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4210 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4211 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4212 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4213 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4214 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4216 gemu_log("Unsupported socketcall: %d\n", num
);
4217 return -TARGET_EINVAL
;
4222 #define N_SHM_REGIONS 32
4224 static struct shm_region
{
4228 } shm_regions
[N_SHM_REGIONS
];
4230 #ifndef TARGET_SEMID64_DS
4231 /* asm-generic version of this struct */
4232 struct target_semid64_ds
4234 struct target_ipc_perm sem_perm
;
4235 abi_ulong sem_otime
;
4236 #if TARGET_ABI_BITS == 32
4237 abi_ulong __unused1
;
4239 abi_ulong sem_ctime
;
4240 #if TARGET_ABI_BITS == 32
4241 abi_ulong __unused2
;
4243 abi_ulong sem_nsems
;
4244 abi_ulong __unused3
;
4245 abi_ulong __unused4
;
4249 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4250 abi_ulong target_addr
)
4252 struct target_ipc_perm
*target_ip
;
4253 struct target_semid64_ds
*target_sd
;
4255 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4256 return -TARGET_EFAULT
;
4257 target_ip
= &(target_sd
->sem_perm
);
4258 host_ip
->__key
= tswap32(target_ip
->__key
);
4259 host_ip
->uid
= tswap32(target_ip
->uid
);
4260 host_ip
->gid
= tswap32(target_ip
->gid
);
4261 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4262 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4264 host_ip
->mode
= tswap32(target_ip
->mode
);
4266 host_ip
->mode
= tswap16(target_ip
->mode
);
4268 #if defined(TARGET_PPC)
4269 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4271 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4273 unlock_user_struct(target_sd
, target_addr
, 0);
4277 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4278 struct ipc_perm
*host_ip
)
4280 struct target_ipc_perm
*target_ip
;
4281 struct target_semid64_ds
*target_sd
;
4283 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4284 return -TARGET_EFAULT
;
4285 target_ip
= &(target_sd
->sem_perm
);
4286 target_ip
->__key
= tswap32(host_ip
->__key
);
4287 target_ip
->uid
= tswap32(host_ip
->uid
);
4288 target_ip
->gid
= tswap32(host_ip
->gid
);
4289 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4290 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4292 target_ip
->mode
= tswap32(host_ip
->mode
);
4294 target_ip
->mode
= tswap16(host_ip
->mode
);
4296 #if defined(TARGET_PPC)
4297 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4299 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4301 unlock_user_struct(target_sd
, target_addr
, 1);
4305 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4306 abi_ulong target_addr
)
4308 struct target_semid64_ds
*target_sd
;
4310 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4311 return -TARGET_EFAULT
;
4312 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4313 return -TARGET_EFAULT
;
4314 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4315 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4316 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4317 unlock_user_struct(target_sd
, target_addr
, 0);
4321 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4322 struct semid_ds
*host_sd
)
4324 struct target_semid64_ds
*target_sd
;
4326 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4327 return -TARGET_EFAULT
;
4328 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4329 return -TARGET_EFAULT
;
4330 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4331 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4332 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4333 unlock_user_struct(target_sd
, target_addr
, 1);
4337 struct target_seminfo
{
4350 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4351 struct seminfo
*host_seminfo
)
4353 struct target_seminfo
*target_seminfo
;
4354 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4355 return -TARGET_EFAULT
;
4356 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4357 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4358 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4359 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4360 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4361 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4362 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4363 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4364 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4365 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4366 unlock_user_struct(target_seminfo
, target_addr
, 1);
4372 struct semid_ds
*buf
;
4373 unsigned short *array
;
4374 struct seminfo
*__buf
;
4377 union target_semun
{
4384 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4385 abi_ulong target_addr
)
4388 unsigned short *array
;
4390 struct semid_ds semid_ds
;
4393 semun
.buf
= &semid_ds
;
4395 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4397 return get_errno(ret
);
4399 nsems
= semid_ds
.sem_nsems
;
4401 *host_array
= g_try_new(unsigned short, nsems
);
4403 return -TARGET_ENOMEM
;
4405 array
= lock_user(VERIFY_READ
, target_addr
,
4406 nsems
*sizeof(unsigned short), 1);
4408 g_free(*host_array
);
4409 return -TARGET_EFAULT
;
4412 for(i
=0; i
<nsems
; i
++) {
4413 __get_user((*host_array
)[i
], &array
[i
]);
4415 unlock_user(array
, target_addr
, 0);
4420 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4421 unsigned short **host_array
)
4424 unsigned short *array
;
4426 struct semid_ds semid_ds
;
4429 semun
.buf
= &semid_ds
;
4431 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4433 return get_errno(ret
);
4435 nsems
= semid_ds
.sem_nsems
;
4437 array
= lock_user(VERIFY_WRITE
, target_addr
,
4438 nsems
*sizeof(unsigned short), 0);
4440 return -TARGET_EFAULT
;
4442 for(i
=0; i
<nsems
; i
++) {
4443 __put_user((*host_array
)[i
], &array
[i
]);
4445 g_free(*host_array
);
4446 unlock_user(array
, target_addr
, 1);
4451 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4452 abi_ulong target_arg
)
4454 union target_semun target_su
= { .buf
= target_arg
};
4456 struct semid_ds dsarg
;
4457 unsigned short *array
= NULL
;
4458 struct seminfo seminfo
;
4459 abi_long ret
= -TARGET_EINVAL
;
4466 /* In 64 bit cross-endian situations, we will erroneously pick up
4467 * the wrong half of the union for the "val" element. To rectify
4468 * this, the entire 8-byte structure is byteswapped, followed by
4469 * a swap of the 4 byte val field. In other cases, the data is
4470 * already in proper host byte order. */
4471 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4472 target_su
.buf
= tswapal(target_su
.buf
);
4473 arg
.val
= tswap32(target_su
.val
);
4475 arg
.val
= target_su
.val
;
4477 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4481 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4485 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4486 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4493 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4497 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4498 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4504 arg
.__buf
= &seminfo
;
4505 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4506 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4514 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4521 struct target_sembuf
{
4522 unsigned short sem_num
;
4527 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4528 abi_ulong target_addr
,
4531 struct target_sembuf
*target_sembuf
;
4534 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4535 nsops
*sizeof(struct target_sembuf
), 1);
4537 return -TARGET_EFAULT
;
4539 for(i
=0; i
<nsops
; i
++) {
4540 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4541 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4542 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4545 unlock_user(target_sembuf
, target_addr
, 0);
4550 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4552 struct sembuf sops
[nsops
];
4554 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4555 return -TARGET_EFAULT
;
4557 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4560 struct target_msqid_ds
4562 struct target_ipc_perm msg_perm
;
4563 abi_ulong msg_stime
;
4564 #if TARGET_ABI_BITS == 32
4565 abi_ulong __unused1
;
4567 abi_ulong msg_rtime
;
4568 #if TARGET_ABI_BITS == 32
4569 abi_ulong __unused2
;
4571 abi_ulong msg_ctime
;
4572 #if TARGET_ABI_BITS == 32
4573 abi_ulong __unused3
;
4575 abi_ulong __msg_cbytes
;
4577 abi_ulong msg_qbytes
;
4578 abi_ulong msg_lspid
;
4579 abi_ulong msg_lrpid
;
4580 abi_ulong __unused4
;
4581 abi_ulong __unused5
;
4584 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4585 abi_ulong target_addr
)
4587 struct target_msqid_ds
*target_md
;
4589 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4590 return -TARGET_EFAULT
;
4591 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4592 return -TARGET_EFAULT
;
4593 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4594 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4595 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4596 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4597 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4598 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4599 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4600 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4601 unlock_user_struct(target_md
, target_addr
, 0);
4605 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4606 struct msqid_ds
*host_md
)
4608 struct target_msqid_ds
*target_md
;
4610 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4611 return -TARGET_EFAULT
;
4612 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4613 return -TARGET_EFAULT
;
4614 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4615 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4616 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4617 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4618 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4619 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4620 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4621 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4622 unlock_user_struct(target_md
, target_addr
, 1);
4626 struct target_msginfo
{
4634 unsigned short int msgseg
;
4637 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4638 struct msginfo
*host_msginfo
)
4640 struct target_msginfo
*target_msginfo
;
4641 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4642 return -TARGET_EFAULT
;
4643 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4644 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4645 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4646 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4647 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4648 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4649 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4650 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4651 unlock_user_struct(target_msginfo
, target_addr
, 1);
4655 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4657 struct msqid_ds dsarg
;
4658 struct msginfo msginfo
;
4659 abi_long ret
= -TARGET_EINVAL
;
4667 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4668 return -TARGET_EFAULT
;
4669 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4670 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4671 return -TARGET_EFAULT
;
4674 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4678 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4679 if (host_to_target_msginfo(ptr
, &msginfo
))
4680 return -TARGET_EFAULT
;
4687 struct target_msgbuf
{
4692 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4693 ssize_t msgsz
, int msgflg
)
4695 struct target_msgbuf
*target_mb
;
4696 struct msgbuf
*host_mb
;
4700 return -TARGET_EINVAL
;
4703 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4704 return -TARGET_EFAULT
;
4705 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4707 unlock_user_struct(target_mb
, msgp
, 0);
4708 return -TARGET_ENOMEM
;
4710 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4711 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4712 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4714 unlock_user_struct(target_mb
, msgp
, 0);
4719 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4720 ssize_t msgsz
, abi_long msgtyp
,
4723 struct target_msgbuf
*target_mb
;
4725 struct msgbuf
*host_mb
;
4729 return -TARGET_EINVAL
;
4732 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4733 return -TARGET_EFAULT
;
4735 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4737 ret
= -TARGET_ENOMEM
;
4740 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4743 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4744 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4745 if (!target_mtext
) {
4746 ret
= -TARGET_EFAULT
;
4749 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4750 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4753 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4757 unlock_user_struct(target_mb
, msgp
, 1);
4762 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4763 abi_ulong target_addr
)
4765 struct target_shmid_ds
*target_sd
;
4767 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4768 return -TARGET_EFAULT
;
4769 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4770 return -TARGET_EFAULT
;
4771 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4772 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4773 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4774 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4775 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4776 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4777 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4778 unlock_user_struct(target_sd
, target_addr
, 0);
4782 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4783 struct shmid_ds
*host_sd
)
4785 struct target_shmid_ds
*target_sd
;
4787 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4788 return -TARGET_EFAULT
;
4789 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4790 return -TARGET_EFAULT
;
4791 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4792 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4793 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4794 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4795 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4796 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4797 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4798 unlock_user_struct(target_sd
, target_addr
, 1);
4802 struct target_shminfo
{
4810 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4811 struct shminfo
*host_shminfo
)
4813 struct target_shminfo
*target_shminfo
;
4814 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4815 return -TARGET_EFAULT
;
4816 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4817 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4818 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4819 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4820 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4821 unlock_user_struct(target_shminfo
, target_addr
, 1);
4825 struct target_shm_info
{
4830 abi_ulong swap_attempts
;
4831 abi_ulong swap_successes
;
4834 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4835 struct shm_info
*host_shm_info
)
4837 struct target_shm_info
*target_shm_info
;
4838 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4839 return -TARGET_EFAULT
;
4840 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4841 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4842 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4843 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4844 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4845 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4846 unlock_user_struct(target_shm_info
, target_addr
, 1);
4850 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4852 struct shmid_ds dsarg
;
4853 struct shminfo shminfo
;
4854 struct shm_info shm_info
;
4855 abi_long ret
= -TARGET_EINVAL
;
4863 if (target_to_host_shmid_ds(&dsarg
, buf
))
4864 return -TARGET_EFAULT
;
4865 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4866 if (host_to_target_shmid_ds(buf
, &dsarg
))
4867 return -TARGET_EFAULT
;
4870 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4871 if (host_to_target_shminfo(buf
, &shminfo
))
4872 return -TARGET_EFAULT
;
4875 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4876 if (host_to_target_shm_info(buf
, &shm_info
))
4877 return -TARGET_EFAULT
;
4882 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4889 #ifndef TARGET_FORCE_SHMLBA
4890 /* For most architectures, SHMLBA is the same as the page size;
4891 * some architectures have larger values, in which case they should
4892 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4893 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4894 * and defining its own value for SHMLBA.
4896 * The kernel also permits SHMLBA to be set by the architecture to a
4897 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4898 * this means that addresses are rounded to the large size if
4899 * SHM_RND is set but addresses not aligned to that size are not rejected
4900 * as long as they are at least page-aligned. Since the only architecture
4901 * which uses this is ia64 this code doesn't provide for that oddity.
4903 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4905 return TARGET_PAGE_SIZE
;
4909 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4910 int shmid
, abi_ulong shmaddr
, int shmflg
)
4914 struct shmid_ds shm_info
;
4918 /* find out the length of the shared memory segment */
4919 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4920 if (is_error(ret
)) {
4921 /* can't get length, bail out */
4925 shmlba
= target_shmlba(cpu_env
);
4927 if (shmaddr
& (shmlba
- 1)) {
4928 if (shmflg
& SHM_RND
) {
4929 shmaddr
&= ~(shmlba
- 1);
4931 return -TARGET_EINVAL
;
4934 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4935 return -TARGET_EINVAL
;
4941 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4943 abi_ulong mmap_start
;
4945 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4947 if (mmap_start
== -1) {
4949 host_raddr
= (void *)-1;
4951 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4954 if (host_raddr
== (void *)-1) {
4956 return get_errno((long)host_raddr
);
4958 raddr
=h2g((unsigned long)host_raddr
);
4960 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4961 PAGE_VALID
| PAGE_READ
|
4962 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4964 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4965 if (!shm_regions
[i
].in_use
) {
4966 shm_regions
[i
].in_use
= true;
4967 shm_regions
[i
].start
= raddr
;
4968 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4978 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4985 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4986 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4987 shm_regions
[i
].in_use
= false;
4988 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4992 rv
= get_errno(shmdt(g2h(shmaddr
)));
4999 #ifdef TARGET_NR_ipc
5000 /* ??? This only works with linear mappings. */
5001 /* do_ipc() must return target values and target errnos. */
5002 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5003 unsigned int call
, abi_long first
,
5004 abi_long second
, abi_long third
,
5005 abi_long ptr
, abi_long fifth
)
5010 version
= call
>> 16;
5015 ret
= do_semop(first
, ptr
, second
);
5019 ret
= get_errno(semget(first
, second
, third
));
5022 case IPCOP_semctl
: {
5023 /* The semun argument to semctl is passed by value, so dereference the
5026 get_user_ual(atptr
, ptr
);
5027 ret
= do_semctl(first
, second
, third
, atptr
);
5032 ret
= get_errno(msgget(first
, second
));
5036 ret
= do_msgsnd(first
, ptr
, second
, third
);
5040 ret
= do_msgctl(first
, second
, ptr
);
5047 struct target_ipc_kludge
{
5052 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5053 ret
= -TARGET_EFAULT
;
5057 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5059 unlock_user_struct(tmp
, ptr
, 0);
5063 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5072 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5073 if (is_error(raddr
))
5074 return get_errno(raddr
);
5075 if (put_user_ual(raddr
, third
))
5076 return -TARGET_EFAULT
;
5080 ret
= -TARGET_EINVAL
;
5085 ret
= do_shmdt(ptr
);
5089 /* IPC_* flag values are the same on all linux platforms */
5090 ret
= get_errno(shmget(first
, second
, third
));
5093 /* IPC_* and SHM_* command values are the same on all linux platforms */
5095 ret
= do_shmctl(first
, second
, ptr
);
5098 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5099 ret
= -TARGET_ENOSYS
;
5106 /* kernel structure types definitions */
5108 #define STRUCT(name, ...) STRUCT_ ## name,
5109 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5111 #include "syscall_types.h"
5115 #undef STRUCT_SPECIAL
5117 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5118 #define STRUCT_SPECIAL(name)
5119 #include "syscall_types.h"
5121 #undef STRUCT_SPECIAL
5123 typedef struct IOCTLEntry IOCTLEntry
;
5125 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5126 int fd
, int cmd
, abi_long arg
);
5130 unsigned int host_cmd
;
5133 do_ioctl_fn
*do_ioctl
;
5134 const argtype arg_type
[5];
5137 #define IOC_R 0x0001
5138 #define IOC_W 0x0002
5139 #define IOC_RW (IOC_R | IOC_W)
5141 #define MAX_STRUCT_SIZE 4096
5143 #ifdef CONFIG_FIEMAP
5144 /* So fiemap access checks don't overflow on 32 bit systems.
5145 * This is very slightly smaller than the limit imposed by
5146 * the underlying kernel.
5148 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5149 / sizeof(struct fiemap_extent))
5151 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5152 int fd
, int cmd
, abi_long arg
)
5154 /* The parameter for this ioctl is a struct fiemap followed
5155 * by an array of struct fiemap_extent whose size is set
5156 * in fiemap->fm_extent_count. The array is filled in by the
5159 int target_size_in
, target_size_out
;
5161 const argtype
*arg_type
= ie
->arg_type
;
5162 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5165 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5169 assert(arg_type
[0] == TYPE_PTR
);
5170 assert(ie
->access
== IOC_RW
);
5172 target_size_in
= thunk_type_size(arg_type
, 0);
5173 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5175 return -TARGET_EFAULT
;
5177 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5178 unlock_user(argptr
, arg
, 0);
5179 fm
= (struct fiemap
*)buf_temp
;
5180 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5181 return -TARGET_EINVAL
;
5184 outbufsz
= sizeof (*fm
) +
5185 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5187 if (outbufsz
> MAX_STRUCT_SIZE
) {
5188 /* We can't fit all the extents into the fixed size buffer.
5189 * Allocate one that is large enough and use it instead.
5191 fm
= g_try_malloc(outbufsz
);
5193 return -TARGET_ENOMEM
;
5195 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5198 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5199 if (!is_error(ret
)) {
5200 target_size_out
= target_size_in
;
5201 /* An extent_count of 0 means we were only counting the extents
5202 * so there are no structs to copy
5204 if (fm
->fm_extent_count
!= 0) {
5205 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5207 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5209 ret
= -TARGET_EFAULT
;
5211 /* Convert the struct fiemap */
5212 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5213 if (fm
->fm_extent_count
!= 0) {
5214 p
= argptr
+ target_size_in
;
5215 /* ...and then all the struct fiemap_extents */
5216 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5217 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5222 unlock_user(argptr
, arg
, target_size_out
);
5232 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5233 int fd
, int cmd
, abi_long arg
)
5235 const argtype
*arg_type
= ie
->arg_type
;
5239 struct ifconf
*host_ifconf
;
5241 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5242 int target_ifreq_size
;
5247 abi_long target_ifc_buf
;
5251 assert(arg_type
[0] == TYPE_PTR
);
5252 assert(ie
->access
== IOC_RW
);
5255 target_size
= thunk_type_size(arg_type
, 0);
5257 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5259 return -TARGET_EFAULT
;
5260 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5261 unlock_user(argptr
, arg
, 0);
5263 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5264 target_ifc_len
= host_ifconf
->ifc_len
;
5265 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5267 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5268 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5269 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5271 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5272 if (outbufsz
> MAX_STRUCT_SIZE
) {
5273 /* We can't fit all the extents into the fixed size buffer.
5274 * Allocate one that is large enough and use it instead.
5276 host_ifconf
= malloc(outbufsz
);
5278 return -TARGET_ENOMEM
;
5280 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5283 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5285 host_ifconf
->ifc_len
= host_ifc_len
;
5286 host_ifconf
->ifc_buf
= host_ifc_buf
;
5288 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5289 if (!is_error(ret
)) {
5290 /* convert host ifc_len to target ifc_len */
5292 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5293 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5294 host_ifconf
->ifc_len
= target_ifc_len
;
5296 /* restore target ifc_buf */
5298 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5300 /* copy struct ifconf to target user */
5302 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5304 return -TARGET_EFAULT
;
5305 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5306 unlock_user(argptr
, arg
, target_size
);
5308 /* copy ifreq[] to target user */
5310 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5311 for (i
= 0; i
< nb_ifreq
; i
++) {
5312 thunk_convert(argptr
+ i
* target_ifreq_size
,
5313 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5314 ifreq_arg_type
, THUNK_TARGET
);
5316 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5326 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5327 int cmd
, abi_long arg
)
5330 struct dm_ioctl
*host_dm
;
5331 abi_long guest_data
;
5332 uint32_t guest_data_size
;
5334 const argtype
*arg_type
= ie
->arg_type
;
5336 void *big_buf
= NULL
;
5340 target_size
= thunk_type_size(arg_type
, 0);
5341 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5343 ret
= -TARGET_EFAULT
;
5346 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5347 unlock_user(argptr
, arg
, 0);
5349 /* buf_temp is too small, so fetch things into a bigger buffer */
5350 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5351 memcpy(big_buf
, buf_temp
, target_size
);
5355 guest_data
= arg
+ host_dm
->data_start
;
5356 if ((guest_data
- arg
) < 0) {
5357 ret
= -TARGET_EINVAL
;
5360 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5361 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5363 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5365 ret
= -TARGET_EFAULT
;
5369 switch (ie
->host_cmd
) {
5371 case DM_LIST_DEVICES
:
5374 case DM_DEV_SUSPEND
:
5377 case DM_TABLE_STATUS
:
5378 case DM_TABLE_CLEAR
:
5380 case DM_LIST_VERSIONS
:
5384 case DM_DEV_SET_GEOMETRY
:
5385 /* data contains only strings */
5386 memcpy(host_data
, argptr
, guest_data_size
);
5389 memcpy(host_data
, argptr
, guest_data_size
);
5390 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5394 void *gspec
= argptr
;
5395 void *cur_data
= host_data
;
5396 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5397 int spec_size
= thunk_type_size(arg_type
, 0);
5400 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5401 struct dm_target_spec
*spec
= cur_data
;
5405 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5406 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5408 spec
->next
= sizeof(*spec
) + slen
;
5409 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5411 cur_data
+= spec
->next
;
5416 ret
= -TARGET_EINVAL
;
5417 unlock_user(argptr
, guest_data
, 0);
5420 unlock_user(argptr
, guest_data
, 0);
5422 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5423 if (!is_error(ret
)) {
5424 guest_data
= arg
+ host_dm
->data_start
;
5425 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5426 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5427 switch (ie
->host_cmd
) {
5432 case DM_DEV_SUSPEND
:
5435 case DM_TABLE_CLEAR
:
5437 case DM_DEV_SET_GEOMETRY
:
5438 /* no return data */
5440 case DM_LIST_DEVICES
:
5442 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5443 uint32_t remaining_data
= guest_data_size
;
5444 void *cur_data
= argptr
;
5445 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5446 int nl_size
= 12; /* can't use thunk_size due to alignment */
5449 uint32_t next
= nl
->next
;
5451 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5453 if (remaining_data
< nl
->next
) {
5454 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5457 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5458 strcpy(cur_data
+ nl_size
, nl
->name
);
5459 cur_data
+= nl
->next
;
5460 remaining_data
-= nl
->next
;
5464 nl
= (void*)nl
+ next
;
5469 case DM_TABLE_STATUS
:
5471 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5472 void *cur_data
= argptr
;
5473 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5474 int spec_size
= thunk_type_size(arg_type
, 0);
5477 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5478 uint32_t next
= spec
->next
;
5479 int slen
= strlen((char*)&spec
[1]) + 1;
5480 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5481 if (guest_data_size
< spec
->next
) {
5482 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5485 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5486 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5487 cur_data
= argptr
+ spec
->next
;
5488 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5494 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5495 int count
= *(uint32_t*)hdata
;
5496 uint64_t *hdev
= hdata
+ 8;
5497 uint64_t *gdev
= argptr
+ 8;
5500 *(uint32_t*)argptr
= tswap32(count
);
5501 for (i
= 0; i
< count
; i
++) {
5502 *gdev
= tswap64(*hdev
);
5508 case DM_LIST_VERSIONS
:
5510 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5511 uint32_t remaining_data
= guest_data_size
;
5512 void *cur_data
= argptr
;
5513 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5514 int vers_size
= thunk_type_size(arg_type
, 0);
5517 uint32_t next
= vers
->next
;
5519 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5521 if (remaining_data
< vers
->next
) {
5522 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5525 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5526 strcpy(cur_data
+ vers_size
, vers
->name
);
5527 cur_data
+= vers
->next
;
5528 remaining_data
-= vers
->next
;
5532 vers
= (void*)vers
+ next
;
5537 unlock_user(argptr
, guest_data
, 0);
5538 ret
= -TARGET_EINVAL
;
5541 unlock_user(argptr
, guest_data
, guest_data_size
);
5543 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5545 ret
= -TARGET_EFAULT
;
5548 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5549 unlock_user(argptr
, arg
, target_size
);
5556 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5557 int cmd
, abi_long arg
)
5561 const argtype
*arg_type
= ie
->arg_type
;
5562 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5565 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5566 struct blkpg_partition host_part
;
5568 /* Read and convert blkpg */
5570 target_size
= thunk_type_size(arg_type
, 0);
5571 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5573 ret
= -TARGET_EFAULT
;
5576 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5577 unlock_user(argptr
, arg
, 0);
5579 switch (host_blkpg
->op
) {
5580 case BLKPG_ADD_PARTITION
:
5581 case BLKPG_DEL_PARTITION
:
5582 /* payload is struct blkpg_partition */
5585 /* Unknown opcode */
5586 ret
= -TARGET_EINVAL
;
5590 /* Read and convert blkpg->data */
5591 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5592 target_size
= thunk_type_size(part_arg_type
, 0);
5593 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5595 ret
= -TARGET_EFAULT
;
5598 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5599 unlock_user(argptr
, arg
, 0);
5601 /* Swizzle the data pointer to our local copy and call! */
5602 host_blkpg
->data
= &host_part
;
5603 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5609 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5610 int fd
, int cmd
, abi_long arg
)
5612 const argtype
*arg_type
= ie
->arg_type
;
5613 const StructEntry
*se
;
5614 const argtype
*field_types
;
5615 const int *dst_offsets
, *src_offsets
;
5618 abi_ulong
*target_rt_dev_ptr
;
5619 unsigned long *host_rt_dev_ptr
;
5623 assert(ie
->access
== IOC_W
);
5624 assert(*arg_type
== TYPE_PTR
);
5626 assert(*arg_type
== TYPE_STRUCT
);
5627 target_size
= thunk_type_size(arg_type
, 0);
5628 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5630 return -TARGET_EFAULT
;
5633 assert(*arg_type
== (int)STRUCT_rtentry
);
5634 se
= struct_entries
+ *arg_type
++;
5635 assert(se
->convert
[0] == NULL
);
5636 /* convert struct here to be able to catch rt_dev string */
5637 field_types
= se
->field_types
;
5638 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5639 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5640 for (i
= 0; i
< se
->nb_fields
; i
++) {
5641 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5642 assert(*field_types
== TYPE_PTRVOID
);
5643 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5644 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5645 if (*target_rt_dev_ptr
!= 0) {
5646 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5647 tswapal(*target_rt_dev_ptr
));
5648 if (!*host_rt_dev_ptr
) {
5649 unlock_user(argptr
, arg
, 0);
5650 return -TARGET_EFAULT
;
5653 *host_rt_dev_ptr
= 0;
5658 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5659 argptr
+ src_offsets
[i
],
5660 field_types
, THUNK_HOST
);
5662 unlock_user(argptr
, arg
, 0);
5664 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5665 if (*host_rt_dev_ptr
!= 0) {
5666 unlock_user((void *)*host_rt_dev_ptr
,
5667 *target_rt_dev_ptr
, 0);
5672 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5673 int fd
, int cmd
, abi_long arg
)
5675 int sig
= target_to_host_signal(arg
);
5676 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5680 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5681 int fd
, int cmd
, abi_long arg
)
5683 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5684 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5688 static IOCTLEntry ioctl_entries
[] = {
5689 #define IOCTL(cmd, access, ...) \
5690 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5691 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5692 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5693 #define IOCTL_IGNORE(cmd) \
5694 { TARGET_ ## cmd, 0, #cmd },
5699 /* ??? Implement proper locking for ioctls. */
5700 /* do_ioctl() Must return target values and target errnos. */
5701 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5703 const IOCTLEntry
*ie
;
5704 const argtype
*arg_type
;
5706 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5712 if (ie
->target_cmd
== 0) {
5713 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5714 return -TARGET_ENOSYS
;
5716 if (ie
->target_cmd
== cmd
)
5720 arg_type
= ie
->arg_type
;
5722 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5725 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5726 } else if (!ie
->host_cmd
) {
5727 /* Some architectures define BSD ioctls in their headers
5728 that are not implemented in Linux. */
5729 return -TARGET_ENOSYS
;
5732 switch(arg_type
[0]) {
5735 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5739 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5743 target_size
= thunk_type_size(arg_type
, 0);
5744 switch(ie
->access
) {
5746 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5747 if (!is_error(ret
)) {
5748 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5750 return -TARGET_EFAULT
;
5751 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5752 unlock_user(argptr
, arg
, target_size
);
5756 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5758 return -TARGET_EFAULT
;
5759 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5760 unlock_user(argptr
, arg
, 0);
5761 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5765 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5767 return -TARGET_EFAULT
;
5768 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5769 unlock_user(argptr
, arg
, 0);
5770 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5771 if (!is_error(ret
)) {
5772 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5774 return -TARGET_EFAULT
;
5775 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5776 unlock_user(argptr
, arg
, target_size
);
5782 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5783 (long)cmd
, arg_type
[0]);
5784 ret
= -TARGET_ENOSYS
;
5790 static const bitmask_transtbl iflag_tbl
[] = {
5791 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5792 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5793 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5794 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5795 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5796 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5797 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5798 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5799 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5800 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5801 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5802 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5803 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5804 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5808 static const bitmask_transtbl oflag_tbl
[] = {
5809 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5810 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5811 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5812 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5813 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5814 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5815 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5816 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5817 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5818 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5819 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5820 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5821 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5822 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5823 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5824 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5825 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5826 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5827 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5828 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5829 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5830 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5831 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5832 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5836 static const bitmask_transtbl cflag_tbl
[] = {
5837 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5838 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5839 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5840 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5841 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5842 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5843 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5844 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5845 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5846 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5847 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5848 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5849 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5850 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5851 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5852 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5853 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5854 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5855 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5856 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5857 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5858 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5859 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5860 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5861 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5862 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5863 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5864 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5865 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5866 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5867 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5871 static const bitmask_transtbl lflag_tbl
[] = {
5872 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5873 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5874 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5875 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5876 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5877 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5878 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5879 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5880 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5881 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5882 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5883 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5884 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5885 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5886 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5890 static void target_to_host_termios (void *dst
, const void *src
)
5892 struct host_termios
*host
= dst
;
5893 const struct target_termios
*target
= src
;
5896 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5898 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5900 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5902 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5903 host
->c_line
= target
->c_line
;
5905 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5906 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5907 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5908 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5909 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5910 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5911 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5912 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5913 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5914 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5915 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5916 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5917 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5918 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5919 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5920 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5921 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5922 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5925 static void host_to_target_termios (void *dst
, const void *src
)
5927 struct target_termios
*target
= dst
;
5928 const struct host_termios
*host
= src
;
5931 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5933 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5935 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5937 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5938 target
->c_line
= host
->c_line
;
5940 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5941 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5942 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5943 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5944 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5945 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5946 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5947 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5948 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5949 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5950 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5951 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5952 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5953 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5954 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5955 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5956 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5957 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5960 static const StructEntry struct_termios_def
= {
5961 .convert
= { host_to_target_termios
, target_to_host_termios
},
5962 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5963 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5966 static bitmask_transtbl mmap_flags_tbl
[] = {
5967 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5968 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5969 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5970 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5971 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5972 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5973 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5974 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5975 MAP_DENYWRITE
, MAP_DENYWRITE
},
5976 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5977 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5978 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5979 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5980 MAP_NORESERVE
, MAP_NORESERVE
},
5981 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5982 /* MAP_STACK had been ignored by the kernel for quite some time.
5983 Recognize it for the target insofar as we do not want to pass
5984 it through to the host. */
5985 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5989 #if defined(TARGET_I386)
5991 /* NOTE: there is really one LDT for all the threads */
5992 static uint8_t *ldt_table
;
5994 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6001 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6002 if (size
> bytecount
)
6004 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6006 return -TARGET_EFAULT
;
6007 /* ??? Should this by byteswapped? */
6008 memcpy(p
, ldt_table
, size
);
6009 unlock_user(p
, ptr
, size
);
6013 /* XXX: add locking support */
6014 static abi_long
write_ldt(CPUX86State
*env
,
6015 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6017 struct target_modify_ldt_ldt_s ldt_info
;
6018 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6019 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6020 int seg_not_present
, useable
, lm
;
6021 uint32_t *lp
, entry_1
, entry_2
;
6023 if (bytecount
!= sizeof(ldt_info
))
6024 return -TARGET_EINVAL
;
6025 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6026 return -TARGET_EFAULT
;
6027 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6028 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6029 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6030 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6031 unlock_user_struct(target_ldt_info
, ptr
, 0);
6033 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6034 return -TARGET_EINVAL
;
6035 seg_32bit
= ldt_info
.flags
& 1;
6036 contents
= (ldt_info
.flags
>> 1) & 3;
6037 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6038 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6039 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6040 useable
= (ldt_info
.flags
>> 6) & 1;
6044 lm
= (ldt_info
.flags
>> 7) & 1;
6046 if (contents
== 3) {
6048 return -TARGET_EINVAL
;
6049 if (seg_not_present
== 0)
6050 return -TARGET_EINVAL
;
6052 /* allocate the LDT */
6054 env
->ldt
.base
= target_mmap(0,
6055 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6056 PROT_READ
|PROT_WRITE
,
6057 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6058 if (env
->ldt
.base
== -1)
6059 return -TARGET_ENOMEM
;
6060 memset(g2h(env
->ldt
.base
), 0,
6061 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6062 env
->ldt
.limit
= 0xffff;
6063 ldt_table
= g2h(env
->ldt
.base
);
6066 /* NOTE: same code as Linux kernel */
6067 /* Allow LDTs to be cleared by the user. */
6068 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6071 read_exec_only
== 1 &&
6073 limit_in_pages
== 0 &&
6074 seg_not_present
== 1 &&
6082 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6083 (ldt_info
.limit
& 0x0ffff);
6084 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6085 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6086 (ldt_info
.limit
& 0xf0000) |
6087 ((read_exec_only
^ 1) << 9) |
6089 ((seg_not_present
^ 1) << 15) |
6091 (limit_in_pages
<< 23) |
6095 entry_2
|= (useable
<< 20);
6097 /* Install the new entry ... */
6099 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6100 lp
[0] = tswap32(entry_1
);
6101 lp
[1] = tswap32(entry_2
);
6105 /* specific and weird i386 syscalls */
6106 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6107 unsigned long bytecount
)
6113 ret
= read_ldt(ptr
, bytecount
);
6116 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6119 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6122 ret
= -TARGET_ENOSYS
;
6128 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6129 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6131 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6132 struct target_modify_ldt_ldt_s ldt_info
;
6133 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6134 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6135 int seg_not_present
, useable
, lm
;
6136 uint32_t *lp
, entry_1
, entry_2
;
6139 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6140 if (!target_ldt_info
)
6141 return -TARGET_EFAULT
;
6142 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6143 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6144 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6145 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6146 if (ldt_info
.entry_number
== -1) {
6147 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6148 if (gdt_table
[i
] == 0) {
6149 ldt_info
.entry_number
= i
;
6150 target_ldt_info
->entry_number
= tswap32(i
);
6155 unlock_user_struct(target_ldt_info
, ptr
, 1);
6157 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6158 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6159 return -TARGET_EINVAL
;
6160 seg_32bit
= ldt_info
.flags
& 1;
6161 contents
= (ldt_info
.flags
>> 1) & 3;
6162 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6163 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6164 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6165 useable
= (ldt_info
.flags
>> 6) & 1;
6169 lm
= (ldt_info
.flags
>> 7) & 1;
6172 if (contents
== 3) {
6173 if (seg_not_present
== 0)
6174 return -TARGET_EINVAL
;
6177 /* NOTE: same code as Linux kernel */
6178 /* Allow LDTs to be cleared by the user. */
6179 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6180 if ((contents
== 0 &&
6181 read_exec_only
== 1 &&
6183 limit_in_pages
== 0 &&
6184 seg_not_present
== 1 &&
6192 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6193 (ldt_info
.limit
& 0x0ffff);
6194 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6195 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6196 (ldt_info
.limit
& 0xf0000) |
6197 ((read_exec_only
^ 1) << 9) |
6199 ((seg_not_present
^ 1) << 15) |
6201 (limit_in_pages
<< 23) |
6206 /* Install the new entry ... */
6208 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6209 lp
[0] = tswap32(entry_1
);
6210 lp
[1] = tswap32(entry_2
);
6214 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6216 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6217 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6218 uint32_t base_addr
, limit
, flags
;
6219 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6220 int seg_not_present
, useable
, lm
;
6221 uint32_t *lp
, entry_1
, entry_2
;
6223 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6224 if (!target_ldt_info
)
6225 return -TARGET_EFAULT
;
6226 idx
= tswap32(target_ldt_info
->entry_number
);
6227 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6228 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6229 unlock_user_struct(target_ldt_info
, ptr
, 1);
6230 return -TARGET_EINVAL
;
6232 lp
= (uint32_t *)(gdt_table
+ idx
);
6233 entry_1
= tswap32(lp
[0]);
6234 entry_2
= tswap32(lp
[1]);
6236 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6237 contents
= (entry_2
>> 10) & 3;
6238 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6239 seg_32bit
= (entry_2
>> 22) & 1;
6240 limit_in_pages
= (entry_2
>> 23) & 1;
6241 useable
= (entry_2
>> 20) & 1;
6245 lm
= (entry_2
>> 21) & 1;
6247 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6248 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6249 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6250 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6251 base_addr
= (entry_1
>> 16) |
6252 (entry_2
& 0xff000000) |
6253 ((entry_2
& 0xff) << 16);
6254 target_ldt_info
->base_addr
= tswapal(base_addr
);
6255 target_ldt_info
->limit
= tswap32(limit
);
6256 target_ldt_info
->flags
= tswap32(flags
);
6257 unlock_user_struct(target_ldt_info
, ptr
, 1);
6260 #endif /* TARGET_I386 && TARGET_ABI32 */
6262 #ifndef TARGET_ABI32
6263 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6270 case TARGET_ARCH_SET_GS
:
6271 case TARGET_ARCH_SET_FS
:
6272 if (code
== TARGET_ARCH_SET_GS
)
6276 cpu_x86_load_seg(env
, idx
, 0);
6277 env
->segs
[idx
].base
= addr
;
6279 case TARGET_ARCH_GET_GS
:
6280 case TARGET_ARCH_GET_FS
:
6281 if (code
== TARGET_ARCH_GET_GS
)
6285 val
= env
->segs
[idx
].base
;
6286 if (put_user(val
, addr
, abi_ulong
))
6287 ret
= -TARGET_EFAULT
;
6290 ret
= -TARGET_EINVAL
;
6297 #endif /* defined(TARGET_I386) */
6299 #define NEW_STACK_SIZE 0x40000
6302 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6305 pthread_mutex_t mutex
;
6306 pthread_cond_t cond
;
6309 abi_ulong child_tidptr
;
6310 abi_ulong parent_tidptr
;
6314 static void *clone_func(void *arg
)
6316 new_thread_info
*info
= arg
;
6321 rcu_register_thread();
6322 tcg_register_thread();
6324 cpu
= ENV_GET_CPU(env
);
6326 ts
= (TaskState
*)cpu
->opaque
;
6327 info
->tid
= gettid();
6329 if (info
->child_tidptr
)
6330 put_user_u32(info
->tid
, info
->child_tidptr
);
6331 if (info
->parent_tidptr
)
6332 put_user_u32(info
->tid
, info
->parent_tidptr
);
6333 /* Enable signals. */
6334 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6335 /* Signal to the parent that we're ready. */
6336 pthread_mutex_lock(&info
->mutex
);
6337 pthread_cond_broadcast(&info
->cond
);
6338 pthread_mutex_unlock(&info
->mutex
);
6339 /* Wait until the parent has finished initializing the tls state. */
6340 pthread_mutex_lock(&clone_lock
);
6341 pthread_mutex_unlock(&clone_lock
);
6347 /* do_fork() Must return host values and target errnos (unlike most
6348 do_*() functions). */
6349 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6350 abi_ulong parent_tidptr
, target_ulong newtls
,
6351 abi_ulong child_tidptr
)
6353 CPUState
*cpu
= ENV_GET_CPU(env
);
6357 CPUArchState
*new_env
;
6360 flags
&= ~CLONE_IGNORED_FLAGS
;
6362 /* Emulate vfork() with fork() */
6363 if (flags
& CLONE_VFORK
)
6364 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6366 if (flags
& CLONE_VM
) {
6367 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6368 new_thread_info info
;
6369 pthread_attr_t attr
;
6371 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6372 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6373 return -TARGET_EINVAL
;
6376 ts
= g_new0(TaskState
, 1);
6377 init_task_state(ts
);
6379 /* Grab a mutex so that thread setup appears atomic. */
6380 pthread_mutex_lock(&clone_lock
);
6382 /* we create a new CPU instance. */
6383 new_env
= cpu_copy(env
);
6384 /* Init regs that differ from the parent. */
6385 cpu_clone_regs(new_env
, newsp
);
6386 new_cpu
= ENV_GET_CPU(new_env
);
6387 new_cpu
->opaque
= ts
;
6388 ts
->bprm
= parent_ts
->bprm
;
6389 ts
->info
= parent_ts
->info
;
6390 ts
->signal_mask
= parent_ts
->signal_mask
;
6392 if (flags
& CLONE_CHILD_CLEARTID
) {
6393 ts
->child_tidptr
= child_tidptr
;
6396 if (flags
& CLONE_SETTLS
) {
6397 cpu_set_tls (new_env
, newtls
);
6400 memset(&info
, 0, sizeof(info
));
6401 pthread_mutex_init(&info
.mutex
, NULL
);
6402 pthread_mutex_lock(&info
.mutex
);
6403 pthread_cond_init(&info
.cond
, NULL
);
6405 if (flags
& CLONE_CHILD_SETTID
) {
6406 info
.child_tidptr
= child_tidptr
;
6408 if (flags
& CLONE_PARENT_SETTID
) {
6409 info
.parent_tidptr
= parent_tidptr
;
6412 ret
= pthread_attr_init(&attr
);
6413 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6414 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6415 /* It is not safe to deliver signals until the child has finished
6416 initializing, so temporarily block all signals. */
6417 sigfillset(&sigmask
);
6418 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6420 /* If this is our first additional thread, we need to ensure we
6421 * generate code for parallel execution and flush old translations.
6423 if (!parallel_cpus
) {
6424 parallel_cpus
= true;
6428 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6429 /* TODO: Free new CPU state if thread creation failed. */
6431 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6432 pthread_attr_destroy(&attr
);
6434 /* Wait for the child to initialize. */
6435 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6440 pthread_mutex_unlock(&info
.mutex
);
6441 pthread_cond_destroy(&info
.cond
);
6442 pthread_mutex_destroy(&info
.mutex
);
6443 pthread_mutex_unlock(&clone_lock
);
6445 /* if no CLONE_VM, we consider it is a fork */
6446 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6447 return -TARGET_EINVAL
;
6450 /* We can't support custom termination signals */
6451 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6452 return -TARGET_EINVAL
;
6455 if (block_signals()) {
6456 return -TARGET_ERESTARTSYS
;
6462 /* Child Process. */
6463 cpu_clone_regs(env
, newsp
);
6465 /* There is a race condition here. The parent process could
6466 theoretically read the TID in the child process before the child
6467 tid is set. This would require using either ptrace
6468 (not implemented) or having *_tidptr to point at a shared memory
6469 mapping. We can't repeat the spinlock hack used above because
6470 the child process gets its own copy of the lock. */
6471 if (flags
& CLONE_CHILD_SETTID
)
6472 put_user_u32(gettid(), child_tidptr
);
6473 if (flags
& CLONE_PARENT_SETTID
)
6474 put_user_u32(gettid(), parent_tidptr
);
6475 ts
= (TaskState
*)cpu
->opaque
;
6476 if (flags
& CLONE_SETTLS
)
6477 cpu_set_tls (env
, newtls
);
6478 if (flags
& CLONE_CHILD_CLEARTID
)
6479 ts
->child_tidptr
= child_tidptr
;
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd
)
6491 case TARGET_F_DUPFD
:
6492 case TARGET_F_GETFD
:
6493 case TARGET_F_SETFD
:
6494 case TARGET_F_GETFL
:
6495 case TARGET_F_SETFL
:
6497 case TARGET_F_GETLK
:
6499 case TARGET_F_SETLK
:
6501 case TARGET_F_SETLKW
:
6503 case TARGET_F_GETOWN
:
6505 case TARGET_F_SETOWN
:
6507 case TARGET_F_GETSIG
:
6509 case TARGET_F_SETSIG
:
6511 #if TARGET_ABI_BITS == 32
6512 case TARGET_F_GETLK64
:
6514 case TARGET_F_SETLK64
:
6516 case TARGET_F_SETLKW64
:
6519 case TARGET_F_SETLEASE
:
6521 case TARGET_F_GETLEASE
:
6523 #ifdef F_DUPFD_CLOEXEC
6524 case TARGET_F_DUPFD_CLOEXEC
:
6525 return F_DUPFD_CLOEXEC
;
6527 case TARGET_F_NOTIFY
:
6530 case TARGET_F_GETOWN_EX
:
6534 case TARGET_F_SETOWN_EX
:
6538 case TARGET_F_SETPIPE_SZ
:
6539 return F_SETPIPE_SZ
;
6540 case TARGET_F_GETPIPE_SZ
:
6541 return F_GETPIPE_SZ
;
6544 return -TARGET_EINVAL
;
6546 return -TARGET_EINVAL
;
6549 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6550 static const bitmask_transtbl flock_tbl
[] = {
6551 TRANSTBL_CONVERT(F_RDLCK
),
6552 TRANSTBL_CONVERT(F_WRLCK
),
6553 TRANSTBL_CONVERT(F_UNLCK
),
6554 TRANSTBL_CONVERT(F_EXLCK
),
6555 TRANSTBL_CONVERT(F_SHLCK
),
6559 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6560 abi_ulong target_flock_addr
)
6562 struct target_flock
*target_fl
;
6565 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6566 return -TARGET_EFAULT
;
6569 __get_user(l_type
, &target_fl
->l_type
);
6570 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6571 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6572 __get_user(fl
->l_start
, &target_fl
->l_start
);
6573 __get_user(fl
->l_len
, &target_fl
->l_len
);
6574 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6575 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6579 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6580 const struct flock64
*fl
)
6582 struct target_flock
*target_fl
;
6585 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6586 return -TARGET_EFAULT
;
6589 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6590 __put_user(l_type
, &target_fl
->l_type
);
6591 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6592 __put_user(fl
->l_start
, &target_fl
->l_start
);
6593 __put_user(fl
->l_len
, &target_fl
->l_len
);
6594 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6595 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6599 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6600 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6602 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6603 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6604 abi_ulong target_flock_addr
)
6606 struct target_eabi_flock64
*target_fl
;
6609 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6610 return -TARGET_EFAULT
;
6613 __get_user(l_type
, &target_fl
->l_type
);
6614 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6615 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6616 __get_user(fl
->l_start
, &target_fl
->l_start
);
6617 __get_user(fl
->l_len
, &target_fl
->l_len
);
6618 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6619 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6623 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6624 const struct flock64
*fl
)
6626 struct target_eabi_flock64
*target_fl
;
6629 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6630 return -TARGET_EFAULT
;
6633 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6634 __put_user(l_type
, &target_fl
->l_type
);
6635 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6636 __put_user(fl
->l_start
, &target_fl
->l_start
);
6637 __put_user(fl
->l_len
, &target_fl
->l_len
);
6638 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6639 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6644 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6645 abi_ulong target_flock_addr
)
6647 struct target_flock64
*target_fl
;
6650 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6651 return -TARGET_EFAULT
;
6654 __get_user(l_type
, &target_fl
->l_type
);
6655 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6656 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6657 __get_user(fl
->l_start
, &target_fl
->l_start
);
6658 __get_user(fl
->l_len
, &target_fl
->l_len
);
6659 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6660 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6664 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6665 const struct flock64
*fl
)
6667 struct target_flock64
*target_fl
;
6670 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6671 return -TARGET_EFAULT
;
6674 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6675 __put_user(l_type
, &target_fl
->l_type
);
6676 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6677 __put_user(fl
->l_start
, &target_fl
->l_start
);
6678 __put_user(fl
->l_len
, &target_fl
->l_len
);
6679 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6680 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6684 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6686 struct flock64 fl64
;
6688 struct f_owner_ex fox
;
6689 struct target_f_owner_ex
*target_fox
;
6692 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6694 if (host_cmd
== -TARGET_EINVAL
)
6698 case TARGET_F_GETLK
:
6699 ret
= copy_from_user_flock(&fl64
, arg
);
6703 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6705 ret
= copy_to_user_flock(arg
, &fl64
);
6709 case TARGET_F_SETLK
:
6710 case TARGET_F_SETLKW
:
6711 ret
= copy_from_user_flock(&fl64
, arg
);
6715 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6718 case TARGET_F_GETLK64
:
6719 ret
= copy_from_user_flock64(&fl64
, arg
);
6723 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6725 ret
= copy_to_user_flock64(arg
, &fl64
);
6728 case TARGET_F_SETLK64
:
6729 case TARGET_F_SETLKW64
:
6730 ret
= copy_from_user_flock64(&fl64
, arg
);
6734 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6737 case TARGET_F_GETFL
:
6738 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6740 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6744 case TARGET_F_SETFL
:
6745 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6746 target_to_host_bitmask(arg
,
6751 case TARGET_F_GETOWN_EX
:
6752 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6754 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6755 return -TARGET_EFAULT
;
6756 target_fox
->type
= tswap32(fox
.type
);
6757 target_fox
->pid
= tswap32(fox
.pid
);
6758 unlock_user_struct(target_fox
, arg
, 1);
6764 case TARGET_F_SETOWN_EX
:
6765 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6766 return -TARGET_EFAULT
;
6767 fox
.type
= tswap32(target_fox
->type
);
6768 fox
.pid
= tswap32(target_fox
->pid
);
6769 unlock_user_struct(target_fox
, arg
, 0);
6770 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6774 case TARGET_F_SETOWN
:
6775 case TARGET_F_GETOWN
:
6776 case TARGET_F_SETSIG
:
6777 case TARGET_F_GETSIG
:
6778 case TARGET_F_SETLEASE
:
6779 case TARGET_F_GETLEASE
:
6780 case TARGET_F_SETPIPE_SZ
:
6781 case TARGET_F_GETPIPE_SZ
:
6782 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6786 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6794 static inline int high2lowuid(int uid
)
6802 static inline int high2lowgid(int gid
)
6810 static inline int low2highuid(int uid
)
6812 if ((int16_t)uid
== -1)
6818 static inline int low2highgid(int gid
)
6820 if ((int16_t)gid
== -1)
6825 static inline int tswapid(int id
)
6830 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6832 #else /* !USE_UID16 */
6833 static inline int high2lowuid(int uid
)
6837 static inline int high2lowgid(int gid
)
6841 static inline int low2highuid(int uid
)
6845 static inline int low2highgid(int gid
)
6849 static inline int tswapid(int id
)
6854 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6856 #endif /* USE_UID16 */
6858 /* We must do direct syscalls for setting UID/GID, because we want to
6859 * implement the Linux system call semantics of "change only for this thread",
6860 * not the libc/POSIX semantics of "change for all threads in process".
6861 * (See http://ewontfix.com/17/ for more details.)
6862 * We use the 32-bit version of the syscalls if present; if it is not
6863 * then either the host architecture supports 32-bit UIDs natively with
6864 * the standard syscall, or the 16-bit UID is the best we can do.
6866 #ifdef __NR_setuid32
6867 #define __NR_sys_setuid __NR_setuid32
6869 #define __NR_sys_setuid __NR_setuid
6871 #ifdef __NR_setgid32
6872 #define __NR_sys_setgid __NR_setgid32
6874 #define __NR_sys_setgid __NR_setgid
6876 #ifdef __NR_setresuid32
6877 #define __NR_sys_setresuid __NR_setresuid32
6879 #define __NR_sys_setresuid __NR_setresuid
6881 #ifdef __NR_setresgid32
6882 #define __NR_sys_setresgid __NR_setresgid32
6884 #define __NR_sys_setresgid __NR_setresgid
6887 _syscall1(int, sys_setuid
, uid_t
, uid
)
6888 _syscall1(int, sys_setgid
, gid_t
, gid
)
6889 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6890 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6892 void syscall_init(void)
6895 const argtype
*arg_type
;
6899 thunk_init(STRUCT_MAX
);
6901 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6902 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6903 #include "syscall_types.h"
6905 #undef STRUCT_SPECIAL
6907 /* Build target_to_host_errno_table[] table from
6908 * host_to_target_errno_table[]. */
6909 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6910 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6913 /* we patch the ioctl size if necessary. We rely on the fact that
6914 no ioctl has all the bits at '1' in the size field */
6916 while (ie
->target_cmd
!= 0) {
6917 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6918 TARGET_IOC_SIZEMASK
) {
6919 arg_type
= ie
->arg_type
;
6920 if (arg_type
[0] != TYPE_PTR
) {
6921 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6926 size
= thunk_type_size(arg_type
, 0);
6927 ie
->target_cmd
= (ie
->target_cmd
&
6928 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6929 (size
<< TARGET_IOC_SIZESHIFT
);
6932 /* automatic consistency check if same arch */
6933 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6934 (defined(__x86_64__) && defined(TARGET_X86_64))
6935 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6936 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6937 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6944 #if TARGET_ABI_BITS == 32
6945 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6947 #ifdef TARGET_WORDS_BIGENDIAN
6948 return ((uint64_t)word0
<< 32) | word1
;
6950 return ((uint64_t)word1
<< 32) | word0
;
6953 #else /* TARGET_ABI_BITS == 32 */
6954 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6958 #endif /* TARGET_ABI_BITS != 32 */
6960 #ifdef TARGET_NR_truncate64
6961 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6966 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6970 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6974 #ifdef TARGET_NR_ftruncate64
6975 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6980 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6984 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6988 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6989 abi_ulong target_addr
)
6991 struct target_timespec
*target_ts
;
6993 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6994 return -TARGET_EFAULT
;
6995 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6996 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6997 unlock_user_struct(target_ts
, target_addr
, 0);
7001 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7002 struct timespec
*host_ts
)
7004 struct target_timespec
*target_ts
;
7006 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7007 return -TARGET_EFAULT
;
7008 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7009 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7010 unlock_user_struct(target_ts
, target_addr
, 1);
7014 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7015 abi_ulong target_addr
)
7017 struct target_itimerspec
*target_itspec
;
7019 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7020 return -TARGET_EFAULT
;
7023 host_itspec
->it_interval
.tv_sec
=
7024 tswapal(target_itspec
->it_interval
.tv_sec
);
7025 host_itspec
->it_interval
.tv_nsec
=
7026 tswapal(target_itspec
->it_interval
.tv_nsec
);
7027 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7028 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7030 unlock_user_struct(target_itspec
, target_addr
, 1);
7034 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7035 struct itimerspec
*host_its
)
7037 struct target_itimerspec
*target_itspec
;
7039 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7040 return -TARGET_EFAULT
;
7043 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7044 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7046 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7047 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7049 unlock_user_struct(target_itspec
, target_addr
, 0);
7053 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7054 abi_long target_addr
)
7056 struct target_timex
*target_tx
;
7058 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7059 return -TARGET_EFAULT
;
7062 __get_user(host_tx
->modes
, &target_tx
->modes
);
7063 __get_user(host_tx
->offset
, &target_tx
->offset
);
7064 __get_user(host_tx
->freq
, &target_tx
->freq
);
7065 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7066 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7067 __get_user(host_tx
->status
, &target_tx
->status
);
7068 __get_user(host_tx
->constant
, &target_tx
->constant
);
7069 __get_user(host_tx
->precision
, &target_tx
->precision
);
7070 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7071 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7072 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7073 __get_user(host_tx
->tick
, &target_tx
->tick
);
7074 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7075 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7076 __get_user(host_tx
->shift
, &target_tx
->shift
);
7077 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7078 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7079 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7080 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7081 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7082 __get_user(host_tx
->tai
, &target_tx
->tai
);
7084 unlock_user_struct(target_tx
, target_addr
, 0);
7088 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7089 struct timex
*host_tx
)
7091 struct target_timex
*target_tx
;
7093 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7094 return -TARGET_EFAULT
;
7097 __put_user(host_tx
->modes
, &target_tx
->modes
);
7098 __put_user(host_tx
->offset
, &target_tx
->offset
);
7099 __put_user(host_tx
->freq
, &target_tx
->freq
);
7100 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7101 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7102 __put_user(host_tx
->status
, &target_tx
->status
);
7103 __put_user(host_tx
->constant
, &target_tx
->constant
);
7104 __put_user(host_tx
->precision
, &target_tx
->precision
);
7105 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7106 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7107 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7108 __put_user(host_tx
->tick
, &target_tx
->tick
);
7109 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7110 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7111 __put_user(host_tx
->shift
, &target_tx
->shift
);
7112 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7113 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7114 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7115 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7116 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7117 __put_user(host_tx
->tai
, &target_tx
->tai
);
7119 unlock_user_struct(target_tx
, target_addr
, 1);
7124 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7125 abi_ulong target_addr
)
7127 struct target_sigevent
*target_sevp
;
7129 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7130 return -TARGET_EFAULT
;
7133 /* This union is awkward on 64 bit systems because it has a 32 bit
7134 * integer and a pointer in it; we follow the conversion approach
7135 * used for handling sigval types in signal.c so the guest should get
7136 * the correct value back even if we did a 64 bit byteswap and it's
7137 * using the 32 bit integer.
7139 host_sevp
->sigev_value
.sival_ptr
=
7140 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7141 host_sevp
->sigev_signo
=
7142 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7143 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7144 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7146 unlock_user_struct(target_sevp
, target_addr
, 1);
7150 #if defined(TARGET_NR_mlockall)
7151 static inline int target_to_host_mlockall_arg(int arg
)
7155 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7156 result
|= MCL_CURRENT
;
7158 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7159 result
|= MCL_FUTURE
;
7165 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7166 abi_ulong target_addr
,
7167 struct stat
*host_st
)
7169 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7170 if (((CPUARMState
*)cpu_env
)->eabi
) {
7171 struct target_eabi_stat64
*target_st
;
7173 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7174 return -TARGET_EFAULT
;
7175 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7176 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7177 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7178 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7179 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7181 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7182 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7183 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7184 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7185 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7186 __put_user(host_st
->st_size
, &target_st
->st_size
);
7187 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7188 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7189 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7190 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7191 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7192 unlock_user_struct(target_st
, target_addr
, 1);
7196 #if defined(TARGET_HAS_STRUCT_STAT64)
7197 struct target_stat64
*target_st
;
7199 struct target_stat
*target_st
;
7202 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7203 return -TARGET_EFAULT
;
7204 memset(target_st
, 0, sizeof(*target_st
));
7205 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7206 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7207 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7208 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7210 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7211 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7212 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7213 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7214 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7215 /* XXX: better use of kernel struct */
7216 __put_user(host_st
->st_size
, &target_st
->st_size
);
7217 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7218 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7219 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7220 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7221 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7222 unlock_user_struct(target_st
, target_addr
, 1);
7228 /* ??? Using host futex calls even when target atomic operations
7229 are not really atomic probably breaks things. However implementing
7230 futexes locally would make futexes shared between multiple processes
7231 tricky. However they're probably useless because guest atomic
7232 operations won't work either. */
7233 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7234 target_ulong uaddr2
, int val3
)
7236 struct timespec ts
, *pts
;
7239 /* ??? We assume FUTEX_* constants are the same on both host
7241 #ifdef FUTEX_CMD_MASK
7242 base_op
= op
& FUTEX_CMD_MASK
;
7248 case FUTEX_WAIT_BITSET
:
7251 target_to_host_timespec(pts
, timeout
);
7255 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7258 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7260 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7262 case FUTEX_CMP_REQUEUE
:
7264 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7265 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7266 But the prototype takes a `struct timespec *'; insert casts
7267 to satisfy the compiler. We do not need to tswap TIMEOUT
7268 since it's not compared to guest memory. */
7269 pts
= (struct timespec
*)(uintptr_t) timeout
;
7270 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7272 (base_op
== FUTEX_CMP_REQUEUE
7276 return -TARGET_ENOSYS
;
7279 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7280 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7281 abi_long handle
, abi_long mount_id
,
7284 struct file_handle
*target_fh
;
7285 struct file_handle
*fh
;
7289 unsigned int size
, total_size
;
7291 if (get_user_s32(size
, handle
)) {
7292 return -TARGET_EFAULT
;
7295 name
= lock_user_string(pathname
);
7297 return -TARGET_EFAULT
;
7300 total_size
= sizeof(struct file_handle
) + size
;
7301 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7303 unlock_user(name
, pathname
, 0);
7304 return -TARGET_EFAULT
;
7307 fh
= g_malloc0(total_size
);
7308 fh
->handle_bytes
= size
;
7310 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7311 unlock_user(name
, pathname
, 0);
7313 /* man name_to_handle_at(2):
7314 * Other than the use of the handle_bytes field, the caller should treat
7315 * the file_handle structure as an opaque data type
7318 memcpy(target_fh
, fh
, total_size
);
7319 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7320 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7322 unlock_user(target_fh
, handle
, total_size
);
7324 if (put_user_s32(mid
, mount_id
)) {
7325 return -TARGET_EFAULT
;
7333 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7334 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7337 struct file_handle
*target_fh
;
7338 struct file_handle
*fh
;
7339 unsigned int size
, total_size
;
7342 if (get_user_s32(size
, handle
)) {
7343 return -TARGET_EFAULT
;
7346 total_size
= sizeof(struct file_handle
) + size
;
7347 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7349 return -TARGET_EFAULT
;
7352 fh
= g_memdup(target_fh
, total_size
);
7353 fh
->handle_bytes
= size
;
7354 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7356 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7357 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7361 unlock_user(target_fh
, handle
, total_size
);
7367 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7369 /* signalfd siginfo conversion */
7372 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7373 const struct signalfd_siginfo
*info
)
7375 int sig
= host_to_target_signal(info
->ssi_signo
);
7377 /* linux/signalfd.h defines a ssi_addr_lsb
7378 * not defined in sys/signalfd.h but used by some kernels
7381 #ifdef BUS_MCEERR_AO
7382 if (tinfo
->ssi_signo
== SIGBUS
&&
7383 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7384 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7385 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7386 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7387 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7391 tinfo
->ssi_signo
= tswap32(sig
);
7392 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7393 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7394 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7395 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7396 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7397 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7398 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7399 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7400 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7401 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7402 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7403 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7404 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7405 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7406 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7409 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7413 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7414 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7420 static TargetFdTrans target_signalfd_trans
= {
7421 .host_to_target_data
= host_to_target_data_signalfd
,
7424 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7427 target_sigset_t
*target_mask
;
7431 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7432 return -TARGET_EINVAL
;
7434 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7435 return -TARGET_EFAULT
;
7438 target_to_host_sigset(&host_mask
, target_mask
);
7440 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7442 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7444 fd_trans_register(ret
, &target_signalfd_trans
);
7447 unlock_user_struct(target_mask
, mask
, 0);
7453 /* Map host to target signal numbers for the wait family of syscalls.
7454 Assume all other status bits are the same. */
7455 int host_to_target_waitstatus(int status
)
7457 if (WIFSIGNALED(status
)) {
7458 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7460 if (WIFSTOPPED(status
)) {
7461 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7467 static int open_self_cmdline(void *cpu_env
, int fd
)
7469 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7470 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7473 for (i
= 0; i
< bprm
->argc
; i
++) {
7474 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7476 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7484 static int open_self_maps(void *cpu_env
, int fd
)
7486 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7487 TaskState
*ts
= cpu
->opaque
;
7493 fp
= fopen("/proc/self/maps", "r");
7498 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7499 int fields
, dev_maj
, dev_min
, inode
;
7500 uint64_t min
, max
, offset
;
7501 char flag_r
, flag_w
, flag_x
, flag_p
;
7502 char path
[512] = "";
7503 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7504 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7505 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7507 if ((fields
< 10) || (fields
> 11)) {
7510 if (h2g_valid(min
)) {
7511 int flags
= page_get_flags(h2g(min
));
7512 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7513 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7516 if (h2g(min
) == ts
->info
->stack_limit
) {
7517 pstrcpy(path
, sizeof(path
), " [stack]");
7519 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7520 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7521 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7522 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7523 path
[0] ? " " : "", path
);
7533 static int open_self_stat(void *cpu_env
, int fd
)
7535 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7536 TaskState
*ts
= cpu
->opaque
;
7537 abi_ulong start_stack
= ts
->info
->start_stack
;
7540 for (i
= 0; i
< 44; i
++) {
7548 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7549 } else if (i
== 1) {
7551 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7552 } else if (i
== 27) {
7555 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7557 /* for the rest, there is MasterCard */
7558 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7562 if (write(fd
, buf
, len
) != len
) {
7570 static int open_self_auxv(void *cpu_env
, int fd
)
7572 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7573 TaskState
*ts
= cpu
->opaque
;
7574 abi_ulong auxv
= ts
->info
->saved_auxv
;
7575 abi_ulong len
= ts
->info
->auxv_len
;
7579 * Auxiliary vector is stored in target process stack.
7580 * read in whole auxv vector and copy it to file
7582 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7586 r
= write(fd
, ptr
, len
);
7593 lseek(fd
, 0, SEEK_SET
);
7594 unlock_user(ptr
, auxv
, len
);
7600 static int is_proc_myself(const char *filename
, const char *entry
)
7602 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7603 filename
+= strlen("/proc/");
7604 if (!strncmp(filename
, "self/", strlen("self/"))) {
7605 filename
+= strlen("self/");
7606 } else if (*filename
>= '1' && *filename
<= '9') {
7608 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7609 if (!strncmp(filename
, myself
, strlen(myself
))) {
7610 filename
+= strlen(myself
);
7617 if (!strcmp(filename
, entry
)) {
7624 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7625 static int is_proc(const char *filename
, const char *entry
)
7627 return strcmp(filename
, entry
) == 0;
7630 static int open_net_route(void *cpu_env
, int fd
)
7637 fp
= fopen("/proc/net/route", "r");
7644 read
= getline(&line
, &len
, fp
);
7645 dprintf(fd
, "%s", line
);
7649 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7651 uint32_t dest
, gw
, mask
;
7652 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7653 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7654 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7655 &mask
, &mtu
, &window
, &irtt
);
7656 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7657 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7658 metric
, tswap32(mask
), mtu
, window
, irtt
);
7668 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7671 const char *filename
;
7672 int (*fill
)(void *cpu_env
, int fd
);
7673 int (*cmp
)(const char *s1
, const char *s2
);
7675 const struct fake_open
*fake_open
;
7676 static const struct fake_open fakes
[] = {
7677 { "maps", open_self_maps
, is_proc_myself
},
7678 { "stat", open_self_stat
, is_proc_myself
},
7679 { "auxv", open_self_auxv
, is_proc_myself
},
7680 { "cmdline", open_self_cmdline
, is_proc_myself
},
7681 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7682 { "/proc/net/route", open_net_route
, is_proc
},
7684 { NULL
, NULL
, NULL
}
7687 if (is_proc_myself(pathname
, "exe")) {
7688 int execfd
= qemu_getauxval(AT_EXECFD
);
7689 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7692 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7693 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7698 if (fake_open
->filename
) {
7700 char filename
[PATH_MAX
];
7703 /* create temporary file to map stat to */
7704 tmpdir
= getenv("TMPDIR");
7707 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7708 fd
= mkstemp(filename
);
7714 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7720 lseek(fd
, 0, SEEK_SET
);
7725 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7728 #define TIMER_MAGIC 0x0caf0000
7729 #define TIMER_MAGIC_MASK 0xffff0000
7731 /* Convert QEMU provided timer ID back to internal 16bit index format */
7732 static target_timer_t
get_timer_id(abi_long arg
)
7734 target_timer_t timerid
= arg
;
7736 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7737 return -TARGET_EINVAL
;
7742 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7743 return -TARGET_EINVAL
;
7749 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7751 uint64_t *counter
= buf
;
7754 if (len
< sizeof(uint64_t)) {
7758 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7759 *counter
= tswap64(*counter
);
7766 static TargetFdTrans target_eventfd_trans
= {
7767 .host_to_target_data
= swap_data_eventfd
,
7768 .target_to_host_data
= swap_data_eventfd
,
7771 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7772 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7773 defined(__NR_inotify_init1))
7774 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7776 struct inotify_event
*ev
;
7780 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7781 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7784 ev
->wd
= tswap32(ev
->wd
);
7785 ev
->mask
= tswap32(ev
->mask
);
7786 ev
->cookie
= tswap32(ev
->cookie
);
7787 ev
->len
= tswap32(name_len
);
7793 static TargetFdTrans target_inotify_trans
= {
7794 .host_to_target_data
= host_to_target_data_inotify
,
7798 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7800 abi_ulong target_addr
,
7803 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7804 unsigned host_bits
= sizeof(*host_mask
) * 8;
7805 abi_ulong
*target_mask
;
7808 assert(host_size
>= target_size
);
7810 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7812 return -TARGET_EFAULT
;
7814 memset(host_mask
, 0, host_size
);
7816 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7817 unsigned bit
= i
* target_bits
;
7820 __get_user(val
, &target_mask
[i
]);
7821 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7822 if (val
& (1UL << j
)) {
7823 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7828 unlock_user(target_mask
, target_addr
, 0);
7832 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7834 abi_ulong target_addr
,
7837 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7838 unsigned host_bits
= sizeof(*host_mask
) * 8;
7839 abi_ulong
*target_mask
;
7842 assert(host_size
>= target_size
);
7844 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7846 return -TARGET_EFAULT
;
7849 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7850 unsigned bit
= i
* target_bits
;
7853 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7854 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7858 __put_user(val
, &target_mask
[i
]);
7861 unlock_user(target_mask
, target_addr
, target_size
);
7865 /* do_syscall() should always have a single exit point at the end so
7866 that actions, such as logging of syscall results, can be performed.
7867 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7868 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7869 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7870 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7873 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7879 #if defined(DEBUG_ERESTARTSYS)
7880 /* Debug-only code for exercising the syscall-restart code paths
7881 * in the per-architecture cpu main loops: restart every syscall
7882 * the guest makes once before letting it through.
7889 return -TARGET_ERESTARTSYS
;
7895 gemu_log("syscall %d", num
);
7897 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7899 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7902 case TARGET_NR_exit
:
7903 /* In old applications this may be used to implement _exit(2).
7904 However in threaded applictions it is used for thread termination,
7905 and _exit_group is used for application termination.
7906 Do thread termination if we have more then one thread. */
7908 if (block_signals()) {
7909 ret
= -TARGET_ERESTARTSYS
;
7915 if (CPU_NEXT(first_cpu
)) {
7918 /* Remove the CPU from the list. */
7919 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7924 if (ts
->child_tidptr
) {
7925 put_user_u32(0, ts
->child_tidptr
);
7926 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7930 object_unref(OBJECT(cpu
));
7932 rcu_unregister_thread();
7940 gdb_exit(cpu_env
, arg1
);
7942 ret
= 0; /* avoid warning */
7944 case TARGET_NR_read
:
7948 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7950 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7952 fd_trans_host_to_target_data(arg1
)) {
7953 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7955 unlock_user(p
, arg2
, ret
);
7958 case TARGET_NR_write
:
7959 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7961 if (fd_trans_target_to_host_data(arg1
)) {
7962 void *copy
= g_malloc(arg3
);
7963 memcpy(copy
, p
, arg3
);
7964 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7966 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7970 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7972 unlock_user(p
, arg2
, 0);
7974 #ifdef TARGET_NR_open
7975 case TARGET_NR_open
:
7976 if (!(p
= lock_user_string(arg1
)))
7978 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7979 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7981 fd_trans_unregister(ret
);
7982 unlock_user(p
, arg1
, 0);
7985 case TARGET_NR_openat
:
7986 if (!(p
= lock_user_string(arg2
)))
7988 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7989 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7991 fd_trans_unregister(ret
);
7992 unlock_user(p
, arg2
, 0);
7994 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7995 case TARGET_NR_name_to_handle_at
:
7996 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7999 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8000 case TARGET_NR_open_by_handle_at
:
8001 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8002 fd_trans_unregister(ret
);
8005 case TARGET_NR_close
:
8006 fd_trans_unregister(arg1
);
8007 ret
= get_errno(close(arg1
));
8012 #ifdef TARGET_NR_fork
8013 case TARGET_NR_fork
:
8014 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8017 #ifdef TARGET_NR_waitpid
8018 case TARGET_NR_waitpid
:
8021 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8022 if (!is_error(ret
) && arg2
&& ret
8023 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8028 #ifdef TARGET_NR_waitid
8029 case TARGET_NR_waitid
:
8033 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8034 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8035 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8037 host_to_target_siginfo(p
, &info
);
8038 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8043 #ifdef TARGET_NR_creat /* not on alpha */
8044 case TARGET_NR_creat
:
8045 if (!(p
= lock_user_string(arg1
)))
8047 ret
= get_errno(creat(p
, arg2
));
8048 fd_trans_unregister(ret
);
8049 unlock_user(p
, arg1
, 0);
8052 #ifdef TARGET_NR_link
8053 case TARGET_NR_link
:
8056 p
= lock_user_string(arg1
);
8057 p2
= lock_user_string(arg2
);
8059 ret
= -TARGET_EFAULT
;
8061 ret
= get_errno(link(p
, p2
));
8062 unlock_user(p2
, arg2
, 0);
8063 unlock_user(p
, arg1
, 0);
8067 #if defined(TARGET_NR_linkat)
8068 case TARGET_NR_linkat
:
8073 p
= lock_user_string(arg2
);
8074 p2
= lock_user_string(arg4
);
8076 ret
= -TARGET_EFAULT
;
8078 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8079 unlock_user(p
, arg2
, 0);
8080 unlock_user(p2
, arg4
, 0);
8084 #ifdef TARGET_NR_unlink
8085 case TARGET_NR_unlink
:
8086 if (!(p
= lock_user_string(arg1
)))
8088 ret
= get_errno(unlink(p
));
8089 unlock_user(p
, arg1
, 0);
8092 #if defined(TARGET_NR_unlinkat)
8093 case TARGET_NR_unlinkat
:
8094 if (!(p
= lock_user_string(arg2
)))
8096 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8097 unlock_user(p
, arg2
, 0);
8100 case TARGET_NR_execve
:
8102 char **argp
, **envp
;
8105 abi_ulong guest_argp
;
8106 abi_ulong guest_envp
;
8113 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8114 if (get_user_ual(addr
, gp
))
8122 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8123 if (get_user_ual(addr
, gp
))
8130 argp
= g_new0(char *, argc
+ 1);
8131 envp
= g_new0(char *, envc
+ 1);
8133 for (gp
= guest_argp
, q
= argp
; gp
;
8134 gp
+= sizeof(abi_ulong
), q
++) {
8135 if (get_user_ual(addr
, gp
))
8139 if (!(*q
= lock_user_string(addr
)))
8141 total_size
+= strlen(*q
) + 1;
8145 for (gp
= guest_envp
, q
= envp
; gp
;
8146 gp
+= sizeof(abi_ulong
), q
++) {
8147 if (get_user_ual(addr
, gp
))
8151 if (!(*q
= lock_user_string(addr
)))
8153 total_size
+= strlen(*q
) + 1;
8157 if (!(p
= lock_user_string(arg1
)))
8159 /* Although execve() is not an interruptible syscall it is
8160 * a special case where we must use the safe_syscall wrapper:
8161 * if we allow a signal to happen before we make the host
8162 * syscall then we will 'lose' it, because at the point of
8163 * execve the process leaves QEMU's control. So we use the
8164 * safe syscall wrapper to ensure that we either take the
8165 * signal as a guest signal, or else it does not happen
8166 * before the execve completes and makes it the other
8167 * program's problem.
8169 ret
= get_errno(safe_execve(p
, argp
, envp
));
8170 unlock_user(p
, arg1
, 0);
8175 ret
= -TARGET_EFAULT
;
8178 for (gp
= guest_argp
, q
= argp
; *q
;
8179 gp
+= sizeof(abi_ulong
), q
++) {
8180 if (get_user_ual(addr
, gp
)
8183 unlock_user(*q
, addr
, 0);
8185 for (gp
= guest_envp
, q
= envp
; *q
;
8186 gp
+= sizeof(abi_ulong
), q
++) {
8187 if (get_user_ual(addr
, gp
)
8190 unlock_user(*q
, addr
, 0);
8197 case TARGET_NR_chdir
:
8198 if (!(p
= lock_user_string(arg1
)))
8200 ret
= get_errno(chdir(p
));
8201 unlock_user(p
, arg1
, 0);
8203 #ifdef TARGET_NR_time
8204 case TARGET_NR_time
:
8207 ret
= get_errno(time(&host_time
));
8210 && put_user_sal(host_time
, arg1
))
8215 #ifdef TARGET_NR_mknod
8216 case TARGET_NR_mknod
:
8217 if (!(p
= lock_user_string(arg1
)))
8219 ret
= get_errno(mknod(p
, arg2
, arg3
));
8220 unlock_user(p
, arg1
, 0);
8223 #if defined(TARGET_NR_mknodat)
8224 case TARGET_NR_mknodat
:
8225 if (!(p
= lock_user_string(arg2
)))
8227 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8228 unlock_user(p
, arg2
, 0);
8231 #ifdef TARGET_NR_chmod
8232 case TARGET_NR_chmod
:
8233 if (!(p
= lock_user_string(arg1
)))
8235 ret
= get_errno(chmod(p
, arg2
));
8236 unlock_user(p
, arg1
, 0);
8239 #ifdef TARGET_NR_break
8240 case TARGET_NR_break
:
8243 #ifdef TARGET_NR_oldstat
8244 case TARGET_NR_oldstat
:
8247 case TARGET_NR_lseek
:
8248 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8250 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8251 /* Alpha specific */
8252 case TARGET_NR_getxpid
:
8253 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8254 ret
= get_errno(getpid());
8257 #ifdef TARGET_NR_getpid
8258 case TARGET_NR_getpid
:
8259 ret
= get_errno(getpid());
8262 case TARGET_NR_mount
:
8264 /* need to look at the data field */
8268 p
= lock_user_string(arg1
);
8276 p2
= lock_user_string(arg2
);
8279 unlock_user(p
, arg1
, 0);
8285 p3
= lock_user_string(arg3
);
8288 unlock_user(p
, arg1
, 0);
8290 unlock_user(p2
, arg2
, 0);
8297 /* FIXME - arg5 should be locked, but it isn't clear how to
8298 * do that since it's not guaranteed to be a NULL-terminated
8302 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8304 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8306 ret
= get_errno(ret
);
8309 unlock_user(p
, arg1
, 0);
8311 unlock_user(p2
, arg2
, 0);
8313 unlock_user(p3
, arg3
, 0);
8317 #ifdef TARGET_NR_umount
8318 case TARGET_NR_umount
:
8319 if (!(p
= lock_user_string(arg1
)))
8321 ret
= get_errno(umount(p
));
8322 unlock_user(p
, arg1
, 0);
8325 #ifdef TARGET_NR_stime /* not on alpha */
8326 case TARGET_NR_stime
:
8329 if (get_user_sal(host_time
, arg1
))
8331 ret
= get_errno(stime(&host_time
));
8335 case TARGET_NR_ptrace
:
8337 #ifdef TARGET_NR_alarm /* not on alpha */
8338 case TARGET_NR_alarm
:
8342 #ifdef TARGET_NR_oldfstat
8343 case TARGET_NR_oldfstat
:
8346 #ifdef TARGET_NR_pause /* not on alpha */
8347 case TARGET_NR_pause
:
8348 if (!block_signals()) {
8349 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8351 ret
= -TARGET_EINTR
;
8354 #ifdef TARGET_NR_utime
8355 case TARGET_NR_utime
:
8357 struct utimbuf tbuf
, *host_tbuf
;
8358 struct target_utimbuf
*target_tbuf
;
8360 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8362 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8363 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8364 unlock_user_struct(target_tbuf
, arg2
, 0);
8369 if (!(p
= lock_user_string(arg1
)))
8371 ret
= get_errno(utime(p
, host_tbuf
));
8372 unlock_user(p
, arg1
, 0);
8376 #ifdef TARGET_NR_utimes
8377 case TARGET_NR_utimes
:
8379 struct timeval
*tvp
, tv
[2];
8381 if (copy_from_user_timeval(&tv
[0], arg2
)
8382 || copy_from_user_timeval(&tv
[1],
8383 arg2
+ sizeof(struct target_timeval
)))
8389 if (!(p
= lock_user_string(arg1
)))
8391 ret
= get_errno(utimes(p
, tvp
));
8392 unlock_user(p
, arg1
, 0);
8396 #if defined(TARGET_NR_futimesat)
8397 case TARGET_NR_futimesat
:
8399 struct timeval
*tvp
, tv
[2];
8401 if (copy_from_user_timeval(&tv
[0], arg3
)
8402 || copy_from_user_timeval(&tv
[1],
8403 arg3
+ sizeof(struct target_timeval
)))
8409 if (!(p
= lock_user_string(arg2
)))
8411 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8412 unlock_user(p
, arg2
, 0);
8416 #ifdef TARGET_NR_stty
8417 case TARGET_NR_stty
:
8420 #ifdef TARGET_NR_gtty
8421 case TARGET_NR_gtty
:
8424 #ifdef TARGET_NR_access
8425 case TARGET_NR_access
:
8426 if (!(p
= lock_user_string(arg1
)))
8428 ret
= get_errno(access(path(p
), arg2
));
8429 unlock_user(p
, arg1
, 0);
8432 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8433 case TARGET_NR_faccessat
:
8434 if (!(p
= lock_user_string(arg2
)))
8436 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8437 unlock_user(p
, arg2
, 0);
8440 #ifdef TARGET_NR_nice /* not on alpha */
8441 case TARGET_NR_nice
:
8442 ret
= get_errno(nice(arg1
));
8445 #ifdef TARGET_NR_ftime
8446 case TARGET_NR_ftime
:
8449 case TARGET_NR_sync
:
8453 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8454 case TARGET_NR_syncfs
:
8455 ret
= get_errno(syncfs(arg1
));
8458 case TARGET_NR_kill
:
8459 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8461 #ifdef TARGET_NR_rename
8462 case TARGET_NR_rename
:
8465 p
= lock_user_string(arg1
);
8466 p2
= lock_user_string(arg2
);
8468 ret
= -TARGET_EFAULT
;
8470 ret
= get_errno(rename(p
, p2
));
8471 unlock_user(p2
, arg2
, 0);
8472 unlock_user(p
, arg1
, 0);
8476 #if defined(TARGET_NR_renameat)
8477 case TARGET_NR_renameat
:
8480 p
= lock_user_string(arg2
);
8481 p2
= lock_user_string(arg4
);
8483 ret
= -TARGET_EFAULT
;
8485 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8486 unlock_user(p2
, arg4
, 0);
8487 unlock_user(p
, arg2
, 0);
8491 #if defined(TARGET_NR_renameat2)
8492 case TARGET_NR_renameat2
:
8495 p
= lock_user_string(arg2
);
8496 p2
= lock_user_string(arg4
);
8498 ret
= -TARGET_EFAULT
;
8500 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8502 unlock_user(p2
, arg4
, 0);
8503 unlock_user(p
, arg2
, 0);
8507 #ifdef TARGET_NR_mkdir
8508 case TARGET_NR_mkdir
:
8509 if (!(p
= lock_user_string(arg1
)))
8511 ret
= get_errno(mkdir(p
, arg2
));
8512 unlock_user(p
, arg1
, 0);
8515 #if defined(TARGET_NR_mkdirat)
8516 case TARGET_NR_mkdirat
:
8517 if (!(p
= lock_user_string(arg2
)))
8519 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8520 unlock_user(p
, arg2
, 0);
8523 #ifdef TARGET_NR_rmdir
8524 case TARGET_NR_rmdir
:
8525 if (!(p
= lock_user_string(arg1
)))
8527 ret
= get_errno(rmdir(p
));
8528 unlock_user(p
, arg1
, 0);
8532 ret
= get_errno(dup(arg1
));
8534 fd_trans_dup(arg1
, ret
);
8537 #ifdef TARGET_NR_pipe
8538 case TARGET_NR_pipe
:
8539 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8542 #ifdef TARGET_NR_pipe2
8543 case TARGET_NR_pipe2
:
8544 ret
= do_pipe(cpu_env
, arg1
,
8545 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8548 case TARGET_NR_times
:
8550 struct target_tms
*tmsp
;
8552 ret
= get_errno(times(&tms
));
8554 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8557 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8558 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8559 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8560 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8563 ret
= host_to_target_clock_t(ret
);
8566 #ifdef TARGET_NR_prof
8567 case TARGET_NR_prof
:
8570 #ifdef TARGET_NR_signal
8571 case TARGET_NR_signal
:
8574 case TARGET_NR_acct
:
8576 ret
= get_errno(acct(NULL
));
8578 if (!(p
= lock_user_string(arg1
)))
8580 ret
= get_errno(acct(path(p
)));
8581 unlock_user(p
, arg1
, 0);
8584 #ifdef TARGET_NR_umount2
8585 case TARGET_NR_umount2
:
8586 if (!(p
= lock_user_string(arg1
)))
8588 ret
= get_errno(umount2(p
, arg2
));
8589 unlock_user(p
, arg1
, 0);
8592 #ifdef TARGET_NR_lock
8593 case TARGET_NR_lock
:
8596 case TARGET_NR_ioctl
:
8597 ret
= do_ioctl(arg1
, arg2
, arg3
);
8599 #ifdef TARGET_NR_fcntl
8600 case TARGET_NR_fcntl
:
8601 ret
= do_fcntl(arg1
, arg2
, arg3
);
8604 #ifdef TARGET_NR_mpx
8608 case TARGET_NR_setpgid
:
8609 ret
= get_errno(setpgid(arg1
, arg2
));
8611 #ifdef TARGET_NR_ulimit
8612 case TARGET_NR_ulimit
:
8615 #ifdef TARGET_NR_oldolduname
8616 case TARGET_NR_oldolduname
:
8619 case TARGET_NR_umask
:
8620 ret
= get_errno(umask(arg1
));
8622 case TARGET_NR_chroot
:
8623 if (!(p
= lock_user_string(arg1
)))
8625 ret
= get_errno(chroot(p
));
8626 unlock_user(p
, arg1
, 0);
8628 #ifdef TARGET_NR_ustat
8629 case TARGET_NR_ustat
:
8632 #ifdef TARGET_NR_dup2
8633 case TARGET_NR_dup2
:
8634 ret
= get_errno(dup2(arg1
, arg2
));
8636 fd_trans_dup(arg1
, arg2
);
8640 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8641 case TARGET_NR_dup3
:
8645 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8648 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8649 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8651 fd_trans_dup(arg1
, arg2
);
8656 #ifdef TARGET_NR_getppid /* not on alpha */
8657 case TARGET_NR_getppid
:
8658 ret
= get_errno(getppid());
8661 #ifdef TARGET_NR_getpgrp
8662 case TARGET_NR_getpgrp
:
8663 ret
= get_errno(getpgrp());
8666 case TARGET_NR_setsid
:
8667 ret
= get_errno(setsid());
8669 #ifdef TARGET_NR_sigaction
8670 case TARGET_NR_sigaction
:
8672 #if defined(TARGET_ALPHA)
8673 struct target_sigaction act
, oact
, *pact
= 0;
8674 struct target_old_sigaction
*old_act
;
8676 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8678 act
._sa_handler
= old_act
->_sa_handler
;
8679 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8680 act
.sa_flags
= old_act
->sa_flags
;
8681 act
.sa_restorer
= 0;
8682 unlock_user_struct(old_act
, arg2
, 0);
8685 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8686 if (!is_error(ret
) && arg3
) {
8687 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8689 old_act
->_sa_handler
= oact
._sa_handler
;
8690 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8691 old_act
->sa_flags
= oact
.sa_flags
;
8692 unlock_user_struct(old_act
, arg3
, 1);
8694 #elif defined(TARGET_MIPS)
8695 struct target_sigaction act
, oact
, *pact
, *old_act
;
8698 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8700 act
._sa_handler
= old_act
->_sa_handler
;
8701 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8702 act
.sa_flags
= old_act
->sa_flags
;
8703 unlock_user_struct(old_act
, arg2
, 0);
8709 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8711 if (!is_error(ret
) && arg3
) {
8712 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8714 old_act
->_sa_handler
= oact
._sa_handler
;
8715 old_act
->sa_flags
= oact
.sa_flags
;
8716 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8717 old_act
->sa_mask
.sig
[1] = 0;
8718 old_act
->sa_mask
.sig
[2] = 0;
8719 old_act
->sa_mask
.sig
[3] = 0;
8720 unlock_user_struct(old_act
, arg3
, 1);
8723 struct target_old_sigaction
*old_act
;
8724 struct target_sigaction act
, oact
, *pact
;
8726 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8728 act
._sa_handler
= old_act
->_sa_handler
;
8729 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8730 act
.sa_flags
= old_act
->sa_flags
;
8731 act
.sa_restorer
= old_act
->sa_restorer
;
8732 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8733 act
.ka_restorer
= 0;
8735 unlock_user_struct(old_act
, arg2
, 0);
8740 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8741 if (!is_error(ret
) && arg3
) {
8742 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8744 old_act
->_sa_handler
= oact
._sa_handler
;
8745 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8746 old_act
->sa_flags
= oact
.sa_flags
;
8747 old_act
->sa_restorer
= oact
.sa_restorer
;
8748 unlock_user_struct(old_act
, arg3
, 1);
8754 case TARGET_NR_rt_sigaction
:
8756 #if defined(TARGET_ALPHA)
8757 /* For Alpha and SPARC this is a 5 argument syscall, with
8758 * a 'restorer' parameter which must be copied into the
8759 * sa_restorer field of the sigaction struct.
8760 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8761 * and arg5 is the sigsetsize.
8762 * Alpha also has a separate rt_sigaction struct that it uses
8763 * here; SPARC uses the usual sigaction struct.
8765 struct target_rt_sigaction
*rt_act
;
8766 struct target_sigaction act
, oact
, *pact
= 0;
8768 if (arg4
!= sizeof(target_sigset_t
)) {
8769 ret
= -TARGET_EINVAL
;
8773 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8775 act
._sa_handler
= rt_act
->_sa_handler
;
8776 act
.sa_mask
= rt_act
->sa_mask
;
8777 act
.sa_flags
= rt_act
->sa_flags
;
8778 act
.sa_restorer
= arg5
;
8779 unlock_user_struct(rt_act
, arg2
, 0);
8782 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8783 if (!is_error(ret
) && arg3
) {
8784 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8786 rt_act
->_sa_handler
= oact
._sa_handler
;
8787 rt_act
->sa_mask
= oact
.sa_mask
;
8788 rt_act
->sa_flags
= oact
.sa_flags
;
8789 unlock_user_struct(rt_act
, arg3
, 1);
8793 target_ulong restorer
= arg4
;
8794 target_ulong sigsetsize
= arg5
;
8796 target_ulong sigsetsize
= arg4
;
8798 struct target_sigaction
*act
;
8799 struct target_sigaction
*oact
;
8801 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8802 ret
= -TARGET_EINVAL
;
8806 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8809 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8810 act
->ka_restorer
= restorer
;
8816 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8817 ret
= -TARGET_EFAULT
;
8818 goto rt_sigaction_fail
;
8822 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8825 unlock_user_struct(act
, arg2
, 0);
8827 unlock_user_struct(oact
, arg3
, 1);
8831 #ifdef TARGET_NR_sgetmask /* not on alpha */
8832 case TARGET_NR_sgetmask
:
8835 abi_ulong target_set
;
8836 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8838 host_to_target_old_sigset(&target_set
, &cur_set
);
8844 #ifdef TARGET_NR_ssetmask /* not on alpha */
8845 case TARGET_NR_ssetmask
:
8848 abi_ulong target_set
= arg1
;
8849 target_to_host_old_sigset(&set
, &target_set
);
8850 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8852 host_to_target_old_sigset(&target_set
, &oset
);
8858 #ifdef TARGET_NR_sigprocmask
8859 case TARGET_NR_sigprocmask
:
8861 #if defined(TARGET_ALPHA)
8862 sigset_t set
, oldset
;
8867 case TARGET_SIG_BLOCK
:
8870 case TARGET_SIG_UNBLOCK
:
8873 case TARGET_SIG_SETMASK
:
8877 ret
= -TARGET_EINVAL
;
8881 target_to_host_old_sigset(&set
, &mask
);
8883 ret
= do_sigprocmask(how
, &set
, &oldset
);
8884 if (!is_error(ret
)) {
8885 host_to_target_old_sigset(&mask
, &oldset
);
8887 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8890 sigset_t set
, oldset
, *set_ptr
;
8895 case TARGET_SIG_BLOCK
:
8898 case TARGET_SIG_UNBLOCK
:
8901 case TARGET_SIG_SETMASK
:
8905 ret
= -TARGET_EINVAL
;
8908 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8910 target_to_host_old_sigset(&set
, p
);
8911 unlock_user(p
, arg2
, 0);
8917 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8918 if (!is_error(ret
) && arg3
) {
8919 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8921 host_to_target_old_sigset(p
, &oldset
);
8922 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8928 case TARGET_NR_rt_sigprocmask
:
8931 sigset_t set
, oldset
, *set_ptr
;
8933 if (arg4
!= sizeof(target_sigset_t
)) {
8934 ret
= -TARGET_EINVAL
;
8940 case TARGET_SIG_BLOCK
:
8943 case TARGET_SIG_UNBLOCK
:
8946 case TARGET_SIG_SETMASK
:
8950 ret
= -TARGET_EINVAL
;
8953 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8955 target_to_host_sigset(&set
, p
);
8956 unlock_user(p
, arg2
, 0);
8962 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8963 if (!is_error(ret
) && arg3
) {
8964 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8966 host_to_target_sigset(p
, &oldset
);
8967 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8971 #ifdef TARGET_NR_sigpending
8972 case TARGET_NR_sigpending
:
8975 ret
= get_errno(sigpending(&set
));
8976 if (!is_error(ret
)) {
8977 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8979 host_to_target_old_sigset(p
, &set
);
8980 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8985 case TARGET_NR_rt_sigpending
:
8989 /* Yes, this check is >, not != like most. We follow the kernel's
8990 * logic and it does it like this because it implements
8991 * NR_sigpending through the same code path, and in that case
8992 * the old_sigset_t is smaller in size.
8994 if (arg2
> sizeof(target_sigset_t
)) {
8995 ret
= -TARGET_EINVAL
;
8999 ret
= get_errno(sigpending(&set
));
9000 if (!is_error(ret
)) {
9001 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9003 host_to_target_sigset(p
, &set
);
9004 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9008 #ifdef TARGET_NR_sigsuspend
9009 case TARGET_NR_sigsuspend
:
9011 TaskState
*ts
= cpu
->opaque
;
9012 #if defined(TARGET_ALPHA)
9013 abi_ulong mask
= arg1
;
9014 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9016 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9018 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9019 unlock_user(p
, arg1
, 0);
9021 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9023 if (ret
!= -TARGET_ERESTARTSYS
) {
9024 ts
->in_sigsuspend
= 1;
9029 case TARGET_NR_rt_sigsuspend
:
9031 TaskState
*ts
= cpu
->opaque
;
9033 if (arg2
!= sizeof(target_sigset_t
)) {
9034 ret
= -TARGET_EINVAL
;
9037 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9039 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9040 unlock_user(p
, arg1
, 0);
9041 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9043 if (ret
!= -TARGET_ERESTARTSYS
) {
9044 ts
->in_sigsuspend
= 1;
9048 case TARGET_NR_rt_sigtimedwait
:
9051 struct timespec uts
, *puts
;
9054 if (arg4
!= sizeof(target_sigset_t
)) {
9055 ret
= -TARGET_EINVAL
;
9059 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9061 target_to_host_sigset(&set
, p
);
9062 unlock_user(p
, arg1
, 0);
9065 target_to_host_timespec(puts
, arg3
);
9069 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9071 if (!is_error(ret
)) {
9073 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9078 host_to_target_siginfo(p
, &uinfo
);
9079 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9081 ret
= host_to_target_signal(ret
);
9085 case TARGET_NR_rt_sigqueueinfo
:
9089 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9093 target_to_host_siginfo(&uinfo
, p
);
9094 unlock_user(p
, arg3
, 0);
9095 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9098 case TARGET_NR_rt_tgsigqueueinfo
:
9102 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9106 target_to_host_siginfo(&uinfo
, p
);
9107 unlock_user(p
, arg4
, 0);
9108 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9111 #ifdef TARGET_NR_sigreturn
9112 case TARGET_NR_sigreturn
:
9113 if (block_signals()) {
9114 ret
= -TARGET_ERESTARTSYS
;
9116 ret
= do_sigreturn(cpu_env
);
9120 case TARGET_NR_rt_sigreturn
:
9121 if (block_signals()) {
9122 ret
= -TARGET_ERESTARTSYS
;
9124 ret
= do_rt_sigreturn(cpu_env
);
9127 case TARGET_NR_sethostname
:
9128 if (!(p
= lock_user_string(arg1
)))
9130 ret
= get_errno(sethostname(p
, arg2
));
9131 unlock_user(p
, arg1
, 0);
9133 case TARGET_NR_setrlimit
:
9135 int resource
= target_to_host_resource(arg1
);
9136 struct target_rlimit
*target_rlim
;
9138 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9140 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9141 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9142 unlock_user_struct(target_rlim
, arg2
, 0);
9143 ret
= get_errno(setrlimit(resource
, &rlim
));
9146 case TARGET_NR_getrlimit
:
9148 int resource
= target_to_host_resource(arg1
);
9149 struct target_rlimit
*target_rlim
;
9152 ret
= get_errno(getrlimit(resource
, &rlim
));
9153 if (!is_error(ret
)) {
9154 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9156 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9157 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9158 unlock_user_struct(target_rlim
, arg2
, 1);
9162 case TARGET_NR_getrusage
:
9164 struct rusage rusage
;
9165 ret
= get_errno(getrusage(arg1
, &rusage
));
9166 if (!is_error(ret
)) {
9167 ret
= host_to_target_rusage(arg2
, &rusage
);
9171 case TARGET_NR_gettimeofday
:
9174 ret
= get_errno(gettimeofday(&tv
, NULL
));
9175 if (!is_error(ret
)) {
9176 if (copy_to_user_timeval(arg1
, &tv
))
9181 case TARGET_NR_settimeofday
:
9183 struct timeval tv
, *ptv
= NULL
;
9184 struct timezone tz
, *ptz
= NULL
;
9187 if (copy_from_user_timeval(&tv
, arg1
)) {
9194 if (copy_from_user_timezone(&tz
, arg2
)) {
9200 ret
= get_errno(settimeofday(ptv
, ptz
));
9203 #if defined(TARGET_NR_select)
9204 case TARGET_NR_select
:
9205 #if defined(TARGET_WANT_NI_OLD_SELECT)
9206 /* some architectures used to have old_select here
9207 * but now ENOSYS it.
9209 ret
= -TARGET_ENOSYS
;
9210 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9211 ret
= do_old_select(arg1
);
9213 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9217 #ifdef TARGET_NR_pselect6
9218 case TARGET_NR_pselect6
:
9220 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9221 fd_set rfds
, wfds
, efds
;
9222 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9223 struct timespec ts
, *ts_ptr
;
9226 * The 6th arg is actually two args smashed together,
9227 * so we cannot use the C library.
9235 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9236 target_sigset_t
*target_sigset
;
9244 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9248 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9252 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9258 * This takes a timespec, and not a timeval, so we cannot
9259 * use the do_select() helper ...
9262 if (target_to_host_timespec(&ts
, ts_addr
)) {
9270 /* Extract the two packed args for the sigset */
9273 sig
.size
= SIGSET_T_SIZE
;
9275 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9279 arg_sigset
= tswapal(arg7
[0]);
9280 arg_sigsize
= tswapal(arg7
[1]);
9281 unlock_user(arg7
, arg6
, 0);
9285 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9286 /* Like the kernel, we enforce correct size sigsets */
9287 ret
= -TARGET_EINVAL
;
9290 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9291 sizeof(*target_sigset
), 1);
9292 if (!target_sigset
) {
9295 target_to_host_sigset(&set
, target_sigset
);
9296 unlock_user(target_sigset
, arg_sigset
, 0);
9304 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9307 if (!is_error(ret
)) {
9308 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9310 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9312 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9315 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9321 #ifdef TARGET_NR_symlink
9322 case TARGET_NR_symlink
:
9325 p
= lock_user_string(arg1
);
9326 p2
= lock_user_string(arg2
);
9328 ret
= -TARGET_EFAULT
;
9330 ret
= get_errno(symlink(p
, p2
));
9331 unlock_user(p2
, arg2
, 0);
9332 unlock_user(p
, arg1
, 0);
9336 #if defined(TARGET_NR_symlinkat)
9337 case TARGET_NR_symlinkat
:
9340 p
= lock_user_string(arg1
);
9341 p2
= lock_user_string(arg3
);
9343 ret
= -TARGET_EFAULT
;
9345 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9346 unlock_user(p2
, arg3
, 0);
9347 unlock_user(p
, arg1
, 0);
9351 #ifdef TARGET_NR_oldlstat
9352 case TARGET_NR_oldlstat
:
9355 #ifdef TARGET_NR_readlink
9356 case TARGET_NR_readlink
:
9359 p
= lock_user_string(arg1
);
9360 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9362 ret
= -TARGET_EFAULT
;
9364 /* Short circuit this for the magic exe check. */
9365 ret
= -TARGET_EINVAL
;
9366 } else if (is_proc_myself((const char *)p
, "exe")) {
9367 char real
[PATH_MAX
], *temp
;
9368 temp
= realpath(exec_path
, real
);
9369 /* Return value is # of bytes that we wrote to the buffer. */
9371 ret
= get_errno(-1);
9373 /* Don't worry about sign mismatch as earlier mapping
9374 * logic would have thrown a bad address error. */
9375 ret
= MIN(strlen(real
), arg3
);
9376 /* We cannot NUL terminate the string. */
9377 memcpy(p2
, real
, ret
);
9380 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9382 unlock_user(p2
, arg2
, ret
);
9383 unlock_user(p
, arg1
, 0);
9387 #if defined(TARGET_NR_readlinkat)
9388 case TARGET_NR_readlinkat
:
9391 p
= lock_user_string(arg2
);
9392 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9394 ret
= -TARGET_EFAULT
;
9395 } else if (is_proc_myself((const char *)p
, "exe")) {
9396 char real
[PATH_MAX
], *temp
;
9397 temp
= realpath(exec_path
, real
);
9398 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9399 snprintf((char *)p2
, arg4
, "%s", real
);
9401 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9403 unlock_user(p2
, arg3
, ret
);
9404 unlock_user(p
, arg2
, 0);
9408 #ifdef TARGET_NR_uselib
9409 case TARGET_NR_uselib
:
9412 #ifdef TARGET_NR_swapon
9413 case TARGET_NR_swapon
:
9414 if (!(p
= lock_user_string(arg1
)))
9416 ret
= get_errno(swapon(p
, arg2
));
9417 unlock_user(p
, arg1
, 0);
9420 case TARGET_NR_reboot
:
9421 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9422 /* arg4 must be ignored in all other cases */
9423 p
= lock_user_string(arg4
);
9427 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9428 unlock_user(p
, arg4
, 0);
9430 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9433 #ifdef TARGET_NR_readdir
9434 case TARGET_NR_readdir
:
9437 #ifdef TARGET_NR_mmap
9438 case TARGET_NR_mmap
:
9439 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9440 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9441 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9442 || defined(TARGET_S390X)
9445 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9446 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9454 unlock_user(v
, arg1
, 0);
9455 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9456 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9460 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9461 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9467 #ifdef TARGET_NR_mmap2
9468 case TARGET_NR_mmap2
:
9470 #define MMAP_SHIFT 12
9472 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9473 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9475 arg6
<< MMAP_SHIFT
));
9478 case TARGET_NR_munmap
:
9479 ret
= get_errno(target_munmap(arg1
, arg2
));
9481 case TARGET_NR_mprotect
:
9483 TaskState
*ts
= cpu
->opaque
;
9484 /* Special hack to detect libc making the stack executable. */
9485 if ((arg3
& PROT_GROWSDOWN
)
9486 && arg1
>= ts
->info
->stack_limit
9487 && arg1
<= ts
->info
->start_stack
) {
9488 arg3
&= ~PROT_GROWSDOWN
;
9489 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9490 arg1
= ts
->info
->stack_limit
;
9493 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9495 #ifdef TARGET_NR_mremap
9496 case TARGET_NR_mremap
:
9497 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9500 /* ??? msync/mlock/munlock are broken for softmmu. */
9501 #ifdef TARGET_NR_msync
9502 case TARGET_NR_msync
:
9503 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9506 #ifdef TARGET_NR_mlock
9507 case TARGET_NR_mlock
:
9508 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9511 #ifdef TARGET_NR_munlock
9512 case TARGET_NR_munlock
:
9513 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9516 #ifdef TARGET_NR_mlockall
9517 case TARGET_NR_mlockall
:
9518 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9521 #ifdef TARGET_NR_munlockall
9522 case TARGET_NR_munlockall
:
9523 ret
= get_errno(munlockall());
9526 case TARGET_NR_truncate
:
9527 if (!(p
= lock_user_string(arg1
)))
9529 ret
= get_errno(truncate(p
, arg2
));
9530 unlock_user(p
, arg1
, 0);
9532 case TARGET_NR_ftruncate
:
9533 ret
= get_errno(ftruncate(arg1
, arg2
));
9535 case TARGET_NR_fchmod
:
9536 ret
= get_errno(fchmod(arg1
, arg2
));
9538 #if defined(TARGET_NR_fchmodat)
9539 case TARGET_NR_fchmodat
:
9540 if (!(p
= lock_user_string(arg2
)))
9542 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9543 unlock_user(p
, arg2
, 0);
9546 case TARGET_NR_getpriority
:
9547 /* Note that negative values are valid for getpriority, so we must
9548 differentiate based on errno settings. */
9550 ret
= getpriority(arg1
, arg2
);
9551 if (ret
== -1 && errno
!= 0) {
9552 ret
= -host_to_target_errno(errno
);
9556 /* Return value is the unbiased priority. Signal no error. */
9557 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9559 /* Return value is a biased priority to avoid negative numbers. */
9563 case TARGET_NR_setpriority
:
9564 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9566 #ifdef TARGET_NR_profil
9567 case TARGET_NR_profil
:
9570 case TARGET_NR_statfs
:
9571 if (!(p
= lock_user_string(arg1
)))
9573 ret
= get_errno(statfs(path(p
), &stfs
));
9574 unlock_user(p
, arg1
, 0);
9576 if (!is_error(ret
)) {
9577 struct target_statfs
*target_stfs
;
9579 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9581 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9582 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9583 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9584 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9585 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9586 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9587 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9588 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9589 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9590 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9591 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9592 #ifdef _STATFS_F_FLAGS
9593 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9595 __put_user(0, &target_stfs
->f_flags
);
9597 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9598 unlock_user_struct(target_stfs
, arg2
, 1);
9601 case TARGET_NR_fstatfs
:
9602 ret
= get_errno(fstatfs(arg1
, &stfs
));
9603 goto convert_statfs
;
9604 #ifdef TARGET_NR_statfs64
9605 case TARGET_NR_statfs64
:
9606 if (!(p
= lock_user_string(arg1
)))
9608 ret
= get_errno(statfs(path(p
), &stfs
));
9609 unlock_user(p
, arg1
, 0);
9611 if (!is_error(ret
)) {
9612 struct target_statfs64
*target_stfs
;
9614 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9616 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9617 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9618 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9619 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9620 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9621 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9622 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9623 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9624 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9625 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9626 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9627 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9628 unlock_user_struct(target_stfs
, arg3
, 1);
9631 case TARGET_NR_fstatfs64
:
9632 ret
= get_errno(fstatfs(arg1
, &stfs
));
9633 goto convert_statfs64
;
9635 #ifdef TARGET_NR_ioperm
9636 case TARGET_NR_ioperm
:
9639 #ifdef TARGET_NR_socketcall
9640 case TARGET_NR_socketcall
:
9641 ret
= do_socketcall(arg1
, arg2
);
9644 #ifdef TARGET_NR_accept
9645 case TARGET_NR_accept
:
9646 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9649 #ifdef TARGET_NR_accept4
9650 case TARGET_NR_accept4
:
9651 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9654 #ifdef TARGET_NR_bind
9655 case TARGET_NR_bind
:
9656 ret
= do_bind(arg1
, arg2
, arg3
);
9659 #ifdef TARGET_NR_connect
9660 case TARGET_NR_connect
:
9661 ret
= do_connect(arg1
, arg2
, arg3
);
9664 #ifdef TARGET_NR_getpeername
9665 case TARGET_NR_getpeername
:
9666 ret
= do_getpeername(arg1
, arg2
, arg3
);
9669 #ifdef TARGET_NR_getsockname
9670 case TARGET_NR_getsockname
:
9671 ret
= do_getsockname(arg1
, arg2
, arg3
);
9674 #ifdef TARGET_NR_getsockopt
9675 case TARGET_NR_getsockopt
:
9676 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9679 #ifdef TARGET_NR_listen
9680 case TARGET_NR_listen
:
9681 ret
= get_errno(listen(arg1
, arg2
));
9684 #ifdef TARGET_NR_recv
9685 case TARGET_NR_recv
:
9686 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9689 #ifdef TARGET_NR_recvfrom
9690 case TARGET_NR_recvfrom
:
9691 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9694 #ifdef TARGET_NR_recvmsg
9695 case TARGET_NR_recvmsg
:
9696 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9699 #ifdef TARGET_NR_send
9700 case TARGET_NR_send
:
9701 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9704 #ifdef TARGET_NR_sendmsg
9705 case TARGET_NR_sendmsg
:
9706 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9709 #ifdef TARGET_NR_sendmmsg
9710 case TARGET_NR_sendmmsg
:
9711 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9713 case TARGET_NR_recvmmsg
:
9714 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9717 #ifdef TARGET_NR_sendto
9718 case TARGET_NR_sendto
:
9719 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9722 #ifdef TARGET_NR_shutdown
9723 case TARGET_NR_shutdown
:
9724 ret
= get_errno(shutdown(arg1
, arg2
));
9727 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9728 case TARGET_NR_getrandom
:
9729 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9733 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9734 unlock_user(p
, arg1
, ret
);
9737 #ifdef TARGET_NR_socket
9738 case TARGET_NR_socket
:
9739 ret
= do_socket(arg1
, arg2
, arg3
);
9742 #ifdef TARGET_NR_socketpair
9743 case TARGET_NR_socketpair
:
9744 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9747 #ifdef TARGET_NR_setsockopt
9748 case TARGET_NR_setsockopt
:
9749 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9752 #if defined(TARGET_NR_syslog)
9753 case TARGET_NR_syslog
:
9758 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9759 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9760 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9761 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9762 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9763 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9764 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9765 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9767 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9770 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9771 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9772 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9774 ret
= -TARGET_EINVAL
;
9782 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9784 ret
= -TARGET_EFAULT
;
9787 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9788 unlock_user(p
, arg2
, arg3
);
9798 case TARGET_NR_setitimer
:
9800 struct itimerval value
, ovalue
, *pvalue
;
9804 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9805 || copy_from_user_timeval(&pvalue
->it_value
,
9806 arg2
+ sizeof(struct target_timeval
)))
9811 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9812 if (!is_error(ret
) && arg3
) {
9813 if (copy_to_user_timeval(arg3
,
9814 &ovalue
.it_interval
)
9815 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9821 case TARGET_NR_getitimer
:
9823 struct itimerval value
;
9825 ret
= get_errno(getitimer(arg1
, &value
));
9826 if (!is_error(ret
) && arg2
) {
9827 if (copy_to_user_timeval(arg2
,
9829 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9835 #ifdef TARGET_NR_stat
9836 case TARGET_NR_stat
:
9837 if (!(p
= lock_user_string(arg1
)))
9839 ret
= get_errno(stat(path(p
), &st
));
9840 unlock_user(p
, arg1
, 0);
9843 #ifdef TARGET_NR_lstat
9844 case TARGET_NR_lstat
:
9845 if (!(p
= lock_user_string(arg1
)))
9847 ret
= get_errno(lstat(path(p
), &st
));
9848 unlock_user(p
, arg1
, 0);
9851 case TARGET_NR_fstat
:
9853 ret
= get_errno(fstat(arg1
, &st
));
9854 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9857 if (!is_error(ret
)) {
9858 struct target_stat
*target_st
;
9860 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9862 memset(target_st
, 0, sizeof(*target_st
));
9863 __put_user(st
.st_dev
, &target_st
->st_dev
);
9864 __put_user(st
.st_ino
, &target_st
->st_ino
);
9865 __put_user(st
.st_mode
, &target_st
->st_mode
);
9866 __put_user(st
.st_uid
, &target_st
->st_uid
);
9867 __put_user(st
.st_gid
, &target_st
->st_gid
);
9868 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9869 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9870 __put_user(st
.st_size
, &target_st
->st_size
);
9871 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9872 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9873 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9874 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9875 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9876 unlock_user_struct(target_st
, arg2
, 1);
9880 #ifdef TARGET_NR_olduname
9881 case TARGET_NR_olduname
:
9884 #ifdef TARGET_NR_iopl
9885 case TARGET_NR_iopl
:
9888 case TARGET_NR_vhangup
:
9889 ret
= get_errno(vhangup());
9891 #ifdef TARGET_NR_idle
9892 case TARGET_NR_idle
:
9895 #ifdef TARGET_NR_syscall
9896 case TARGET_NR_syscall
:
9897 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9898 arg6
, arg7
, arg8
, 0);
9901 case TARGET_NR_wait4
:
9904 abi_long status_ptr
= arg2
;
9905 struct rusage rusage
, *rusage_ptr
;
9906 abi_ulong target_rusage
= arg4
;
9907 abi_long rusage_err
;
9909 rusage_ptr
= &rusage
;
9912 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9913 if (!is_error(ret
)) {
9914 if (status_ptr
&& ret
) {
9915 status
= host_to_target_waitstatus(status
);
9916 if (put_user_s32(status
, status_ptr
))
9919 if (target_rusage
) {
9920 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9928 #ifdef TARGET_NR_swapoff
9929 case TARGET_NR_swapoff
:
9930 if (!(p
= lock_user_string(arg1
)))
9932 ret
= get_errno(swapoff(p
));
9933 unlock_user(p
, arg1
, 0);
9936 case TARGET_NR_sysinfo
:
9938 struct target_sysinfo
*target_value
;
9939 struct sysinfo value
;
9940 ret
= get_errno(sysinfo(&value
));
9941 if (!is_error(ret
) && arg1
)
9943 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9945 __put_user(value
.uptime
, &target_value
->uptime
);
9946 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9947 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9948 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9949 __put_user(value
.totalram
, &target_value
->totalram
);
9950 __put_user(value
.freeram
, &target_value
->freeram
);
9951 __put_user(value
.sharedram
, &target_value
->sharedram
);
9952 __put_user(value
.bufferram
, &target_value
->bufferram
);
9953 __put_user(value
.totalswap
, &target_value
->totalswap
);
9954 __put_user(value
.freeswap
, &target_value
->freeswap
);
9955 __put_user(value
.procs
, &target_value
->procs
);
9956 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9957 __put_user(value
.freehigh
, &target_value
->freehigh
);
9958 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9959 unlock_user_struct(target_value
, arg1
, 1);
9963 #ifdef TARGET_NR_ipc
9965 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9968 #ifdef TARGET_NR_semget
9969 case TARGET_NR_semget
:
9970 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9973 #ifdef TARGET_NR_semop
9974 case TARGET_NR_semop
:
9975 ret
= do_semop(arg1
, arg2
, arg3
);
9978 #ifdef TARGET_NR_semctl
9979 case TARGET_NR_semctl
:
9980 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9983 #ifdef TARGET_NR_msgctl
9984 case TARGET_NR_msgctl
:
9985 ret
= do_msgctl(arg1
, arg2
, arg3
);
9988 #ifdef TARGET_NR_msgget
9989 case TARGET_NR_msgget
:
9990 ret
= get_errno(msgget(arg1
, arg2
));
9993 #ifdef TARGET_NR_msgrcv
9994 case TARGET_NR_msgrcv
:
9995 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9998 #ifdef TARGET_NR_msgsnd
9999 case TARGET_NR_msgsnd
:
10000 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10003 #ifdef TARGET_NR_shmget
10004 case TARGET_NR_shmget
:
10005 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
10008 #ifdef TARGET_NR_shmctl
10009 case TARGET_NR_shmctl
:
10010 ret
= do_shmctl(arg1
, arg2
, arg3
);
10013 #ifdef TARGET_NR_shmat
10014 case TARGET_NR_shmat
:
10015 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10018 #ifdef TARGET_NR_shmdt
10019 case TARGET_NR_shmdt
:
10020 ret
= do_shmdt(arg1
);
10023 case TARGET_NR_fsync
:
10024 ret
= get_errno(fsync(arg1
));
10026 case TARGET_NR_clone
:
10027 /* Linux manages to have three different orderings for its
10028 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10029 * match the kernel's CONFIG_CLONE_* settings.
10030 * Microblaze is further special in that it uses a sixth
10031 * implicit argument to clone for the TLS pointer.
10033 #if defined(TARGET_MICROBLAZE)
10034 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10035 #elif defined(TARGET_CLONE_BACKWARDS)
10036 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10037 #elif defined(TARGET_CLONE_BACKWARDS2)
10038 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10040 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10043 #ifdef __NR_exit_group
10044 /* new thread calls */
10045 case TARGET_NR_exit_group
:
10046 #ifdef TARGET_GPROF
10049 gdb_exit(cpu_env
, arg1
);
10050 ret
= get_errno(exit_group(arg1
));
10053 case TARGET_NR_setdomainname
:
10054 if (!(p
= lock_user_string(arg1
)))
10056 ret
= get_errno(setdomainname(p
, arg2
));
10057 unlock_user(p
, arg1
, 0);
10059 case TARGET_NR_uname
:
10060 /* no need to transcode because we use the linux syscall */
10062 struct new_utsname
* buf
;
10064 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10066 ret
= get_errno(sys_uname(buf
));
10067 if (!is_error(ret
)) {
10068 /* Overwrite the native machine name with whatever is being
10070 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
10071 /* Allow the user to override the reported release. */
10072 if (qemu_uname_release
&& *qemu_uname_release
) {
10073 g_strlcpy(buf
->release
, qemu_uname_release
,
10074 sizeof(buf
->release
));
10077 unlock_user_struct(buf
, arg1
, 1);
10081 case TARGET_NR_modify_ldt
:
10082 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10084 #if !defined(TARGET_X86_64)
10085 case TARGET_NR_vm86old
:
10086 goto unimplemented
;
10087 case TARGET_NR_vm86
:
10088 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10092 case TARGET_NR_adjtimex
:
10094 struct timex host_buf
;
10096 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10099 ret
= get_errno(adjtimex(&host_buf
));
10100 if (!is_error(ret
)) {
10101 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10107 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10108 case TARGET_NR_clock_adjtime
:
10110 struct timex htx
, *phtx
= &htx
;
10112 if (target_to_host_timex(phtx
, arg2
) != 0) {
10115 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10116 if (!is_error(ret
) && phtx
) {
10117 if (host_to_target_timex(arg2
, phtx
) != 0) {
10124 #ifdef TARGET_NR_create_module
10125 case TARGET_NR_create_module
:
10127 case TARGET_NR_init_module
:
10128 case TARGET_NR_delete_module
:
10129 #ifdef TARGET_NR_get_kernel_syms
10130 case TARGET_NR_get_kernel_syms
:
10132 goto unimplemented
;
10133 case TARGET_NR_quotactl
:
10134 goto unimplemented
;
10135 case TARGET_NR_getpgid
:
10136 ret
= get_errno(getpgid(arg1
));
10138 case TARGET_NR_fchdir
:
10139 ret
= get_errno(fchdir(arg1
));
10141 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10142 case TARGET_NR_bdflush
:
10143 goto unimplemented
;
10145 #ifdef TARGET_NR_sysfs
10146 case TARGET_NR_sysfs
:
10147 goto unimplemented
;
10149 case TARGET_NR_personality
:
10150 ret
= get_errno(personality(arg1
));
10152 #ifdef TARGET_NR_afs_syscall
10153 case TARGET_NR_afs_syscall
:
10154 goto unimplemented
;
10156 #ifdef TARGET_NR__llseek /* Not on alpha */
10157 case TARGET_NR__llseek
:
10160 #if !defined(__NR_llseek)
10161 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10163 ret
= get_errno(res
);
10168 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10170 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10176 #ifdef TARGET_NR_getdents
10177 case TARGET_NR_getdents
:
10178 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10179 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10181 struct target_dirent
*target_dirp
;
10182 struct linux_dirent
*dirp
;
10183 abi_long count
= arg3
;
10185 dirp
= g_try_malloc(count
);
10187 ret
= -TARGET_ENOMEM
;
10191 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10192 if (!is_error(ret
)) {
10193 struct linux_dirent
*de
;
10194 struct target_dirent
*tde
;
10196 int reclen
, treclen
;
10197 int count1
, tnamelen
;
10201 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10205 reclen
= de
->d_reclen
;
10206 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10207 assert(tnamelen
>= 0);
10208 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10209 assert(count1
+ treclen
<= count
);
10210 tde
->d_reclen
= tswap16(treclen
);
10211 tde
->d_ino
= tswapal(de
->d_ino
);
10212 tde
->d_off
= tswapal(de
->d_off
);
10213 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10214 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10216 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10220 unlock_user(target_dirp
, arg2
, ret
);
10226 struct linux_dirent
*dirp
;
10227 abi_long count
= arg3
;
10229 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10231 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10232 if (!is_error(ret
)) {
10233 struct linux_dirent
*de
;
10238 reclen
= de
->d_reclen
;
10241 de
->d_reclen
= tswap16(reclen
);
10242 tswapls(&de
->d_ino
);
10243 tswapls(&de
->d_off
);
10244 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10248 unlock_user(dirp
, arg2
, ret
);
10252 /* Implement getdents in terms of getdents64 */
10254 struct linux_dirent64
*dirp
;
10255 abi_long count
= arg3
;
10257 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10261 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10262 if (!is_error(ret
)) {
10263 /* Convert the dirent64 structs to target dirent. We do this
10264 * in-place, since we can guarantee that a target_dirent is no
10265 * larger than a dirent64; however this means we have to be
10266 * careful to read everything before writing in the new format.
10268 struct linux_dirent64
*de
;
10269 struct target_dirent
*tde
;
10274 tde
= (struct target_dirent
*)dirp
;
10276 int namelen
, treclen
;
10277 int reclen
= de
->d_reclen
;
10278 uint64_t ino
= de
->d_ino
;
10279 int64_t off
= de
->d_off
;
10280 uint8_t type
= de
->d_type
;
10282 namelen
= strlen(de
->d_name
);
10283 treclen
= offsetof(struct target_dirent
, d_name
)
10285 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10287 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10288 tde
->d_ino
= tswapal(ino
);
10289 tde
->d_off
= tswapal(off
);
10290 tde
->d_reclen
= tswap16(treclen
);
10291 /* The target_dirent type is in what was formerly a padding
10292 * byte at the end of the structure:
10294 *(((char *)tde
) + treclen
- 1) = type
;
10296 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10297 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10303 unlock_user(dirp
, arg2
, ret
);
10307 #endif /* TARGET_NR_getdents */
10308 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10309 case TARGET_NR_getdents64
:
10311 struct linux_dirent64
*dirp
;
10312 abi_long count
= arg3
;
10313 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10315 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10316 if (!is_error(ret
)) {
10317 struct linux_dirent64
*de
;
10322 reclen
= de
->d_reclen
;
10325 de
->d_reclen
= tswap16(reclen
);
10326 tswap64s((uint64_t *)&de
->d_ino
);
10327 tswap64s((uint64_t *)&de
->d_off
);
10328 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10332 unlock_user(dirp
, arg2
, ret
);
10335 #endif /* TARGET_NR_getdents64 */
10336 #if defined(TARGET_NR__newselect)
10337 case TARGET_NR__newselect
:
10338 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10341 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10342 # ifdef TARGET_NR_poll
10343 case TARGET_NR_poll
:
10345 # ifdef TARGET_NR_ppoll
10346 case TARGET_NR_ppoll
:
10349 struct target_pollfd
*target_pfd
;
10350 unsigned int nfds
= arg2
;
10351 struct pollfd
*pfd
;
10357 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10358 ret
= -TARGET_EINVAL
;
10362 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10363 sizeof(struct target_pollfd
) * nfds
, 1);
10368 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10369 for (i
= 0; i
< nfds
; i
++) {
10370 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10371 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10376 # ifdef TARGET_NR_ppoll
10377 case TARGET_NR_ppoll
:
10379 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10380 target_sigset_t
*target_set
;
10381 sigset_t _set
, *set
= &_set
;
10384 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10385 unlock_user(target_pfd
, arg1
, 0);
10393 if (arg5
!= sizeof(target_sigset_t
)) {
10394 unlock_user(target_pfd
, arg1
, 0);
10395 ret
= -TARGET_EINVAL
;
10399 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10401 unlock_user(target_pfd
, arg1
, 0);
10404 target_to_host_sigset(set
, target_set
);
10409 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10410 set
, SIGSET_T_SIZE
));
10412 if (!is_error(ret
) && arg3
) {
10413 host_to_target_timespec(arg3
, timeout_ts
);
10416 unlock_user(target_set
, arg4
, 0);
10421 # ifdef TARGET_NR_poll
10422 case TARGET_NR_poll
:
10424 struct timespec ts
, *pts
;
10427 /* Convert ms to secs, ns */
10428 ts
.tv_sec
= arg3
/ 1000;
10429 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10432 /* -ve poll() timeout means "infinite" */
10435 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10440 g_assert_not_reached();
10443 if (!is_error(ret
)) {
10444 for(i
= 0; i
< nfds
; i
++) {
10445 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10448 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10452 case TARGET_NR_flock
:
10453 /* NOTE: the flock constant seems to be the same for every
10455 ret
= get_errno(safe_flock(arg1
, arg2
));
10457 case TARGET_NR_readv
:
10459 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10461 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10462 unlock_iovec(vec
, arg2
, arg3
, 1);
10464 ret
= -host_to_target_errno(errno
);
10468 case TARGET_NR_writev
:
10470 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10472 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10473 unlock_iovec(vec
, arg2
, arg3
, 0);
10475 ret
= -host_to_target_errno(errno
);
10479 #if defined(TARGET_NR_preadv)
10480 case TARGET_NR_preadv
:
10482 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10484 unsigned long low
, high
;
10486 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10487 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10488 unlock_iovec(vec
, arg2
, arg3
, 1);
10490 ret
= -host_to_target_errno(errno
);
10495 #if defined(TARGET_NR_pwritev)
10496 case TARGET_NR_pwritev
:
10498 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10500 unsigned long low
, high
;
10502 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10503 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10504 unlock_iovec(vec
, arg2
, arg3
, 0);
10506 ret
= -host_to_target_errno(errno
);
10511 case TARGET_NR_getsid
:
10512 ret
= get_errno(getsid(arg1
));
10514 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10515 case TARGET_NR_fdatasync
:
10516 ret
= get_errno(fdatasync(arg1
));
10519 #ifdef TARGET_NR__sysctl
10520 case TARGET_NR__sysctl
:
10521 /* We don't implement this, but ENOTDIR is always a safe
10523 ret
= -TARGET_ENOTDIR
;
10526 case TARGET_NR_sched_getaffinity
:
10528 unsigned int mask_size
;
10529 unsigned long *mask
;
10532 * sched_getaffinity needs multiples of ulong, so need to take
10533 * care of mismatches between target ulong and host ulong sizes.
10535 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10536 ret
= -TARGET_EINVAL
;
10539 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10541 mask
= alloca(mask_size
);
10542 memset(mask
, 0, mask_size
);
10543 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10545 if (!is_error(ret
)) {
10547 /* More data returned than the caller's buffer will fit.
10548 * This only happens if sizeof(abi_long) < sizeof(long)
10549 * and the caller passed us a buffer holding an odd number
10550 * of abi_longs. If the host kernel is actually using the
10551 * extra 4 bytes then fail EINVAL; otherwise we can just
10552 * ignore them and only copy the interesting part.
10554 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10555 if (numcpus
> arg2
* 8) {
10556 ret
= -TARGET_EINVAL
;
10562 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10568 case TARGET_NR_sched_setaffinity
:
10570 unsigned int mask_size
;
10571 unsigned long *mask
;
10574 * sched_setaffinity needs multiples of ulong, so need to take
10575 * care of mismatches between target ulong and host ulong sizes.
10577 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10578 ret
= -TARGET_EINVAL
;
10581 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10582 mask
= alloca(mask_size
);
10584 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10589 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10592 case TARGET_NR_getcpu
:
10594 unsigned cpu
, node
;
10595 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10596 arg2
? &node
: NULL
,
10598 if (is_error(ret
)) {
10601 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10604 if (arg2
&& put_user_u32(node
, arg2
)) {
10609 case TARGET_NR_sched_setparam
:
10611 struct sched_param
*target_schp
;
10612 struct sched_param schp
;
10615 return -TARGET_EINVAL
;
10617 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10619 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10620 unlock_user_struct(target_schp
, arg2
, 0);
10621 ret
= get_errno(sched_setparam(arg1
, &schp
));
10624 case TARGET_NR_sched_getparam
:
10626 struct sched_param
*target_schp
;
10627 struct sched_param schp
;
10630 return -TARGET_EINVAL
;
10632 ret
= get_errno(sched_getparam(arg1
, &schp
));
10633 if (!is_error(ret
)) {
10634 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10636 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10637 unlock_user_struct(target_schp
, arg2
, 1);
10641 case TARGET_NR_sched_setscheduler
:
10643 struct sched_param
*target_schp
;
10644 struct sched_param schp
;
10646 return -TARGET_EINVAL
;
10648 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10650 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10651 unlock_user_struct(target_schp
, arg3
, 0);
10652 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10655 case TARGET_NR_sched_getscheduler
:
10656 ret
= get_errno(sched_getscheduler(arg1
));
10658 case TARGET_NR_sched_yield
:
10659 ret
= get_errno(sched_yield());
10661 case TARGET_NR_sched_get_priority_max
:
10662 ret
= get_errno(sched_get_priority_max(arg1
));
10664 case TARGET_NR_sched_get_priority_min
:
10665 ret
= get_errno(sched_get_priority_min(arg1
));
10667 case TARGET_NR_sched_rr_get_interval
:
10669 struct timespec ts
;
10670 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10671 if (!is_error(ret
)) {
10672 ret
= host_to_target_timespec(arg2
, &ts
);
10676 case TARGET_NR_nanosleep
:
10678 struct timespec req
, rem
;
10679 target_to_host_timespec(&req
, arg1
);
10680 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10681 if (is_error(ret
) && arg2
) {
10682 host_to_target_timespec(arg2
, &rem
);
10686 #ifdef TARGET_NR_query_module
10687 case TARGET_NR_query_module
:
10688 goto unimplemented
;
10690 #ifdef TARGET_NR_nfsservctl
10691 case TARGET_NR_nfsservctl
:
10692 goto unimplemented
;
10694 case TARGET_NR_prctl
:
10696 case PR_GET_PDEATHSIG
:
10699 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10700 if (!is_error(ret
) && arg2
10701 && put_user_ual(deathsig
, arg2
)) {
10709 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10713 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10714 arg3
, arg4
, arg5
));
10715 unlock_user(name
, arg2
, 16);
10720 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10724 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10725 arg3
, arg4
, arg5
));
10726 unlock_user(name
, arg2
, 0);
10730 #ifdef TARGET_AARCH64
10731 case TARGET_PR_SVE_SET_VL
:
10732 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10733 or PR_SVE_VL_INHERIT. Therefore, anything above
10734 ARM_MAX_VQ results in EINVAL. */
10735 ret
= -TARGET_EINVAL
;
10736 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10737 && arg2
>= 0 && arg2
<= ARM_MAX_VQ
* 16 && !(arg2
& 15)) {
10738 CPUARMState
*env
= cpu_env
;
10739 int old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10740 int vq
= MAX(arg2
/ 16, 1);
10743 aarch64_sve_narrow_vq(env
, vq
);
10745 env
->vfp
.zcr_el
[1] = vq
- 1;
10749 case TARGET_PR_SVE_GET_VL
:
10750 ret
= -TARGET_EINVAL
;
10751 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
10752 CPUARMState
*env
= cpu_env
;
10753 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10756 #endif /* AARCH64 */
10757 case PR_GET_SECCOMP
:
10758 case PR_SET_SECCOMP
:
10759 /* Disable seccomp to prevent the target disabling syscalls we
10761 ret
= -TARGET_EINVAL
;
10764 /* Most prctl options have no pointer arguments */
10765 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10769 #ifdef TARGET_NR_arch_prctl
10770 case TARGET_NR_arch_prctl
:
10771 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10772 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10775 goto unimplemented
;
10778 #ifdef TARGET_NR_pread64
10779 case TARGET_NR_pread64
:
10780 if (regpairs_aligned(cpu_env
, num
)) {
10784 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10786 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10787 unlock_user(p
, arg2
, ret
);
10789 case TARGET_NR_pwrite64
:
10790 if (regpairs_aligned(cpu_env
, num
)) {
10794 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10796 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10797 unlock_user(p
, arg2
, 0);
10800 case TARGET_NR_getcwd
:
10801 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10803 ret
= get_errno(sys_getcwd1(p
, arg2
));
10804 unlock_user(p
, arg1
, ret
);
10806 case TARGET_NR_capget
:
10807 case TARGET_NR_capset
:
10809 struct target_user_cap_header
*target_header
;
10810 struct target_user_cap_data
*target_data
= NULL
;
10811 struct __user_cap_header_struct header
;
10812 struct __user_cap_data_struct data
[2];
10813 struct __user_cap_data_struct
*dataptr
= NULL
;
10814 int i
, target_datalen
;
10815 int data_items
= 1;
10817 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10820 header
.version
= tswap32(target_header
->version
);
10821 header
.pid
= tswap32(target_header
->pid
);
10823 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10824 /* Version 2 and up takes pointer to two user_data structs */
10828 target_datalen
= sizeof(*target_data
) * data_items
;
10831 if (num
== TARGET_NR_capget
) {
10832 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10834 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10836 if (!target_data
) {
10837 unlock_user_struct(target_header
, arg1
, 0);
10841 if (num
== TARGET_NR_capset
) {
10842 for (i
= 0; i
< data_items
; i
++) {
10843 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10844 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10845 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10852 if (num
== TARGET_NR_capget
) {
10853 ret
= get_errno(capget(&header
, dataptr
));
10855 ret
= get_errno(capset(&header
, dataptr
));
10858 /* The kernel always updates version for both capget and capset */
10859 target_header
->version
= tswap32(header
.version
);
10860 unlock_user_struct(target_header
, arg1
, 1);
10863 if (num
== TARGET_NR_capget
) {
10864 for (i
= 0; i
< data_items
; i
++) {
10865 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10866 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10867 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10869 unlock_user(target_data
, arg2
, target_datalen
);
10871 unlock_user(target_data
, arg2
, 0);
10876 case TARGET_NR_sigaltstack
:
10877 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10880 #ifdef CONFIG_SENDFILE
10881 case TARGET_NR_sendfile
:
10883 off_t
*offp
= NULL
;
10886 ret
= get_user_sal(off
, arg3
);
10887 if (is_error(ret
)) {
10892 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10893 if (!is_error(ret
) && arg3
) {
10894 abi_long ret2
= put_user_sal(off
, arg3
);
10895 if (is_error(ret2
)) {
10901 #ifdef TARGET_NR_sendfile64
10902 case TARGET_NR_sendfile64
:
10904 off_t
*offp
= NULL
;
10907 ret
= get_user_s64(off
, arg3
);
10908 if (is_error(ret
)) {
10913 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10914 if (!is_error(ret
) && arg3
) {
10915 abi_long ret2
= put_user_s64(off
, arg3
);
10916 if (is_error(ret2
)) {
10924 case TARGET_NR_sendfile
:
10925 #ifdef TARGET_NR_sendfile64
10926 case TARGET_NR_sendfile64
:
10928 goto unimplemented
;
10931 #ifdef TARGET_NR_getpmsg
10932 case TARGET_NR_getpmsg
:
10933 goto unimplemented
;
10935 #ifdef TARGET_NR_putpmsg
10936 case TARGET_NR_putpmsg
:
10937 goto unimplemented
;
10939 #ifdef TARGET_NR_vfork
10940 case TARGET_NR_vfork
:
10941 ret
= get_errno(do_fork(cpu_env
,
10942 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10946 #ifdef TARGET_NR_ugetrlimit
10947 case TARGET_NR_ugetrlimit
:
10949 struct rlimit rlim
;
10950 int resource
= target_to_host_resource(arg1
);
10951 ret
= get_errno(getrlimit(resource
, &rlim
));
10952 if (!is_error(ret
)) {
10953 struct target_rlimit
*target_rlim
;
10954 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10956 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10957 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10958 unlock_user_struct(target_rlim
, arg2
, 1);
10963 #ifdef TARGET_NR_truncate64
10964 case TARGET_NR_truncate64
:
10965 if (!(p
= lock_user_string(arg1
)))
10967 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10968 unlock_user(p
, arg1
, 0);
10971 #ifdef TARGET_NR_ftruncate64
10972 case TARGET_NR_ftruncate64
:
10973 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10976 #ifdef TARGET_NR_stat64
10977 case TARGET_NR_stat64
:
10978 if (!(p
= lock_user_string(arg1
)))
10980 ret
= get_errno(stat(path(p
), &st
));
10981 unlock_user(p
, arg1
, 0);
10982 if (!is_error(ret
))
10983 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10986 #ifdef TARGET_NR_lstat64
10987 case TARGET_NR_lstat64
:
10988 if (!(p
= lock_user_string(arg1
)))
10990 ret
= get_errno(lstat(path(p
), &st
));
10991 unlock_user(p
, arg1
, 0);
10992 if (!is_error(ret
))
10993 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10996 #ifdef TARGET_NR_fstat64
10997 case TARGET_NR_fstat64
:
10998 ret
= get_errno(fstat(arg1
, &st
));
10999 if (!is_error(ret
))
11000 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11003 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11004 #ifdef TARGET_NR_fstatat64
11005 case TARGET_NR_fstatat64
:
11007 #ifdef TARGET_NR_newfstatat
11008 case TARGET_NR_newfstatat
:
11010 if (!(p
= lock_user_string(arg2
)))
11012 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11013 if (!is_error(ret
))
11014 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11017 #ifdef TARGET_NR_lchown
11018 case TARGET_NR_lchown
:
11019 if (!(p
= lock_user_string(arg1
)))
11021 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11022 unlock_user(p
, arg1
, 0);
11025 #ifdef TARGET_NR_getuid
11026 case TARGET_NR_getuid
:
11027 ret
= get_errno(high2lowuid(getuid()));
11030 #ifdef TARGET_NR_getgid
11031 case TARGET_NR_getgid
:
11032 ret
= get_errno(high2lowgid(getgid()));
11035 #ifdef TARGET_NR_geteuid
11036 case TARGET_NR_geteuid
:
11037 ret
= get_errno(high2lowuid(geteuid()));
11040 #ifdef TARGET_NR_getegid
11041 case TARGET_NR_getegid
:
11042 ret
= get_errno(high2lowgid(getegid()));
11045 case TARGET_NR_setreuid
:
11046 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11048 case TARGET_NR_setregid
:
11049 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11051 case TARGET_NR_getgroups
:
11053 int gidsetsize
= arg1
;
11054 target_id
*target_grouplist
;
11058 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11059 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11060 if (gidsetsize
== 0)
11062 if (!is_error(ret
)) {
11063 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11064 if (!target_grouplist
)
11066 for(i
= 0;i
< ret
; i
++)
11067 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11068 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11072 case TARGET_NR_setgroups
:
11074 int gidsetsize
= arg1
;
11075 target_id
*target_grouplist
;
11076 gid_t
*grouplist
= NULL
;
11079 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11080 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11081 if (!target_grouplist
) {
11082 ret
= -TARGET_EFAULT
;
11085 for (i
= 0; i
< gidsetsize
; i
++) {
11086 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11088 unlock_user(target_grouplist
, arg2
, 0);
11090 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11093 case TARGET_NR_fchown
:
11094 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11096 #if defined(TARGET_NR_fchownat)
11097 case TARGET_NR_fchownat
:
11098 if (!(p
= lock_user_string(arg2
)))
11100 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11101 low2highgid(arg4
), arg5
));
11102 unlock_user(p
, arg2
, 0);
11105 #ifdef TARGET_NR_setresuid
11106 case TARGET_NR_setresuid
:
11107 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11109 low2highuid(arg3
)));
11112 #ifdef TARGET_NR_getresuid
11113 case TARGET_NR_getresuid
:
11115 uid_t ruid
, euid
, suid
;
11116 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11117 if (!is_error(ret
)) {
11118 if (put_user_id(high2lowuid(ruid
), arg1
)
11119 || put_user_id(high2lowuid(euid
), arg2
)
11120 || put_user_id(high2lowuid(suid
), arg3
))
11126 #ifdef TARGET_NR_getresgid
11127 case TARGET_NR_setresgid
:
11128 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11130 low2highgid(arg3
)));
11133 #ifdef TARGET_NR_getresgid
11134 case TARGET_NR_getresgid
:
11136 gid_t rgid
, egid
, sgid
;
11137 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11138 if (!is_error(ret
)) {
11139 if (put_user_id(high2lowgid(rgid
), arg1
)
11140 || put_user_id(high2lowgid(egid
), arg2
)
11141 || put_user_id(high2lowgid(sgid
), arg3
))
11147 #ifdef TARGET_NR_chown
11148 case TARGET_NR_chown
:
11149 if (!(p
= lock_user_string(arg1
)))
11151 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11152 unlock_user(p
, arg1
, 0);
11155 case TARGET_NR_setuid
:
11156 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11158 case TARGET_NR_setgid
:
11159 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11161 case TARGET_NR_setfsuid
:
11162 ret
= get_errno(setfsuid(arg1
));
11164 case TARGET_NR_setfsgid
:
11165 ret
= get_errno(setfsgid(arg1
));
11168 #ifdef TARGET_NR_lchown32
11169 case TARGET_NR_lchown32
:
11170 if (!(p
= lock_user_string(arg1
)))
11172 ret
= get_errno(lchown(p
, arg2
, arg3
));
11173 unlock_user(p
, arg1
, 0);
11176 #ifdef TARGET_NR_getuid32
11177 case TARGET_NR_getuid32
:
11178 ret
= get_errno(getuid());
11182 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11183 /* Alpha specific */
11184 case TARGET_NR_getxuid
:
11188 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11190 ret
= get_errno(getuid());
11193 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11194 /* Alpha specific */
11195 case TARGET_NR_getxgid
:
11199 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11201 ret
= get_errno(getgid());
11204 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11205 /* Alpha specific */
11206 case TARGET_NR_osf_getsysinfo
:
11207 ret
= -TARGET_EOPNOTSUPP
;
11209 case TARGET_GSI_IEEE_FP_CONTROL
:
11211 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11213 /* Copied from linux ieee_fpcr_to_swcr. */
11214 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11215 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11216 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11217 | SWCR_TRAP_ENABLE_DZE
11218 | SWCR_TRAP_ENABLE_OVF
);
11219 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11220 | SWCR_TRAP_ENABLE_INE
);
11221 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11222 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11224 if (put_user_u64 (swcr
, arg2
))
11230 /* case GSI_IEEE_STATE_AT_SIGNAL:
11231 -- Not implemented in linux kernel.
11233 -- Retrieves current unaligned access state; not much used.
11234 case GSI_PROC_TYPE:
11235 -- Retrieves implver information; surely not used.
11236 case GSI_GET_HWRPB:
11237 -- Grabs a copy of the HWRPB; surely not used.
11242 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11243 /* Alpha specific */
11244 case TARGET_NR_osf_setsysinfo
:
11245 ret
= -TARGET_EOPNOTSUPP
;
11247 case TARGET_SSI_IEEE_FP_CONTROL
:
11249 uint64_t swcr
, fpcr
, orig_fpcr
;
11251 if (get_user_u64 (swcr
, arg2
)) {
11254 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11255 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11257 /* Copied from linux ieee_swcr_to_fpcr. */
11258 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11259 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11260 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11261 | SWCR_TRAP_ENABLE_DZE
11262 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11263 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11264 | SWCR_TRAP_ENABLE_INE
)) << 57;
11265 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11266 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11268 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11273 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11275 uint64_t exc
, fpcr
, orig_fpcr
;
11278 if (get_user_u64(exc
, arg2
)) {
11282 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11284 /* We only add to the exception status here. */
11285 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11287 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11290 /* Old exceptions are not signaled. */
11291 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11293 /* If any exceptions set by this call,
11294 and are unmasked, send a signal. */
11296 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11297 si_code
= TARGET_FPE_FLTRES
;
11299 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11300 si_code
= TARGET_FPE_FLTUND
;
11302 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11303 si_code
= TARGET_FPE_FLTOVF
;
11305 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11306 si_code
= TARGET_FPE_FLTDIV
;
11308 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11309 si_code
= TARGET_FPE_FLTINV
;
11311 if (si_code
!= 0) {
11312 target_siginfo_t info
;
11313 info
.si_signo
= SIGFPE
;
11315 info
.si_code
= si_code
;
11316 info
._sifields
._sigfault
._addr
11317 = ((CPUArchState
*)cpu_env
)->pc
;
11318 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11319 QEMU_SI_FAULT
, &info
);
11324 /* case SSI_NVPAIRS:
11325 -- Used with SSIN_UACPROC to enable unaligned accesses.
11326 case SSI_IEEE_STATE_AT_SIGNAL:
11327 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11328 -- Not implemented in linux kernel
11333 #ifdef TARGET_NR_osf_sigprocmask
11334 /* Alpha specific. */
11335 case TARGET_NR_osf_sigprocmask
:
11339 sigset_t set
, oldset
;
11342 case TARGET_SIG_BLOCK
:
11345 case TARGET_SIG_UNBLOCK
:
11348 case TARGET_SIG_SETMASK
:
11352 ret
= -TARGET_EINVAL
;
11356 target_to_host_old_sigset(&set
, &mask
);
11357 ret
= do_sigprocmask(how
, &set
, &oldset
);
11359 host_to_target_old_sigset(&mask
, &oldset
);
11366 #ifdef TARGET_NR_getgid32
11367 case TARGET_NR_getgid32
:
11368 ret
= get_errno(getgid());
11371 #ifdef TARGET_NR_geteuid32
11372 case TARGET_NR_geteuid32
:
11373 ret
= get_errno(geteuid());
11376 #ifdef TARGET_NR_getegid32
11377 case TARGET_NR_getegid32
:
11378 ret
= get_errno(getegid());
11381 #ifdef TARGET_NR_setreuid32
11382 case TARGET_NR_setreuid32
:
11383 ret
= get_errno(setreuid(arg1
, arg2
));
11386 #ifdef TARGET_NR_setregid32
11387 case TARGET_NR_setregid32
:
11388 ret
= get_errno(setregid(arg1
, arg2
));
11391 #ifdef TARGET_NR_getgroups32
11392 case TARGET_NR_getgroups32
:
11394 int gidsetsize
= arg1
;
11395 uint32_t *target_grouplist
;
11399 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11400 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11401 if (gidsetsize
== 0)
11403 if (!is_error(ret
)) {
11404 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11405 if (!target_grouplist
) {
11406 ret
= -TARGET_EFAULT
;
11409 for(i
= 0;i
< ret
; i
++)
11410 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11411 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11416 #ifdef TARGET_NR_setgroups32
11417 case TARGET_NR_setgroups32
:
11419 int gidsetsize
= arg1
;
11420 uint32_t *target_grouplist
;
11424 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11425 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11426 if (!target_grouplist
) {
11427 ret
= -TARGET_EFAULT
;
11430 for(i
= 0;i
< gidsetsize
; i
++)
11431 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11432 unlock_user(target_grouplist
, arg2
, 0);
11433 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11437 #ifdef TARGET_NR_fchown32
11438 case TARGET_NR_fchown32
:
11439 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11442 #ifdef TARGET_NR_setresuid32
11443 case TARGET_NR_setresuid32
:
11444 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11447 #ifdef TARGET_NR_getresuid32
11448 case TARGET_NR_getresuid32
:
11450 uid_t ruid
, euid
, suid
;
11451 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11452 if (!is_error(ret
)) {
11453 if (put_user_u32(ruid
, arg1
)
11454 || put_user_u32(euid
, arg2
)
11455 || put_user_u32(suid
, arg3
))
11461 #ifdef TARGET_NR_setresgid32
11462 case TARGET_NR_setresgid32
:
11463 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11466 #ifdef TARGET_NR_getresgid32
11467 case TARGET_NR_getresgid32
:
11469 gid_t rgid
, egid
, sgid
;
11470 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11471 if (!is_error(ret
)) {
11472 if (put_user_u32(rgid
, arg1
)
11473 || put_user_u32(egid
, arg2
)
11474 || put_user_u32(sgid
, arg3
))
11480 #ifdef TARGET_NR_chown32
11481 case TARGET_NR_chown32
:
11482 if (!(p
= lock_user_string(arg1
)))
11484 ret
= get_errno(chown(p
, arg2
, arg3
));
11485 unlock_user(p
, arg1
, 0);
11488 #ifdef TARGET_NR_setuid32
11489 case TARGET_NR_setuid32
:
11490 ret
= get_errno(sys_setuid(arg1
));
11493 #ifdef TARGET_NR_setgid32
11494 case TARGET_NR_setgid32
:
11495 ret
= get_errno(sys_setgid(arg1
));
11498 #ifdef TARGET_NR_setfsuid32
11499 case TARGET_NR_setfsuid32
:
11500 ret
= get_errno(setfsuid(arg1
));
11503 #ifdef TARGET_NR_setfsgid32
11504 case TARGET_NR_setfsgid32
:
11505 ret
= get_errno(setfsgid(arg1
));
11509 case TARGET_NR_pivot_root
:
11510 goto unimplemented
;
11511 #ifdef TARGET_NR_mincore
11512 case TARGET_NR_mincore
:
11515 ret
= -TARGET_ENOMEM
;
11516 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11520 ret
= -TARGET_EFAULT
;
11521 p
= lock_user_string(arg3
);
11525 ret
= get_errno(mincore(a
, arg2
, p
));
11526 unlock_user(p
, arg3
, ret
);
11528 unlock_user(a
, arg1
, 0);
11532 #ifdef TARGET_NR_arm_fadvise64_64
11533 case TARGET_NR_arm_fadvise64_64
:
11534 /* arm_fadvise64_64 looks like fadvise64_64 but
11535 * with different argument order: fd, advice, offset, len
11536 * rather than the usual fd, offset, len, advice.
11537 * Note that offset and len are both 64-bit so appear as
11538 * pairs of 32-bit registers.
11540 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11541 target_offset64(arg5
, arg6
), arg2
);
11542 ret
= -host_to_target_errno(ret
);
11546 #if TARGET_ABI_BITS == 32
11548 #ifdef TARGET_NR_fadvise64_64
11549 case TARGET_NR_fadvise64_64
:
11550 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11551 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11559 /* 6 args: fd, offset (high, low), len (high, low), advice */
11560 if (regpairs_aligned(cpu_env
, num
)) {
11561 /* offset is in (3,4), len in (5,6) and advice in 7 */
11569 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11570 target_offset64(arg2
, arg3
),
11571 target_offset64(arg4
, arg5
),
11576 #ifdef TARGET_NR_fadvise64
11577 case TARGET_NR_fadvise64
:
11578 /* 5 args: fd, offset (high, low), len, advice */
11579 if (regpairs_aligned(cpu_env
, num
)) {
11580 /* offset is in (3,4), len in 5 and advice in 6 */
11586 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11587 target_offset64(arg2
, arg3
),
11592 #else /* not a 32-bit ABI */
11593 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11594 #ifdef TARGET_NR_fadvise64_64
11595 case TARGET_NR_fadvise64_64
:
11597 #ifdef TARGET_NR_fadvise64
11598 case TARGET_NR_fadvise64
:
11600 #ifdef TARGET_S390X
11602 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11603 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11604 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11605 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11609 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11612 #endif /* end of 64-bit ABI fadvise handling */
11614 #ifdef TARGET_NR_madvise
11615 case TARGET_NR_madvise
:
11616 /* A straight passthrough may not be safe because qemu sometimes
11617 turns private file-backed mappings into anonymous mappings.
11618 This will break MADV_DONTNEED.
11619 This is a hint, so ignoring and returning success is ok. */
11620 ret
= get_errno(0);
11623 #if TARGET_ABI_BITS == 32
11624 case TARGET_NR_fcntl64
:
11628 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11629 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11632 if (((CPUARMState
*)cpu_env
)->eabi
) {
11633 copyfrom
= copy_from_user_eabi_flock64
;
11634 copyto
= copy_to_user_eabi_flock64
;
11638 cmd
= target_to_host_fcntl_cmd(arg2
);
11639 if (cmd
== -TARGET_EINVAL
) {
11645 case TARGET_F_GETLK64
:
11646 ret
= copyfrom(&fl
, arg3
);
11650 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11652 ret
= copyto(arg3
, &fl
);
11656 case TARGET_F_SETLK64
:
11657 case TARGET_F_SETLKW64
:
11658 ret
= copyfrom(&fl
, arg3
);
11662 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11665 ret
= do_fcntl(arg1
, arg2
, arg3
);
11671 #ifdef TARGET_NR_cacheflush
11672 case TARGET_NR_cacheflush
:
11673 /* self-modifying code is handled automatically, so nothing needed */
11677 #ifdef TARGET_NR_security
11678 case TARGET_NR_security
:
11679 goto unimplemented
;
11681 #ifdef TARGET_NR_getpagesize
11682 case TARGET_NR_getpagesize
:
11683 ret
= TARGET_PAGE_SIZE
;
11686 case TARGET_NR_gettid
:
11687 ret
= get_errno(gettid());
11689 #ifdef TARGET_NR_readahead
11690 case TARGET_NR_readahead
:
11691 #if TARGET_ABI_BITS == 32
11692 if (regpairs_aligned(cpu_env
, num
)) {
11697 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11699 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11704 #ifdef TARGET_NR_setxattr
11705 case TARGET_NR_listxattr
:
11706 case TARGET_NR_llistxattr
:
11710 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11712 ret
= -TARGET_EFAULT
;
11716 p
= lock_user_string(arg1
);
11718 if (num
== TARGET_NR_listxattr
) {
11719 ret
= get_errno(listxattr(p
, b
, arg3
));
11721 ret
= get_errno(llistxattr(p
, b
, arg3
));
11724 ret
= -TARGET_EFAULT
;
11726 unlock_user(p
, arg1
, 0);
11727 unlock_user(b
, arg2
, arg3
);
11730 case TARGET_NR_flistxattr
:
11734 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11736 ret
= -TARGET_EFAULT
;
11740 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11741 unlock_user(b
, arg2
, arg3
);
11744 case TARGET_NR_setxattr
:
11745 case TARGET_NR_lsetxattr
:
11747 void *p
, *n
, *v
= 0;
11749 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11751 ret
= -TARGET_EFAULT
;
11755 p
= lock_user_string(arg1
);
11756 n
= lock_user_string(arg2
);
11758 if (num
== TARGET_NR_setxattr
) {
11759 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11761 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11764 ret
= -TARGET_EFAULT
;
11766 unlock_user(p
, arg1
, 0);
11767 unlock_user(n
, arg2
, 0);
11768 unlock_user(v
, arg3
, 0);
11771 case TARGET_NR_fsetxattr
:
11775 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11777 ret
= -TARGET_EFAULT
;
11781 n
= lock_user_string(arg2
);
11783 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11785 ret
= -TARGET_EFAULT
;
11787 unlock_user(n
, arg2
, 0);
11788 unlock_user(v
, arg3
, 0);
11791 case TARGET_NR_getxattr
:
11792 case TARGET_NR_lgetxattr
:
11794 void *p
, *n
, *v
= 0;
11796 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11798 ret
= -TARGET_EFAULT
;
11802 p
= lock_user_string(arg1
);
11803 n
= lock_user_string(arg2
);
11805 if (num
== TARGET_NR_getxattr
) {
11806 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11808 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11811 ret
= -TARGET_EFAULT
;
11813 unlock_user(p
, arg1
, 0);
11814 unlock_user(n
, arg2
, 0);
11815 unlock_user(v
, arg3
, arg4
);
11818 case TARGET_NR_fgetxattr
:
11822 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11824 ret
= -TARGET_EFAULT
;
11828 n
= lock_user_string(arg2
);
11830 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11832 ret
= -TARGET_EFAULT
;
11834 unlock_user(n
, arg2
, 0);
11835 unlock_user(v
, arg3
, arg4
);
11838 case TARGET_NR_removexattr
:
11839 case TARGET_NR_lremovexattr
:
11842 p
= lock_user_string(arg1
);
11843 n
= lock_user_string(arg2
);
11845 if (num
== TARGET_NR_removexattr
) {
11846 ret
= get_errno(removexattr(p
, n
));
11848 ret
= get_errno(lremovexattr(p
, n
));
11851 ret
= -TARGET_EFAULT
;
11853 unlock_user(p
, arg1
, 0);
11854 unlock_user(n
, arg2
, 0);
11857 case TARGET_NR_fremovexattr
:
11860 n
= lock_user_string(arg2
);
11862 ret
= get_errno(fremovexattr(arg1
, n
));
11864 ret
= -TARGET_EFAULT
;
11866 unlock_user(n
, arg2
, 0);
11870 #endif /* CONFIG_ATTR */
11871 #ifdef TARGET_NR_set_thread_area
11872 case TARGET_NR_set_thread_area
:
11873 #if defined(TARGET_MIPS)
11874 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11877 #elif defined(TARGET_CRIS)
11879 ret
= -TARGET_EINVAL
;
11881 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11885 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11886 ret
= do_set_thread_area(cpu_env
, arg1
);
11888 #elif defined(TARGET_M68K)
11890 TaskState
*ts
= cpu
->opaque
;
11891 ts
->tp_value
= arg1
;
11896 goto unimplemented_nowarn
;
11899 #ifdef TARGET_NR_get_thread_area
11900 case TARGET_NR_get_thread_area
:
11901 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11902 ret
= do_get_thread_area(cpu_env
, arg1
);
11904 #elif defined(TARGET_M68K)
11906 TaskState
*ts
= cpu
->opaque
;
11907 ret
= ts
->tp_value
;
11911 goto unimplemented_nowarn
;
11914 #ifdef TARGET_NR_getdomainname
11915 case TARGET_NR_getdomainname
:
11916 goto unimplemented_nowarn
;
11919 #ifdef TARGET_NR_clock_settime
11920 case TARGET_NR_clock_settime
:
11922 struct timespec ts
;
11924 ret
= target_to_host_timespec(&ts
, arg2
);
11925 if (!is_error(ret
)) {
11926 ret
= get_errno(clock_settime(arg1
, &ts
));
11931 #ifdef TARGET_NR_clock_gettime
11932 case TARGET_NR_clock_gettime
:
11934 struct timespec ts
;
11935 ret
= get_errno(clock_gettime(arg1
, &ts
));
11936 if (!is_error(ret
)) {
11937 ret
= host_to_target_timespec(arg2
, &ts
);
11942 #ifdef TARGET_NR_clock_getres
11943 case TARGET_NR_clock_getres
:
11945 struct timespec ts
;
11946 ret
= get_errno(clock_getres(arg1
, &ts
));
11947 if (!is_error(ret
)) {
11948 host_to_target_timespec(arg2
, &ts
);
11953 #ifdef TARGET_NR_clock_nanosleep
11954 case TARGET_NR_clock_nanosleep
:
11956 struct timespec ts
;
11957 target_to_host_timespec(&ts
, arg3
);
11958 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11959 &ts
, arg4
? &ts
: NULL
));
11961 host_to_target_timespec(arg4
, &ts
);
11963 #if defined(TARGET_PPC)
11964 /* clock_nanosleep is odd in that it returns positive errno values.
11965 * On PPC, CR0 bit 3 should be set in such a situation. */
11966 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11967 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11974 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11975 case TARGET_NR_set_tid_address
:
11976 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11980 case TARGET_NR_tkill
:
11981 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11984 case TARGET_NR_tgkill
:
11985 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11986 target_to_host_signal(arg3
)));
11989 #ifdef TARGET_NR_set_robust_list
11990 case TARGET_NR_set_robust_list
:
11991 case TARGET_NR_get_robust_list
:
11992 /* The ABI for supporting robust futexes has userspace pass
11993 * the kernel a pointer to a linked list which is updated by
11994 * userspace after the syscall; the list is walked by the kernel
11995 * when the thread exits. Since the linked list in QEMU guest
11996 * memory isn't a valid linked list for the host and we have
11997 * no way to reliably intercept the thread-death event, we can't
11998 * support these. Silently return ENOSYS so that guest userspace
11999 * falls back to a non-robust futex implementation (which should
12000 * be OK except in the corner case of the guest crashing while
12001 * holding a mutex that is shared with another process via
12004 goto unimplemented_nowarn
;
12007 #if defined(TARGET_NR_utimensat)
12008 case TARGET_NR_utimensat
:
12010 struct timespec
*tsp
, ts
[2];
12014 target_to_host_timespec(ts
, arg3
);
12015 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
12019 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12021 if (!(p
= lock_user_string(arg2
))) {
12022 ret
= -TARGET_EFAULT
;
12025 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12026 unlock_user(p
, arg2
, 0);
12031 case TARGET_NR_futex
:
12032 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12034 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12035 case TARGET_NR_inotify_init
:
12036 ret
= get_errno(sys_inotify_init());
12038 fd_trans_register(ret
, &target_inotify_trans
);
12042 #ifdef CONFIG_INOTIFY1
12043 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12044 case TARGET_NR_inotify_init1
:
12045 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12046 fcntl_flags_tbl
)));
12048 fd_trans_register(ret
, &target_inotify_trans
);
12053 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12054 case TARGET_NR_inotify_add_watch
:
12055 p
= lock_user_string(arg2
);
12056 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12057 unlock_user(p
, arg2
, 0);
12060 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12061 case TARGET_NR_inotify_rm_watch
:
12062 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12066 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12067 case TARGET_NR_mq_open
:
12069 struct mq_attr posix_mq_attr
;
12070 struct mq_attr
*pposix_mq_attr
;
12073 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12074 pposix_mq_attr
= NULL
;
12076 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12079 pposix_mq_attr
= &posix_mq_attr
;
12081 p
= lock_user_string(arg1
- 1);
12085 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12086 unlock_user (p
, arg1
, 0);
12090 case TARGET_NR_mq_unlink
:
12091 p
= lock_user_string(arg1
- 1);
12093 ret
= -TARGET_EFAULT
;
12096 ret
= get_errno(mq_unlink(p
));
12097 unlock_user (p
, arg1
, 0);
12100 case TARGET_NR_mq_timedsend
:
12102 struct timespec ts
;
12104 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12106 target_to_host_timespec(&ts
, arg5
);
12107 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12108 host_to_target_timespec(arg5
, &ts
);
12110 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12112 unlock_user (p
, arg2
, arg3
);
12116 case TARGET_NR_mq_timedreceive
:
12118 struct timespec ts
;
12121 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12123 target_to_host_timespec(&ts
, arg5
);
12124 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12126 host_to_target_timespec(arg5
, &ts
);
12128 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12131 unlock_user (p
, arg2
, arg3
);
12133 put_user_u32(prio
, arg4
);
12137 /* Not implemented for now... */
12138 /* case TARGET_NR_mq_notify: */
12141 case TARGET_NR_mq_getsetattr
:
12143 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12146 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12147 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12148 &posix_mq_attr_out
));
12149 } else if (arg3
!= 0) {
12150 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12152 if (ret
== 0 && arg3
!= 0) {
12153 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12159 #ifdef CONFIG_SPLICE
12160 #ifdef TARGET_NR_tee
12161 case TARGET_NR_tee
:
12163 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12167 #ifdef TARGET_NR_splice
12168 case TARGET_NR_splice
:
12170 loff_t loff_in
, loff_out
;
12171 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12173 if (get_user_u64(loff_in
, arg2
)) {
12176 ploff_in
= &loff_in
;
12179 if (get_user_u64(loff_out
, arg4
)) {
12182 ploff_out
= &loff_out
;
12184 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12186 if (put_user_u64(loff_in
, arg2
)) {
12191 if (put_user_u64(loff_out
, arg4
)) {
12198 #ifdef TARGET_NR_vmsplice
12199 case TARGET_NR_vmsplice
:
12201 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12203 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12204 unlock_iovec(vec
, arg2
, arg3
, 0);
12206 ret
= -host_to_target_errno(errno
);
12211 #endif /* CONFIG_SPLICE */
12212 #ifdef CONFIG_EVENTFD
12213 #if defined(TARGET_NR_eventfd)
12214 case TARGET_NR_eventfd
:
12215 ret
= get_errno(eventfd(arg1
, 0));
12217 fd_trans_register(ret
, &target_eventfd_trans
);
12221 #if defined(TARGET_NR_eventfd2)
12222 case TARGET_NR_eventfd2
:
12224 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12225 if (arg2
& TARGET_O_NONBLOCK
) {
12226 host_flags
|= O_NONBLOCK
;
12228 if (arg2
& TARGET_O_CLOEXEC
) {
12229 host_flags
|= O_CLOEXEC
;
12231 ret
= get_errno(eventfd(arg1
, host_flags
));
12233 fd_trans_register(ret
, &target_eventfd_trans
);
12238 #endif /* CONFIG_EVENTFD */
12239 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12240 case TARGET_NR_fallocate
:
12241 #if TARGET_ABI_BITS == 32
12242 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12243 target_offset64(arg5
, arg6
)));
12245 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12249 #if defined(CONFIG_SYNC_FILE_RANGE)
12250 #if defined(TARGET_NR_sync_file_range)
12251 case TARGET_NR_sync_file_range
:
12252 #if TARGET_ABI_BITS == 32
12253 #if defined(TARGET_MIPS)
12254 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12255 target_offset64(arg5
, arg6
), arg7
));
12257 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12258 target_offset64(arg4
, arg5
), arg6
));
12259 #endif /* !TARGET_MIPS */
12261 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12265 #if defined(TARGET_NR_sync_file_range2)
12266 case TARGET_NR_sync_file_range2
:
12267 /* This is like sync_file_range but the arguments are reordered */
12268 #if TARGET_ABI_BITS == 32
12269 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12270 target_offset64(arg5
, arg6
), arg2
));
12272 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12277 #if defined(TARGET_NR_signalfd4)
12278 case TARGET_NR_signalfd4
:
12279 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12282 #if defined(TARGET_NR_signalfd)
12283 case TARGET_NR_signalfd
:
12284 ret
= do_signalfd4(arg1
, arg2
, 0);
12287 #if defined(CONFIG_EPOLL)
12288 #if defined(TARGET_NR_epoll_create)
12289 case TARGET_NR_epoll_create
:
12290 ret
= get_errno(epoll_create(arg1
));
12293 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12294 case TARGET_NR_epoll_create1
:
12295 ret
= get_errno(epoll_create1(arg1
));
12298 #if defined(TARGET_NR_epoll_ctl)
12299 case TARGET_NR_epoll_ctl
:
12301 struct epoll_event ep
;
12302 struct epoll_event
*epp
= 0;
12304 struct target_epoll_event
*target_ep
;
12305 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12308 ep
.events
= tswap32(target_ep
->events
);
12309 /* The epoll_data_t union is just opaque data to the kernel,
12310 * so we transfer all 64 bits across and need not worry what
12311 * actual data type it is.
12313 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12314 unlock_user_struct(target_ep
, arg4
, 0);
12317 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12322 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12323 #if defined(TARGET_NR_epoll_wait)
12324 case TARGET_NR_epoll_wait
:
12326 #if defined(TARGET_NR_epoll_pwait)
12327 case TARGET_NR_epoll_pwait
:
12330 struct target_epoll_event
*target_ep
;
12331 struct epoll_event
*ep
;
12333 int maxevents
= arg3
;
12334 int timeout
= arg4
;
12336 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12337 ret
= -TARGET_EINVAL
;
12341 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12342 maxevents
* sizeof(struct target_epoll_event
), 1);
12347 ep
= g_try_new(struct epoll_event
, maxevents
);
12349 unlock_user(target_ep
, arg2
, 0);
12350 ret
= -TARGET_ENOMEM
;
12355 #if defined(TARGET_NR_epoll_pwait)
12356 case TARGET_NR_epoll_pwait
:
12358 target_sigset_t
*target_set
;
12359 sigset_t _set
, *set
= &_set
;
12362 if (arg6
!= sizeof(target_sigset_t
)) {
12363 ret
= -TARGET_EINVAL
;
12367 target_set
= lock_user(VERIFY_READ
, arg5
,
12368 sizeof(target_sigset_t
), 1);
12370 ret
= -TARGET_EFAULT
;
12373 target_to_host_sigset(set
, target_set
);
12374 unlock_user(target_set
, arg5
, 0);
12379 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12380 set
, SIGSET_T_SIZE
));
12384 #if defined(TARGET_NR_epoll_wait)
12385 case TARGET_NR_epoll_wait
:
12386 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12391 ret
= -TARGET_ENOSYS
;
12393 if (!is_error(ret
)) {
12395 for (i
= 0; i
< ret
; i
++) {
12396 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12397 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12399 unlock_user(target_ep
, arg2
,
12400 ret
* sizeof(struct target_epoll_event
));
12402 unlock_user(target_ep
, arg2
, 0);
12409 #ifdef TARGET_NR_prlimit64
12410 case TARGET_NR_prlimit64
:
12412 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12413 struct target_rlimit64
*target_rnew
, *target_rold
;
12414 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12415 int resource
= target_to_host_resource(arg2
);
12417 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12420 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12421 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12422 unlock_user_struct(target_rnew
, arg3
, 0);
12426 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12427 if (!is_error(ret
) && arg4
) {
12428 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12431 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12432 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12433 unlock_user_struct(target_rold
, arg4
, 1);
12438 #ifdef TARGET_NR_gethostname
12439 case TARGET_NR_gethostname
:
12441 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12443 ret
= get_errno(gethostname(name
, arg2
));
12444 unlock_user(name
, arg1
, arg2
);
12446 ret
= -TARGET_EFAULT
;
12451 #ifdef TARGET_NR_atomic_cmpxchg_32
12452 case TARGET_NR_atomic_cmpxchg_32
:
12454 /* should use start_exclusive from main.c */
12455 abi_ulong mem_value
;
12456 if (get_user_u32(mem_value
, arg6
)) {
12457 target_siginfo_t info
;
12458 info
.si_signo
= SIGSEGV
;
12460 info
.si_code
= TARGET_SEGV_MAPERR
;
12461 info
._sifields
._sigfault
._addr
= arg6
;
12462 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12463 QEMU_SI_FAULT
, &info
);
12467 if (mem_value
== arg2
)
12468 put_user_u32(arg1
, arg6
);
12473 #ifdef TARGET_NR_atomic_barrier
12474 case TARGET_NR_atomic_barrier
:
12476 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12482 #ifdef TARGET_NR_timer_create
12483 case TARGET_NR_timer_create
:
12485 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12487 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12490 int timer_index
= next_free_host_timer();
12492 if (timer_index
< 0) {
12493 ret
= -TARGET_EAGAIN
;
12495 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12498 phost_sevp
= &host_sevp
;
12499 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12505 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12509 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12518 #ifdef TARGET_NR_timer_settime
12519 case TARGET_NR_timer_settime
:
12521 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12522 * struct itimerspec * old_value */
12523 target_timer_t timerid
= get_timer_id(arg1
);
12527 } else if (arg3
== 0) {
12528 ret
= -TARGET_EINVAL
;
12530 timer_t htimer
= g_posix_timers
[timerid
];
12531 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12533 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12537 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12538 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12546 #ifdef TARGET_NR_timer_gettime
12547 case TARGET_NR_timer_gettime
:
12549 /* args: timer_t timerid, struct itimerspec *curr_value */
12550 target_timer_t timerid
= get_timer_id(arg1
);
12554 } else if (!arg2
) {
12555 ret
= -TARGET_EFAULT
;
12557 timer_t htimer
= g_posix_timers
[timerid
];
12558 struct itimerspec hspec
;
12559 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12561 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12562 ret
= -TARGET_EFAULT
;
12569 #ifdef TARGET_NR_timer_getoverrun
12570 case TARGET_NR_timer_getoverrun
:
12572 /* args: timer_t timerid */
12573 target_timer_t timerid
= get_timer_id(arg1
);
12578 timer_t htimer
= g_posix_timers
[timerid
];
12579 ret
= get_errno(timer_getoverrun(htimer
));
12581 fd_trans_unregister(ret
);
12586 #ifdef TARGET_NR_timer_delete
12587 case TARGET_NR_timer_delete
:
12589 /* args: timer_t timerid */
12590 target_timer_t timerid
= get_timer_id(arg1
);
12595 timer_t htimer
= g_posix_timers
[timerid
];
12596 ret
= get_errno(timer_delete(htimer
));
12597 g_posix_timers
[timerid
] = 0;
12603 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12604 case TARGET_NR_timerfd_create
:
12605 ret
= get_errno(timerfd_create(arg1
,
12606 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12610 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12611 case TARGET_NR_timerfd_gettime
:
12613 struct itimerspec its_curr
;
12615 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12617 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12624 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12625 case TARGET_NR_timerfd_settime
:
12627 struct itimerspec its_new
, its_old
, *p_new
;
12630 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12638 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12640 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12647 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12648 case TARGET_NR_ioprio_get
:
12649 ret
= get_errno(ioprio_get(arg1
, arg2
));
12653 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12654 case TARGET_NR_ioprio_set
:
12655 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12659 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12660 case TARGET_NR_setns
:
12661 ret
= get_errno(setns(arg1
, arg2
));
12664 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12665 case TARGET_NR_unshare
:
12666 ret
= get_errno(unshare(arg1
));
12669 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12670 case TARGET_NR_kcmp
:
12671 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12677 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12678 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12679 unimplemented_nowarn
:
12681 ret
= -TARGET_ENOSYS
;
12686 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12689 print_syscall_ret(num
, ret
);
12690 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12693 ret
= -TARGET_EFAULT
;