4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
62 #include <sys/timerfd.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu/xattr.h"
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
111 #include <linux/audit.h>
112 #include "linux_loop.h"
118 #define CLONE_IO 0x80000000 /* Clone io context */
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
247 #define __NR__llseek __NR_lseek
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
256 _syscall0(int, gettid
)
258 /* This is a replacement for the host gettid() and must return a host
260 static int gettid(void) {
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
273 loff_t
*, res
, uint
, wh
);
275 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
276 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group
,int,error_code
)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address
,int *,tidptr
)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
285 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
289 unsigned long *, user_mask_ptr
);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
292 unsigned long *, user_mask_ptr
);
293 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
295 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
296 struct __user_cap_data_struct
*, data
);
297 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
298 struct __user_cap_data_struct
*, data
);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get
, int, which
, int, who
)
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
311 unsigned long, idx1
, unsigned long, idx2
)
314 static bitmask_transtbl fcntl_flags_tbl
[] = {
315 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
316 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
317 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
318 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
319 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
320 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
321 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
322 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
323 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
324 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
325 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
326 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
327 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
338 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
349 QEMU_IFLA_BR_FORWARD_DELAY
,
350 QEMU_IFLA_BR_HELLO_TIME
,
351 QEMU_IFLA_BR_MAX_AGE
,
352 QEMU_IFLA_BR_AGEING_TIME
,
353 QEMU_IFLA_BR_STP_STATE
,
354 QEMU_IFLA_BR_PRIORITY
,
355 QEMU_IFLA_BR_VLAN_FILTERING
,
356 QEMU_IFLA_BR_VLAN_PROTOCOL
,
357 QEMU_IFLA_BR_GROUP_FWD_MASK
,
358 QEMU_IFLA_BR_ROOT_ID
,
359 QEMU_IFLA_BR_BRIDGE_ID
,
360 QEMU_IFLA_BR_ROOT_PORT
,
361 QEMU_IFLA_BR_ROOT_PATH_COST
,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
364 QEMU_IFLA_BR_HELLO_TIMER
,
365 QEMU_IFLA_BR_TCN_TIMER
,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
367 QEMU_IFLA_BR_GC_TIMER
,
368 QEMU_IFLA_BR_GROUP_ADDR
,
369 QEMU_IFLA_BR_FDB_FLUSH
,
370 QEMU_IFLA_BR_MCAST_ROUTER
,
371 QEMU_IFLA_BR_MCAST_SNOOPING
,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
373 QEMU_IFLA_BR_MCAST_QUERIER
,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
375 QEMU_IFLA_BR_MCAST_HASH_MAX
,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
414 QEMU_IFLA_NET_NS_PID
,
417 QEMU_IFLA_VFINFO_LIST
,
425 QEMU_IFLA_PROMISCUITY
,
426 QEMU_IFLA_NUM_TX_QUEUES
,
427 QEMU_IFLA_NUM_RX_QUEUES
,
429 QEMU_IFLA_PHYS_PORT_ID
,
430 QEMU_IFLA_CARRIER_CHANGES
,
431 QEMU_IFLA_PHYS_SWITCH_ID
,
432 QEMU_IFLA_LINK_NETNSID
,
433 QEMU_IFLA_PHYS_PORT_NAME
,
434 QEMU_IFLA_PROTO_DOWN
,
435 QEMU_IFLA_GSO_MAX_SEGS
,
436 QEMU_IFLA_GSO_MAX_SIZE
,
443 QEMU_IFLA_BRPORT_UNSPEC
,
444 QEMU_IFLA_BRPORT_STATE
,
445 QEMU_IFLA_BRPORT_PRIORITY
,
446 QEMU_IFLA_BRPORT_COST
,
447 QEMU_IFLA_BRPORT_MODE
,
448 QEMU_IFLA_BRPORT_GUARD
,
449 QEMU_IFLA_BRPORT_PROTECT
,
450 QEMU_IFLA_BRPORT_FAST_LEAVE
,
451 QEMU_IFLA_BRPORT_LEARNING
,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
453 QEMU_IFLA_BRPORT_PROXYARP
,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
456 QEMU_IFLA_BRPORT_ROOT_ID
,
457 QEMU_IFLA_BRPORT_BRIDGE_ID
,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
466 QEMU_IFLA_BRPORT_HOLD_TIMER
,
467 QEMU_IFLA_BRPORT_FLUSH
,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
469 QEMU_IFLA_BRPORT_PAD
,
470 QEMU___IFLA_BRPORT_MAX
474 QEMU_IFLA_INFO_UNSPEC
,
477 QEMU_IFLA_INFO_XSTATS
,
478 QEMU_IFLA_INFO_SLAVE_KIND
,
479 QEMU_IFLA_INFO_SLAVE_DATA
,
480 QEMU___IFLA_INFO_MAX
,
484 QEMU_IFLA_INET_UNSPEC
,
486 QEMU___IFLA_INET_MAX
,
490 QEMU_IFLA_INET6_UNSPEC
,
491 QEMU_IFLA_INET6_FLAGS
,
492 QEMU_IFLA_INET6_CONF
,
493 QEMU_IFLA_INET6_STATS
,
494 QEMU_IFLA_INET6_MCAST
,
495 QEMU_IFLA_INET6_CACHEINFO
,
496 QEMU_IFLA_INET6_ICMP6STATS
,
497 QEMU_IFLA_INET6_TOKEN
,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
504 typedef struct TargetFdTrans
{
505 TargetFdDataFunc host_to_target_data
;
506 TargetFdDataFunc target_to_host_data
;
507 TargetFdAddrFunc target_to_host_addr
;
510 static TargetFdTrans
**target_fd_trans
;
512 static unsigned int target_fd_max
;
514 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
516 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
517 return target_fd_trans
[fd
]->target_to_host_data
;
522 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
524 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
525 return target_fd_trans
[fd
]->host_to_target_data
;
530 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
532 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
533 return target_fd_trans
[fd
]->target_to_host_addr
;
538 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
542 if (fd
>= target_fd_max
) {
543 oldmax
= target_fd_max
;
544 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans
= g_renew(TargetFdTrans
*,
546 target_fd_trans
, target_fd_max
);
547 memset((void *)(target_fd_trans
+ oldmax
), 0,
548 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
550 target_fd_trans
[fd
] = trans
;
553 static void fd_trans_unregister(int fd
)
555 if (fd
>= 0 && fd
< target_fd_max
) {
556 target_fd_trans
[fd
] = NULL
;
560 static void fd_trans_dup(int oldfd
, int newfd
)
562 fd_trans_unregister(newfd
);
563 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
564 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
568 static int sys_getcwd1(char *buf
, size_t size
)
570 if (getcwd(buf
, size
) == NULL
) {
571 /* getcwd() sets errno */
574 return strlen(buf
)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
581 const struct timespec
*,tsp
,int,flags
)
583 static int sys_utimensat(int dirfd
, const char *pathname
,
584 const struct timespec times
[2], int flags
)
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
604 return (inotify_add_watch(fd
, pathname
, mask
));
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
610 return (inotify_rm_watch(fd
, wd
));
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags
)
617 return (inotify_init1(flags
));
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64
{
639 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
640 const struct host_rlimit64
*, new_limit
,
641 struct host_rlimit64
*, old_limit
)
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers
[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
654 if (g_posix_timers
[k
] == 0) {
655 g_posix_timers
[k
] = (timer_t
) 1;
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
665 static inline int regpairs_aligned(void *cpu_env
) {
666 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
674 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
676 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
691 [EAGAIN
] = TARGET_EAGAIN
,
692 [EIDRM
] = TARGET_EIDRM
,
693 [ECHRNG
] = TARGET_ECHRNG
,
694 [EL2NSYNC
] = TARGET_EL2NSYNC
,
695 [EL3HLT
] = TARGET_EL3HLT
,
696 [EL3RST
] = TARGET_EL3RST
,
697 [ELNRNG
] = TARGET_ELNRNG
,
698 [EUNATCH
] = TARGET_EUNATCH
,
699 [ENOCSI
] = TARGET_ENOCSI
,
700 [EL2HLT
] = TARGET_EL2HLT
,
701 [EDEADLK
] = TARGET_EDEADLK
,
702 [ENOLCK
] = TARGET_ENOLCK
,
703 [EBADE
] = TARGET_EBADE
,
704 [EBADR
] = TARGET_EBADR
,
705 [EXFULL
] = TARGET_EXFULL
,
706 [ENOANO
] = TARGET_ENOANO
,
707 [EBADRQC
] = TARGET_EBADRQC
,
708 [EBADSLT
] = TARGET_EBADSLT
,
709 [EBFONT
] = TARGET_EBFONT
,
710 [ENOSTR
] = TARGET_ENOSTR
,
711 [ENODATA
] = TARGET_ENODATA
,
712 [ETIME
] = TARGET_ETIME
,
713 [ENOSR
] = TARGET_ENOSR
,
714 [ENONET
] = TARGET_ENONET
,
715 [ENOPKG
] = TARGET_ENOPKG
,
716 [EREMOTE
] = TARGET_EREMOTE
,
717 [ENOLINK
] = TARGET_ENOLINK
,
718 [EADV
] = TARGET_EADV
,
719 [ESRMNT
] = TARGET_ESRMNT
,
720 [ECOMM
] = TARGET_ECOMM
,
721 [EPROTO
] = TARGET_EPROTO
,
722 [EDOTDOT
] = TARGET_EDOTDOT
,
723 [EMULTIHOP
] = TARGET_EMULTIHOP
,
724 [EBADMSG
] = TARGET_EBADMSG
,
725 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
726 [EOVERFLOW
] = TARGET_EOVERFLOW
,
727 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
728 [EBADFD
] = TARGET_EBADFD
,
729 [EREMCHG
] = TARGET_EREMCHG
,
730 [ELIBACC
] = TARGET_ELIBACC
,
731 [ELIBBAD
] = TARGET_ELIBBAD
,
732 [ELIBSCN
] = TARGET_ELIBSCN
,
733 [ELIBMAX
] = TARGET_ELIBMAX
,
734 [ELIBEXEC
] = TARGET_ELIBEXEC
,
735 [EILSEQ
] = TARGET_EILSEQ
,
736 [ENOSYS
] = TARGET_ENOSYS
,
737 [ELOOP
] = TARGET_ELOOP
,
738 [ERESTART
] = TARGET_ERESTART
,
739 [ESTRPIPE
] = TARGET_ESTRPIPE
,
740 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
741 [EUSERS
] = TARGET_EUSERS
,
742 [ENOTSOCK
] = TARGET_ENOTSOCK
,
743 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
744 [EMSGSIZE
] = TARGET_EMSGSIZE
,
745 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
746 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
747 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
748 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
749 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
750 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
751 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
752 [EADDRINUSE
] = TARGET_EADDRINUSE
,
753 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
754 [ENETDOWN
] = TARGET_ENETDOWN
,
755 [ENETUNREACH
] = TARGET_ENETUNREACH
,
756 [ENETRESET
] = TARGET_ENETRESET
,
757 [ECONNABORTED
] = TARGET_ECONNABORTED
,
758 [ECONNRESET
] = TARGET_ECONNRESET
,
759 [ENOBUFS
] = TARGET_ENOBUFS
,
760 [EISCONN
] = TARGET_EISCONN
,
761 [ENOTCONN
] = TARGET_ENOTCONN
,
762 [EUCLEAN
] = TARGET_EUCLEAN
,
763 [ENOTNAM
] = TARGET_ENOTNAM
,
764 [ENAVAIL
] = TARGET_ENAVAIL
,
765 [EISNAM
] = TARGET_EISNAM
,
766 [EREMOTEIO
] = TARGET_EREMOTEIO
,
767 [EDQUOT
] = TARGET_EDQUOT
,
768 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
769 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
770 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
771 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
772 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
773 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
774 [EALREADY
] = TARGET_EALREADY
,
775 [EINPROGRESS
] = TARGET_EINPROGRESS
,
776 [ESTALE
] = TARGET_ESTALE
,
777 [ECANCELED
] = TARGET_ECANCELED
,
778 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
779 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
781 [ENOKEY
] = TARGET_ENOKEY
,
784 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
787 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
790 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
793 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
799 [ENOMSG
] = TARGET_ENOMSG
,
802 [ERFKILL
] = TARGET_ERFKILL
,
805 [EHWPOISON
] = TARGET_EHWPOISON
,
809 static inline int host_to_target_errno(int err
)
811 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
812 host_to_target_errno_table
[err
]) {
813 return host_to_target_errno_table
[err
];
818 static inline int target_to_host_errno(int err
)
820 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
821 target_to_host_errno_table
[err
]) {
822 return target_to_host_errno_table
[err
];
827 static inline abi_long
get_errno(abi_long ret
)
830 return -host_to_target_errno(errno
);
835 static inline int is_error(abi_long ret
)
837 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
840 const char *target_strerror(int err
)
842 if (err
== TARGET_ERESTARTSYS
) {
843 return "To be restarted";
845 if (err
== TARGET_QEMU_ESIGRETURN
) {
846 return "Successful exit from sigreturn";
849 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
852 return strerror(target_to_host_errno(err
));
855 #define safe_syscall0(type, name) \
856 static type safe_##name(void) \
858 return safe_syscall(__NR_##name); \
861 #define safe_syscall1(type, name, type1, arg1) \
862 static type safe_##name(type1 arg1) \
864 return safe_syscall(__NR_##name, arg1); \
867 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
868 static type safe_##name(type1 arg1, type2 arg2) \
870 return safe_syscall(__NR_##name, arg1, arg2); \
873 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
874 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
876 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
879 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
883 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
886 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
887 type4, arg4, type5, arg5) \
888 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
894 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
895 type4, arg4, type5, arg5, type6, arg6) \
896 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
897 type5 arg5, type6 arg6) \
899 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
902 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
903 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
904 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
905 int, flags
, mode_t
, mode
)
906 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
907 struct rusage
*, rusage
)
908 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
909 int, options
, struct rusage
*, rusage
)
910 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
911 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
912 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
913 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
914 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
916 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
917 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
919 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
920 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
921 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
922 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
923 safe_syscall2(int, tkill
, int, tid
, int, sig
)
924 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
925 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
926 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
927 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
928 unsigned long, pos_l
, unsigned long, pos_h
)
929 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
930 unsigned long, pos_l
, unsigned long, pos_h
)
931 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
933 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
934 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
935 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
936 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
937 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
938 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
939 safe_syscall2(int, flock
, int, fd
, int, operation
)
940 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
941 const struct timespec
*, uts
, size_t, sigsetsize
)
942 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
944 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
945 struct timespec
*, rem
)
946 #ifdef TARGET_NR_clock_nanosleep
947 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
948 const struct timespec
*, req
, struct timespec
*, rem
)
951 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
953 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
954 long, msgtype
, int, flags
)
955 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
956 unsigned, nsops
, const struct timespec
*, timeout
)
958 /* This host kernel architecture uses a single ipc syscall; fake up
959 * wrappers for the sub-operations to hide this implementation detail.
960 * Annoyingly we can't include linux/ipc.h to get the constant definitions
961 * for the call parameter because some structs in there conflict with the
962 * sys/ipc.h ones. So we just define them here, and rely on them being
963 * the same for all host architectures.
965 #define Q_SEMTIMEDOP 4
968 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
970 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
971 void *, ptr
, long, fifth
)
972 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
974 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
976 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
978 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
980 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
981 const struct timespec
*timeout
)
983 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
987 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
988 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
989 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
990 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
991 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
993 /* We do ioctl like this rather than via safe_syscall3 to preserve the
994 * "third argument might be integer or pointer or not present" behaviour of
997 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
998 /* Similarly for fcntl. Note that callers must always:
999 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1000 * use the flock64 struct rather than unsuffixed flock
1001 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1004 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1006 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1009 static inline int host_to_target_sock_type(int host_type
)
1013 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1015 target_type
= TARGET_SOCK_DGRAM
;
1018 target_type
= TARGET_SOCK_STREAM
;
1021 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1025 #if defined(SOCK_CLOEXEC)
1026 if (host_type
& SOCK_CLOEXEC
) {
1027 target_type
|= TARGET_SOCK_CLOEXEC
;
1031 #if defined(SOCK_NONBLOCK)
1032 if (host_type
& SOCK_NONBLOCK
) {
1033 target_type
|= TARGET_SOCK_NONBLOCK
;
1040 static abi_ulong target_brk
;
1041 static abi_ulong target_original_brk
;
1042 static abi_ulong brk_page
;
1044 void target_set_brk(abi_ulong new_brk
)
1046 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1047 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1050 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1051 #define DEBUGF_BRK(message, args...)
1053 /* do_brk() must return target values and target errnos. */
1054 abi_long
do_brk(abi_ulong new_brk
)
1056 abi_long mapped_addr
;
1057 abi_ulong new_alloc_size
;
1059 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1062 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1065 if (new_brk
< target_original_brk
) {
1066 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1071 /* If the new brk is less than the highest page reserved to the
1072 * target heap allocation, set it and we're almost done... */
1073 if (new_brk
<= brk_page
) {
1074 /* Heap contents are initialized to zero, as for anonymous
1076 if (new_brk
> target_brk
) {
1077 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1079 target_brk
= new_brk
;
1080 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1084 /* We need to allocate more memory after the brk... Note that
1085 * we don't use MAP_FIXED because that will map over the top of
1086 * any existing mapping (like the one with the host libc or qemu
1087 * itself); instead we treat "mapped but at wrong address" as
1088 * a failure and unmap again.
1090 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1091 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1092 PROT_READ
|PROT_WRITE
,
1093 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1095 if (mapped_addr
== brk_page
) {
1096 /* Heap contents are initialized to zero, as for anonymous
1097 * mapped pages. Technically the new pages are already
1098 * initialized to zero since they *are* anonymous mapped
1099 * pages, however we have to take care with the contents that
1100 * come from the remaining part of the previous page: it may
1101 * contains garbage data due to a previous heap usage (grown
1102 * then shrunken). */
1103 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1105 target_brk
= new_brk
;
1106 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1107 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1110 } else if (mapped_addr
!= -1) {
1111 /* Mapped but at wrong address, meaning there wasn't actually
1112 * enough space for this brk.
1114 target_munmap(mapped_addr
, new_alloc_size
);
1116 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1119 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1122 #if defined(TARGET_ALPHA)
1123 /* We (partially) emulate OSF/1 on Alpha, which requires we
1124 return a proper errno, not an unchanged brk value. */
1125 return -TARGET_ENOMEM
;
1127 /* For everything else, return the previous break. */
1131 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1132 abi_ulong target_fds_addr
,
1136 abi_ulong b
, *target_fds
;
1138 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1139 if (!(target_fds
= lock_user(VERIFY_READ
,
1141 sizeof(abi_ulong
) * nw
,
1143 return -TARGET_EFAULT
;
1147 for (i
= 0; i
< nw
; i
++) {
1148 /* grab the abi_ulong */
1149 __get_user(b
, &target_fds
[i
]);
1150 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1151 /* check the bit inside the abi_ulong */
1158 unlock_user(target_fds
, target_fds_addr
, 0);
1163 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1164 abi_ulong target_fds_addr
,
1167 if (target_fds_addr
) {
1168 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1169 return -TARGET_EFAULT
;
1177 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1183 abi_ulong
*target_fds
;
1185 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1186 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1188 sizeof(abi_ulong
) * nw
,
1190 return -TARGET_EFAULT
;
1193 for (i
= 0; i
< nw
; i
++) {
1195 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1196 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1199 __put_user(v
, &target_fds
[i
]);
1202 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1207 #if defined(__alpha__)
1208 #define HOST_HZ 1024
1213 static inline abi_long
host_to_target_clock_t(long ticks
)
1215 #if HOST_HZ == TARGET_HZ
1218 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1222 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1223 const struct rusage
*rusage
)
1225 struct target_rusage
*target_rusage
;
1227 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1228 return -TARGET_EFAULT
;
1229 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1230 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1231 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1232 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1233 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1234 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1235 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1236 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1237 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1238 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1239 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1240 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1241 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1242 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1243 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1244 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1245 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1246 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1247 unlock_user_struct(target_rusage
, target_addr
, 1);
1252 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1254 abi_ulong target_rlim_swap
;
1257 target_rlim_swap
= tswapal(target_rlim
);
1258 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1259 return RLIM_INFINITY
;
1261 result
= target_rlim_swap
;
1262 if (target_rlim_swap
!= (rlim_t
)result
)
1263 return RLIM_INFINITY
;
1268 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1270 abi_ulong target_rlim_swap
;
1273 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1274 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1276 target_rlim_swap
= rlim
;
1277 result
= tswapal(target_rlim_swap
);
1282 static inline int target_to_host_resource(int code
)
1285 case TARGET_RLIMIT_AS
:
1287 case TARGET_RLIMIT_CORE
:
1289 case TARGET_RLIMIT_CPU
:
1291 case TARGET_RLIMIT_DATA
:
1293 case TARGET_RLIMIT_FSIZE
:
1294 return RLIMIT_FSIZE
;
1295 case TARGET_RLIMIT_LOCKS
:
1296 return RLIMIT_LOCKS
;
1297 case TARGET_RLIMIT_MEMLOCK
:
1298 return RLIMIT_MEMLOCK
;
1299 case TARGET_RLIMIT_MSGQUEUE
:
1300 return RLIMIT_MSGQUEUE
;
1301 case TARGET_RLIMIT_NICE
:
1303 case TARGET_RLIMIT_NOFILE
:
1304 return RLIMIT_NOFILE
;
1305 case TARGET_RLIMIT_NPROC
:
1306 return RLIMIT_NPROC
;
1307 case TARGET_RLIMIT_RSS
:
1309 case TARGET_RLIMIT_RTPRIO
:
1310 return RLIMIT_RTPRIO
;
1311 case TARGET_RLIMIT_SIGPENDING
:
1312 return RLIMIT_SIGPENDING
;
1313 case TARGET_RLIMIT_STACK
:
1314 return RLIMIT_STACK
;
1320 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1321 abi_ulong target_tv_addr
)
1323 struct target_timeval
*target_tv
;
1325 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1326 return -TARGET_EFAULT
;
1328 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1329 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1331 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1336 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1337 const struct timeval
*tv
)
1339 struct target_timeval
*target_tv
;
1341 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1342 return -TARGET_EFAULT
;
1344 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1345 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1347 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1352 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1353 abi_ulong target_tz_addr
)
1355 struct target_timezone
*target_tz
;
1357 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1358 return -TARGET_EFAULT
;
1361 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1362 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1364 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1369 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1372 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1373 abi_ulong target_mq_attr_addr
)
1375 struct target_mq_attr
*target_mq_attr
;
1377 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1378 target_mq_attr_addr
, 1))
1379 return -TARGET_EFAULT
;
1381 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1382 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1383 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1384 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1386 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1391 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1392 const struct mq_attr
*attr
)
1394 struct target_mq_attr
*target_mq_attr
;
1396 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1397 target_mq_attr_addr
, 0))
1398 return -TARGET_EFAULT
;
1400 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1401 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1402 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1403 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1405 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1411 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1412 /* do_select() must return target values and target errnos. */
1413 static abi_long
do_select(int n
,
1414 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1415 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1417 fd_set rfds
, wfds
, efds
;
1418 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1420 struct timespec ts
, *ts_ptr
;
1423 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1427 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1431 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1436 if (target_tv_addr
) {
1437 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1438 return -TARGET_EFAULT
;
1439 ts
.tv_sec
= tv
.tv_sec
;
1440 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1446 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1449 if (!is_error(ret
)) {
1450 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1451 return -TARGET_EFAULT
;
1452 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1453 return -TARGET_EFAULT
;
1454 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1455 return -TARGET_EFAULT
;
1457 if (target_tv_addr
) {
1458 tv
.tv_sec
= ts
.tv_sec
;
1459 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1460 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1461 return -TARGET_EFAULT
;
1469 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1470 static abi_long
do_old_select(abi_ulong arg1
)
1472 struct target_sel_arg_struct
*sel
;
1473 abi_ulong inp
, outp
, exp
, tvp
;
1476 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1477 return -TARGET_EFAULT
;
1480 nsel
= tswapal(sel
->n
);
1481 inp
= tswapal(sel
->inp
);
1482 outp
= tswapal(sel
->outp
);
1483 exp
= tswapal(sel
->exp
);
1484 tvp
= tswapal(sel
->tvp
);
1486 unlock_user_struct(sel
, arg1
, 0);
1488 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1493 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1496 return pipe2(host_pipe
, flags
);
1502 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1503 int flags
, int is_pipe2
)
1507 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1510 return get_errno(ret
);
1512 /* Several targets have special calling conventions for the original
1513 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1515 #if defined(TARGET_ALPHA)
1516 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1517 return host_pipe
[0];
1518 #elif defined(TARGET_MIPS)
1519 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1520 return host_pipe
[0];
1521 #elif defined(TARGET_SH4)
1522 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1523 return host_pipe
[0];
1524 #elif defined(TARGET_SPARC)
1525 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1526 return host_pipe
[0];
1530 if (put_user_s32(host_pipe
[0], pipedes
)
1531 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1532 return -TARGET_EFAULT
;
1533 return get_errno(ret
);
1536 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1537 abi_ulong target_addr
,
1540 struct target_ip_mreqn
*target_smreqn
;
1542 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1544 return -TARGET_EFAULT
;
1545 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1546 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1547 if (len
== sizeof(struct target_ip_mreqn
))
1548 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1549 unlock_user(target_smreqn
, target_addr
, 0);
1554 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1555 abi_ulong target_addr
,
1558 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1559 sa_family_t sa_family
;
1560 struct target_sockaddr
*target_saddr
;
1562 if (fd_trans_target_to_host_addr(fd
)) {
1563 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1566 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1568 return -TARGET_EFAULT
;
1570 sa_family
= tswap16(target_saddr
->sa_family
);
1572 /* Oops. The caller might send a incomplete sun_path; sun_path
1573 * must be terminated by \0 (see the manual page), but
1574 * unfortunately it is quite common to specify sockaddr_un
1575 * length as "strlen(x->sun_path)" while it should be
1576 * "strlen(...) + 1". We'll fix that here if needed.
1577 * Linux kernel has a similar feature.
1580 if (sa_family
== AF_UNIX
) {
1581 if (len
< unix_maxlen
&& len
> 0) {
1582 char *cp
= (char*)target_saddr
;
1584 if ( cp
[len
-1] && !cp
[len
] )
1587 if (len
> unix_maxlen
)
1591 memcpy(addr
, target_saddr
, len
);
1592 addr
->sa_family
= sa_family
;
1593 if (sa_family
== AF_NETLINK
) {
1594 struct sockaddr_nl
*nladdr
;
1596 nladdr
= (struct sockaddr_nl
*)addr
;
1597 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1598 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1599 } else if (sa_family
== AF_PACKET
) {
1600 struct target_sockaddr_ll
*lladdr
;
1602 lladdr
= (struct target_sockaddr_ll
*)addr
;
1603 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1604 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1606 unlock_user(target_saddr
, target_addr
, 0);
1611 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1612 struct sockaddr
*addr
,
1615 struct target_sockaddr
*target_saddr
;
1621 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1623 return -TARGET_EFAULT
;
1624 memcpy(target_saddr
, addr
, len
);
1625 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1626 sizeof(target_saddr
->sa_family
)) {
1627 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1629 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1630 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1631 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1632 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1633 } else if (addr
->sa_family
== AF_PACKET
) {
1634 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1635 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1636 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1638 unlock_user(target_saddr
, target_addr
, len
);
1643 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1644 struct target_msghdr
*target_msgh
)
1646 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1647 abi_long msg_controllen
;
1648 abi_ulong target_cmsg_addr
;
1649 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1650 socklen_t space
= 0;
1652 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1653 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1655 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1656 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1657 target_cmsg_start
= target_cmsg
;
1659 return -TARGET_EFAULT
;
1661 while (cmsg
&& target_cmsg
) {
1662 void *data
= CMSG_DATA(cmsg
);
1663 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1665 int len
= tswapal(target_cmsg
->cmsg_len
)
1666 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1668 space
+= CMSG_SPACE(len
);
1669 if (space
> msgh
->msg_controllen
) {
1670 space
-= CMSG_SPACE(len
);
1671 /* This is a QEMU bug, since we allocated the payload
1672 * area ourselves (unlike overflow in host-to-target
1673 * conversion, which is just the guest giving us a buffer
1674 * that's too small). It can't happen for the payload types
1675 * we currently support; if it becomes an issue in future
1676 * we would need to improve our allocation strategy to
1677 * something more intelligent than "twice the size of the
1678 * target buffer we're reading from".
1680 gemu_log("Host cmsg overflow\n");
1684 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1685 cmsg
->cmsg_level
= SOL_SOCKET
;
1687 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1689 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1690 cmsg
->cmsg_len
= CMSG_LEN(len
);
1692 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1693 int *fd
= (int *)data
;
1694 int *target_fd
= (int *)target_data
;
1695 int i
, numfds
= len
/ sizeof(int);
1697 for (i
= 0; i
< numfds
; i
++) {
1698 __get_user(fd
[i
], target_fd
+ i
);
1700 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1701 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1702 struct ucred
*cred
= (struct ucred
*)data
;
1703 struct target_ucred
*target_cred
=
1704 (struct target_ucred
*)target_data
;
1706 __get_user(cred
->pid
, &target_cred
->pid
);
1707 __get_user(cred
->uid
, &target_cred
->uid
);
1708 __get_user(cred
->gid
, &target_cred
->gid
);
1710 gemu_log("Unsupported ancillary data: %d/%d\n",
1711 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1712 memcpy(data
, target_data
, len
);
1715 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1716 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1719 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1721 msgh
->msg_controllen
= space
;
1725 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1726 struct msghdr
*msgh
)
1728 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1729 abi_long msg_controllen
;
1730 abi_ulong target_cmsg_addr
;
1731 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1732 socklen_t space
= 0;
1734 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1735 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1737 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1738 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1739 target_cmsg_start
= target_cmsg
;
1741 return -TARGET_EFAULT
;
1743 while (cmsg
&& target_cmsg
) {
1744 void *data
= CMSG_DATA(cmsg
);
1745 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1747 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1748 int tgt_len
, tgt_space
;
1750 /* We never copy a half-header but may copy half-data;
1751 * this is Linux's behaviour in put_cmsg(). Note that
1752 * truncation here is a guest problem (which we report
1753 * to the guest via the CTRUNC bit), unlike truncation
1754 * in target_to_host_cmsg, which is a QEMU bug.
1756 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1757 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1761 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1762 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1764 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1766 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1768 tgt_len
= TARGET_CMSG_LEN(len
);
1770 /* Payload types which need a different size of payload on
1771 * the target must adjust tgt_len here.
1773 switch (cmsg
->cmsg_level
) {
1775 switch (cmsg
->cmsg_type
) {
1777 tgt_len
= sizeof(struct target_timeval
);
1786 if (msg_controllen
< tgt_len
) {
1787 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1788 tgt_len
= msg_controllen
;
1791 /* We must now copy-and-convert len bytes of payload
1792 * into tgt_len bytes of destination space. Bear in mind
1793 * that in both source and destination we may be dealing
1794 * with a truncated value!
1796 switch (cmsg
->cmsg_level
) {
1798 switch (cmsg
->cmsg_type
) {
1801 int *fd
= (int *)data
;
1802 int *target_fd
= (int *)target_data
;
1803 int i
, numfds
= tgt_len
/ sizeof(int);
1805 for (i
= 0; i
< numfds
; i
++) {
1806 __put_user(fd
[i
], target_fd
+ i
);
1812 struct timeval
*tv
= (struct timeval
*)data
;
1813 struct target_timeval
*target_tv
=
1814 (struct target_timeval
*)target_data
;
1816 if (len
!= sizeof(struct timeval
) ||
1817 tgt_len
!= sizeof(struct target_timeval
)) {
1821 /* copy struct timeval to target */
1822 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1823 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1826 case SCM_CREDENTIALS
:
1828 struct ucred
*cred
= (struct ucred
*)data
;
1829 struct target_ucred
*target_cred
=
1830 (struct target_ucred
*)target_data
;
1832 __put_user(cred
->pid
, &target_cred
->pid
);
1833 __put_user(cred
->uid
, &target_cred
->uid
);
1834 __put_user(cred
->gid
, &target_cred
->gid
);
1844 gemu_log("Unsupported ancillary data: %d/%d\n",
1845 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1846 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1847 if (tgt_len
> len
) {
1848 memset(target_data
+ len
, 0, tgt_len
- len
);
1852 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1853 tgt_space
= TARGET_CMSG_SPACE(len
);
1854 if (msg_controllen
< tgt_space
) {
1855 tgt_space
= msg_controllen
;
1857 msg_controllen
-= tgt_space
;
1859 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1860 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1863 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1865 target_msgh
->msg_controllen
= tswapal(space
);
1869 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1871 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1872 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1873 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1874 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1875 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1878 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1880 abi_long (*host_to_target_nlmsg
)
1881 (struct nlmsghdr
*))
1886 while (len
> sizeof(struct nlmsghdr
)) {
1888 nlmsg_len
= nlh
->nlmsg_len
;
1889 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1894 switch (nlh
->nlmsg_type
) {
1896 tswap_nlmsghdr(nlh
);
1902 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1903 e
->error
= tswap32(e
->error
);
1904 tswap_nlmsghdr(&e
->msg
);
1905 tswap_nlmsghdr(nlh
);
1909 ret
= host_to_target_nlmsg(nlh
);
1911 tswap_nlmsghdr(nlh
);
1916 tswap_nlmsghdr(nlh
);
1917 len
-= NLMSG_ALIGN(nlmsg_len
);
1918 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1923 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1925 abi_long (*target_to_host_nlmsg
)
1926 (struct nlmsghdr
*))
1930 while (len
> sizeof(struct nlmsghdr
)) {
1931 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1932 tswap32(nlh
->nlmsg_len
) > len
) {
1935 tswap_nlmsghdr(nlh
);
1936 switch (nlh
->nlmsg_type
) {
1943 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1944 e
->error
= tswap32(e
->error
);
1945 tswap_nlmsghdr(&e
->msg
);
1949 ret
= target_to_host_nlmsg(nlh
);
1954 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1955 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1960 #ifdef CONFIG_RTNETLINK
1961 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1962 size_t len
, void *context
,
1963 abi_long (*host_to_target_nlattr
)
1967 unsigned short nla_len
;
1970 while (len
> sizeof(struct nlattr
)) {
1971 nla_len
= nlattr
->nla_len
;
1972 if (nla_len
< sizeof(struct nlattr
) ||
1976 ret
= host_to_target_nlattr(nlattr
, context
);
1977 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1978 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1982 len
-= NLA_ALIGN(nla_len
);
1983 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1988 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1990 abi_long (*host_to_target_rtattr
)
1993 unsigned short rta_len
;
1996 while (len
> sizeof(struct rtattr
)) {
1997 rta_len
= rtattr
->rta_len
;
1998 if (rta_len
< sizeof(struct rtattr
) ||
2002 ret
= host_to_target_rtattr(rtattr
);
2003 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2004 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2008 len
-= RTA_ALIGN(rta_len
);
2009 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2014 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2016 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2023 switch (nlattr
->nla_type
) {
2025 case QEMU_IFLA_BR_FDB_FLUSH
:
2028 case QEMU_IFLA_BR_GROUP_ADDR
:
2031 case QEMU_IFLA_BR_VLAN_FILTERING
:
2032 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2033 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2034 case QEMU_IFLA_BR_MCAST_ROUTER
:
2035 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2036 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2037 case QEMU_IFLA_BR_MCAST_QUERIER
:
2038 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2039 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2040 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2043 case QEMU_IFLA_BR_PRIORITY
:
2044 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2045 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2046 case QEMU_IFLA_BR_ROOT_PORT
:
2047 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2048 u16
= NLA_DATA(nlattr
);
2049 *u16
= tswap16(*u16
);
2052 case QEMU_IFLA_BR_FORWARD_DELAY
:
2053 case QEMU_IFLA_BR_HELLO_TIME
:
2054 case QEMU_IFLA_BR_MAX_AGE
:
2055 case QEMU_IFLA_BR_AGEING_TIME
:
2056 case QEMU_IFLA_BR_STP_STATE
:
2057 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2058 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2059 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2060 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2061 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2062 u32
= NLA_DATA(nlattr
);
2063 *u32
= tswap32(*u32
);
2066 case QEMU_IFLA_BR_HELLO_TIMER
:
2067 case QEMU_IFLA_BR_TCN_TIMER
:
2068 case QEMU_IFLA_BR_GC_TIMER
:
2069 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2070 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2071 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2072 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2073 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2074 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2075 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2076 u64
= NLA_DATA(nlattr
);
2077 *u64
= tswap64(*u64
);
2079 /* ifla_bridge_id: uin8_t[] */
2080 case QEMU_IFLA_BR_ROOT_ID
:
2081 case QEMU_IFLA_BR_BRIDGE_ID
:
2084 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2090 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2097 switch (nlattr
->nla_type
) {
2099 case QEMU_IFLA_BRPORT_STATE
:
2100 case QEMU_IFLA_BRPORT_MODE
:
2101 case QEMU_IFLA_BRPORT_GUARD
:
2102 case QEMU_IFLA_BRPORT_PROTECT
:
2103 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2104 case QEMU_IFLA_BRPORT_LEARNING
:
2105 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2106 case QEMU_IFLA_BRPORT_PROXYARP
:
2107 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2108 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2109 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2110 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2111 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2114 case QEMU_IFLA_BRPORT_PRIORITY
:
2115 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2116 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2117 case QEMU_IFLA_BRPORT_ID
:
2118 case QEMU_IFLA_BRPORT_NO
:
2119 u16
= NLA_DATA(nlattr
);
2120 *u16
= tswap16(*u16
);
2123 case QEMU_IFLA_BRPORT_COST
:
2124 u32
= NLA_DATA(nlattr
);
2125 *u32
= tswap32(*u32
);
2128 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2129 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2130 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2131 u64
= NLA_DATA(nlattr
);
2132 *u64
= tswap64(*u64
);
2134 /* ifla_bridge_id: uint8_t[] */
2135 case QEMU_IFLA_BRPORT_ROOT_ID
:
2136 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2139 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2145 struct linkinfo_context
{
2152 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2155 struct linkinfo_context
*li_context
= context
;
2157 switch (nlattr
->nla_type
) {
2159 case QEMU_IFLA_INFO_KIND
:
2160 li_context
->name
= NLA_DATA(nlattr
);
2161 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2163 case QEMU_IFLA_INFO_SLAVE_KIND
:
2164 li_context
->slave_name
= NLA_DATA(nlattr
);
2165 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2168 case QEMU_IFLA_INFO_XSTATS
:
2169 /* FIXME: only used by CAN */
2172 case QEMU_IFLA_INFO_DATA
:
2173 if (strncmp(li_context
->name
, "bridge",
2174 li_context
->len
) == 0) {
2175 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2178 host_to_target_data_bridge_nlattr
);
2180 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2183 case QEMU_IFLA_INFO_SLAVE_DATA
:
2184 if (strncmp(li_context
->slave_name
, "bridge",
2185 li_context
->slave_len
) == 0) {
2186 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2189 host_to_target_slave_data_bridge_nlattr
);
2191 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2192 li_context
->slave_name
);
2196 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2203 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2209 switch (nlattr
->nla_type
) {
2210 case QEMU_IFLA_INET_CONF
:
2211 u32
= NLA_DATA(nlattr
);
2212 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2214 u32
[i
] = tswap32(u32
[i
]);
2218 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2223 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2228 struct ifla_cacheinfo
*ci
;
2231 switch (nlattr
->nla_type
) {
2233 case QEMU_IFLA_INET6_TOKEN
:
2236 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2239 case QEMU_IFLA_INET6_FLAGS
:
2240 u32
= NLA_DATA(nlattr
);
2241 *u32
= tswap32(*u32
);
2244 case QEMU_IFLA_INET6_CONF
:
2245 u32
= NLA_DATA(nlattr
);
2246 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2248 u32
[i
] = tswap32(u32
[i
]);
2251 /* ifla_cacheinfo */
2252 case QEMU_IFLA_INET6_CACHEINFO
:
2253 ci
= NLA_DATA(nlattr
);
2254 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2255 ci
->tstamp
= tswap32(ci
->tstamp
);
2256 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2257 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2260 case QEMU_IFLA_INET6_STATS
:
2261 case QEMU_IFLA_INET6_ICMP6STATS
:
2262 u64
= NLA_DATA(nlattr
);
2263 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2265 u64
[i
] = tswap64(u64
[i
]);
2269 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2274 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2277 switch (nlattr
->nla_type
) {
2279 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2281 host_to_target_data_inet_nlattr
);
2283 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2285 host_to_target_data_inet6_nlattr
);
2287 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2293 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2296 struct rtnl_link_stats
*st
;
2297 struct rtnl_link_stats64
*st64
;
2298 struct rtnl_link_ifmap
*map
;
2299 struct linkinfo_context li_context
;
2301 switch (rtattr
->rta_type
) {
2303 case QEMU_IFLA_ADDRESS
:
2304 case QEMU_IFLA_BROADCAST
:
2306 case QEMU_IFLA_IFNAME
:
2307 case QEMU_IFLA_QDISC
:
2310 case QEMU_IFLA_OPERSTATE
:
2311 case QEMU_IFLA_LINKMODE
:
2312 case QEMU_IFLA_CARRIER
:
2313 case QEMU_IFLA_PROTO_DOWN
:
2317 case QEMU_IFLA_LINK
:
2318 case QEMU_IFLA_WEIGHT
:
2319 case QEMU_IFLA_TXQLEN
:
2320 case QEMU_IFLA_CARRIER_CHANGES
:
2321 case QEMU_IFLA_NUM_RX_QUEUES
:
2322 case QEMU_IFLA_NUM_TX_QUEUES
:
2323 case QEMU_IFLA_PROMISCUITY
:
2324 case QEMU_IFLA_EXT_MASK
:
2325 case QEMU_IFLA_LINK_NETNSID
:
2326 case QEMU_IFLA_GROUP
:
2327 case QEMU_IFLA_MASTER
:
2328 case QEMU_IFLA_NUM_VF
:
2329 u32
= RTA_DATA(rtattr
);
2330 *u32
= tswap32(*u32
);
2332 /* struct rtnl_link_stats */
2333 case QEMU_IFLA_STATS
:
2334 st
= RTA_DATA(rtattr
);
2335 st
->rx_packets
= tswap32(st
->rx_packets
);
2336 st
->tx_packets
= tswap32(st
->tx_packets
);
2337 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2338 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2339 st
->rx_errors
= tswap32(st
->rx_errors
);
2340 st
->tx_errors
= tswap32(st
->tx_errors
);
2341 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2342 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2343 st
->multicast
= tswap32(st
->multicast
);
2344 st
->collisions
= tswap32(st
->collisions
);
2346 /* detailed rx_errors: */
2347 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2348 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2349 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2350 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2351 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2352 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2354 /* detailed tx_errors */
2355 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2356 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2357 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2358 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2359 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2362 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2363 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2365 /* struct rtnl_link_stats64 */
2366 case QEMU_IFLA_STATS64
:
2367 st64
= RTA_DATA(rtattr
);
2368 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2369 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2370 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2371 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2372 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2373 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2374 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2375 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2376 st64
->multicast
= tswap64(st64
->multicast
);
2377 st64
->collisions
= tswap64(st64
->collisions
);
2379 /* detailed rx_errors: */
2380 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2381 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2382 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2383 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2384 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2385 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2387 /* detailed tx_errors */
2388 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2389 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2390 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2391 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2392 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2395 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2396 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2398 /* struct rtnl_link_ifmap */
2400 map
= RTA_DATA(rtattr
);
2401 map
->mem_start
= tswap64(map
->mem_start
);
2402 map
->mem_end
= tswap64(map
->mem_end
);
2403 map
->base_addr
= tswap64(map
->base_addr
);
2404 map
->irq
= tswap16(map
->irq
);
2407 case QEMU_IFLA_LINKINFO
:
2408 memset(&li_context
, 0, sizeof(li_context
));
2409 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2411 host_to_target_data_linkinfo_nlattr
);
2412 case QEMU_IFLA_AF_SPEC
:
2413 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2415 host_to_target_data_spec_nlattr
);
2417 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2423 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2426 struct ifa_cacheinfo
*ci
;
2428 switch (rtattr
->rta_type
) {
2429 /* binary: depends on family type */
2439 u32
= RTA_DATA(rtattr
);
2440 *u32
= tswap32(*u32
);
2442 /* struct ifa_cacheinfo */
2444 ci
= RTA_DATA(rtattr
);
2445 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2446 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2447 ci
->cstamp
= tswap32(ci
->cstamp
);
2448 ci
->tstamp
= tswap32(ci
->tstamp
);
2451 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2457 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2460 switch (rtattr
->rta_type
) {
2461 /* binary: depends on family type */
2470 u32
= RTA_DATA(rtattr
);
2471 *u32
= tswap32(*u32
);
2474 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2480 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2481 uint32_t rtattr_len
)
2483 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2484 host_to_target_data_link_rtattr
);
2487 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2488 uint32_t rtattr_len
)
2490 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2491 host_to_target_data_addr_rtattr
);
2494 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2495 uint32_t rtattr_len
)
2497 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2498 host_to_target_data_route_rtattr
);
2501 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2504 struct ifinfomsg
*ifi
;
2505 struct ifaddrmsg
*ifa
;
2508 nlmsg_len
= nlh
->nlmsg_len
;
2509 switch (nlh
->nlmsg_type
) {
2513 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2514 ifi
= NLMSG_DATA(nlh
);
2515 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2516 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2517 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2518 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2519 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2520 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2526 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2527 ifa
= NLMSG_DATA(nlh
);
2528 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2529 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2530 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2536 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2537 rtm
= NLMSG_DATA(nlh
);
2538 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2539 host_to_target_route_rtattr(RTM_RTA(rtm
),
2540 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2544 return -TARGET_EINVAL
;
2549 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2552 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2555 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2557 abi_long (*target_to_host_rtattr
)
2562 while (len
>= sizeof(struct rtattr
)) {
2563 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2564 tswap16(rtattr
->rta_len
) > len
) {
2567 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2568 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2569 ret
= target_to_host_rtattr(rtattr
);
2573 len
-= RTA_ALIGN(rtattr
->rta_len
);
2574 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2575 RTA_ALIGN(rtattr
->rta_len
));
2580 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2582 switch (rtattr
->rta_type
) {
2584 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2590 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2592 switch (rtattr
->rta_type
) {
2593 /* binary: depends on family type */
2598 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2604 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2607 switch (rtattr
->rta_type
) {
2608 /* binary: depends on family type */
2616 u32
= RTA_DATA(rtattr
);
2617 *u32
= tswap32(*u32
);
2620 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2626 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2627 uint32_t rtattr_len
)
2629 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2630 target_to_host_data_link_rtattr
);
2633 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2634 uint32_t rtattr_len
)
2636 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2637 target_to_host_data_addr_rtattr
);
2640 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2641 uint32_t rtattr_len
)
2643 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2644 target_to_host_data_route_rtattr
);
2647 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2649 struct ifinfomsg
*ifi
;
2650 struct ifaddrmsg
*ifa
;
2653 switch (nlh
->nlmsg_type
) {
2658 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2659 ifi
= NLMSG_DATA(nlh
);
2660 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2661 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2662 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2663 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2664 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2665 NLMSG_LENGTH(sizeof(*ifi
)));
2671 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2672 ifa
= NLMSG_DATA(nlh
);
2673 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2674 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2675 NLMSG_LENGTH(sizeof(*ifa
)));
2682 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2683 rtm
= NLMSG_DATA(nlh
);
2684 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2685 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2686 NLMSG_LENGTH(sizeof(*rtm
)));
2690 return -TARGET_EOPNOTSUPP
;
2695 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2697 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2699 #endif /* CONFIG_RTNETLINK */
2701 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2703 switch (nlh
->nlmsg_type
) {
2705 gemu_log("Unknown host audit message type %d\n",
2707 return -TARGET_EINVAL
;
2712 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2715 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2718 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2720 switch (nlh
->nlmsg_type
) {
2722 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2723 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2726 gemu_log("Unknown target audit message type %d\n",
2728 return -TARGET_EINVAL
;
2734 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2736 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2739 /* do_setsockopt() Must return target values and target errnos. */
2740 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2741 abi_ulong optval_addr
, socklen_t optlen
)
2745 struct ip_mreqn
*ip_mreq
;
2746 struct ip_mreq_source
*ip_mreq_source
;
2750 /* TCP options all take an 'int' value. */
2751 if (optlen
< sizeof(uint32_t))
2752 return -TARGET_EINVAL
;
2754 if (get_user_u32(val
, optval_addr
))
2755 return -TARGET_EFAULT
;
2756 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2763 case IP_ROUTER_ALERT
:
2767 case IP_MTU_DISCOVER
:
2773 case IP_MULTICAST_TTL
:
2774 case IP_MULTICAST_LOOP
:
2776 if (optlen
>= sizeof(uint32_t)) {
2777 if (get_user_u32(val
, optval_addr
))
2778 return -TARGET_EFAULT
;
2779 } else if (optlen
>= 1) {
2780 if (get_user_u8(val
, optval_addr
))
2781 return -TARGET_EFAULT
;
2783 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2785 case IP_ADD_MEMBERSHIP
:
2786 case IP_DROP_MEMBERSHIP
:
2787 if (optlen
< sizeof (struct target_ip_mreq
) ||
2788 optlen
> sizeof (struct target_ip_mreqn
))
2789 return -TARGET_EINVAL
;
2791 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2792 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2793 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2796 case IP_BLOCK_SOURCE
:
2797 case IP_UNBLOCK_SOURCE
:
2798 case IP_ADD_SOURCE_MEMBERSHIP
:
2799 case IP_DROP_SOURCE_MEMBERSHIP
:
2800 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2801 return -TARGET_EINVAL
;
2803 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2804 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2805 unlock_user (ip_mreq_source
, optval_addr
, 0);
2814 case IPV6_MTU_DISCOVER
:
2817 case IPV6_RECVPKTINFO
:
2819 if (optlen
< sizeof(uint32_t)) {
2820 return -TARGET_EINVAL
;
2822 if (get_user_u32(val
, optval_addr
)) {
2823 return -TARGET_EFAULT
;
2825 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2826 &val
, sizeof(val
)));
2835 /* struct icmp_filter takes an u32 value */
2836 if (optlen
< sizeof(uint32_t)) {
2837 return -TARGET_EINVAL
;
2840 if (get_user_u32(val
, optval_addr
)) {
2841 return -TARGET_EFAULT
;
2843 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2844 &val
, sizeof(val
)));
2851 case TARGET_SOL_SOCKET
:
2853 case TARGET_SO_RCVTIMEO
:
2857 optname
= SO_RCVTIMEO
;
2860 if (optlen
!= sizeof(struct target_timeval
)) {
2861 return -TARGET_EINVAL
;
2864 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2865 return -TARGET_EFAULT
;
2868 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2872 case TARGET_SO_SNDTIMEO
:
2873 optname
= SO_SNDTIMEO
;
2875 case TARGET_SO_ATTACH_FILTER
:
2877 struct target_sock_fprog
*tfprog
;
2878 struct target_sock_filter
*tfilter
;
2879 struct sock_fprog fprog
;
2880 struct sock_filter
*filter
;
2883 if (optlen
!= sizeof(*tfprog
)) {
2884 return -TARGET_EINVAL
;
2886 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2887 return -TARGET_EFAULT
;
2889 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2890 tswapal(tfprog
->filter
), 0)) {
2891 unlock_user_struct(tfprog
, optval_addr
, 1);
2892 return -TARGET_EFAULT
;
2895 fprog
.len
= tswap16(tfprog
->len
);
2896 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2897 if (filter
== NULL
) {
2898 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2899 unlock_user_struct(tfprog
, optval_addr
, 1);
2900 return -TARGET_ENOMEM
;
2902 for (i
= 0; i
< fprog
.len
; i
++) {
2903 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2904 filter
[i
].jt
= tfilter
[i
].jt
;
2905 filter
[i
].jf
= tfilter
[i
].jf
;
2906 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2908 fprog
.filter
= filter
;
2910 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2911 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2914 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2915 unlock_user_struct(tfprog
, optval_addr
, 1);
2918 case TARGET_SO_BINDTODEVICE
:
2920 char *dev_ifname
, *addr_ifname
;
2922 if (optlen
> IFNAMSIZ
- 1) {
2923 optlen
= IFNAMSIZ
- 1;
2925 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2927 return -TARGET_EFAULT
;
2929 optname
= SO_BINDTODEVICE
;
2930 addr_ifname
= alloca(IFNAMSIZ
);
2931 memcpy(addr_ifname
, dev_ifname
, optlen
);
2932 addr_ifname
[optlen
] = 0;
2933 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2934 addr_ifname
, optlen
));
2935 unlock_user (dev_ifname
, optval_addr
, 0);
2938 /* Options with 'int' argument. */
2939 case TARGET_SO_DEBUG
:
2942 case TARGET_SO_REUSEADDR
:
2943 optname
= SO_REUSEADDR
;
2945 case TARGET_SO_TYPE
:
2948 case TARGET_SO_ERROR
:
2951 case TARGET_SO_DONTROUTE
:
2952 optname
= SO_DONTROUTE
;
2954 case TARGET_SO_BROADCAST
:
2955 optname
= SO_BROADCAST
;
2957 case TARGET_SO_SNDBUF
:
2958 optname
= SO_SNDBUF
;
2960 case TARGET_SO_SNDBUFFORCE
:
2961 optname
= SO_SNDBUFFORCE
;
2963 case TARGET_SO_RCVBUF
:
2964 optname
= SO_RCVBUF
;
2966 case TARGET_SO_RCVBUFFORCE
:
2967 optname
= SO_RCVBUFFORCE
;
2969 case TARGET_SO_KEEPALIVE
:
2970 optname
= SO_KEEPALIVE
;
2972 case TARGET_SO_OOBINLINE
:
2973 optname
= SO_OOBINLINE
;
2975 case TARGET_SO_NO_CHECK
:
2976 optname
= SO_NO_CHECK
;
2978 case TARGET_SO_PRIORITY
:
2979 optname
= SO_PRIORITY
;
2982 case TARGET_SO_BSDCOMPAT
:
2983 optname
= SO_BSDCOMPAT
;
2986 case TARGET_SO_PASSCRED
:
2987 optname
= SO_PASSCRED
;
2989 case TARGET_SO_PASSSEC
:
2990 optname
= SO_PASSSEC
;
2992 case TARGET_SO_TIMESTAMP
:
2993 optname
= SO_TIMESTAMP
;
2995 case TARGET_SO_RCVLOWAT
:
2996 optname
= SO_RCVLOWAT
;
3002 if (optlen
< sizeof(uint32_t))
3003 return -TARGET_EINVAL
;
3005 if (get_user_u32(val
, optval_addr
))
3006 return -TARGET_EFAULT
;
3007 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3011 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3012 ret
= -TARGET_ENOPROTOOPT
;
3017 /* do_getsockopt() Must return target values and target errnos. */
3018 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3019 abi_ulong optval_addr
, abi_ulong optlen
)
3026 case TARGET_SOL_SOCKET
:
3029 /* These don't just return a single integer */
3030 case TARGET_SO_LINGER
:
3031 case TARGET_SO_RCVTIMEO
:
3032 case TARGET_SO_SNDTIMEO
:
3033 case TARGET_SO_PEERNAME
:
3035 case TARGET_SO_PEERCRED
: {
3038 struct target_ucred
*tcr
;
3040 if (get_user_u32(len
, optlen
)) {
3041 return -TARGET_EFAULT
;
3044 return -TARGET_EINVAL
;
3048 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3056 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3057 return -TARGET_EFAULT
;
3059 __put_user(cr
.pid
, &tcr
->pid
);
3060 __put_user(cr
.uid
, &tcr
->uid
);
3061 __put_user(cr
.gid
, &tcr
->gid
);
3062 unlock_user_struct(tcr
, optval_addr
, 1);
3063 if (put_user_u32(len
, optlen
)) {
3064 return -TARGET_EFAULT
;
3068 /* Options with 'int' argument. */
3069 case TARGET_SO_DEBUG
:
3072 case TARGET_SO_REUSEADDR
:
3073 optname
= SO_REUSEADDR
;
3075 case TARGET_SO_TYPE
:
3078 case TARGET_SO_ERROR
:
3081 case TARGET_SO_DONTROUTE
:
3082 optname
= SO_DONTROUTE
;
3084 case TARGET_SO_BROADCAST
:
3085 optname
= SO_BROADCAST
;
3087 case TARGET_SO_SNDBUF
:
3088 optname
= SO_SNDBUF
;
3090 case TARGET_SO_RCVBUF
:
3091 optname
= SO_RCVBUF
;
3093 case TARGET_SO_KEEPALIVE
:
3094 optname
= SO_KEEPALIVE
;
3096 case TARGET_SO_OOBINLINE
:
3097 optname
= SO_OOBINLINE
;
3099 case TARGET_SO_NO_CHECK
:
3100 optname
= SO_NO_CHECK
;
3102 case TARGET_SO_PRIORITY
:
3103 optname
= SO_PRIORITY
;
3106 case TARGET_SO_BSDCOMPAT
:
3107 optname
= SO_BSDCOMPAT
;
3110 case TARGET_SO_PASSCRED
:
3111 optname
= SO_PASSCRED
;
3113 case TARGET_SO_TIMESTAMP
:
3114 optname
= SO_TIMESTAMP
;
3116 case TARGET_SO_RCVLOWAT
:
3117 optname
= SO_RCVLOWAT
;
3119 case TARGET_SO_ACCEPTCONN
:
3120 optname
= SO_ACCEPTCONN
;
3127 /* TCP options all take an 'int' value. */
3129 if (get_user_u32(len
, optlen
))
3130 return -TARGET_EFAULT
;
3132 return -TARGET_EINVAL
;
3134 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3137 if (optname
== SO_TYPE
) {
3138 val
= host_to_target_sock_type(val
);
3143 if (put_user_u32(val
, optval_addr
))
3144 return -TARGET_EFAULT
;
3146 if (put_user_u8(val
, optval_addr
))
3147 return -TARGET_EFAULT
;
3149 if (put_user_u32(len
, optlen
))
3150 return -TARGET_EFAULT
;
3157 case IP_ROUTER_ALERT
:
3161 case IP_MTU_DISCOVER
:
3167 case IP_MULTICAST_TTL
:
3168 case IP_MULTICAST_LOOP
:
3169 if (get_user_u32(len
, optlen
))
3170 return -TARGET_EFAULT
;
3172 return -TARGET_EINVAL
;
3174 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3177 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3179 if (put_user_u32(len
, optlen
)
3180 || put_user_u8(val
, optval_addr
))
3181 return -TARGET_EFAULT
;
3183 if (len
> sizeof(int))
3185 if (put_user_u32(len
, optlen
)
3186 || put_user_u32(val
, optval_addr
))
3187 return -TARGET_EFAULT
;
3191 ret
= -TARGET_ENOPROTOOPT
;
3197 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3199 ret
= -TARGET_EOPNOTSUPP
;
3205 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3206 abi_ulong count
, int copy
)
3208 struct target_iovec
*target_vec
;
3210 abi_ulong total_len
, max_len
;
3213 bool bad_address
= false;
3219 if (count
> IOV_MAX
) {
3224 vec
= g_try_new0(struct iovec
, count
);
3230 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3231 count
* sizeof(struct target_iovec
), 1);
3232 if (target_vec
== NULL
) {
3237 /* ??? If host page size > target page size, this will result in a
3238 value larger than what we can actually support. */
3239 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3242 for (i
= 0; i
< count
; i
++) {
3243 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3244 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3249 } else if (len
== 0) {
3250 /* Zero length pointer is ignored. */
3251 vec
[i
].iov_base
= 0;
3253 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3254 /* If the first buffer pointer is bad, this is a fault. But
3255 * subsequent bad buffers will result in a partial write; this
3256 * is realized by filling the vector with null pointers and
3258 if (!vec
[i
].iov_base
) {
3269 if (len
> max_len
- total_len
) {
3270 len
= max_len
- total_len
;
3273 vec
[i
].iov_len
= len
;
3277 unlock_user(target_vec
, target_addr
, 0);
3282 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3283 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3286 unlock_user(target_vec
, target_addr
, 0);
3293 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3294 abi_ulong count
, int copy
)
3296 struct target_iovec
*target_vec
;
3299 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3300 count
* sizeof(struct target_iovec
), 1);
3302 for (i
= 0; i
< count
; i
++) {
3303 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3304 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3308 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3310 unlock_user(target_vec
, target_addr
, 0);
3316 static inline int target_to_host_sock_type(int *type
)
3319 int target_type
= *type
;
3321 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3322 case TARGET_SOCK_DGRAM
:
3323 host_type
= SOCK_DGRAM
;
3325 case TARGET_SOCK_STREAM
:
3326 host_type
= SOCK_STREAM
;
3329 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3332 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3333 #if defined(SOCK_CLOEXEC)
3334 host_type
|= SOCK_CLOEXEC
;
3336 return -TARGET_EINVAL
;
3339 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3340 #if defined(SOCK_NONBLOCK)
3341 host_type
|= SOCK_NONBLOCK
;
3342 #elif !defined(O_NONBLOCK)
3343 return -TARGET_EINVAL
;
3350 /* Try to emulate socket type flags after socket creation. */
3351 static int sock_flags_fixup(int fd
, int target_type
)
3353 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3354 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3355 int flags
= fcntl(fd
, F_GETFL
);
3356 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3358 return -TARGET_EINVAL
;
3365 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3366 abi_ulong target_addr
,
3369 struct sockaddr
*addr
= host_addr
;
3370 struct target_sockaddr
*target_saddr
;
3372 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3373 if (!target_saddr
) {
3374 return -TARGET_EFAULT
;
3377 memcpy(addr
, target_saddr
, len
);
3378 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3379 /* spkt_protocol is big-endian */
3381 unlock_user(target_saddr
, target_addr
, 0);
3385 static TargetFdTrans target_packet_trans
= {
3386 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3389 #ifdef CONFIG_RTNETLINK
3390 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3394 ret
= target_to_host_nlmsg_route(buf
, len
);
3402 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3406 ret
= host_to_target_nlmsg_route(buf
, len
);
3414 static TargetFdTrans target_netlink_route_trans
= {
3415 .target_to_host_data
= netlink_route_target_to_host
,
3416 .host_to_target_data
= netlink_route_host_to_target
,
3418 #endif /* CONFIG_RTNETLINK */
3420 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3424 ret
= target_to_host_nlmsg_audit(buf
, len
);
3432 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3436 ret
= host_to_target_nlmsg_audit(buf
, len
);
3444 static TargetFdTrans target_netlink_audit_trans
= {
3445 .target_to_host_data
= netlink_audit_target_to_host
,
3446 .host_to_target_data
= netlink_audit_host_to_target
,
3449 /* do_socket() Must return target values and target errnos. */
3450 static abi_long
do_socket(int domain
, int type
, int protocol
)
3452 int target_type
= type
;
3455 ret
= target_to_host_sock_type(&type
);
3460 if (domain
== PF_NETLINK
&& !(
3461 #ifdef CONFIG_RTNETLINK
3462 protocol
== NETLINK_ROUTE
||
3464 protocol
== NETLINK_KOBJECT_UEVENT
||
3465 protocol
== NETLINK_AUDIT
)) {
3466 return -EPFNOSUPPORT
;
3469 if (domain
== AF_PACKET
||
3470 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3471 protocol
= tswap16(protocol
);
3474 ret
= get_errno(socket(domain
, type
, protocol
));
3476 ret
= sock_flags_fixup(ret
, target_type
);
3477 if (type
== SOCK_PACKET
) {
3478 /* Manage an obsolete case :
3479 * if socket type is SOCK_PACKET, bind by name
3481 fd_trans_register(ret
, &target_packet_trans
);
3482 } else if (domain
== PF_NETLINK
) {
3484 #ifdef CONFIG_RTNETLINK
3486 fd_trans_register(ret
, &target_netlink_route_trans
);
3489 case NETLINK_KOBJECT_UEVENT
:
3490 /* nothing to do: messages are strings */
3493 fd_trans_register(ret
, &target_netlink_audit_trans
);
3496 g_assert_not_reached();
3503 /* do_bind() Must return target values and target errnos. */
3504 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3510 if ((int)addrlen
< 0) {
3511 return -TARGET_EINVAL
;
3514 addr
= alloca(addrlen
+1);
3516 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3520 return get_errno(bind(sockfd
, addr
, addrlen
));
3523 /* do_connect() Must return target values and target errnos. */
3524 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3530 if ((int)addrlen
< 0) {
3531 return -TARGET_EINVAL
;
3534 addr
= alloca(addrlen
+1);
3536 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3540 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3543 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3544 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3545 int flags
, int send
)
3551 abi_ulong target_vec
;
3553 if (msgp
->msg_name
) {
3554 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3555 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3556 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3557 tswapal(msgp
->msg_name
),
3559 if (ret
== -TARGET_EFAULT
) {
3560 /* For connected sockets msg_name and msg_namelen must
3561 * be ignored, so returning EFAULT immediately is wrong.
3562 * Instead, pass a bad msg_name to the host kernel, and
3563 * let it decide whether to return EFAULT or not.
3565 msg
.msg_name
= (void *)-1;
3570 msg
.msg_name
= NULL
;
3571 msg
.msg_namelen
= 0;
3573 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3574 msg
.msg_control
= alloca(msg
.msg_controllen
);
3575 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3577 count
= tswapal(msgp
->msg_iovlen
);
3578 target_vec
= tswapal(msgp
->msg_iov
);
3580 if (count
> IOV_MAX
) {
3581 /* sendrcvmsg returns a different errno for this condition than
3582 * readv/writev, so we must catch it here before lock_iovec() does.
3584 ret
= -TARGET_EMSGSIZE
;
3588 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3589 target_vec
, count
, send
);
3591 ret
= -host_to_target_errno(errno
);
3594 msg
.msg_iovlen
= count
;
3598 if (fd_trans_target_to_host_data(fd
)) {
3601 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3602 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3603 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3604 msg
.msg_iov
->iov_len
);
3606 msg
.msg_iov
->iov_base
= host_msg
;
3607 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3611 ret
= target_to_host_cmsg(&msg
, msgp
);
3613 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3617 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3618 if (!is_error(ret
)) {
3620 if (fd_trans_host_to_target_data(fd
)) {
3621 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3624 ret
= host_to_target_cmsg(msgp
, &msg
);
3626 if (!is_error(ret
)) {
3627 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3628 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3629 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3630 msg
.msg_name
, msg
.msg_namelen
);
3642 unlock_iovec(vec
, target_vec
, count
, !send
);
3647 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3648 int flags
, int send
)
3651 struct target_msghdr
*msgp
;
3653 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3657 return -TARGET_EFAULT
;
3659 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3660 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3664 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3665 * so it might not have this *mmsg-specific flag either.
3667 #ifndef MSG_WAITFORONE
3668 #define MSG_WAITFORONE 0x10000
3671 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3672 unsigned int vlen
, unsigned int flags
,
3675 struct target_mmsghdr
*mmsgp
;
3679 if (vlen
> UIO_MAXIOV
) {
3683 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3685 return -TARGET_EFAULT
;
3688 for (i
= 0; i
< vlen
; i
++) {
3689 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3690 if (is_error(ret
)) {
3693 mmsgp
[i
].msg_len
= tswap32(ret
);
3694 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3695 if (flags
& MSG_WAITFORONE
) {
3696 flags
|= MSG_DONTWAIT
;
3700 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3702 /* Return number of datagrams sent if we sent any at all;
3703 * otherwise return the error.
3711 /* do_accept4() Must return target values and target errnos. */
3712 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3713 abi_ulong target_addrlen_addr
, int flags
)
3720 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3722 if (target_addr
== 0) {
3723 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3726 /* linux returns EINVAL if addrlen pointer is invalid */
3727 if (get_user_u32(addrlen
, target_addrlen_addr
))
3728 return -TARGET_EINVAL
;
3730 if ((int)addrlen
< 0) {
3731 return -TARGET_EINVAL
;
3734 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3735 return -TARGET_EINVAL
;
3737 addr
= alloca(addrlen
);
3739 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3740 if (!is_error(ret
)) {
3741 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3742 if (put_user_u32(addrlen
, target_addrlen_addr
))
3743 ret
= -TARGET_EFAULT
;
3748 /* do_getpeername() Must return target values and target errnos. */
3749 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3750 abi_ulong target_addrlen_addr
)
3756 if (get_user_u32(addrlen
, target_addrlen_addr
))
3757 return -TARGET_EFAULT
;
3759 if ((int)addrlen
< 0) {
3760 return -TARGET_EINVAL
;
3763 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3764 return -TARGET_EFAULT
;
3766 addr
= alloca(addrlen
);
3768 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3769 if (!is_error(ret
)) {
3770 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3771 if (put_user_u32(addrlen
, target_addrlen_addr
))
3772 ret
= -TARGET_EFAULT
;
3777 /* do_getsockname() Must return target values and target errnos. */
3778 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3779 abi_ulong target_addrlen_addr
)
3785 if (get_user_u32(addrlen
, target_addrlen_addr
))
3786 return -TARGET_EFAULT
;
3788 if ((int)addrlen
< 0) {
3789 return -TARGET_EINVAL
;
3792 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3793 return -TARGET_EFAULT
;
3795 addr
= alloca(addrlen
);
3797 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3798 if (!is_error(ret
)) {
3799 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3800 if (put_user_u32(addrlen
, target_addrlen_addr
))
3801 ret
= -TARGET_EFAULT
;
3806 /* do_socketpair() Must return target values and target errnos. */
3807 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3808 abi_ulong target_tab_addr
)
3813 target_to_host_sock_type(&type
);
3815 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3816 if (!is_error(ret
)) {
3817 if (put_user_s32(tab
[0], target_tab_addr
)
3818 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3819 ret
= -TARGET_EFAULT
;
3824 /* do_sendto() Must return target values and target errnos. */
3825 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3826 abi_ulong target_addr
, socklen_t addrlen
)
3830 void *copy_msg
= NULL
;
3833 if ((int)addrlen
< 0) {
3834 return -TARGET_EINVAL
;
3837 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3839 return -TARGET_EFAULT
;
3840 if (fd_trans_target_to_host_data(fd
)) {
3841 copy_msg
= host_msg
;
3842 host_msg
= g_malloc(len
);
3843 memcpy(host_msg
, copy_msg
, len
);
3844 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3850 addr
= alloca(addrlen
+1);
3851 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3855 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3857 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3862 host_msg
= copy_msg
;
3864 unlock_user(host_msg
, msg
, 0);
3868 /* do_recvfrom() Must return target values and target errnos. */
3869 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3870 abi_ulong target_addr
,
3871 abi_ulong target_addrlen
)
3878 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3880 return -TARGET_EFAULT
;
3882 if (get_user_u32(addrlen
, target_addrlen
)) {
3883 ret
= -TARGET_EFAULT
;
3886 if ((int)addrlen
< 0) {
3887 ret
= -TARGET_EINVAL
;
3890 addr
= alloca(addrlen
);
3891 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3894 addr
= NULL
; /* To keep compiler quiet. */
3895 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3897 if (!is_error(ret
)) {
3898 if (fd_trans_host_to_target_data(fd
)) {
3899 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3902 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3903 if (put_user_u32(addrlen
, target_addrlen
)) {
3904 ret
= -TARGET_EFAULT
;
3908 unlock_user(host_msg
, msg
, len
);
3911 unlock_user(host_msg
, msg
, 0);
3916 #ifdef TARGET_NR_socketcall
3917 /* do_socketcall() must return target values and target errnos. */
3918 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3920 static const unsigned nargs
[] = { /* number of arguments per operation */
3921 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3922 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3923 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3924 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3925 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3926 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3927 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3928 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3929 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3930 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3931 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3932 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3933 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3934 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3935 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3936 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3937 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3938 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3939 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3940 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3942 abi_long a
[6]; /* max 6 args */
3945 /* check the range of the first argument num */
3946 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3947 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3948 return -TARGET_EINVAL
;
3950 /* ensure we have space for args */
3951 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3952 return -TARGET_EINVAL
;
3954 /* collect the arguments in a[] according to nargs[] */
3955 for (i
= 0; i
< nargs
[num
]; ++i
) {
3956 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3957 return -TARGET_EFAULT
;
3960 /* now when we have the args, invoke the appropriate underlying function */
3962 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3963 return do_socket(a
[0], a
[1], a
[2]);
3964 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3965 return do_bind(a
[0], a
[1], a
[2]);
3966 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3967 return do_connect(a
[0], a
[1], a
[2]);
3968 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3969 return get_errno(listen(a
[0], a
[1]));
3970 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3971 return do_accept4(a
[0], a
[1], a
[2], 0);
3972 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3973 return do_getsockname(a
[0], a
[1], a
[2]);
3974 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3975 return do_getpeername(a
[0], a
[1], a
[2]);
3976 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3977 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3978 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3979 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3980 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3981 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3982 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3983 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3984 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3985 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3986 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3987 return get_errno(shutdown(a
[0], a
[1]));
3988 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3989 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3990 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3991 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3992 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3993 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3994 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3995 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3996 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3997 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3998 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3999 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4000 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4001 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4003 gemu_log("Unsupported socketcall: %d\n", num
);
4004 return -TARGET_EINVAL
;
4009 #define N_SHM_REGIONS 32
4011 static struct shm_region
{
4015 } shm_regions
[N_SHM_REGIONS
];
4017 #ifndef TARGET_SEMID64_DS
4018 /* asm-generic version of this struct */
4019 struct target_semid64_ds
4021 struct target_ipc_perm sem_perm
;
4022 abi_ulong sem_otime
;
4023 #if TARGET_ABI_BITS == 32
4024 abi_ulong __unused1
;
4026 abi_ulong sem_ctime
;
4027 #if TARGET_ABI_BITS == 32
4028 abi_ulong __unused2
;
4030 abi_ulong sem_nsems
;
4031 abi_ulong __unused3
;
4032 abi_ulong __unused4
;
4036 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4037 abi_ulong target_addr
)
4039 struct target_ipc_perm
*target_ip
;
4040 struct target_semid64_ds
*target_sd
;
4042 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4043 return -TARGET_EFAULT
;
4044 target_ip
= &(target_sd
->sem_perm
);
4045 host_ip
->__key
= tswap32(target_ip
->__key
);
4046 host_ip
->uid
= tswap32(target_ip
->uid
);
4047 host_ip
->gid
= tswap32(target_ip
->gid
);
4048 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4049 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4050 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4051 host_ip
->mode
= tswap32(target_ip
->mode
);
4053 host_ip
->mode
= tswap16(target_ip
->mode
);
4055 #if defined(TARGET_PPC)
4056 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4058 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4060 unlock_user_struct(target_sd
, target_addr
, 0);
4064 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4065 struct ipc_perm
*host_ip
)
4067 struct target_ipc_perm
*target_ip
;
4068 struct target_semid64_ds
*target_sd
;
4070 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4071 return -TARGET_EFAULT
;
4072 target_ip
= &(target_sd
->sem_perm
);
4073 target_ip
->__key
= tswap32(host_ip
->__key
);
4074 target_ip
->uid
= tswap32(host_ip
->uid
);
4075 target_ip
->gid
= tswap32(host_ip
->gid
);
4076 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4077 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4078 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4079 target_ip
->mode
= tswap32(host_ip
->mode
);
4081 target_ip
->mode
= tswap16(host_ip
->mode
);
4083 #if defined(TARGET_PPC)
4084 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4086 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4088 unlock_user_struct(target_sd
, target_addr
, 1);
4092 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4093 abi_ulong target_addr
)
4095 struct target_semid64_ds
*target_sd
;
4097 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4098 return -TARGET_EFAULT
;
4099 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4100 return -TARGET_EFAULT
;
4101 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4102 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4103 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4104 unlock_user_struct(target_sd
, target_addr
, 0);
4108 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4109 struct semid_ds
*host_sd
)
4111 struct target_semid64_ds
*target_sd
;
4113 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4114 return -TARGET_EFAULT
;
4115 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4116 return -TARGET_EFAULT
;
4117 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4118 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4119 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4120 unlock_user_struct(target_sd
, target_addr
, 1);
4124 struct target_seminfo
{
4137 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4138 struct seminfo
*host_seminfo
)
4140 struct target_seminfo
*target_seminfo
;
4141 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4142 return -TARGET_EFAULT
;
4143 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4144 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4145 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4146 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4147 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4148 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4149 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4150 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4151 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4152 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4153 unlock_user_struct(target_seminfo
, target_addr
, 1);
4159 struct semid_ds
*buf
;
4160 unsigned short *array
;
4161 struct seminfo
*__buf
;
4164 union target_semun
{
4171 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4172 abi_ulong target_addr
)
4175 unsigned short *array
;
4177 struct semid_ds semid_ds
;
4180 semun
.buf
= &semid_ds
;
4182 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4184 return get_errno(ret
);
4186 nsems
= semid_ds
.sem_nsems
;
4188 *host_array
= g_try_new(unsigned short, nsems
);
4190 return -TARGET_ENOMEM
;
4192 array
= lock_user(VERIFY_READ
, target_addr
,
4193 nsems
*sizeof(unsigned short), 1);
4195 g_free(*host_array
);
4196 return -TARGET_EFAULT
;
4199 for(i
=0; i
<nsems
; i
++) {
4200 __get_user((*host_array
)[i
], &array
[i
]);
4202 unlock_user(array
, target_addr
, 0);
4207 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4208 unsigned short **host_array
)
4211 unsigned short *array
;
4213 struct semid_ds semid_ds
;
4216 semun
.buf
= &semid_ds
;
4218 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4220 return get_errno(ret
);
4222 nsems
= semid_ds
.sem_nsems
;
4224 array
= lock_user(VERIFY_WRITE
, target_addr
,
4225 nsems
*sizeof(unsigned short), 0);
4227 return -TARGET_EFAULT
;
4229 for(i
=0; i
<nsems
; i
++) {
4230 __put_user((*host_array
)[i
], &array
[i
]);
4232 g_free(*host_array
);
4233 unlock_user(array
, target_addr
, 1);
4238 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4239 abi_ulong target_arg
)
4241 union target_semun target_su
= { .buf
= target_arg
};
4243 struct semid_ds dsarg
;
4244 unsigned short *array
= NULL
;
4245 struct seminfo seminfo
;
4246 abi_long ret
= -TARGET_EINVAL
;
4253 /* In 64 bit cross-endian situations, we will erroneously pick up
4254 * the wrong half of the union for the "val" element. To rectify
4255 * this, the entire 8-byte structure is byteswapped, followed by
4256 * a swap of the 4 byte val field. In other cases, the data is
4257 * already in proper host byte order. */
4258 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4259 target_su
.buf
= tswapal(target_su
.buf
);
4260 arg
.val
= tswap32(target_su
.val
);
4262 arg
.val
= target_su
.val
;
4264 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4268 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4272 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4273 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4280 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4284 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4285 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4291 arg
.__buf
= &seminfo
;
4292 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4293 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4301 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4308 struct target_sembuf
{
4309 unsigned short sem_num
;
4314 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4315 abi_ulong target_addr
,
4318 struct target_sembuf
*target_sembuf
;
4321 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4322 nsops
*sizeof(struct target_sembuf
), 1);
4324 return -TARGET_EFAULT
;
4326 for(i
=0; i
<nsops
; i
++) {
4327 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4328 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4329 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4332 unlock_user(target_sembuf
, target_addr
, 0);
4337 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4339 struct sembuf sops
[nsops
];
4341 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4342 return -TARGET_EFAULT
;
4344 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4347 struct target_msqid_ds
4349 struct target_ipc_perm msg_perm
;
4350 abi_ulong msg_stime
;
4351 #if TARGET_ABI_BITS == 32
4352 abi_ulong __unused1
;
4354 abi_ulong msg_rtime
;
4355 #if TARGET_ABI_BITS == 32
4356 abi_ulong __unused2
;
4358 abi_ulong msg_ctime
;
4359 #if TARGET_ABI_BITS == 32
4360 abi_ulong __unused3
;
4362 abi_ulong __msg_cbytes
;
4364 abi_ulong msg_qbytes
;
4365 abi_ulong msg_lspid
;
4366 abi_ulong msg_lrpid
;
4367 abi_ulong __unused4
;
4368 abi_ulong __unused5
;
4371 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4372 abi_ulong target_addr
)
4374 struct target_msqid_ds
*target_md
;
4376 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4377 return -TARGET_EFAULT
;
4378 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4379 return -TARGET_EFAULT
;
4380 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4381 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4382 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4383 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4384 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4385 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4386 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4387 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4388 unlock_user_struct(target_md
, target_addr
, 0);
4392 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4393 struct msqid_ds
*host_md
)
4395 struct target_msqid_ds
*target_md
;
4397 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4398 return -TARGET_EFAULT
;
4399 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4400 return -TARGET_EFAULT
;
4401 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4402 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4403 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4404 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4405 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4406 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4407 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4408 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4409 unlock_user_struct(target_md
, target_addr
, 1);
4413 struct target_msginfo
{
4421 unsigned short int msgseg
;
4424 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4425 struct msginfo
*host_msginfo
)
4427 struct target_msginfo
*target_msginfo
;
4428 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4429 return -TARGET_EFAULT
;
4430 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4431 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4432 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4433 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4434 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4435 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4436 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4437 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4438 unlock_user_struct(target_msginfo
, target_addr
, 1);
4442 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4444 struct msqid_ds dsarg
;
4445 struct msginfo msginfo
;
4446 abi_long ret
= -TARGET_EINVAL
;
4454 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4455 return -TARGET_EFAULT
;
4456 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4457 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4458 return -TARGET_EFAULT
;
4461 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4465 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4466 if (host_to_target_msginfo(ptr
, &msginfo
))
4467 return -TARGET_EFAULT
;
4474 struct target_msgbuf
{
4479 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4480 ssize_t msgsz
, int msgflg
)
4482 struct target_msgbuf
*target_mb
;
4483 struct msgbuf
*host_mb
;
4487 return -TARGET_EINVAL
;
4490 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4491 return -TARGET_EFAULT
;
4492 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4494 unlock_user_struct(target_mb
, msgp
, 0);
4495 return -TARGET_ENOMEM
;
4497 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4498 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4499 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4501 unlock_user_struct(target_mb
, msgp
, 0);
4506 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4507 ssize_t msgsz
, abi_long msgtyp
,
4510 struct target_msgbuf
*target_mb
;
4512 struct msgbuf
*host_mb
;
4516 return -TARGET_EINVAL
;
4519 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4520 return -TARGET_EFAULT
;
4522 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4524 ret
= -TARGET_ENOMEM
;
4527 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4530 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4531 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4532 if (!target_mtext
) {
4533 ret
= -TARGET_EFAULT
;
4536 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4537 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4540 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4544 unlock_user_struct(target_mb
, msgp
, 1);
4549 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4550 abi_ulong target_addr
)
4552 struct target_shmid_ds
*target_sd
;
4554 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4555 return -TARGET_EFAULT
;
4556 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4557 return -TARGET_EFAULT
;
4558 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4559 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4560 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4561 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4562 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4563 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4564 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4565 unlock_user_struct(target_sd
, target_addr
, 0);
4569 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4570 struct shmid_ds
*host_sd
)
4572 struct target_shmid_ds
*target_sd
;
4574 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4575 return -TARGET_EFAULT
;
4576 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4577 return -TARGET_EFAULT
;
4578 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4579 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4580 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4581 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4582 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4583 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4584 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4585 unlock_user_struct(target_sd
, target_addr
, 1);
4589 struct target_shminfo
{
4597 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4598 struct shminfo
*host_shminfo
)
4600 struct target_shminfo
*target_shminfo
;
4601 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4602 return -TARGET_EFAULT
;
4603 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4604 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4605 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4606 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4607 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4608 unlock_user_struct(target_shminfo
, target_addr
, 1);
4612 struct target_shm_info
{
4617 abi_ulong swap_attempts
;
4618 abi_ulong swap_successes
;
4621 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4622 struct shm_info
*host_shm_info
)
4624 struct target_shm_info
*target_shm_info
;
4625 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4626 return -TARGET_EFAULT
;
4627 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4628 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4629 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4630 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4631 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4632 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4633 unlock_user_struct(target_shm_info
, target_addr
, 1);
4637 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4639 struct shmid_ds dsarg
;
4640 struct shminfo shminfo
;
4641 struct shm_info shm_info
;
4642 abi_long ret
= -TARGET_EINVAL
;
4650 if (target_to_host_shmid_ds(&dsarg
, buf
))
4651 return -TARGET_EFAULT
;
4652 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4653 if (host_to_target_shmid_ds(buf
, &dsarg
))
4654 return -TARGET_EFAULT
;
4657 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4658 if (host_to_target_shminfo(buf
, &shminfo
))
4659 return -TARGET_EFAULT
;
4662 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4663 if (host_to_target_shm_info(buf
, &shm_info
))
4664 return -TARGET_EFAULT
;
4669 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4676 #ifndef TARGET_FORCE_SHMLBA
4677 /* For most architectures, SHMLBA is the same as the page size;
4678 * some architectures have larger values, in which case they should
4679 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4680 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4681 * and defining its own value for SHMLBA.
4683 * The kernel also permits SHMLBA to be set by the architecture to a
4684 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4685 * this means that addresses are rounded to the large size if
4686 * SHM_RND is set but addresses not aligned to that size are not rejected
4687 * as long as they are at least page-aligned. Since the only architecture
4688 * which uses this is ia64 this code doesn't provide for that oddity.
4690 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4692 return TARGET_PAGE_SIZE
;
4696 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4697 int shmid
, abi_ulong shmaddr
, int shmflg
)
4701 struct shmid_ds shm_info
;
4705 /* find out the length of the shared memory segment */
4706 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4707 if (is_error(ret
)) {
4708 /* can't get length, bail out */
4712 shmlba
= target_shmlba(cpu_env
);
4714 if (shmaddr
& (shmlba
- 1)) {
4715 if (shmflg
& SHM_RND
) {
4716 shmaddr
&= ~(shmlba
- 1);
4718 return -TARGET_EINVAL
;
4725 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4727 abi_ulong mmap_start
;
4729 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4731 if (mmap_start
== -1) {
4733 host_raddr
= (void *)-1;
4735 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4738 if (host_raddr
== (void *)-1) {
4740 return get_errno((long)host_raddr
);
4742 raddr
=h2g((unsigned long)host_raddr
);
4744 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4745 PAGE_VALID
| PAGE_READ
|
4746 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4748 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4749 if (!shm_regions
[i
].in_use
) {
4750 shm_regions
[i
].in_use
= true;
4751 shm_regions
[i
].start
= raddr
;
4752 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4762 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4766 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4767 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4768 shm_regions
[i
].in_use
= false;
4769 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4774 return get_errno(shmdt(g2h(shmaddr
)));
4777 #ifdef TARGET_NR_ipc
4778 /* ??? This only works with linear mappings. */
4779 /* do_ipc() must return target values and target errnos. */
4780 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4781 unsigned int call
, abi_long first
,
4782 abi_long second
, abi_long third
,
4783 abi_long ptr
, abi_long fifth
)
4788 version
= call
>> 16;
4793 ret
= do_semop(first
, ptr
, second
);
4797 ret
= get_errno(semget(first
, second
, third
));
4800 case IPCOP_semctl
: {
4801 /* The semun argument to semctl is passed by value, so dereference the
4804 get_user_ual(atptr
, ptr
);
4805 ret
= do_semctl(first
, second
, third
, atptr
);
4810 ret
= get_errno(msgget(first
, second
));
4814 ret
= do_msgsnd(first
, ptr
, second
, third
);
4818 ret
= do_msgctl(first
, second
, ptr
);
4825 struct target_ipc_kludge
{
4830 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4831 ret
= -TARGET_EFAULT
;
4835 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4837 unlock_user_struct(tmp
, ptr
, 0);
4841 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4850 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4851 if (is_error(raddr
))
4852 return get_errno(raddr
);
4853 if (put_user_ual(raddr
, third
))
4854 return -TARGET_EFAULT
;
4858 ret
= -TARGET_EINVAL
;
4863 ret
= do_shmdt(ptr
);
4867 /* IPC_* flag values are the same on all linux platforms */
4868 ret
= get_errno(shmget(first
, second
, third
));
4871 /* IPC_* and SHM_* command values are the same on all linux platforms */
4873 ret
= do_shmctl(first
, second
, ptr
);
4876 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4877 ret
= -TARGET_ENOSYS
;
4884 /* kernel structure types definitions */
4886 #define STRUCT(name, ...) STRUCT_ ## name,
4887 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4889 #include "syscall_types.h"
4893 #undef STRUCT_SPECIAL
4895 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4896 #define STRUCT_SPECIAL(name)
4897 #include "syscall_types.h"
4899 #undef STRUCT_SPECIAL
4901 typedef struct IOCTLEntry IOCTLEntry
;
4903 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4904 int fd
, int cmd
, abi_long arg
);
4908 unsigned int host_cmd
;
4911 do_ioctl_fn
*do_ioctl
;
4912 const argtype arg_type
[5];
4915 #define IOC_R 0x0001
4916 #define IOC_W 0x0002
4917 #define IOC_RW (IOC_R | IOC_W)
4919 #define MAX_STRUCT_SIZE 4096
4921 #ifdef CONFIG_FIEMAP
4922 /* So fiemap access checks don't overflow on 32 bit systems.
4923 * This is very slightly smaller than the limit imposed by
4924 * the underlying kernel.
4926 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4927 / sizeof(struct fiemap_extent))
4929 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4930 int fd
, int cmd
, abi_long arg
)
4932 /* The parameter for this ioctl is a struct fiemap followed
4933 * by an array of struct fiemap_extent whose size is set
4934 * in fiemap->fm_extent_count. The array is filled in by the
4937 int target_size_in
, target_size_out
;
4939 const argtype
*arg_type
= ie
->arg_type
;
4940 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4943 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4947 assert(arg_type
[0] == TYPE_PTR
);
4948 assert(ie
->access
== IOC_RW
);
4950 target_size_in
= thunk_type_size(arg_type
, 0);
4951 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4953 return -TARGET_EFAULT
;
4955 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4956 unlock_user(argptr
, arg
, 0);
4957 fm
= (struct fiemap
*)buf_temp
;
4958 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4959 return -TARGET_EINVAL
;
4962 outbufsz
= sizeof (*fm
) +
4963 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4965 if (outbufsz
> MAX_STRUCT_SIZE
) {
4966 /* We can't fit all the extents into the fixed size buffer.
4967 * Allocate one that is large enough and use it instead.
4969 fm
= g_try_malloc(outbufsz
);
4971 return -TARGET_ENOMEM
;
4973 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4976 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4977 if (!is_error(ret
)) {
4978 target_size_out
= target_size_in
;
4979 /* An extent_count of 0 means we were only counting the extents
4980 * so there are no structs to copy
4982 if (fm
->fm_extent_count
!= 0) {
4983 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4985 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4987 ret
= -TARGET_EFAULT
;
4989 /* Convert the struct fiemap */
4990 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4991 if (fm
->fm_extent_count
!= 0) {
4992 p
= argptr
+ target_size_in
;
4993 /* ...and then all the struct fiemap_extents */
4994 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4995 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5000 unlock_user(argptr
, arg
, target_size_out
);
5010 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5011 int fd
, int cmd
, abi_long arg
)
5013 const argtype
*arg_type
= ie
->arg_type
;
5017 struct ifconf
*host_ifconf
;
5019 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5020 int target_ifreq_size
;
5025 abi_long target_ifc_buf
;
5029 assert(arg_type
[0] == TYPE_PTR
);
5030 assert(ie
->access
== IOC_RW
);
5033 target_size
= thunk_type_size(arg_type
, 0);
5035 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5037 return -TARGET_EFAULT
;
5038 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5039 unlock_user(argptr
, arg
, 0);
5041 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5042 target_ifc_len
= host_ifconf
->ifc_len
;
5043 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5045 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5046 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5047 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5049 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5050 if (outbufsz
> MAX_STRUCT_SIZE
) {
5051 /* We can't fit all the extents into the fixed size buffer.
5052 * Allocate one that is large enough and use it instead.
5054 host_ifconf
= malloc(outbufsz
);
5056 return -TARGET_ENOMEM
;
5058 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5061 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5063 host_ifconf
->ifc_len
= host_ifc_len
;
5064 host_ifconf
->ifc_buf
= host_ifc_buf
;
5066 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5067 if (!is_error(ret
)) {
5068 /* convert host ifc_len to target ifc_len */
5070 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5071 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5072 host_ifconf
->ifc_len
= target_ifc_len
;
5074 /* restore target ifc_buf */
5076 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5078 /* copy struct ifconf to target user */
5080 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5082 return -TARGET_EFAULT
;
5083 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5084 unlock_user(argptr
, arg
, target_size
);
5086 /* copy ifreq[] to target user */
5088 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5089 for (i
= 0; i
< nb_ifreq
; i
++) {
5090 thunk_convert(argptr
+ i
* target_ifreq_size
,
5091 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5092 ifreq_arg_type
, THUNK_TARGET
);
5094 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5104 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5105 int cmd
, abi_long arg
)
5108 struct dm_ioctl
*host_dm
;
5109 abi_long guest_data
;
5110 uint32_t guest_data_size
;
5112 const argtype
*arg_type
= ie
->arg_type
;
5114 void *big_buf
= NULL
;
5118 target_size
= thunk_type_size(arg_type
, 0);
5119 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5121 ret
= -TARGET_EFAULT
;
5124 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5125 unlock_user(argptr
, arg
, 0);
5127 /* buf_temp is too small, so fetch things into a bigger buffer */
5128 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5129 memcpy(big_buf
, buf_temp
, target_size
);
5133 guest_data
= arg
+ host_dm
->data_start
;
5134 if ((guest_data
- arg
) < 0) {
5135 ret
= -TARGET_EINVAL
;
5138 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5139 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5141 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5143 ret
= -TARGET_EFAULT
;
5147 switch (ie
->host_cmd
) {
5149 case DM_LIST_DEVICES
:
5152 case DM_DEV_SUSPEND
:
5155 case DM_TABLE_STATUS
:
5156 case DM_TABLE_CLEAR
:
5158 case DM_LIST_VERSIONS
:
5162 case DM_DEV_SET_GEOMETRY
:
5163 /* data contains only strings */
5164 memcpy(host_data
, argptr
, guest_data_size
);
5167 memcpy(host_data
, argptr
, guest_data_size
);
5168 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5172 void *gspec
= argptr
;
5173 void *cur_data
= host_data
;
5174 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5175 int spec_size
= thunk_type_size(arg_type
, 0);
5178 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5179 struct dm_target_spec
*spec
= cur_data
;
5183 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5184 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5186 spec
->next
= sizeof(*spec
) + slen
;
5187 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5189 cur_data
+= spec
->next
;
5194 ret
= -TARGET_EINVAL
;
5195 unlock_user(argptr
, guest_data
, 0);
5198 unlock_user(argptr
, guest_data
, 0);
5200 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5201 if (!is_error(ret
)) {
5202 guest_data
= arg
+ host_dm
->data_start
;
5203 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5204 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5205 switch (ie
->host_cmd
) {
5210 case DM_DEV_SUSPEND
:
5213 case DM_TABLE_CLEAR
:
5215 case DM_DEV_SET_GEOMETRY
:
5216 /* no return data */
5218 case DM_LIST_DEVICES
:
5220 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5221 uint32_t remaining_data
= guest_data_size
;
5222 void *cur_data
= argptr
;
5223 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5224 int nl_size
= 12; /* can't use thunk_size due to alignment */
5227 uint32_t next
= nl
->next
;
5229 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5231 if (remaining_data
< nl
->next
) {
5232 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5235 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5236 strcpy(cur_data
+ nl_size
, nl
->name
);
5237 cur_data
+= nl
->next
;
5238 remaining_data
-= nl
->next
;
5242 nl
= (void*)nl
+ next
;
5247 case DM_TABLE_STATUS
:
5249 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5250 void *cur_data
= argptr
;
5251 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5252 int spec_size
= thunk_type_size(arg_type
, 0);
5255 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5256 uint32_t next
= spec
->next
;
5257 int slen
= strlen((char*)&spec
[1]) + 1;
5258 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5259 if (guest_data_size
< spec
->next
) {
5260 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5263 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5264 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5265 cur_data
= argptr
+ spec
->next
;
5266 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5272 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5273 int count
= *(uint32_t*)hdata
;
5274 uint64_t *hdev
= hdata
+ 8;
5275 uint64_t *gdev
= argptr
+ 8;
5278 *(uint32_t*)argptr
= tswap32(count
);
5279 for (i
= 0; i
< count
; i
++) {
5280 *gdev
= tswap64(*hdev
);
5286 case DM_LIST_VERSIONS
:
5288 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5289 uint32_t remaining_data
= guest_data_size
;
5290 void *cur_data
= argptr
;
5291 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5292 int vers_size
= thunk_type_size(arg_type
, 0);
5295 uint32_t next
= vers
->next
;
5297 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5299 if (remaining_data
< vers
->next
) {
5300 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5303 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5304 strcpy(cur_data
+ vers_size
, vers
->name
);
5305 cur_data
+= vers
->next
;
5306 remaining_data
-= vers
->next
;
5310 vers
= (void*)vers
+ next
;
5315 unlock_user(argptr
, guest_data
, 0);
5316 ret
= -TARGET_EINVAL
;
5319 unlock_user(argptr
, guest_data
, guest_data_size
);
5321 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5323 ret
= -TARGET_EFAULT
;
5326 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5327 unlock_user(argptr
, arg
, target_size
);
5334 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5335 int cmd
, abi_long arg
)
5339 const argtype
*arg_type
= ie
->arg_type
;
5340 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5343 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5344 struct blkpg_partition host_part
;
5346 /* Read and convert blkpg */
5348 target_size
= thunk_type_size(arg_type
, 0);
5349 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5351 ret
= -TARGET_EFAULT
;
5354 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5355 unlock_user(argptr
, arg
, 0);
5357 switch (host_blkpg
->op
) {
5358 case BLKPG_ADD_PARTITION
:
5359 case BLKPG_DEL_PARTITION
:
5360 /* payload is struct blkpg_partition */
5363 /* Unknown opcode */
5364 ret
= -TARGET_EINVAL
;
5368 /* Read and convert blkpg->data */
5369 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5370 target_size
= thunk_type_size(part_arg_type
, 0);
5371 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5373 ret
= -TARGET_EFAULT
;
5376 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5377 unlock_user(argptr
, arg
, 0);
5379 /* Swizzle the data pointer to our local copy and call! */
5380 host_blkpg
->data
= &host_part
;
5381 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5387 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5388 int fd
, int cmd
, abi_long arg
)
5390 const argtype
*arg_type
= ie
->arg_type
;
5391 const StructEntry
*se
;
5392 const argtype
*field_types
;
5393 const int *dst_offsets
, *src_offsets
;
5396 abi_ulong
*target_rt_dev_ptr
;
5397 unsigned long *host_rt_dev_ptr
;
5401 assert(ie
->access
== IOC_W
);
5402 assert(*arg_type
== TYPE_PTR
);
5404 assert(*arg_type
== TYPE_STRUCT
);
5405 target_size
= thunk_type_size(arg_type
, 0);
5406 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5408 return -TARGET_EFAULT
;
5411 assert(*arg_type
== (int)STRUCT_rtentry
);
5412 se
= struct_entries
+ *arg_type
++;
5413 assert(se
->convert
[0] == NULL
);
5414 /* convert struct here to be able to catch rt_dev string */
5415 field_types
= se
->field_types
;
5416 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5417 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5418 for (i
= 0; i
< se
->nb_fields
; i
++) {
5419 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5420 assert(*field_types
== TYPE_PTRVOID
);
5421 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5422 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5423 if (*target_rt_dev_ptr
!= 0) {
5424 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5425 tswapal(*target_rt_dev_ptr
));
5426 if (!*host_rt_dev_ptr
) {
5427 unlock_user(argptr
, arg
, 0);
5428 return -TARGET_EFAULT
;
5431 *host_rt_dev_ptr
= 0;
5436 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5437 argptr
+ src_offsets
[i
],
5438 field_types
, THUNK_HOST
);
5440 unlock_user(argptr
, arg
, 0);
5442 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5443 if (*host_rt_dev_ptr
!= 0) {
5444 unlock_user((void *)*host_rt_dev_ptr
,
5445 *target_rt_dev_ptr
, 0);
5450 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5451 int fd
, int cmd
, abi_long arg
)
5453 int sig
= target_to_host_signal(arg
);
5454 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5457 static IOCTLEntry ioctl_entries
[] = {
5458 #define IOCTL(cmd, access, ...) \
5459 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5460 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5461 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5462 #define IOCTL_IGNORE(cmd) \
5463 { TARGET_ ## cmd, 0, #cmd },
5468 /* ??? Implement proper locking for ioctls. */
5469 /* do_ioctl() Must return target values and target errnos. */
5470 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5472 const IOCTLEntry
*ie
;
5473 const argtype
*arg_type
;
5475 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5481 if (ie
->target_cmd
== 0) {
5482 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5483 return -TARGET_ENOSYS
;
5485 if (ie
->target_cmd
== cmd
)
5489 arg_type
= ie
->arg_type
;
5491 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5494 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5495 } else if (!ie
->host_cmd
) {
5496 /* Some architectures define BSD ioctls in their headers
5497 that are not implemented in Linux. */
5498 return -TARGET_ENOSYS
;
5501 switch(arg_type
[0]) {
5504 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5508 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5512 target_size
= thunk_type_size(arg_type
, 0);
5513 switch(ie
->access
) {
5515 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5516 if (!is_error(ret
)) {
5517 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5519 return -TARGET_EFAULT
;
5520 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5521 unlock_user(argptr
, arg
, target_size
);
5525 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5527 return -TARGET_EFAULT
;
5528 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5529 unlock_user(argptr
, arg
, 0);
5530 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5534 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5536 return -TARGET_EFAULT
;
5537 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5538 unlock_user(argptr
, arg
, 0);
5539 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5540 if (!is_error(ret
)) {
5541 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5543 return -TARGET_EFAULT
;
5544 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5545 unlock_user(argptr
, arg
, target_size
);
5551 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5552 (long)cmd
, arg_type
[0]);
5553 ret
= -TARGET_ENOSYS
;
5559 static const bitmask_transtbl iflag_tbl
[] = {
5560 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5561 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5562 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5563 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5564 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5565 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5566 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5567 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5568 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5569 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5570 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5571 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5572 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5573 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5577 static const bitmask_transtbl oflag_tbl
[] = {
5578 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5579 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5580 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5581 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5582 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5583 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5584 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5585 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5586 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5587 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5588 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5589 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5590 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5591 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5592 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5593 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5594 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5595 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5596 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5597 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5598 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5599 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5600 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5601 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5605 static const bitmask_transtbl cflag_tbl
[] = {
5606 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5607 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5608 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5609 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5610 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5611 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5612 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5613 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5614 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5615 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5616 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5617 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5618 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5619 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5620 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5621 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5622 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5623 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5624 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5625 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5626 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5627 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5628 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5629 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5630 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5631 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5632 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5633 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5634 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5635 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5636 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5640 static const bitmask_transtbl lflag_tbl
[] = {
5641 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5642 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5643 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5644 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5645 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5646 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5647 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5648 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5649 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5650 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5651 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5652 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5653 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5654 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5655 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5659 static void target_to_host_termios (void *dst
, const void *src
)
5661 struct host_termios
*host
= dst
;
5662 const struct target_termios
*target
= src
;
5665 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5667 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5669 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5671 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5672 host
->c_line
= target
->c_line
;
5674 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5675 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5676 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5677 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5678 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5679 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5680 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5681 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5682 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5683 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5684 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5685 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5686 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5687 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5688 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5689 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5690 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5691 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5694 static void host_to_target_termios (void *dst
, const void *src
)
5696 struct target_termios
*target
= dst
;
5697 const struct host_termios
*host
= src
;
5700 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5702 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5704 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5706 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5707 target
->c_line
= host
->c_line
;
5709 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5710 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5711 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5712 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5713 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5714 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5715 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5716 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5717 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5718 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5719 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5720 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5721 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5722 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5723 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5724 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5725 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5726 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5729 static const StructEntry struct_termios_def
= {
5730 .convert
= { host_to_target_termios
, target_to_host_termios
},
5731 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5732 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5735 static bitmask_transtbl mmap_flags_tbl
[] = {
5736 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5737 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5738 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5739 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5740 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5741 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5742 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5743 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5744 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5749 #if defined(TARGET_I386)
5751 /* NOTE: there is really one LDT for all the threads */
5752 static uint8_t *ldt_table
;
5754 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5761 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5762 if (size
> bytecount
)
5764 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5766 return -TARGET_EFAULT
;
5767 /* ??? Should this by byteswapped? */
5768 memcpy(p
, ldt_table
, size
);
5769 unlock_user(p
, ptr
, size
);
5773 /* XXX: add locking support */
5774 static abi_long
write_ldt(CPUX86State
*env
,
5775 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5777 struct target_modify_ldt_ldt_s ldt_info
;
5778 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5779 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5780 int seg_not_present
, useable
, lm
;
5781 uint32_t *lp
, entry_1
, entry_2
;
5783 if (bytecount
!= sizeof(ldt_info
))
5784 return -TARGET_EINVAL
;
5785 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5786 return -TARGET_EFAULT
;
5787 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5788 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5789 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5790 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5791 unlock_user_struct(target_ldt_info
, ptr
, 0);
5793 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5794 return -TARGET_EINVAL
;
5795 seg_32bit
= ldt_info
.flags
& 1;
5796 contents
= (ldt_info
.flags
>> 1) & 3;
5797 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5798 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5799 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5800 useable
= (ldt_info
.flags
>> 6) & 1;
5804 lm
= (ldt_info
.flags
>> 7) & 1;
5806 if (contents
== 3) {
5808 return -TARGET_EINVAL
;
5809 if (seg_not_present
== 0)
5810 return -TARGET_EINVAL
;
5812 /* allocate the LDT */
5814 env
->ldt
.base
= target_mmap(0,
5815 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5816 PROT_READ
|PROT_WRITE
,
5817 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5818 if (env
->ldt
.base
== -1)
5819 return -TARGET_ENOMEM
;
5820 memset(g2h(env
->ldt
.base
), 0,
5821 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5822 env
->ldt
.limit
= 0xffff;
5823 ldt_table
= g2h(env
->ldt
.base
);
5826 /* NOTE: same code as Linux kernel */
5827 /* Allow LDTs to be cleared by the user. */
5828 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5831 read_exec_only
== 1 &&
5833 limit_in_pages
== 0 &&
5834 seg_not_present
== 1 &&
5842 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5843 (ldt_info
.limit
& 0x0ffff);
5844 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5845 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5846 (ldt_info
.limit
& 0xf0000) |
5847 ((read_exec_only
^ 1) << 9) |
5849 ((seg_not_present
^ 1) << 15) |
5851 (limit_in_pages
<< 23) |
5855 entry_2
|= (useable
<< 20);
5857 /* Install the new entry ... */
5859 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5860 lp
[0] = tswap32(entry_1
);
5861 lp
[1] = tswap32(entry_2
);
5865 /* specific and weird i386 syscalls */
5866 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5867 unsigned long bytecount
)
5873 ret
= read_ldt(ptr
, bytecount
);
5876 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5879 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5882 ret
= -TARGET_ENOSYS
;
5888 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5889 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5891 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5892 struct target_modify_ldt_ldt_s ldt_info
;
5893 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5894 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5895 int seg_not_present
, useable
, lm
;
5896 uint32_t *lp
, entry_1
, entry_2
;
5899 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5900 if (!target_ldt_info
)
5901 return -TARGET_EFAULT
;
5902 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5903 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5904 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5905 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5906 if (ldt_info
.entry_number
== -1) {
5907 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5908 if (gdt_table
[i
] == 0) {
5909 ldt_info
.entry_number
= i
;
5910 target_ldt_info
->entry_number
= tswap32(i
);
5915 unlock_user_struct(target_ldt_info
, ptr
, 1);
5917 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5918 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5919 return -TARGET_EINVAL
;
5920 seg_32bit
= ldt_info
.flags
& 1;
5921 contents
= (ldt_info
.flags
>> 1) & 3;
5922 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5923 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5924 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5925 useable
= (ldt_info
.flags
>> 6) & 1;
5929 lm
= (ldt_info
.flags
>> 7) & 1;
5932 if (contents
== 3) {
5933 if (seg_not_present
== 0)
5934 return -TARGET_EINVAL
;
5937 /* NOTE: same code as Linux kernel */
5938 /* Allow LDTs to be cleared by the user. */
5939 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5940 if ((contents
== 0 &&
5941 read_exec_only
== 1 &&
5943 limit_in_pages
== 0 &&
5944 seg_not_present
== 1 &&
5952 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5953 (ldt_info
.limit
& 0x0ffff);
5954 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5955 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5956 (ldt_info
.limit
& 0xf0000) |
5957 ((read_exec_only
^ 1) << 9) |
5959 ((seg_not_present
^ 1) << 15) |
5961 (limit_in_pages
<< 23) |
5966 /* Install the new entry ... */
5968 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5969 lp
[0] = tswap32(entry_1
);
5970 lp
[1] = tswap32(entry_2
);
5974 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5976 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5977 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5978 uint32_t base_addr
, limit
, flags
;
5979 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5980 int seg_not_present
, useable
, lm
;
5981 uint32_t *lp
, entry_1
, entry_2
;
5983 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5984 if (!target_ldt_info
)
5985 return -TARGET_EFAULT
;
5986 idx
= tswap32(target_ldt_info
->entry_number
);
5987 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5988 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5989 unlock_user_struct(target_ldt_info
, ptr
, 1);
5990 return -TARGET_EINVAL
;
5992 lp
= (uint32_t *)(gdt_table
+ idx
);
5993 entry_1
= tswap32(lp
[0]);
5994 entry_2
= tswap32(lp
[1]);
5996 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5997 contents
= (entry_2
>> 10) & 3;
5998 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5999 seg_32bit
= (entry_2
>> 22) & 1;
6000 limit_in_pages
= (entry_2
>> 23) & 1;
6001 useable
= (entry_2
>> 20) & 1;
6005 lm
= (entry_2
>> 21) & 1;
6007 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6008 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6009 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6010 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6011 base_addr
= (entry_1
>> 16) |
6012 (entry_2
& 0xff000000) |
6013 ((entry_2
& 0xff) << 16);
6014 target_ldt_info
->base_addr
= tswapal(base_addr
);
6015 target_ldt_info
->limit
= tswap32(limit
);
6016 target_ldt_info
->flags
= tswap32(flags
);
6017 unlock_user_struct(target_ldt_info
, ptr
, 1);
6020 #endif /* TARGET_I386 && TARGET_ABI32 */
6022 #ifndef TARGET_ABI32
6023 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6030 case TARGET_ARCH_SET_GS
:
6031 case TARGET_ARCH_SET_FS
:
6032 if (code
== TARGET_ARCH_SET_GS
)
6036 cpu_x86_load_seg(env
, idx
, 0);
6037 env
->segs
[idx
].base
= addr
;
6039 case TARGET_ARCH_GET_GS
:
6040 case TARGET_ARCH_GET_FS
:
6041 if (code
== TARGET_ARCH_GET_GS
)
6045 val
= env
->segs
[idx
].base
;
6046 if (put_user(val
, addr
, abi_ulong
))
6047 ret
= -TARGET_EFAULT
;
6050 ret
= -TARGET_EINVAL
;
6057 #endif /* defined(TARGET_I386) */
6059 #define NEW_STACK_SIZE 0x40000
6062 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6065 pthread_mutex_t mutex
;
6066 pthread_cond_t cond
;
6069 abi_ulong child_tidptr
;
6070 abi_ulong parent_tidptr
;
6074 static void *clone_func(void *arg
)
6076 new_thread_info
*info
= arg
;
6081 rcu_register_thread();
6083 cpu
= ENV_GET_CPU(env
);
6085 ts
= (TaskState
*)cpu
->opaque
;
6086 info
->tid
= gettid();
6087 cpu
->host_tid
= info
->tid
;
6089 if (info
->child_tidptr
)
6090 put_user_u32(info
->tid
, info
->child_tidptr
);
6091 if (info
->parent_tidptr
)
6092 put_user_u32(info
->tid
, info
->parent_tidptr
);
6093 /* Enable signals. */
6094 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6095 /* Signal to the parent that we're ready. */
6096 pthread_mutex_lock(&info
->mutex
);
6097 pthread_cond_broadcast(&info
->cond
);
6098 pthread_mutex_unlock(&info
->mutex
);
6099 /* Wait until the parent has finshed initializing the tls state. */
6100 pthread_mutex_lock(&clone_lock
);
6101 pthread_mutex_unlock(&clone_lock
);
6107 /* do_fork() Must return host values and target errnos (unlike most
6108 do_*() functions). */
6109 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6110 abi_ulong parent_tidptr
, target_ulong newtls
,
6111 abi_ulong child_tidptr
)
6113 CPUState
*cpu
= ENV_GET_CPU(env
);
6117 CPUArchState
*new_env
;
6120 flags
&= ~CLONE_IGNORED_FLAGS
;
6122 /* Emulate vfork() with fork() */
6123 if (flags
& CLONE_VFORK
)
6124 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6126 if (flags
& CLONE_VM
) {
6127 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6128 new_thread_info info
;
6129 pthread_attr_t attr
;
6131 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6132 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6133 return -TARGET_EINVAL
;
6136 ts
= g_new0(TaskState
, 1);
6137 init_task_state(ts
);
6138 /* we create a new CPU instance. */
6139 new_env
= cpu_copy(env
);
6140 /* Init regs that differ from the parent. */
6141 cpu_clone_regs(new_env
, newsp
);
6142 new_cpu
= ENV_GET_CPU(new_env
);
6143 new_cpu
->opaque
= ts
;
6144 ts
->bprm
= parent_ts
->bprm
;
6145 ts
->info
= parent_ts
->info
;
6146 ts
->signal_mask
= parent_ts
->signal_mask
;
6148 if (flags
& CLONE_CHILD_CLEARTID
) {
6149 ts
->child_tidptr
= child_tidptr
;
6152 if (flags
& CLONE_SETTLS
) {
6153 cpu_set_tls (new_env
, newtls
);
6156 /* Grab a mutex so that thread setup appears atomic. */
6157 pthread_mutex_lock(&clone_lock
);
6159 memset(&info
, 0, sizeof(info
));
6160 pthread_mutex_init(&info
.mutex
, NULL
);
6161 pthread_mutex_lock(&info
.mutex
);
6162 pthread_cond_init(&info
.cond
, NULL
);
6164 if (flags
& CLONE_CHILD_SETTID
) {
6165 info
.child_tidptr
= child_tidptr
;
6167 if (flags
& CLONE_PARENT_SETTID
) {
6168 info
.parent_tidptr
= parent_tidptr
;
6171 ret
= pthread_attr_init(&attr
);
6172 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6173 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6174 /* It is not safe to deliver signals until the child has finished
6175 initializing, so temporarily block all signals. */
6176 sigfillset(&sigmask
);
6177 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6179 /* If this is our first additional thread, we need to ensure we
6180 * generate code for parallel execution and flush old translations.
6182 if (!parallel_cpus
) {
6183 parallel_cpus
= true;
6187 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6188 /* TODO: Free new CPU state if thread creation failed. */
6190 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6191 pthread_attr_destroy(&attr
);
6193 /* Wait for the child to initialize. */
6194 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6199 pthread_mutex_unlock(&info
.mutex
);
6200 pthread_cond_destroy(&info
.cond
);
6201 pthread_mutex_destroy(&info
.mutex
);
6202 pthread_mutex_unlock(&clone_lock
);
6204 /* if no CLONE_VM, we consider it is a fork */
6205 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6206 return -TARGET_EINVAL
;
6209 /* We can't support custom termination signals */
6210 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6211 return -TARGET_EINVAL
;
6214 if (block_signals()) {
6215 return -TARGET_ERESTARTSYS
;
6221 /* Child Process. */
6223 cpu_clone_regs(env
, newsp
);
6225 /* There is a race condition here. The parent process could
6226 theoretically read the TID in the child process before the child
6227 tid is set. This would require using either ptrace
6228 (not implemented) or having *_tidptr to point at a shared memory
6229 mapping. We can't repeat the spinlock hack used above because
6230 the child process gets its own copy of the lock. */
6231 if (flags
& CLONE_CHILD_SETTID
)
6232 put_user_u32(gettid(), child_tidptr
);
6233 if (flags
& CLONE_PARENT_SETTID
)
6234 put_user_u32(gettid(), parent_tidptr
);
6235 ts
= (TaskState
*)cpu
->opaque
;
6236 if (flags
& CLONE_SETTLS
)
6237 cpu_set_tls (env
, newtls
);
6238 if (flags
& CLONE_CHILD_CLEARTID
)
6239 ts
->child_tidptr
= child_tidptr
;
6247 /* warning : doesn't handle linux specific flags... */
6248 static int target_to_host_fcntl_cmd(int cmd
)
6251 case TARGET_F_DUPFD
:
6252 case TARGET_F_GETFD
:
6253 case TARGET_F_SETFD
:
6254 case TARGET_F_GETFL
:
6255 case TARGET_F_SETFL
:
6257 case TARGET_F_GETLK
:
6259 case TARGET_F_SETLK
:
6261 case TARGET_F_SETLKW
:
6263 case TARGET_F_GETOWN
:
6265 case TARGET_F_SETOWN
:
6267 case TARGET_F_GETSIG
:
6269 case TARGET_F_SETSIG
:
6271 #if TARGET_ABI_BITS == 32
6272 case TARGET_F_GETLK64
:
6274 case TARGET_F_SETLK64
:
6276 case TARGET_F_SETLKW64
:
6279 case TARGET_F_SETLEASE
:
6281 case TARGET_F_GETLEASE
:
6283 #ifdef F_DUPFD_CLOEXEC
6284 case TARGET_F_DUPFD_CLOEXEC
:
6285 return F_DUPFD_CLOEXEC
;
6287 case TARGET_F_NOTIFY
:
6290 case TARGET_F_GETOWN_EX
:
6294 case TARGET_F_SETOWN_EX
:
6298 case TARGET_F_SETPIPE_SZ
:
6299 return F_SETPIPE_SZ
;
6300 case TARGET_F_GETPIPE_SZ
:
6301 return F_GETPIPE_SZ
;
6304 return -TARGET_EINVAL
;
6306 return -TARGET_EINVAL
;
6309 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6310 static const bitmask_transtbl flock_tbl
[] = {
6311 TRANSTBL_CONVERT(F_RDLCK
),
6312 TRANSTBL_CONVERT(F_WRLCK
),
6313 TRANSTBL_CONVERT(F_UNLCK
),
6314 TRANSTBL_CONVERT(F_EXLCK
),
6315 TRANSTBL_CONVERT(F_SHLCK
),
6319 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6320 abi_ulong target_flock_addr
)
6322 struct target_flock
*target_fl
;
6325 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6326 return -TARGET_EFAULT
;
6329 __get_user(l_type
, &target_fl
->l_type
);
6330 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6331 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6332 __get_user(fl
->l_start
, &target_fl
->l_start
);
6333 __get_user(fl
->l_len
, &target_fl
->l_len
);
6334 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6335 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6339 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6340 const struct flock64
*fl
)
6342 struct target_flock
*target_fl
;
6345 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6346 return -TARGET_EFAULT
;
6349 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6350 __put_user(l_type
, &target_fl
->l_type
);
6351 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6352 __put_user(fl
->l_start
, &target_fl
->l_start
);
6353 __put_user(fl
->l_len
, &target_fl
->l_len
);
6354 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6355 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6359 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6360 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6362 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6363 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6364 abi_ulong target_flock_addr
)
6366 struct target_eabi_flock64
*target_fl
;
6369 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6370 return -TARGET_EFAULT
;
6373 __get_user(l_type
, &target_fl
->l_type
);
6374 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6375 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6376 __get_user(fl
->l_start
, &target_fl
->l_start
);
6377 __get_user(fl
->l_len
, &target_fl
->l_len
);
6378 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6379 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6383 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6384 const struct flock64
*fl
)
6386 struct target_eabi_flock64
*target_fl
;
6389 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6390 return -TARGET_EFAULT
;
6393 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6394 __put_user(l_type
, &target_fl
->l_type
);
6395 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6396 __put_user(fl
->l_start
, &target_fl
->l_start
);
6397 __put_user(fl
->l_len
, &target_fl
->l_len
);
6398 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6399 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6404 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6405 abi_ulong target_flock_addr
)
6407 struct target_flock64
*target_fl
;
6410 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6411 return -TARGET_EFAULT
;
6414 __get_user(l_type
, &target_fl
->l_type
);
6415 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6416 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6417 __get_user(fl
->l_start
, &target_fl
->l_start
);
6418 __get_user(fl
->l_len
, &target_fl
->l_len
);
6419 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6420 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6424 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6425 const struct flock64
*fl
)
6427 struct target_flock64
*target_fl
;
6430 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6431 return -TARGET_EFAULT
;
6434 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6435 __put_user(l_type
, &target_fl
->l_type
);
6436 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6437 __put_user(fl
->l_start
, &target_fl
->l_start
);
6438 __put_user(fl
->l_len
, &target_fl
->l_len
);
6439 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6440 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6444 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6446 struct flock64 fl64
;
6448 struct f_owner_ex fox
;
6449 struct target_f_owner_ex
*target_fox
;
6452 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6454 if (host_cmd
== -TARGET_EINVAL
)
6458 case TARGET_F_GETLK
:
6459 ret
= copy_from_user_flock(&fl64
, arg
);
6463 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6465 ret
= copy_to_user_flock(arg
, &fl64
);
6469 case TARGET_F_SETLK
:
6470 case TARGET_F_SETLKW
:
6471 ret
= copy_from_user_flock(&fl64
, arg
);
6475 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6478 case TARGET_F_GETLK64
:
6479 ret
= copy_from_user_flock64(&fl64
, arg
);
6483 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6485 ret
= copy_to_user_flock64(arg
, &fl64
);
6488 case TARGET_F_SETLK64
:
6489 case TARGET_F_SETLKW64
:
6490 ret
= copy_from_user_flock64(&fl64
, arg
);
6494 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6497 case TARGET_F_GETFL
:
6498 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6500 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6504 case TARGET_F_SETFL
:
6505 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6506 target_to_host_bitmask(arg
,
6511 case TARGET_F_GETOWN_EX
:
6512 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6514 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6515 return -TARGET_EFAULT
;
6516 target_fox
->type
= tswap32(fox
.type
);
6517 target_fox
->pid
= tswap32(fox
.pid
);
6518 unlock_user_struct(target_fox
, arg
, 1);
6524 case TARGET_F_SETOWN_EX
:
6525 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6526 return -TARGET_EFAULT
;
6527 fox
.type
= tswap32(target_fox
->type
);
6528 fox
.pid
= tswap32(target_fox
->pid
);
6529 unlock_user_struct(target_fox
, arg
, 0);
6530 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6534 case TARGET_F_SETOWN
:
6535 case TARGET_F_GETOWN
:
6536 case TARGET_F_SETSIG
:
6537 case TARGET_F_GETSIG
:
6538 case TARGET_F_SETLEASE
:
6539 case TARGET_F_GETLEASE
:
6540 case TARGET_F_SETPIPE_SZ
:
6541 case TARGET_F_GETPIPE_SZ
:
6542 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6546 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6554 static inline int high2lowuid(int uid
)
6562 static inline int high2lowgid(int gid
)
6570 static inline int low2highuid(int uid
)
6572 if ((int16_t)uid
== -1)
6578 static inline int low2highgid(int gid
)
6580 if ((int16_t)gid
== -1)
6585 static inline int tswapid(int id
)
6590 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6592 #else /* !USE_UID16 */
6593 static inline int high2lowuid(int uid
)
6597 static inline int high2lowgid(int gid
)
6601 static inline int low2highuid(int uid
)
6605 static inline int low2highgid(int gid
)
6609 static inline int tswapid(int id
)
6614 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6616 #endif /* USE_UID16 */
6618 /* We must do direct syscalls for setting UID/GID, because we want to
6619 * implement the Linux system call semantics of "change only for this thread",
6620 * not the libc/POSIX semantics of "change for all threads in process".
6621 * (See http://ewontfix.com/17/ for more details.)
6622 * We use the 32-bit version of the syscalls if present; if it is not
6623 * then either the host architecture supports 32-bit UIDs natively with
6624 * the standard syscall, or the 16-bit UID is the best we can do.
6626 #ifdef __NR_setuid32
6627 #define __NR_sys_setuid __NR_setuid32
6629 #define __NR_sys_setuid __NR_setuid
6631 #ifdef __NR_setgid32
6632 #define __NR_sys_setgid __NR_setgid32
6634 #define __NR_sys_setgid __NR_setgid
6636 #ifdef __NR_setresuid32
6637 #define __NR_sys_setresuid __NR_setresuid32
6639 #define __NR_sys_setresuid __NR_setresuid
6641 #ifdef __NR_setresgid32
6642 #define __NR_sys_setresgid __NR_setresgid32
6644 #define __NR_sys_setresgid __NR_setresgid
6647 _syscall1(int, sys_setuid
, uid_t
, uid
)
6648 _syscall1(int, sys_setgid
, gid_t
, gid
)
6649 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6650 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6652 void syscall_init(void)
6655 const argtype
*arg_type
;
6659 thunk_init(STRUCT_MAX
);
6661 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6662 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6663 #include "syscall_types.h"
6665 #undef STRUCT_SPECIAL
6667 /* Build target_to_host_errno_table[] table from
6668 * host_to_target_errno_table[]. */
6669 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6670 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6673 /* we patch the ioctl size if necessary. We rely on the fact that
6674 no ioctl has all the bits at '1' in the size field */
6676 while (ie
->target_cmd
!= 0) {
6677 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6678 TARGET_IOC_SIZEMASK
) {
6679 arg_type
= ie
->arg_type
;
6680 if (arg_type
[0] != TYPE_PTR
) {
6681 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6686 size
= thunk_type_size(arg_type
, 0);
6687 ie
->target_cmd
= (ie
->target_cmd
&
6688 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6689 (size
<< TARGET_IOC_SIZESHIFT
);
6692 /* automatic consistency check if same arch */
6693 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6694 (defined(__x86_64__) && defined(TARGET_X86_64))
6695 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6696 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6697 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6704 #if TARGET_ABI_BITS == 32
6705 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6707 #ifdef TARGET_WORDS_BIGENDIAN
6708 return ((uint64_t)word0
<< 32) | word1
;
6710 return ((uint64_t)word1
<< 32) | word0
;
6713 #else /* TARGET_ABI_BITS == 32 */
6714 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6718 #endif /* TARGET_ABI_BITS != 32 */
6720 #ifdef TARGET_NR_truncate64
6721 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6726 if (regpairs_aligned(cpu_env
)) {
6730 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6734 #ifdef TARGET_NR_ftruncate64
6735 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6740 if (regpairs_aligned(cpu_env
)) {
6744 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6748 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6749 abi_ulong target_addr
)
6751 struct target_timespec
*target_ts
;
6753 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6754 return -TARGET_EFAULT
;
6755 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6756 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6757 unlock_user_struct(target_ts
, target_addr
, 0);
6761 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6762 struct timespec
*host_ts
)
6764 struct target_timespec
*target_ts
;
6766 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6767 return -TARGET_EFAULT
;
6768 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6769 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6770 unlock_user_struct(target_ts
, target_addr
, 1);
6774 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6775 abi_ulong target_addr
)
6777 struct target_itimerspec
*target_itspec
;
6779 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6780 return -TARGET_EFAULT
;
6783 host_itspec
->it_interval
.tv_sec
=
6784 tswapal(target_itspec
->it_interval
.tv_sec
);
6785 host_itspec
->it_interval
.tv_nsec
=
6786 tswapal(target_itspec
->it_interval
.tv_nsec
);
6787 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6788 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6790 unlock_user_struct(target_itspec
, target_addr
, 1);
6794 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6795 struct itimerspec
*host_its
)
6797 struct target_itimerspec
*target_itspec
;
6799 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6800 return -TARGET_EFAULT
;
6803 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6804 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6806 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6807 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6809 unlock_user_struct(target_itspec
, target_addr
, 0);
6813 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6814 abi_long target_addr
)
6816 struct target_timex
*target_tx
;
6818 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6819 return -TARGET_EFAULT
;
6822 __get_user(host_tx
->modes
, &target_tx
->modes
);
6823 __get_user(host_tx
->offset
, &target_tx
->offset
);
6824 __get_user(host_tx
->freq
, &target_tx
->freq
);
6825 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6826 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6827 __get_user(host_tx
->status
, &target_tx
->status
);
6828 __get_user(host_tx
->constant
, &target_tx
->constant
);
6829 __get_user(host_tx
->precision
, &target_tx
->precision
);
6830 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6831 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6832 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6833 __get_user(host_tx
->tick
, &target_tx
->tick
);
6834 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6835 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6836 __get_user(host_tx
->shift
, &target_tx
->shift
);
6837 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6838 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6839 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6840 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6841 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6842 __get_user(host_tx
->tai
, &target_tx
->tai
);
6844 unlock_user_struct(target_tx
, target_addr
, 0);
6848 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6849 struct timex
*host_tx
)
6851 struct target_timex
*target_tx
;
6853 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6854 return -TARGET_EFAULT
;
6857 __put_user(host_tx
->modes
, &target_tx
->modes
);
6858 __put_user(host_tx
->offset
, &target_tx
->offset
);
6859 __put_user(host_tx
->freq
, &target_tx
->freq
);
6860 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6861 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6862 __put_user(host_tx
->status
, &target_tx
->status
);
6863 __put_user(host_tx
->constant
, &target_tx
->constant
);
6864 __put_user(host_tx
->precision
, &target_tx
->precision
);
6865 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6866 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6867 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6868 __put_user(host_tx
->tick
, &target_tx
->tick
);
6869 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6870 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6871 __put_user(host_tx
->shift
, &target_tx
->shift
);
6872 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6873 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6874 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6875 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6876 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6877 __put_user(host_tx
->tai
, &target_tx
->tai
);
6879 unlock_user_struct(target_tx
, target_addr
, 1);
6884 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6885 abi_ulong target_addr
)
6887 struct target_sigevent
*target_sevp
;
6889 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6890 return -TARGET_EFAULT
;
6893 /* This union is awkward on 64 bit systems because it has a 32 bit
6894 * integer and a pointer in it; we follow the conversion approach
6895 * used for handling sigval types in signal.c so the guest should get
6896 * the correct value back even if we did a 64 bit byteswap and it's
6897 * using the 32 bit integer.
6899 host_sevp
->sigev_value
.sival_ptr
=
6900 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6901 host_sevp
->sigev_signo
=
6902 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6903 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6904 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6906 unlock_user_struct(target_sevp
, target_addr
, 1);
6910 #if defined(TARGET_NR_mlockall)
6911 static inline int target_to_host_mlockall_arg(int arg
)
6915 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6916 result
|= MCL_CURRENT
;
6918 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6919 result
|= MCL_FUTURE
;
6925 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6926 abi_ulong target_addr
,
6927 struct stat
*host_st
)
6929 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6930 if (((CPUARMState
*)cpu_env
)->eabi
) {
6931 struct target_eabi_stat64
*target_st
;
6933 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6934 return -TARGET_EFAULT
;
6935 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6936 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6937 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6938 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6939 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6941 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6942 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6943 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6944 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6945 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6946 __put_user(host_st
->st_size
, &target_st
->st_size
);
6947 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6948 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6949 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6950 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6951 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6952 unlock_user_struct(target_st
, target_addr
, 1);
6956 #if defined(TARGET_HAS_STRUCT_STAT64)
6957 struct target_stat64
*target_st
;
6959 struct target_stat
*target_st
;
6962 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6963 return -TARGET_EFAULT
;
6964 memset(target_st
, 0, sizeof(*target_st
));
6965 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6966 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6967 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6968 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6970 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6971 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6972 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6973 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6974 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6975 /* XXX: better use of kernel struct */
6976 __put_user(host_st
->st_size
, &target_st
->st_size
);
6977 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6978 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6979 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6980 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6981 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6982 unlock_user_struct(target_st
, target_addr
, 1);
6988 /* ??? Using host futex calls even when target atomic operations
6989 are not really atomic probably breaks things. However implementing
6990 futexes locally would make futexes shared between multiple processes
6991 tricky. However they're probably useless because guest atomic
6992 operations won't work either. */
6993 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6994 target_ulong uaddr2
, int val3
)
6996 struct timespec ts
, *pts
;
6999 /* ??? We assume FUTEX_* constants are the same on both host
7001 #ifdef FUTEX_CMD_MASK
7002 base_op
= op
& FUTEX_CMD_MASK
;
7008 case FUTEX_WAIT_BITSET
:
7011 target_to_host_timespec(pts
, timeout
);
7015 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7018 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7020 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7022 case FUTEX_CMP_REQUEUE
:
7024 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7025 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7026 But the prototype takes a `struct timespec *'; insert casts
7027 to satisfy the compiler. We do not need to tswap TIMEOUT
7028 since it's not compared to guest memory. */
7029 pts
= (struct timespec
*)(uintptr_t) timeout
;
7030 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7032 (base_op
== FUTEX_CMP_REQUEUE
7036 return -TARGET_ENOSYS
;
7039 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7040 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7041 abi_long handle
, abi_long mount_id
,
7044 struct file_handle
*target_fh
;
7045 struct file_handle
*fh
;
7049 unsigned int size
, total_size
;
7051 if (get_user_s32(size
, handle
)) {
7052 return -TARGET_EFAULT
;
7055 name
= lock_user_string(pathname
);
7057 return -TARGET_EFAULT
;
7060 total_size
= sizeof(struct file_handle
) + size
;
7061 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7063 unlock_user(name
, pathname
, 0);
7064 return -TARGET_EFAULT
;
7067 fh
= g_malloc0(total_size
);
7068 fh
->handle_bytes
= size
;
7070 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7071 unlock_user(name
, pathname
, 0);
7073 /* man name_to_handle_at(2):
7074 * Other than the use of the handle_bytes field, the caller should treat
7075 * the file_handle structure as an opaque data type
7078 memcpy(target_fh
, fh
, total_size
);
7079 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7080 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7082 unlock_user(target_fh
, handle
, total_size
);
7084 if (put_user_s32(mid
, mount_id
)) {
7085 return -TARGET_EFAULT
;
7093 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7094 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7097 struct file_handle
*target_fh
;
7098 struct file_handle
*fh
;
7099 unsigned int size
, total_size
;
7102 if (get_user_s32(size
, handle
)) {
7103 return -TARGET_EFAULT
;
7106 total_size
= sizeof(struct file_handle
) + size
;
7107 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7109 return -TARGET_EFAULT
;
7112 fh
= g_memdup(target_fh
, total_size
);
7113 fh
->handle_bytes
= size
;
7114 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7116 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7117 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7121 unlock_user(target_fh
, handle
, total_size
);
7127 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7129 /* signalfd siginfo conversion */
7132 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7133 const struct signalfd_siginfo
*info
)
7135 int sig
= host_to_target_signal(info
->ssi_signo
);
7137 /* linux/signalfd.h defines a ssi_addr_lsb
7138 * not defined in sys/signalfd.h but used by some kernels
7141 #ifdef BUS_MCEERR_AO
7142 if (tinfo
->ssi_signo
== SIGBUS
&&
7143 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7144 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7145 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7146 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7147 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7151 tinfo
->ssi_signo
= tswap32(sig
);
7152 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7153 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7154 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7155 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7156 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7157 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7158 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7159 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7160 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7161 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7162 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7163 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7164 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7165 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7166 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7169 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7173 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7174 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7180 static TargetFdTrans target_signalfd_trans
= {
7181 .host_to_target_data
= host_to_target_data_signalfd
,
7184 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7187 target_sigset_t
*target_mask
;
7191 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7192 return -TARGET_EINVAL
;
7194 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7195 return -TARGET_EFAULT
;
7198 target_to_host_sigset(&host_mask
, target_mask
);
7200 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7202 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7204 fd_trans_register(ret
, &target_signalfd_trans
);
7207 unlock_user_struct(target_mask
, mask
, 0);
7213 /* Map host to target signal numbers for the wait family of syscalls.
7214 Assume all other status bits are the same. */
7215 int host_to_target_waitstatus(int status
)
7217 if (WIFSIGNALED(status
)) {
7218 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7220 if (WIFSTOPPED(status
)) {
7221 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7227 static int open_self_cmdline(void *cpu_env
, int fd
)
7230 bool word_skipped
= false;
7232 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7242 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7245 fd_orig
= close(fd_orig
);
7248 } else if (nb_read
== 0) {
7252 if (!word_skipped
) {
7253 /* Skip the first string, which is the path to qemu-*-static
7254 instead of the actual command. */
7255 cp_buf
= memchr(buf
, 0, nb_read
);
7257 /* Null byte found, skip one string */
7259 nb_read
-= cp_buf
- buf
;
7260 word_skipped
= true;
7265 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7274 return close(fd_orig
);
7277 static int open_self_maps(void *cpu_env
, int fd
)
7279 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7280 TaskState
*ts
= cpu
->opaque
;
7286 fp
= fopen("/proc/self/maps", "r");
7291 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7292 int fields
, dev_maj
, dev_min
, inode
;
7293 uint64_t min
, max
, offset
;
7294 char flag_r
, flag_w
, flag_x
, flag_p
;
7295 char path
[512] = "";
7296 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7297 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7298 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7300 if ((fields
< 10) || (fields
> 11)) {
7303 if (h2g_valid(min
)) {
7304 int flags
= page_get_flags(h2g(min
));
7305 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7306 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7309 if (h2g(min
) == ts
->info
->stack_limit
) {
7310 pstrcpy(path
, sizeof(path
), " [stack]");
7312 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7313 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7314 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7315 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7316 path
[0] ? " " : "", path
);
7326 static int open_self_stat(void *cpu_env
, int fd
)
7328 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7329 TaskState
*ts
= cpu
->opaque
;
7330 abi_ulong start_stack
= ts
->info
->start_stack
;
7333 for (i
= 0; i
< 44; i
++) {
7341 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7342 } else if (i
== 1) {
7344 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7345 } else if (i
== 27) {
7348 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7350 /* for the rest, there is MasterCard */
7351 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7355 if (write(fd
, buf
, len
) != len
) {
7363 static int open_self_auxv(void *cpu_env
, int fd
)
7365 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7366 TaskState
*ts
= cpu
->opaque
;
7367 abi_ulong auxv
= ts
->info
->saved_auxv
;
7368 abi_ulong len
= ts
->info
->auxv_len
;
7372 * Auxiliary vector is stored in target process stack.
7373 * read in whole auxv vector and copy it to file
7375 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7379 r
= write(fd
, ptr
, len
);
7386 lseek(fd
, 0, SEEK_SET
);
7387 unlock_user(ptr
, auxv
, len
);
7393 static int is_proc_myself(const char *filename
, const char *entry
)
7395 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7396 filename
+= strlen("/proc/");
7397 if (!strncmp(filename
, "self/", strlen("self/"))) {
7398 filename
+= strlen("self/");
7399 } else if (*filename
>= '1' && *filename
<= '9') {
7401 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7402 if (!strncmp(filename
, myself
, strlen(myself
))) {
7403 filename
+= strlen(myself
);
7410 if (!strcmp(filename
, entry
)) {
7417 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7418 static int is_proc(const char *filename
, const char *entry
)
7420 return strcmp(filename
, entry
) == 0;
7423 static int open_net_route(void *cpu_env
, int fd
)
7430 fp
= fopen("/proc/net/route", "r");
7437 read
= getline(&line
, &len
, fp
);
7438 dprintf(fd
, "%s", line
);
7442 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7444 uint32_t dest
, gw
, mask
;
7445 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7446 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7447 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7448 &mask
, &mtu
, &window
, &irtt
);
7449 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7450 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7451 metric
, tswap32(mask
), mtu
, window
, irtt
);
7461 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7464 const char *filename
;
7465 int (*fill
)(void *cpu_env
, int fd
);
7466 int (*cmp
)(const char *s1
, const char *s2
);
7468 const struct fake_open
*fake_open
;
7469 static const struct fake_open fakes
[] = {
7470 { "maps", open_self_maps
, is_proc_myself
},
7471 { "stat", open_self_stat
, is_proc_myself
},
7472 { "auxv", open_self_auxv
, is_proc_myself
},
7473 { "cmdline", open_self_cmdline
, is_proc_myself
},
7474 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7475 { "/proc/net/route", open_net_route
, is_proc
},
7477 { NULL
, NULL
, NULL
}
7480 if (is_proc_myself(pathname
, "exe")) {
7481 int execfd
= qemu_getauxval(AT_EXECFD
);
7482 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7485 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7486 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7491 if (fake_open
->filename
) {
7493 char filename
[PATH_MAX
];
7496 /* create temporary file to map stat to */
7497 tmpdir
= getenv("TMPDIR");
7500 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7501 fd
= mkstemp(filename
);
7507 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7513 lseek(fd
, 0, SEEK_SET
);
7518 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7521 #define TIMER_MAGIC 0x0caf0000
7522 #define TIMER_MAGIC_MASK 0xffff0000
7524 /* Convert QEMU provided timer ID back to internal 16bit index format */
7525 static target_timer_t
get_timer_id(abi_long arg
)
7527 target_timer_t timerid
= arg
;
7529 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7530 return -TARGET_EINVAL
;
7535 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7536 return -TARGET_EINVAL
;
7542 /* do_syscall() should always have a single exit point at the end so
7543 that actions, such as logging of syscall results, can be performed.
7544 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7545 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7546 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7547 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7550 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7556 #if defined(DEBUG_ERESTARTSYS)
7557 /* Debug-only code for exercising the syscall-restart code paths
7558 * in the per-architecture cpu main loops: restart every syscall
7559 * the guest makes once before letting it through.
7566 return -TARGET_ERESTARTSYS
;
7572 gemu_log("syscall %d", num
);
7574 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7576 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7579 case TARGET_NR_exit
:
7580 /* In old applications this may be used to implement _exit(2).
7581 However in threaded applictions it is used for thread termination,
7582 and _exit_group is used for application termination.
7583 Do thread termination if we have more then one thread. */
7585 if (block_signals()) {
7586 ret
= -TARGET_ERESTARTSYS
;
7592 if (CPU_NEXT(first_cpu
)) {
7595 /* Remove the CPU from the list. */
7596 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7601 if (ts
->child_tidptr
) {
7602 put_user_u32(0, ts
->child_tidptr
);
7603 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7607 object_unref(OBJECT(cpu
));
7609 rcu_unregister_thread();
7617 gdb_exit(cpu_env
, arg1
);
7619 ret
= 0; /* avoid warning */
7621 case TARGET_NR_read
:
7625 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7627 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7629 fd_trans_host_to_target_data(arg1
)) {
7630 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7632 unlock_user(p
, arg2
, ret
);
7635 case TARGET_NR_write
:
7636 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7638 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7639 unlock_user(p
, arg2
, 0);
7641 #ifdef TARGET_NR_open
7642 case TARGET_NR_open
:
7643 if (!(p
= lock_user_string(arg1
)))
7645 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7646 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7648 fd_trans_unregister(ret
);
7649 unlock_user(p
, arg1
, 0);
7652 case TARGET_NR_openat
:
7653 if (!(p
= lock_user_string(arg2
)))
7655 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7656 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7658 fd_trans_unregister(ret
);
7659 unlock_user(p
, arg2
, 0);
7661 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7662 case TARGET_NR_name_to_handle_at
:
7663 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7666 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7667 case TARGET_NR_open_by_handle_at
:
7668 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7669 fd_trans_unregister(ret
);
7672 case TARGET_NR_close
:
7673 fd_trans_unregister(arg1
);
7674 ret
= get_errno(close(arg1
));
7679 #ifdef TARGET_NR_fork
7680 case TARGET_NR_fork
:
7681 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7684 #ifdef TARGET_NR_waitpid
7685 case TARGET_NR_waitpid
:
7688 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7689 if (!is_error(ret
) && arg2
&& ret
7690 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7695 #ifdef TARGET_NR_waitid
7696 case TARGET_NR_waitid
:
7700 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7701 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7702 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7704 host_to_target_siginfo(p
, &info
);
7705 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7710 #ifdef TARGET_NR_creat /* not on alpha */
7711 case TARGET_NR_creat
:
7712 if (!(p
= lock_user_string(arg1
)))
7714 ret
= get_errno(creat(p
, arg2
));
7715 fd_trans_unregister(ret
);
7716 unlock_user(p
, arg1
, 0);
7719 #ifdef TARGET_NR_link
7720 case TARGET_NR_link
:
7723 p
= lock_user_string(arg1
);
7724 p2
= lock_user_string(arg2
);
7726 ret
= -TARGET_EFAULT
;
7728 ret
= get_errno(link(p
, p2
));
7729 unlock_user(p2
, arg2
, 0);
7730 unlock_user(p
, arg1
, 0);
7734 #if defined(TARGET_NR_linkat)
7735 case TARGET_NR_linkat
:
7740 p
= lock_user_string(arg2
);
7741 p2
= lock_user_string(arg4
);
7743 ret
= -TARGET_EFAULT
;
7745 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7746 unlock_user(p
, arg2
, 0);
7747 unlock_user(p2
, arg4
, 0);
7751 #ifdef TARGET_NR_unlink
7752 case TARGET_NR_unlink
:
7753 if (!(p
= lock_user_string(arg1
)))
7755 ret
= get_errno(unlink(p
));
7756 unlock_user(p
, arg1
, 0);
7759 #if defined(TARGET_NR_unlinkat)
7760 case TARGET_NR_unlinkat
:
7761 if (!(p
= lock_user_string(arg2
)))
7763 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7764 unlock_user(p
, arg2
, 0);
7767 case TARGET_NR_execve
:
7769 char **argp
, **envp
;
7772 abi_ulong guest_argp
;
7773 abi_ulong guest_envp
;
7780 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7781 if (get_user_ual(addr
, gp
))
7789 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7790 if (get_user_ual(addr
, gp
))
7797 argp
= alloca((argc
+ 1) * sizeof(void *));
7798 envp
= alloca((envc
+ 1) * sizeof(void *));
7800 for (gp
= guest_argp
, q
= argp
; gp
;
7801 gp
+= sizeof(abi_ulong
), q
++) {
7802 if (get_user_ual(addr
, gp
))
7806 if (!(*q
= lock_user_string(addr
)))
7808 total_size
+= strlen(*q
) + 1;
7812 for (gp
= guest_envp
, q
= envp
; gp
;
7813 gp
+= sizeof(abi_ulong
), q
++) {
7814 if (get_user_ual(addr
, gp
))
7818 if (!(*q
= lock_user_string(addr
)))
7820 total_size
+= strlen(*q
) + 1;
7824 if (!(p
= lock_user_string(arg1
)))
7826 /* Although execve() is not an interruptible syscall it is
7827 * a special case where we must use the safe_syscall wrapper:
7828 * if we allow a signal to happen before we make the host
7829 * syscall then we will 'lose' it, because at the point of
7830 * execve the process leaves QEMU's control. So we use the
7831 * safe syscall wrapper to ensure that we either take the
7832 * signal as a guest signal, or else it does not happen
7833 * before the execve completes and makes it the other
7834 * program's problem.
7836 ret
= get_errno(safe_execve(p
, argp
, envp
));
7837 unlock_user(p
, arg1
, 0);
7842 ret
= -TARGET_EFAULT
;
7845 for (gp
= guest_argp
, q
= argp
; *q
;
7846 gp
+= sizeof(abi_ulong
), q
++) {
7847 if (get_user_ual(addr
, gp
)
7850 unlock_user(*q
, addr
, 0);
7852 for (gp
= guest_envp
, q
= envp
; *q
;
7853 gp
+= sizeof(abi_ulong
), q
++) {
7854 if (get_user_ual(addr
, gp
)
7857 unlock_user(*q
, addr
, 0);
7861 case TARGET_NR_chdir
:
7862 if (!(p
= lock_user_string(arg1
)))
7864 ret
= get_errno(chdir(p
));
7865 unlock_user(p
, arg1
, 0);
7867 #ifdef TARGET_NR_time
7868 case TARGET_NR_time
:
7871 ret
= get_errno(time(&host_time
));
7874 && put_user_sal(host_time
, arg1
))
7879 #ifdef TARGET_NR_mknod
7880 case TARGET_NR_mknod
:
7881 if (!(p
= lock_user_string(arg1
)))
7883 ret
= get_errno(mknod(p
, arg2
, arg3
));
7884 unlock_user(p
, arg1
, 0);
7887 #if defined(TARGET_NR_mknodat)
7888 case TARGET_NR_mknodat
:
7889 if (!(p
= lock_user_string(arg2
)))
7891 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7892 unlock_user(p
, arg2
, 0);
7895 #ifdef TARGET_NR_chmod
7896 case TARGET_NR_chmod
:
7897 if (!(p
= lock_user_string(arg1
)))
7899 ret
= get_errno(chmod(p
, arg2
));
7900 unlock_user(p
, arg1
, 0);
7903 #ifdef TARGET_NR_break
7904 case TARGET_NR_break
:
7907 #ifdef TARGET_NR_oldstat
7908 case TARGET_NR_oldstat
:
7911 case TARGET_NR_lseek
:
7912 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7914 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7915 /* Alpha specific */
7916 case TARGET_NR_getxpid
:
7917 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7918 ret
= get_errno(getpid());
7921 #ifdef TARGET_NR_getpid
7922 case TARGET_NR_getpid
:
7923 ret
= get_errno(getpid());
7926 case TARGET_NR_mount
:
7928 /* need to look at the data field */
7932 p
= lock_user_string(arg1
);
7940 p2
= lock_user_string(arg2
);
7943 unlock_user(p
, arg1
, 0);
7949 p3
= lock_user_string(arg3
);
7952 unlock_user(p
, arg1
, 0);
7954 unlock_user(p2
, arg2
, 0);
7961 /* FIXME - arg5 should be locked, but it isn't clear how to
7962 * do that since it's not guaranteed to be a NULL-terminated
7966 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7968 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7970 ret
= get_errno(ret
);
7973 unlock_user(p
, arg1
, 0);
7975 unlock_user(p2
, arg2
, 0);
7977 unlock_user(p3
, arg3
, 0);
7981 #ifdef TARGET_NR_umount
7982 case TARGET_NR_umount
:
7983 if (!(p
= lock_user_string(arg1
)))
7985 ret
= get_errno(umount(p
));
7986 unlock_user(p
, arg1
, 0);
7989 #ifdef TARGET_NR_stime /* not on alpha */
7990 case TARGET_NR_stime
:
7993 if (get_user_sal(host_time
, arg1
))
7995 ret
= get_errno(stime(&host_time
));
7999 case TARGET_NR_ptrace
:
8001 #ifdef TARGET_NR_alarm /* not on alpha */
8002 case TARGET_NR_alarm
:
8006 #ifdef TARGET_NR_oldfstat
8007 case TARGET_NR_oldfstat
:
8010 #ifdef TARGET_NR_pause /* not on alpha */
8011 case TARGET_NR_pause
:
8012 if (!block_signals()) {
8013 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8015 ret
= -TARGET_EINTR
;
8018 #ifdef TARGET_NR_utime
8019 case TARGET_NR_utime
:
8021 struct utimbuf tbuf
, *host_tbuf
;
8022 struct target_utimbuf
*target_tbuf
;
8024 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8026 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8027 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8028 unlock_user_struct(target_tbuf
, arg2
, 0);
8033 if (!(p
= lock_user_string(arg1
)))
8035 ret
= get_errno(utime(p
, host_tbuf
));
8036 unlock_user(p
, arg1
, 0);
8040 #ifdef TARGET_NR_utimes
8041 case TARGET_NR_utimes
:
8043 struct timeval
*tvp
, tv
[2];
8045 if (copy_from_user_timeval(&tv
[0], arg2
)
8046 || copy_from_user_timeval(&tv
[1],
8047 arg2
+ sizeof(struct target_timeval
)))
8053 if (!(p
= lock_user_string(arg1
)))
8055 ret
= get_errno(utimes(p
, tvp
));
8056 unlock_user(p
, arg1
, 0);
8060 #if defined(TARGET_NR_futimesat)
8061 case TARGET_NR_futimesat
:
8063 struct timeval
*tvp
, tv
[2];
8065 if (copy_from_user_timeval(&tv
[0], arg3
)
8066 || copy_from_user_timeval(&tv
[1],
8067 arg3
+ sizeof(struct target_timeval
)))
8073 if (!(p
= lock_user_string(arg2
)))
8075 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8076 unlock_user(p
, arg2
, 0);
8080 #ifdef TARGET_NR_stty
8081 case TARGET_NR_stty
:
8084 #ifdef TARGET_NR_gtty
8085 case TARGET_NR_gtty
:
8088 #ifdef TARGET_NR_access
8089 case TARGET_NR_access
:
8090 if (!(p
= lock_user_string(arg1
)))
8092 ret
= get_errno(access(path(p
), arg2
));
8093 unlock_user(p
, arg1
, 0);
8096 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8097 case TARGET_NR_faccessat
:
8098 if (!(p
= lock_user_string(arg2
)))
8100 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8101 unlock_user(p
, arg2
, 0);
8104 #ifdef TARGET_NR_nice /* not on alpha */
8105 case TARGET_NR_nice
:
8106 ret
= get_errno(nice(arg1
));
8109 #ifdef TARGET_NR_ftime
8110 case TARGET_NR_ftime
:
8113 case TARGET_NR_sync
:
8117 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8118 case TARGET_NR_syncfs
:
8119 ret
= get_errno(syncfs(arg1
));
8122 case TARGET_NR_kill
:
8123 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8125 #ifdef TARGET_NR_rename
8126 case TARGET_NR_rename
:
8129 p
= lock_user_string(arg1
);
8130 p2
= lock_user_string(arg2
);
8132 ret
= -TARGET_EFAULT
;
8134 ret
= get_errno(rename(p
, p2
));
8135 unlock_user(p2
, arg2
, 0);
8136 unlock_user(p
, arg1
, 0);
8140 #if defined(TARGET_NR_renameat)
8141 case TARGET_NR_renameat
:
8144 p
= lock_user_string(arg2
);
8145 p2
= lock_user_string(arg4
);
8147 ret
= -TARGET_EFAULT
;
8149 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8150 unlock_user(p2
, arg4
, 0);
8151 unlock_user(p
, arg2
, 0);
8155 #ifdef TARGET_NR_mkdir
8156 case TARGET_NR_mkdir
:
8157 if (!(p
= lock_user_string(arg1
)))
8159 ret
= get_errno(mkdir(p
, arg2
));
8160 unlock_user(p
, arg1
, 0);
8163 #if defined(TARGET_NR_mkdirat)
8164 case TARGET_NR_mkdirat
:
8165 if (!(p
= lock_user_string(arg2
)))
8167 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8168 unlock_user(p
, arg2
, 0);
8171 #ifdef TARGET_NR_rmdir
8172 case TARGET_NR_rmdir
:
8173 if (!(p
= lock_user_string(arg1
)))
8175 ret
= get_errno(rmdir(p
));
8176 unlock_user(p
, arg1
, 0);
8180 ret
= get_errno(dup(arg1
));
8182 fd_trans_dup(arg1
, ret
);
8185 #ifdef TARGET_NR_pipe
8186 case TARGET_NR_pipe
:
8187 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8190 #ifdef TARGET_NR_pipe2
8191 case TARGET_NR_pipe2
:
8192 ret
= do_pipe(cpu_env
, arg1
,
8193 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8196 case TARGET_NR_times
:
8198 struct target_tms
*tmsp
;
8200 ret
= get_errno(times(&tms
));
8202 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8205 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8206 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8207 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8208 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8211 ret
= host_to_target_clock_t(ret
);
8214 #ifdef TARGET_NR_prof
8215 case TARGET_NR_prof
:
8218 #ifdef TARGET_NR_signal
8219 case TARGET_NR_signal
:
8222 case TARGET_NR_acct
:
8224 ret
= get_errno(acct(NULL
));
8226 if (!(p
= lock_user_string(arg1
)))
8228 ret
= get_errno(acct(path(p
)));
8229 unlock_user(p
, arg1
, 0);
8232 #ifdef TARGET_NR_umount2
8233 case TARGET_NR_umount2
:
8234 if (!(p
= lock_user_string(arg1
)))
8236 ret
= get_errno(umount2(p
, arg2
));
8237 unlock_user(p
, arg1
, 0);
8240 #ifdef TARGET_NR_lock
8241 case TARGET_NR_lock
:
8244 case TARGET_NR_ioctl
:
8245 ret
= do_ioctl(arg1
, arg2
, arg3
);
8247 case TARGET_NR_fcntl
:
8248 ret
= do_fcntl(arg1
, arg2
, arg3
);
8250 #ifdef TARGET_NR_mpx
8254 case TARGET_NR_setpgid
:
8255 ret
= get_errno(setpgid(arg1
, arg2
));
8257 #ifdef TARGET_NR_ulimit
8258 case TARGET_NR_ulimit
:
8261 #ifdef TARGET_NR_oldolduname
8262 case TARGET_NR_oldolduname
:
8265 case TARGET_NR_umask
:
8266 ret
= get_errno(umask(arg1
));
8268 case TARGET_NR_chroot
:
8269 if (!(p
= lock_user_string(arg1
)))
8271 ret
= get_errno(chroot(p
));
8272 unlock_user(p
, arg1
, 0);
8274 #ifdef TARGET_NR_ustat
8275 case TARGET_NR_ustat
:
8278 #ifdef TARGET_NR_dup2
8279 case TARGET_NR_dup2
:
8280 ret
= get_errno(dup2(arg1
, arg2
));
8282 fd_trans_dup(arg1
, arg2
);
8286 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8287 case TARGET_NR_dup3
:
8288 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8290 fd_trans_dup(arg1
, arg2
);
8294 #ifdef TARGET_NR_getppid /* not on alpha */
8295 case TARGET_NR_getppid
:
8296 ret
= get_errno(getppid());
8299 #ifdef TARGET_NR_getpgrp
8300 case TARGET_NR_getpgrp
:
8301 ret
= get_errno(getpgrp());
8304 case TARGET_NR_setsid
:
8305 ret
= get_errno(setsid());
8307 #ifdef TARGET_NR_sigaction
8308 case TARGET_NR_sigaction
:
8310 #if defined(TARGET_ALPHA)
8311 struct target_sigaction act
, oact
, *pact
= 0;
8312 struct target_old_sigaction
*old_act
;
8314 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8316 act
._sa_handler
= old_act
->_sa_handler
;
8317 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8318 act
.sa_flags
= old_act
->sa_flags
;
8319 act
.sa_restorer
= 0;
8320 unlock_user_struct(old_act
, arg2
, 0);
8323 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8324 if (!is_error(ret
) && arg3
) {
8325 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8327 old_act
->_sa_handler
= oact
._sa_handler
;
8328 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8329 old_act
->sa_flags
= oact
.sa_flags
;
8330 unlock_user_struct(old_act
, arg3
, 1);
8332 #elif defined(TARGET_MIPS)
8333 struct target_sigaction act
, oact
, *pact
, *old_act
;
8336 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8338 act
._sa_handler
= old_act
->_sa_handler
;
8339 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8340 act
.sa_flags
= old_act
->sa_flags
;
8341 unlock_user_struct(old_act
, arg2
, 0);
8347 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8349 if (!is_error(ret
) && arg3
) {
8350 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8352 old_act
->_sa_handler
= oact
._sa_handler
;
8353 old_act
->sa_flags
= oact
.sa_flags
;
8354 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8355 old_act
->sa_mask
.sig
[1] = 0;
8356 old_act
->sa_mask
.sig
[2] = 0;
8357 old_act
->sa_mask
.sig
[3] = 0;
8358 unlock_user_struct(old_act
, arg3
, 1);
8361 struct target_old_sigaction
*old_act
;
8362 struct target_sigaction act
, oact
, *pact
;
8364 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8366 act
._sa_handler
= old_act
->_sa_handler
;
8367 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8368 act
.sa_flags
= old_act
->sa_flags
;
8369 act
.sa_restorer
= old_act
->sa_restorer
;
8370 unlock_user_struct(old_act
, arg2
, 0);
8375 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8376 if (!is_error(ret
) && arg3
) {
8377 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8379 old_act
->_sa_handler
= oact
._sa_handler
;
8380 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8381 old_act
->sa_flags
= oact
.sa_flags
;
8382 old_act
->sa_restorer
= oact
.sa_restorer
;
8383 unlock_user_struct(old_act
, arg3
, 1);
8389 case TARGET_NR_rt_sigaction
:
8391 #if defined(TARGET_ALPHA)
8392 struct target_sigaction act
, oact
, *pact
= 0;
8393 struct target_rt_sigaction
*rt_act
;
8395 if (arg4
!= sizeof(target_sigset_t
)) {
8396 ret
= -TARGET_EINVAL
;
8400 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8402 act
._sa_handler
= rt_act
->_sa_handler
;
8403 act
.sa_mask
= rt_act
->sa_mask
;
8404 act
.sa_flags
= rt_act
->sa_flags
;
8405 act
.sa_restorer
= arg5
;
8406 unlock_user_struct(rt_act
, arg2
, 0);
8409 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8410 if (!is_error(ret
) && arg3
) {
8411 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8413 rt_act
->_sa_handler
= oact
._sa_handler
;
8414 rt_act
->sa_mask
= oact
.sa_mask
;
8415 rt_act
->sa_flags
= oact
.sa_flags
;
8416 unlock_user_struct(rt_act
, arg3
, 1);
8419 struct target_sigaction
*act
;
8420 struct target_sigaction
*oact
;
8422 if (arg4
!= sizeof(target_sigset_t
)) {
8423 ret
= -TARGET_EINVAL
;
8427 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8432 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8433 ret
= -TARGET_EFAULT
;
8434 goto rt_sigaction_fail
;
8438 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8441 unlock_user_struct(act
, arg2
, 0);
8443 unlock_user_struct(oact
, arg3
, 1);
8447 #ifdef TARGET_NR_sgetmask /* not on alpha */
8448 case TARGET_NR_sgetmask
:
8451 abi_ulong target_set
;
8452 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8454 host_to_target_old_sigset(&target_set
, &cur_set
);
8460 #ifdef TARGET_NR_ssetmask /* not on alpha */
8461 case TARGET_NR_ssetmask
:
8463 sigset_t set
, oset
, cur_set
;
8464 abi_ulong target_set
= arg1
;
8465 /* We only have one word of the new mask so we must read
8466 * the rest of it with do_sigprocmask() and OR in this word.
8467 * We are guaranteed that a do_sigprocmask() that only queries
8468 * the signal mask will not fail.
8470 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8472 target_to_host_old_sigset(&set
, &target_set
);
8473 sigorset(&set
, &set
, &cur_set
);
8474 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8476 host_to_target_old_sigset(&target_set
, &oset
);
8482 #ifdef TARGET_NR_sigprocmask
8483 case TARGET_NR_sigprocmask
:
8485 #if defined(TARGET_ALPHA)
8486 sigset_t set
, oldset
;
8491 case TARGET_SIG_BLOCK
:
8494 case TARGET_SIG_UNBLOCK
:
8497 case TARGET_SIG_SETMASK
:
8501 ret
= -TARGET_EINVAL
;
8505 target_to_host_old_sigset(&set
, &mask
);
8507 ret
= do_sigprocmask(how
, &set
, &oldset
);
8508 if (!is_error(ret
)) {
8509 host_to_target_old_sigset(&mask
, &oldset
);
8511 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8514 sigset_t set
, oldset
, *set_ptr
;
8519 case TARGET_SIG_BLOCK
:
8522 case TARGET_SIG_UNBLOCK
:
8525 case TARGET_SIG_SETMASK
:
8529 ret
= -TARGET_EINVAL
;
8532 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8534 target_to_host_old_sigset(&set
, p
);
8535 unlock_user(p
, arg2
, 0);
8541 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8542 if (!is_error(ret
) && arg3
) {
8543 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8545 host_to_target_old_sigset(p
, &oldset
);
8546 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8552 case TARGET_NR_rt_sigprocmask
:
8555 sigset_t set
, oldset
, *set_ptr
;
8557 if (arg4
!= sizeof(target_sigset_t
)) {
8558 ret
= -TARGET_EINVAL
;
8564 case TARGET_SIG_BLOCK
:
8567 case TARGET_SIG_UNBLOCK
:
8570 case TARGET_SIG_SETMASK
:
8574 ret
= -TARGET_EINVAL
;
8577 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8579 target_to_host_sigset(&set
, p
);
8580 unlock_user(p
, arg2
, 0);
8586 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8587 if (!is_error(ret
) && arg3
) {
8588 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8590 host_to_target_sigset(p
, &oldset
);
8591 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8595 #ifdef TARGET_NR_sigpending
8596 case TARGET_NR_sigpending
:
8599 ret
= get_errno(sigpending(&set
));
8600 if (!is_error(ret
)) {
8601 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8603 host_to_target_old_sigset(p
, &set
);
8604 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8609 case TARGET_NR_rt_sigpending
:
8613 /* Yes, this check is >, not != like most. We follow the kernel's
8614 * logic and it does it like this because it implements
8615 * NR_sigpending through the same code path, and in that case
8616 * the old_sigset_t is smaller in size.
8618 if (arg2
> sizeof(target_sigset_t
)) {
8619 ret
= -TARGET_EINVAL
;
8623 ret
= get_errno(sigpending(&set
));
8624 if (!is_error(ret
)) {
8625 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8627 host_to_target_sigset(p
, &set
);
8628 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8632 #ifdef TARGET_NR_sigsuspend
8633 case TARGET_NR_sigsuspend
:
8635 TaskState
*ts
= cpu
->opaque
;
8636 #if defined(TARGET_ALPHA)
8637 abi_ulong mask
= arg1
;
8638 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8640 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8642 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8643 unlock_user(p
, arg1
, 0);
8645 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8647 if (ret
!= -TARGET_ERESTARTSYS
) {
8648 ts
->in_sigsuspend
= 1;
8653 case TARGET_NR_rt_sigsuspend
:
8655 TaskState
*ts
= cpu
->opaque
;
8657 if (arg2
!= sizeof(target_sigset_t
)) {
8658 ret
= -TARGET_EINVAL
;
8661 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8663 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8664 unlock_user(p
, arg1
, 0);
8665 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8667 if (ret
!= -TARGET_ERESTARTSYS
) {
8668 ts
->in_sigsuspend
= 1;
8672 case TARGET_NR_rt_sigtimedwait
:
8675 struct timespec uts
, *puts
;
8678 if (arg4
!= sizeof(target_sigset_t
)) {
8679 ret
= -TARGET_EINVAL
;
8683 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8685 target_to_host_sigset(&set
, p
);
8686 unlock_user(p
, arg1
, 0);
8689 target_to_host_timespec(puts
, arg3
);
8693 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8695 if (!is_error(ret
)) {
8697 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8702 host_to_target_siginfo(p
, &uinfo
);
8703 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8705 ret
= host_to_target_signal(ret
);
8709 case TARGET_NR_rt_sigqueueinfo
:
8713 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8717 target_to_host_siginfo(&uinfo
, p
);
8718 unlock_user(p
, arg1
, 0);
8719 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8722 #ifdef TARGET_NR_sigreturn
8723 case TARGET_NR_sigreturn
:
8724 if (block_signals()) {
8725 ret
= -TARGET_ERESTARTSYS
;
8727 ret
= do_sigreturn(cpu_env
);
8731 case TARGET_NR_rt_sigreturn
:
8732 if (block_signals()) {
8733 ret
= -TARGET_ERESTARTSYS
;
8735 ret
= do_rt_sigreturn(cpu_env
);
8738 case TARGET_NR_sethostname
:
8739 if (!(p
= lock_user_string(arg1
)))
8741 ret
= get_errno(sethostname(p
, arg2
));
8742 unlock_user(p
, arg1
, 0);
8744 case TARGET_NR_setrlimit
:
8746 int resource
= target_to_host_resource(arg1
);
8747 struct target_rlimit
*target_rlim
;
8749 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8751 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8752 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8753 unlock_user_struct(target_rlim
, arg2
, 0);
8754 ret
= get_errno(setrlimit(resource
, &rlim
));
8757 case TARGET_NR_getrlimit
:
8759 int resource
= target_to_host_resource(arg1
);
8760 struct target_rlimit
*target_rlim
;
8763 ret
= get_errno(getrlimit(resource
, &rlim
));
8764 if (!is_error(ret
)) {
8765 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8767 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8768 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8769 unlock_user_struct(target_rlim
, arg2
, 1);
8773 case TARGET_NR_getrusage
:
8775 struct rusage rusage
;
8776 ret
= get_errno(getrusage(arg1
, &rusage
));
8777 if (!is_error(ret
)) {
8778 ret
= host_to_target_rusage(arg2
, &rusage
);
8782 case TARGET_NR_gettimeofday
:
8785 ret
= get_errno(gettimeofday(&tv
, NULL
));
8786 if (!is_error(ret
)) {
8787 if (copy_to_user_timeval(arg1
, &tv
))
8792 case TARGET_NR_settimeofday
:
8794 struct timeval tv
, *ptv
= NULL
;
8795 struct timezone tz
, *ptz
= NULL
;
8798 if (copy_from_user_timeval(&tv
, arg1
)) {
8805 if (copy_from_user_timezone(&tz
, arg2
)) {
8811 ret
= get_errno(settimeofday(ptv
, ptz
));
8814 #if defined(TARGET_NR_select)
8815 case TARGET_NR_select
:
8816 #if defined(TARGET_WANT_NI_OLD_SELECT)
8817 /* some architectures used to have old_select here
8818 * but now ENOSYS it.
8820 ret
= -TARGET_ENOSYS
;
8821 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8822 ret
= do_old_select(arg1
);
8824 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8828 #ifdef TARGET_NR_pselect6
8829 case TARGET_NR_pselect6
:
8831 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8832 fd_set rfds
, wfds
, efds
;
8833 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8834 struct timespec ts
, *ts_ptr
;
8837 * The 6th arg is actually two args smashed together,
8838 * so we cannot use the C library.
8846 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8847 target_sigset_t
*target_sigset
;
8855 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8859 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8863 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8869 * This takes a timespec, and not a timeval, so we cannot
8870 * use the do_select() helper ...
8873 if (target_to_host_timespec(&ts
, ts_addr
)) {
8881 /* Extract the two packed args for the sigset */
8884 sig
.size
= SIGSET_T_SIZE
;
8886 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8890 arg_sigset
= tswapal(arg7
[0]);
8891 arg_sigsize
= tswapal(arg7
[1]);
8892 unlock_user(arg7
, arg6
, 0);
8896 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8897 /* Like the kernel, we enforce correct size sigsets */
8898 ret
= -TARGET_EINVAL
;
8901 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8902 sizeof(*target_sigset
), 1);
8903 if (!target_sigset
) {
8906 target_to_host_sigset(&set
, target_sigset
);
8907 unlock_user(target_sigset
, arg_sigset
, 0);
8915 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8918 if (!is_error(ret
)) {
8919 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8921 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8923 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8926 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8932 #ifdef TARGET_NR_symlink
8933 case TARGET_NR_symlink
:
8936 p
= lock_user_string(arg1
);
8937 p2
= lock_user_string(arg2
);
8939 ret
= -TARGET_EFAULT
;
8941 ret
= get_errno(symlink(p
, p2
));
8942 unlock_user(p2
, arg2
, 0);
8943 unlock_user(p
, arg1
, 0);
8947 #if defined(TARGET_NR_symlinkat)
8948 case TARGET_NR_symlinkat
:
8951 p
= lock_user_string(arg1
);
8952 p2
= lock_user_string(arg3
);
8954 ret
= -TARGET_EFAULT
;
8956 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8957 unlock_user(p2
, arg3
, 0);
8958 unlock_user(p
, arg1
, 0);
8962 #ifdef TARGET_NR_oldlstat
8963 case TARGET_NR_oldlstat
:
8966 #ifdef TARGET_NR_readlink
8967 case TARGET_NR_readlink
:
8970 p
= lock_user_string(arg1
);
8971 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8973 ret
= -TARGET_EFAULT
;
8975 /* Short circuit this for the magic exe check. */
8976 ret
= -TARGET_EINVAL
;
8977 } else if (is_proc_myself((const char *)p
, "exe")) {
8978 char real
[PATH_MAX
], *temp
;
8979 temp
= realpath(exec_path
, real
);
8980 /* Return value is # of bytes that we wrote to the buffer. */
8982 ret
= get_errno(-1);
8984 /* Don't worry about sign mismatch as earlier mapping
8985 * logic would have thrown a bad address error. */
8986 ret
= MIN(strlen(real
), arg3
);
8987 /* We cannot NUL terminate the string. */
8988 memcpy(p2
, real
, ret
);
8991 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8993 unlock_user(p2
, arg2
, ret
);
8994 unlock_user(p
, arg1
, 0);
8998 #if defined(TARGET_NR_readlinkat)
8999 case TARGET_NR_readlinkat
:
9002 p
= lock_user_string(arg2
);
9003 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9005 ret
= -TARGET_EFAULT
;
9006 } else if (is_proc_myself((const char *)p
, "exe")) {
9007 char real
[PATH_MAX
], *temp
;
9008 temp
= realpath(exec_path
, real
);
9009 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9010 snprintf((char *)p2
, arg4
, "%s", real
);
9012 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9014 unlock_user(p2
, arg3
, ret
);
9015 unlock_user(p
, arg2
, 0);
9019 #ifdef TARGET_NR_uselib
9020 case TARGET_NR_uselib
:
9023 #ifdef TARGET_NR_swapon
9024 case TARGET_NR_swapon
:
9025 if (!(p
= lock_user_string(arg1
)))
9027 ret
= get_errno(swapon(p
, arg2
));
9028 unlock_user(p
, arg1
, 0);
9031 case TARGET_NR_reboot
:
9032 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9033 /* arg4 must be ignored in all other cases */
9034 p
= lock_user_string(arg4
);
9038 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9039 unlock_user(p
, arg4
, 0);
9041 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9044 #ifdef TARGET_NR_readdir
9045 case TARGET_NR_readdir
:
9048 #ifdef TARGET_NR_mmap
9049 case TARGET_NR_mmap
:
9050 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9051 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9052 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9053 || defined(TARGET_S390X)
9056 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9057 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9065 unlock_user(v
, arg1
, 0);
9066 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9067 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9071 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9072 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9078 #ifdef TARGET_NR_mmap2
9079 case TARGET_NR_mmap2
:
9081 #define MMAP_SHIFT 12
9083 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9084 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9086 arg6
<< MMAP_SHIFT
));
9089 case TARGET_NR_munmap
:
9090 ret
= get_errno(target_munmap(arg1
, arg2
));
9092 case TARGET_NR_mprotect
:
9094 TaskState
*ts
= cpu
->opaque
;
9095 /* Special hack to detect libc making the stack executable. */
9096 if ((arg3
& PROT_GROWSDOWN
)
9097 && arg1
>= ts
->info
->stack_limit
9098 && arg1
<= ts
->info
->start_stack
) {
9099 arg3
&= ~PROT_GROWSDOWN
;
9100 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9101 arg1
= ts
->info
->stack_limit
;
9104 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9106 #ifdef TARGET_NR_mremap
9107 case TARGET_NR_mremap
:
9108 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9111 /* ??? msync/mlock/munlock are broken for softmmu. */
9112 #ifdef TARGET_NR_msync
9113 case TARGET_NR_msync
:
9114 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9117 #ifdef TARGET_NR_mlock
9118 case TARGET_NR_mlock
:
9119 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9122 #ifdef TARGET_NR_munlock
9123 case TARGET_NR_munlock
:
9124 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9127 #ifdef TARGET_NR_mlockall
9128 case TARGET_NR_mlockall
:
9129 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9132 #ifdef TARGET_NR_munlockall
9133 case TARGET_NR_munlockall
:
9134 ret
= get_errno(munlockall());
9137 case TARGET_NR_truncate
:
9138 if (!(p
= lock_user_string(arg1
)))
9140 ret
= get_errno(truncate(p
, arg2
));
9141 unlock_user(p
, arg1
, 0);
9143 case TARGET_NR_ftruncate
:
9144 ret
= get_errno(ftruncate(arg1
, arg2
));
9146 case TARGET_NR_fchmod
:
9147 ret
= get_errno(fchmod(arg1
, arg2
));
9149 #if defined(TARGET_NR_fchmodat)
9150 case TARGET_NR_fchmodat
:
9151 if (!(p
= lock_user_string(arg2
)))
9153 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9154 unlock_user(p
, arg2
, 0);
9157 case TARGET_NR_getpriority
:
9158 /* Note that negative values are valid for getpriority, so we must
9159 differentiate based on errno settings. */
9161 ret
= getpriority(arg1
, arg2
);
9162 if (ret
== -1 && errno
!= 0) {
9163 ret
= -host_to_target_errno(errno
);
9167 /* Return value is the unbiased priority. Signal no error. */
9168 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9170 /* Return value is a biased priority to avoid negative numbers. */
9174 case TARGET_NR_setpriority
:
9175 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9177 #ifdef TARGET_NR_profil
9178 case TARGET_NR_profil
:
9181 case TARGET_NR_statfs
:
9182 if (!(p
= lock_user_string(arg1
)))
9184 ret
= get_errno(statfs(path(p
), &stfs
));
9185 unlock_user(p
, arg1
, 0);
9187 if (!is_error(ret
)) {
9188 struct target_statfs
*target_stfs
;
9190 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9192 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9193 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9194 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9195 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9196 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9197 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9198 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9199 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9200 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9201 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9202 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9203 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9204 unlock_user_struct(target_stfs
, arg2
, 1);
9207 case TARGET_NR_fstatfs
:
9208 ret
= get_errno(fstatfs(arg1
, &stfs
));
9209 goto convert_statfs
;
9210 #ifdef TARGET_NR_statfs64
9211 case TARGET_NR_statfs64
:
9212 if (!(p
= lock_user_string(arg1
)))
9214 ret
= get_errno(statfs(path(p
), &stfs
));
9215 unlock_user(p
, arg1
, 0);
9217 if (!is_error(ret
)) {
9218 struct target_statfs64
*target_stfs
;
9220 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9222 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9223 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9224 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9225 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9226 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9227 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9228 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9229 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9230 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9231 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9232 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9233 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9234 unlock_user_struct(target_stfs
, arg3
, 1);
9237 case TARGET_NR_fstatfs64
:
9238 ret
= get_errno(fstatfs(arg1
, &stfs
));
9239 goto convert_statfs64
;
9241 #ifdef TARGET_NR_ioperm
9242 case TARGET_NR_ioperm
:
9245 #ifdef TARGET_NR_socketcall
9246 case TARGET_NR_socketcall
:
9247 ret
= do_socketcall(arg1
, arg2
);
9250 #ifdef TARGET_NR_accept
9251 case TARGET_NR_accept
:
9252 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9255 #ifdef TARGET_NR_accept4
9256 case TARGET_NR_accept4
:
9257 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9260 #ifdef TARGET_NR_bind
9261 case TARGET_NR_bind
:
9262 ret
= do_bind(arg1
, arg2
, arg3
);
9265 #ifdef TARGET_NR_connect
9266 case TARGET_NR_connect
:
9267 ret
= do_connect(arg1
, arg2
, arg3
);
9270 #ifdef TARGET_NR_getpeername
9271 case TARGET_NR_getpeername
:
9272 ret
= do_getpeername(arg1
, arg2
, arg3
);
9275 #ifdef TARGET_NR_getsockname
9276 case TARGET_NR_getsockname
:
9277 ret
= do_getsockname(arg1
, arg2
, arg3
);
9280 #ifdef TARGET_NR_getsockopt
9281 case TARGET_NR_getsockopt
:
9282 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9285 #ifdef TARGET_NR_listen
9286 case TARGET_NR_listen
:
9287 ret
= get_errno(listen(arg1
, arg2
));
9290 #ifdef TARGET_NR_recv
9291 case TARGET_NR_recv
:
9292 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9295 #ifdef TARGET_NR_recvfrom
9296 case TARGET_NR_recvfrom
:
9297 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9300 #ifdef TARGET_NR_recvmsg
9301 case TARGET_NR_recvmsg
:
9302 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9305 #ifdef TARGET_NR_send
9306 case TARGET_NR_send
:
9307 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9310 #ifdef TARGET_NR_sendmsg
9311 case TARGET_NR_sendmsg
:
9312 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9315 #ifdef TARGET_NR_sendmmsg
9316 case TARGET_NR_sendmmsg
:
9317 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9319 case TARGET_NR_recvmmsg
:
9320 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9323 #ifdef TARGET_NR_sendto
9324 case TARGET_NR_sendto
:
9325 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9328 #ifdef TARGET_NR_shutdown
9329 case TARGET_NR_shutdown
:
9330 ret
= get_errno(shutdown(arg1
, arg2
));
9333 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9334 case TARGET_NR_getrandom
:
9335 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9339 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9340 unlock_user(p
, arg1
, ret
);
9343 #ifdef TARGET_NR_socket
9344 case TARGET_NR_socket
:
9345 ret
= do_socket(arg1
, arg2
, arg3
);
9346 fd_trans_unregister(ret
);
9349 #ifdef TARGET_NR_socketpair
9350 case TARGET_NR_socketpair
:
9351 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9354 #ifdef TARGET_NR_setsockopt
9355 case TARGET_NR_setsockopt
:
9356 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9359 #if defined(TARGET_NR_syslog)
9360 case TARGET_NR_syslog
:
9365 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9366 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9367 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9368 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9369 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9370 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9371 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9372 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9374 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9377 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9378 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9379 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9381 ret
= -TARGET_EINVAL
;
9389 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9391 ret
= -TARGET_EFAULT
;
9394 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9395 unlock_user(p
, arg2
, arg3
);
9405 case TARGET_NR_setitimer
:
9407 struct itimerval value
, ovalue
, *pvalue
;
9411 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9412 || copy_from_user_timeval(&pvalue
->it_value
,
9413 arg2
+ sizeof(struct target_timeval
)))
9418 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9419 if (!is_error(ret
) && arg3
) {
9420 if (copy_to_user_timeval(arg3
,
9421 &ovalue
.it_interval
)
9422 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9428 case TARGET_NR_getitimer
:
9430 struct itimerval value
;
9432 ret
= get_errno(getitimer(arg1
, &value
));
9433 if (!is_error(ret
) && arg2
) {
9434 if (copy_to_user_timeval(arg2
,
9436 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9442 #ifdef TARGET_NR_stat
9443 case TARGET_NR_stat
:
9444 if (!(p
= lock_user_string(arg1
)))
9446 ret
= get_errno(stat(path(p
), &st
));
9447 unlock_user(p
, arg1
, 0);
9450 #ifdef TARGET_NR_lstat
9451 case TARGET_NR_lstat
:
9452 if (!(p
= lock_user_string(arg1
)))
9454 ret
= get_errno(lstat(path(p
), &st
));
9455 unlock_user(p
, arg1
, 0);
9458 case TARGET_NR_fstat
:
9460 ret
= get_errno(fstat(arg1
, &st
));
9461 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9464 if (!is_error(ret
)) {
9465 struct target_stat
*target_st
;
9467 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9469 memset(target_st
, 0, sizeof(*target_st
));
9470 __put_user(st
.st_dev
, &target_st
->st_dev
);
9471 __put_user(st
.st_ino
, &target_st
->st_ino
);
9472 __put_user(st
.st_mode
, &target_st
->st_mode
);
9473 __put_user(st
.st_uid
, &target_st
->st_uid
);
9474 __put_user(st
.st_gid
, &target_st
->st_gid
);
9475 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9476 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9477 __put_user(st
.st_size
, &target_st
->st_size
);
9478 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9479 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9480 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9481 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9482 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9483 unlock_user_struct(target_st
, arg2
, 1);
9487 #ifdef TARGET_NR_olduname
9488 case TARGET_NR_olduname
:
9491 #ifdef TARGET_NR_iopl
9492 case TARGET_NR_iopl
:
9495 case TARGET_NR_vhangup
:
9496 ret
= get_errno(vhangup());
9498 #ifdef TARGET_NR_idle
9499 case TARGET_NR_idle
:
9502 #ifdef TARGET_NR_syscall
9503 case TARGET_NR_syscall
:
9504 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9505 arg6
, arg7
, arg8
, 0);
9508 case TARGET_NR_wait4
:
9511 abi_long status_ptr
= arg2
;
9512 struct rusage rusage
, *rusage_ptr
;
9513 abi_ulong target_rusage
= arg4
;
9514 abi_long rusage_err
;
9516 rusage_ptr
= &rusage
;
9519 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9520 if (!is_error(ret
)) {
9521 if (status_ptr
&& ret
) {
9522 status
= host_to_target_waitstatus(status
);
9523 if (put_user_s32(status
, status_ptr
))
9526 if (target_rusage
) {
9527 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9535 #ifdef TARGET_NR_swapoff
9536 case TARGET_NR_swapoff
:
9537 if (!(p
= lock_user_string(arg1
)))
9539 ret
= get_errno(swapoff(p
));
9540 unlock_user(p
, arg1
, 0);
9543 case TARGET_NR_sysinfo
:
9545 struct target_sysinfo
*target_value
;
9546 struct sysinfo value
;
9547 ret
= get_errno(sysinfo(&value
));
9548 if (!is_error(ret
) && arg1
)
9550 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9552 __put_user(value
.uptime
, &target_value
->uptime
);
9553 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9554 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9555 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9556 __put_user(value
.totalram
, &target_value
->totalram
);
9557 __put_user(value
.freeram
, &target_value
->freeram
);
9558 __put_user(value
.sharedram
, &target_value
->sharedram
);
9559 __put_user(value
.bufferram
, &target_value
->bufferram
);
9560 __put_user(value
.totalswap
, &target_value
->totalswap
);
9561 __put_user(value
.freeswap
, &target_value
->freeswap
);
9562 __put_user(value
.procs
, &target_value
->procs
);
9563 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9564 __put_user(value
.freehigh
, &target_value
->freehigh
);
9565 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9566 unlock_user_struct(target_value
, arg1
, 1);
9570 #ifdef TARGET_NR_ipc
9572 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9575 #ifdef TARGET_NR_semget
9576 case TARGET_NR_semget
:
9577 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9580 #ifdef TARGET_NR_semop
9581 case TARGET_NR_semop
:
9582 ret
= do_semop(arg1
, arg2
, arg3
);
9585 #ifdef TARGET_NR_semctl
9586 case TARGET_NR_semctl
:
9587 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9590 #ifdef TARGET_NR_msgctl
9591 case TARGET_NR_msgctl
:
9592 ret
= do_msgctl(arg1
, arg2
, arg3
);
9595 #ifdef TARGET_NR_msgget
9596 case TARGET_NR_msgget
:
9597 ret
= get_errno(msgget(arg1
, arg2
));
9600 #ifdef TARGET_NR_msgrcv
9601 case TARGET_NR_msgrcv
:
9602 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9605 #ifdef TARGET_NR_msgsnd
9606 case TARGET_NR_msgsnd
:
9607 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9610 #ifdef TARGET_NR_shmget
9611 case TARGET_NR_shmget
:
9612 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9615 #ifdef TARGET_NR_shmctl
9616 case TARGET_NR_shmctl
:
9617 ret
= do_shmctl(arg1
, arg2
, arg3
);
9620 #ifdef TARGET_NR_shmat
9621 case TARGET_NR_shmat
:
9622 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9625 #ifdef TARGET_NR_shmdt
9626 case TARGET_NR_shmdt
:
9627 ret
= do_shmdt(arg1
);
9630 case TARGET_NR_fsync
:
9631 ret
= get_errno(fsync(arg1
));
9633 case TARGET_NR_clone
:
9634 /* Linux manages to have three different orderings for its
9635 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9636 * match the kernel's CONFIG_CLONE_* settings.
9637 * Microblaze is further special in that it uses a sixth
9638 * implicit argument to clone for the TLS pointer.
9640 #if defined(TARGET_MICROBLAZE)
9641 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9642 #elif defined(TARGET_CLONE_BACKWARDS)
9643 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9644 #elif defined(TARGET_CLONE_BACKWARDS2)
9645 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9647 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9650 #ifdef __NR_exit_group
9651 /* new thread calls */
9652 case TARGET_NR_exit_group
:
9656 gdb_exit(cpu_env
, arg1
);
9657 ret
= get_errno(exit_group(arg1
));
9660 case TARGET_NR_setdomainname
:
9661 if (!(p
= lock_user_string(arg1
)))
9663 ret
= get_errno(setdomainname(p
, arg2
));
9664 unlock_user(p
, arg1
, 0);
9666 case TARGET_NR_uname
:
9667 /* no need to transcode because we use the linux syscall */
9669 struct new_utsname
* buf
;
9671 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9673 ret
= get_errno(sys_uname(buf
));
9674 if (!is_error(ret
)) {
9675 /* Overwrite the native machine name with whatever is being
9677 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9678 /* Allow the user to override the reported release. */
9679 if (qemu_uname_release
&& *qemu_uname_release
) {
9680 g_strlcpy(buf
->release
, qemu_uname_release
,
9681 sizeof(buf
->release
));
9684 unlock_user_struct(buf
, arg1
, 1);
9688 case TARGET_NR_modify_ldt
:
9689 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9691 #if !defined(TARGET_X86_64)
9692 case TARGET_NR_vm86old
:
9694 case TARGET_NR_vm86
:
9695 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9699 case TARGET_NR_adjtimex
:
9701 struct timex host_buf
;
9703 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9706 ret
= get_errno(adjtimex(&host_buf
));
9707 if (!is_error(ret
)) {
9708 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9714 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9715 case TARGET_NR_clock_adjtime
:
9717 struct timex htx
, *phtx
= &htx
;
9719 if (target_to_host_timex(phtx
, arg2
) != 0) {
9722 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9723 if (!is_error(ret
) && phtx
) {
9724 if (host_to_target_timex(arg2
, phtx
) != 0) {
9731 #ifdef TARGET_NR_create_module
9732 case TARGET_NR_create_module
:
9734 case TARGET_NR_init_module
:
9735 case TARGET_NR_delete_module
:
9736 #ifdef TARGET_NR_get_kernel_syms
9737 case TARGET_NR_get_kernel_syms
:
9740 case TARGET_NR_quotactl
:
9742 case TARGET_NR_getpgid
:
9743 ret
= get_errno(getpgid(arg1
));
9745 case TARGET_NR_fchdir
:
9746 ret
= get_errno(fchdir(arg1
));
9748 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9749 case TARGET_NR_bdflush
:
9752 #ifdef TARGET_NR_sysfs
9753 case TARGET_NR_sysfs
:
9756 case TARGET_NR_personality
:
9757 ret
= get_errno(personality(arg1
));
9759 #ifdef TARGET_NR_afs_syscall
9760 case TARGET_NR_afs_syscall
:
9763 #ifdef TARGET_NR__llseek /* Not on alpha */
9764 case TARGET_NR__llseek
:
9767 #if !defined(__NR_llseek)
9768 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9770 ret
= get_errno(res
);
9775 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9777 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9783 #ifdef TARGET_NR_getdents
9784 case TARGET_NR_getdents
:
9785 #ifdef __NR_getdents
9786 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9788 struct target_dirent
*target_dirp
;
9789 struct linux_dirent
*dirp
;
9790 abi_long count
= arg3
;
9792 dirp
= g_try_malloc(count
);
9794 ret
= -TARGET_ENOMEM
;
9798 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9799 if (!is_error(ret
)) {
9800 struct linux_dirent
*de
;
9801 struct target_dirent
*tde
;
9803 int reclen
, treclen
;
9804 int count1
, tnamelen
;
9808 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9812 reclen
= de
->d_reclen
;
9813 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9814 assert(tnamelen
>= 0);
9815 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9816 assert(count1
+ treclen
<= count
);
9817 tde
->d_reclen
= tswap16(treclen
);
9818 tde
->d_ino
= tswapal(de
->d_ino
);
9819 tde
->d_off
= tswapal(de
->d_off
);
9820 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9821 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9823 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9827 unlock_user(target_dirp
, arg2
, ret
);
9833 struct linux_dirent
*dirp
;
9834 abi_long count
= arg3
;
9836 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9838 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9839 if (!is_error(ret
)) {
9840 struct linux_dirent
*de
;
9845 reclen
= de
->d_reclen
;
9848 de
->d_reclen
= tswap16(reclen
);
9849 tswapls(&de
->d_ino
);
9850 tswapls(&de
->d_off
);
9851 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9855 unlock_user(dirp
, arg2
, ret
);
9859 /* Implement getdents in terms of getdents64 */
9861 struct linux_dirent64
*dirp
;
9862 abi_long count
= arg3
;
9864 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9868 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9869 if (!is_error(ret
)) {
9870 /* Convert the dirent64 structs to target dirent. We do this
9871 * in-place, since we can guarantee that a target_dirent is no
9872 * larger than a dirent64; however this means we have to be
9873 * careful to read everything before writing in the new format.
9875 struct linux_dirent64
*de
;
9876 struct target_dirent
*tde
;
9881 tde
= (struct target_dirent
*)dirp
;
9883 int namelen
, treclen
;
9884 int reclen
= de
->d_reclen
;
9885 uint64_t ino
= de
->d_ino
;
9886 int64_t off
= de
->d_off
;
9887 uint8_t type
= de
->d_type
;
9889 namelen
= strlen(de
->d_name
);
9890 treclen
= offsetof(struct target_dirent
, d_name
)
9892 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9894 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9895 tde
->d_ino
= tswapal(ino
);
9896 tde
->d_off
= tswapal(off
);
9897 tde
->d_reclen
= tswap16(treclen
);
9898 /* The target_dirent type is in what was formerly a padding
9899 * byte at the end of the structure:
9901 *(((char *)tde
) + treclen
- 1) = type
;
9903 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9904 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9910 unlock_user(dirp
, arg2
, ret
);
9914 #endif /* TARGET_NR_getdents */
9915 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9916 case TARGET_NR_getdents64
:
9918 struct linux_dirent64
*dirp
;
9919 abi_long count
= arg3
;
9920 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9922 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9923 if (!is_error(ret
)) {
9924 struct linux_dirent64
*de
;
9929 reclen
= de
->d_reclen
;
9932 de
->d_reclen
= tswap16(reclen
);
9933 tswap64s((uint64_t *)&de
->d_ino
);
9934 tswap64s((uint64_t *)&de
->d_off
);
9935 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9939 unlock_user(dirp
, arg2
, ret
);
9942 #endif /* TARGET_NR_getdents64 */
9943 #if defined(TARGET_NR__newselect)
9944 case TARGET_NR__newselect
:
9945 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9948 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9949 # ifdef TARGET_NR_poll
9950 case TARGET_NR_poll
:
9952 # ifdef TARGET_NR_ppoll
9953 case TARGET_NR_ppoll
:
9956 struct target_pollfd
*target_pfd
;
9957 unsigned int nfds
= arg2
;
9964 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9965 ret
= -TARGET_EINVAL
;
9969 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9970 sizeof(struct target_pollfd
) * nfds
, 1);
9975 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9976 for (i
= 0; i
< nfds
; i
++) {
9977 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9978 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9983 # ifdef TARGET_NR_ppoll
9984 case TARGET_NR_ppoll
:
9986 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9987 target_sigset_t
*target_set
;
9988 sigset_t _set
, *set
= &_set
;
9991 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9992 unlock_user(target_pfd
, arg1
, 0);
10000 if (arg5
!= sizeof(target_sigset_t
)) {
10001 unlock_user(target_pfd
, arg1
, 0);
10002 ret
= -TARGET_EINVAL
;
10006 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10008 unlock_user(target_pfd
, arg1
, 0);
10011 target_to_host_sigset(set
, target_set
);
10016 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10017 set
, SIGSET_T_SIZE
));
10019 if (!is_error(ret
) && arg3
) {
10020 host_to_target_timespec(arg3
, timeout_ts
);
10023 unlock_user(target_set
, arg4
, 0);
10028 # ifdef TARGET_NR_poll
10029 case TARGET_NR_poll
:
10031 struct timespec ts
, *pts
;
10034 /* Convert ms to secs, ns */
10035 ts
.tv_sec
= arg3
/ 1000;
10036 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10039 /* -ve poll() timeout means "infinite" */
10042 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10047 g_assert_not_reached();
10050 if (!is_error(ret
)) {
10051 for(i
= 0; i
< nfds
; i
++) {
10052 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10055 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10059 case TARGET_NR_flock
:
10060 /* NOTE: the flock constant seems to be the same for every
10062 ret
= get_errno(safe_flock(arg1
, arg2
));
10064 case TARGET_NR_readv
:
10066 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10068 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10069 unlock_iovec(vec
, arg2
, arg3
, 1);
10071 ret
= -host_to_target_errno(errno
);
10075 case TARGET_NR_writev
:
10077 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10079 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10080 unlock_iovec(vec
, arg2
, arg3
, 0);
10082 ret
= -host_to_target_errno(errno
);
10086 #if defined(TARGET_NR_preadv)
10087 case TARGET_NR_preadv
:
10089 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10091 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10092 unlock_iovec(vec
, arg2
, arg3
, 1);
10094 ret
= -host_to_target_errno(errno
);
10099 #if defined(TARGET_NR_pwritev)
10100 case TARGET_NR_pwritev
:
10102 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10104 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10105 unlock_iovec(vec
, arg2
, arg3
, 0);
10107 ret
= -host_to_target_errno(errno
);
10112 case TARGET_NR_getsid
:
10113 ret
= get_errno(getsid(arg1
));
10115 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10116 case TARGET_NR_fdatasync
:
10117 ret
= get_errno(fdatasync(arg1
));
10120 #ifdef TARGET_NR__sysctl
10121 case TARGET_NR__sysctl
:
10122 /* We don't implement this, but ENOTDIR is always a safe
10124 ret
= -TARGET_ENOTDIR
;
10127 case TARGET_NR_sched_getaffinity
:
10129 unsigned int mask_size
;
10130 unsigned long *mask
;
10133 * sched_getaffinity needs multiples of ulong, so need to take
10134 * care of mismatches between target ulong and host ulong sizes.
10136 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10137 ret
= -TARGET_EINVAL
;
10140 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10142 mask
= alloca(mask_size
);
10143 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10145 if (!is_error(ret
)) {
10147 /* More data returned than the caller's buffer will fit.
10148 * This only happens if sizeof(abi_long) < sizeof(long)
10149 * and the caller passed us a buffer holding an odd number
10150 * of abi_longs. If the host kernel is actually using the
10151 * extra 4 bytes then fail EINVAL; otherwise we can just
10152 * ignore them and only copy the interesting part.
10154 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10155 if (numcpus
> arg2
* 8) {
10156 ret
= -TARGET_EINVAL
;
10162 if (copy_to_user(arg3
, mask
, ret
)) {
10168 case TARGET_NR_sched_setaffinity
:
10170 unsigned int mask_size
;
10171 unsigned long *mask
;
10174 * sched_setaffinity needs multiples of ulong, so need to take
10175 * care of mismatches between target ulong and host ulong sizes.
10177 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10178 ret
= -TARGET_EINVAL
;
10181 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10183 mask
= alloca(mask_size
);
10184 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10187 memcpy(mask
, p
, arg2
);
10188 unlock_user_struct(p
, arg2
, 0);
10190 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10193 case TARGET_NR_sched_setparam
:
10195 struct sched_param
*target_schp
;
10196 struct sched_param schp
;
10199 return -TARGET_EINVAL
;
10201 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10203 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10204 unlock_user_struct(target_schp
, arg2
, 0);
10205 ret
= get_errno(sched_setparam(arg1
, &schp
));
10208 case TARGET_NR_sched_getparam
:
10210 struct sched_param
*target_schp
;
10211 struct sched_param schp
;
10214 return -TARGET_EINVAL
;
10216 ret
= get_errno(sched_getparam(arg1
, &schp
));
10217 if (!is_error(ret
)) {
10218 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10220 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10221 unlock_user_struct(target_schp
, arg2
, 1);
10225 case TARGET_NR_sched_setscheduler
:
10227 struct sched_param
*target_schp
;
10228 struct sched_param schp
;
10230 return -TARGET_EINVAL
;
10232 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10234 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10235 unlock_user_struct(target_schp
, arg3
, 0);
10236 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10239 case TARGET_NR_sched_getscheduler
:
10240 ret
= get_errno(sched_getscheduler(arg1
));
10242 case TARGET_NR_sched_yield
:
10243 ret
= get_errno(sched_yield());
10245 case TARGET_NR_sched_get_priority_max
:
10246 ret
= get_errno(sched_get_priority_max(arg1
));
10248 case TARGET_NR_sched_get_priority_min
:
10249 ret
= get_errno(sched_get_priority_min(arg1
));
10251 case TARGET_NR_sched_rr_get_interval
:
10253 struct timespec ts
;
10254 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10255 if (!is_error(ret
)) {
10256 ret
= host_to_target_timespec(arg2
, &ts
);
10260 case TARGET_NR_nanosleep
:
10262 struct timespec req
, rem
;
10263 target_to_host_timespec(&req
, arg1
);
10264 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10265 if (is_error(ret
) && arg2
) {
10266 host_to_target_timespec(arg2
, &rem
);
10270 #ifdef TARGET_NR_query_module
10271 case TARGET_NR_query_module
:
10272 goto unimplemented
;
10274 #ifdef TARGET_NR_nfsservctl
10275 case TARGET_NR_nfsservctl
:
10276 goto unimplemented
;
10278 case TARGET_NR_prctl
:
10280 case PR_GET_PDEATHSIG
:
10283 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10284 if (!is_error(ret
) && arg2
10285 && put_user_ual(deathsig
, arg2
)) {
10293 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10297 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10298 arg3
, arg4
, arg5
));
10299 unlock_user(name
, arg2
, 16);
10304 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10308 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10309 arg3
, arg4
, arg5
));
10310 unlock_user(name
, arg2
, 0);
10315 /* Most prctl options have no pointer arguments */
10316 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10320 #ifdef TARGET_NR_arch_prctl
10321 case TARGET_NR_arch_prctl
:
10322 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10323 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10326 goto unimplemented
;
10329 #ifdef TARGET_NR_pread64
10330 case TARGET_NR_pread64
:
10331 if (regpairs_aligned(cpu_env
)) {
10335 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10337 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10338 unlock_user(p
, arg2
, ret
);
10340 case TARGET_NR_pwrite64
:
10341 if (regpairs_aligned(cpu_env
)) {
10345 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10347 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10348 unlock_user(p
, arg2
, 0);
10351 case TARGET_NR_getcwd
:
10352 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10354 ret
= get_errno(sys_getcwd1(p
, arg2
));
10355 unlock_user(p
, arg1
, ret
);
10357 case TARGET_NR_capget
:
10358 case TARGET_NR_capset
:
10360 struct target_user_cap_header
*target_header
;
10361 struct target_user_cap_data
*target_data
= NULL
;
10362 struct __user_cap_header_struct header
;
10363 struct __user_cap_data_struct data
[2];
10364 struct __user_cap_data_struct
*dataptr
= NULL
;
10365 int i
, target_datalen
;
10366 int data_items
= 1;
10368 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10371 header
.version
= tswap32(target_header
->version
);
10372 header
.pid
= tswap32(target_header
->pid
);
10374 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10375 /* Version 2 and up takes pointer to two user_data structs */
10379 target_datalen
= sizeof(*target_data
) * data_items
;
10382 if (num
== TARGET_NR_capget
) {
10383 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10385 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10387 if (!target_data
) {
10388 unlock_user_struct(target_header
, arg1
, 0);
10392 if (num
== TARGET_NR_capset
) {
10393 for (i
= 0; i
< data_items
; i
++) {
10394 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10395 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10396 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10403 if (num
== TARGET_NR_capget
) {
10404 ret
= get_errno(capget(&header
, dataptr
));
10406 ret
= get_errno(capset(&header
, dataptr
));
10409 /* The kernel always updates version for both capget and capset */
10410 target_header
->version
= tswap32(header
.version
);
10411 unlock_user_struct(target_header
, arg1
, 1);
10414 if (num
== TARGET_NR_capget
) {
10415 for (i
= 0; i
< data_items
; i
++) {
10416 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10417 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10418 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10420 unlock_user(target_data
, arg2
, target_datalen
);
10422 unlock_user(target_data
, arg2
, 0);
10427 case TARGET_NR_sigaltstack
:
10428 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10431 #ifdef CONFIG_SENDFILE
10432 case TARGET_NR_sendfile
:
10434 off_t
*offp
= NULL
;
10437 ret
= get_user_sal(off
, arg3
);
10438 if (is_error(ret
)) {
10443 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10444 if (!is_error(ret
) && arg3
) {
10445 abi_long ret2
= put_user_sal(off
, arg3
);
10446 if (is_error(ret2
)) {
10452 #ifdef TARGET_NR_sendfile64
10453 case TARGET_NR_sendfile64
:
10455 off_t
*offp
= NULL
;
10458 ret
= get_user_s64(off
, arg3
);
10459 if (is_error(ret
)) {
10464 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10465 if (!is_error(ret
) && arg3
) {
10466 abi_long ret2
= put_user_s64(off
, arg3
);
10467 if (is_error(ret2
)) {
10475 case TARGET_NR_sendfile
:
10476 #ifdef TARGET_NR_sendfile64
10477 case TARGET_NR_sendfile64
:
10479 goto unimplemented
;
10482 #ifdef TARGET_NR_getpmsg
10483 case TARGET_NR_getpmsg
:
10484 goto unimplemented
;
10486 #ifdef TARGET_NR_putpmsg
10487 case TARGET_NR_putpmsg
:
10488 goto unimplemented
;
10490 #ifdef TARGET_NR_vfork
10491 case TARGET_NR_vfork
:
10492 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10496 #ifdef TARGET_NR_ugetrlimit
10497 case TARGET_NR_ugetrlimit
:
10499 struct rlimit rlim
;
10500 int resource
= target_to_host_resource(arg1
);
10501 ret
= get_errno(getrlimit(resource
, &rlim
));
10502 if (!is_error(ret
)) {
10503 struct target_rlimit
*target_rlim
;
10504 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10506 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10507 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10508 unlock_user_struct(target_rlim
, arg2
, 1);
10513 #ifdef TARGET_NR_truncate64
10514 case TARGET_NR_truncate64
:
10515 if (!(p
= lock_user_string(arg1
)))
10517 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10518 unlock_user(p
, arg1
, 0);
10521 #ifdef TARGET_NR_ftruncate64
10522 case TARGET_NR_ftruncate64
:
10523 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10526 #ifdef TARGET_NR_stat64
10527 case TARGET_NR_stat64
:
10528 if (!(p
= lock_user_string(arg1
)))
10530 ret
= get_errno(stat(path(p
), &st
));
10531 unlock_user(p
, arg1
, 0);
10532 if (!is_error(ret
))
10533 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10536 #ifdef TARGET_NR_lstat64
10537 case TARGET_NR_lstat64
:
10538 if (!(p
= lock_user_string(arg1
)))
10540 ret
= get_errno(lstat(path(p
), &st
));
10541 unlock_user(p
, arg1
, 0);
10542 if (!is_error(ret
))
10543 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10546 #ifdef TARGET_NR_fstat64
10547 case TARGET_NR_fstat64
:
10548 ret
= get_errno(fstat(arg1
, &st
));
10549 if (!is_error(ret
))
10550 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10553 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10554 #ifdef TARGET_NR_fstatat64
10555 case TARGET_NR_fstatat64
:
10557 #ifdef TARGET_NR_newfstatat
10558 case TARGET_NR_newfstatat
:
10560 if (!(p
= lock_user_string(arg2
)))
10562 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10563 if (!is_error(ret
))
10564 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10567 #ifdef TARGET_NR_lchown
10568 case TARGET_NR_lchown
:
10569 if (!(p
= lock_user_string(arg1
)))
10571 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10572 unlock_user(p
, arg1
, 0);
10575 #ifdef TARGET_NR_getuid
10576 case TARGET_NR_getuid
:
10577 ret
= get_errno(high2lowuid(getuid()));
10580 #ifdef TARGET_NR_getgid
10581 case TARGET_NR_getgid
:
10582 ret
= get_errno(high2lowgid(getgid()));
10585 #ifdef TARGET_NR_geteuid
10586 case TARGET_NR_geteuid
:
10587 ret
= get_errno(high2lowuid(geteuid()));
10590 #ifdef TARGET_NR_getegid
10591 case TARGET_NR_getegid
:
10592 ret
= get_errno(high2lowgid(getegid()));
10595 case TARGET_NR_setreuid
:
10596 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10598 case TARGET_NR_setregid
:
10599 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10601 case TARGET_NR_getgroups
:
10603 int gidsetsize
= arg1
;
10604 target_id
*target_grouplist
;
10608 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10609 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10610 if (gidsetsize
== 0)
10612 if (!is_error(ret
)) {
10613 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10614 if (!target_grouplist
)
10616 for(i
= 0;i
< ret
; i
++)
10617 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10618 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10622 case TARGET_NR_setgroups
:
10624 int gidsetsize
= arg1
;
10625 target_id
*target_grouplist
;
10626 gid_t
*grouplist
= NULL
;
10629 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10630 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10631 if (!target_grouplist
) {
10632 ret
= -TARGET_EFAULT
;
10635 for (i
= 0; i
< gidsetsize
; i
++) {
10636 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10638 unlock_user(target_grouplist
, arg2
, 0);
10640 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10643 case TARGET_NR_fchown
:
10644 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10646 #if defined(TARGET_NR_fchownat)
10647 case TARGET_NR_fchownat
:
10648 if (!(p
= lock_user_string(arg2
)))
10650 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10651 low2highgid(arg4
), arg5
));
10652 unlock_user(p
, arg2
, 0);
10655 #ifdef TARGET_NR_setresuid
10656 case TARGET_NR_setresuid
:
10657 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10659 low2highuid(arg3
)));
10662 #ifdef TARGET_NR_getresuid
10663 case TARGET_NR_getresuid
:
10665 uid_t ruid
, euid
, suid
;
10666 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10667 if (!is_error(ret
)) {
10668 if (put_user_id(high2lowuid(ruid
), arg1
)
10669 || put_user_id(high2lowuid(euid
), arg2
)
10670 || put_user_id(high2lowuid(suid
), arg3
))
10676 #ifdef TARGET_NR_getresgid
10677 case TARGET_NR_setresgid
:
10678 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10680 low2highgid(arg3
)));
10683 #ifdef TARGET_NR_getresgid
10684 case TARGET_NR_getresgid
:
10686 gid_t rgid
, egid
, sgid
;
10687 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10688 if (!is_error(ret
)) {
10689 if (put_user_id(high2lowgid(rgid
), arg1
)
10690 || put_user_id(high2lowgid(egid
), arg2
)
10691 || put_user_id(high2lowgid(sgid
), arg3
))
10697 #ifdef TARGET_NR_chown
10698 case TARGET_NR_chown
:
10699 if (!(p
= lock_user_string(arg1
)))
10701 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10702 unlock_user(p
, arg1
, 0);
10705 case TARGET_NR_setuid
:
10706 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10708 case TARGET_NR_setgid
:
10709 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10711 case TARGET_NR_setfsuid
:
10712 ret
= get_errno(setfsuid(arg1
));
10714 case TARGET_NR_setfsgid
:
10715 ret
= get_errno(setfsgid(arg1
));
10718 #ifdef TARGET_NR_lchown32
10719 case TARGET_NR_lchown32
:
10720 if (!(p
= lock_user_string(arg1
)))
10722 ret
= get_errno(lchown(p
, arg2
, arg3
));
10723 unlock_user(p
, arg1
, 0);
10726 #ifdef TARGET_NR_getuid32
10727 case TARGET_NR_getuid32
:
10728 ret
= get_errno(getuid());
10732 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10733 /* Alpha specific */
10734 case TARGET_NR_getxuid
:
10738 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10740 ret
= get_errno(getuid());
10743 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10744 /* Alpha specific */
10745 case TARGET_NR_getxgid
:
10749 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10751 ret
= get_errno(getgid());
10754 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10755 /* Alpha specific */
10756 case TARGET_NR_osf_getsysinfo
:
10757 ret
= -TARGET_EOPNOTSUPP
;
10759 case TARGET_GSI_IEEE_FP_CONTROL
:
10761 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10763 /* Copied from linux ieee_fpcr_to_swcr. */
10764 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10765 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10766 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10767 | SWCR_TRAP_ENABLE_DZE
10768 | SWCR_TRAP_ENABLE_OVF
);
10769 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10770 | SWCR_TRAP_ENABLE_INE
);
10771 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10772 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10774 if (put_user_u64 (swcr
, arg2
))
10780 /* case GSI_IEEE_STATE_AT_SIGNAL:
10781 -- Not implemented in linux kernel.
10783 -- Retrieves current unaligned access state; not much used.
10784 case GSI_PROC_TYPE:
10785 -- Retrieves implver information; surely not used.
10786 case GSI_GET_HWRPB:
10787 -- Grabs a copy of the HWRPB; surely not used.
10792 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10793 /* Alpha specific */
10794 case TARGET_NR_osf_setsysinfo
:
10795 ret
= -TARGET_EOPNOTSUPP
;
10797 case TARGET_SSI_IEEE_FP_CONTROL
:
10799 uint64_t swcr
, fpcr
, orig_fpcr
;
10801 if (get_user_u64 (swcr
, arg2
)) {
10804 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10805 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10807 /* Copied from linux ieee_swcr_to_fpcr. */
10808 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10809 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10810 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10811 | SWCR_TRAP_ENABLE_DZE
10812 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10813 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10814 | SWCR_TRAP_ENABLE_INE
)) << 57;
10815 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10816 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10818 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10823 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10825 uint64_t exc
, fpcr
, orig_fpcr
;
10828 if (get_user_u64(exc
, arg2
)) {
10832 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10834 /* We only add to the exception status here. */
10835 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10837 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10840 /* Old exceptions are not signaled. */
10841 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10843 /* If any exceptions set by this call,
10844 and are unmasked, send a signal. */
10846 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10847 si_code
= TARGET_FPE_FLTRES
;
10849 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10850 si_code
= TARGET_FPE_FLTUND
;
10852 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10853 si_code
= TARGET_FPE_FLTOVF
;
10855 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10856 si_code
= TARGET_FPE_FLTDIV
;
10858 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10859 si_code
= TARGET_FPE_FLTINV
;
10861 if (si_code
!= 0) {
10862 target_siginfo_t info
;
10863 info
.si_signo
= SIGFPE
;
10865 info
.si_code
= si_code
;
10866 info
._sifields
._sigfault
._addr
10867 = ((CPUArchState
*)cpu_env
)->pc
;
10868 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10869 QEMU_SI_FAULT
, &info
);
10874 /* case SSI_NVPAIRS:
10875 -- Used with SSIN_UACPROC to enable unaligned accesses.
10876 case SSI_IEEE_STATE_AT_SIGNAL:
10877 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10878 -- Not implemented in linux kernel
10883 #ifdef TARGET_NR_osf_sigprocmask
10884 /* Alpha specific. */
10885 case TARGET_NR_osf_sigprocmask
:
10889 sigset_t set
, oldset
;
10892 case TARGET_SIG_BLOCK
:
10895 case TARGET_SIG_UNBLOCK
:
10898 case TARGET_SIG_SETMASK
:
10902 ret
= -TARGET_EINVAL
;
10906 target_to_host_old_sigset(&set
, &mask
);
10907 ret
= do_sigprocmask(how
, &set
, &oldset
);
10909 host_to_target_old_sigset(&mask
, &oldset
);
10916 #ifdef TARGET_NR_getgid32
10917 case TARGET_NR_getgid32
:
10918 ret
= get_errno(getgid());
10921 #ifdef TARGET_NR_geteuid32
10922 case TARGET_NR_geteuid32
:
10923 ret
= get_errno(geteuid());
10926 #ifdef TARGET_NR_getegid32
10927 case TARGET_NR_getegid32
:
10928 ret
= get_errno(getegid());
10931 #ifdef TARGET_NR_setreuid32
10932 case TARGET_NR_setreuid32
:
10933 ret
= get_errno(setreuid(arg1
, arg2
));
10936 #ifdef TARGET_NR_setregid32
10937 case TARGET_NR_setregid32
:
10938 ret
= get_errno(setregid(arg1
, arg2
));
10941 #ifdef TARGET_NR_getgroups32
10942 case TARGET_NR_getgroups32
:
10944 int gidsetsize
= arg1
;
10945 uint32_t *target_grouplist
;
10949 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10950 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10951 if (gidsetsize
== 0)
10953 if (!is_error(ret
)) {
10954 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10955 if (!target_grouplist
) {
10956 ret
= -TARGET_EFAULT
;
10959 for(i
= 0;i
< ret
; i
++)
10960 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10961 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10966 #ifdef TARGET_NR_setgroups32
10967 case TARGET_NR_setgroups32
:
10969 int gidsetsize
= arg1
;
10970 uint32_t *target_grouplist
;
10974 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10975 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10976 if (!target_grouplist
) {
10977 ret
= -TARGET_EFAULT
;
10980 for(i
= 0;i
< gidsetsize
; i
++)
10981 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10982 unlock_user(target_grouplist
, arg2
, 0);
10983 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10987 #ifdef TARGET_NR_fchown32
10988 case TARGET_NR_fchown32
:
10989 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10992 #ifdef TARGET_NR_setresuid32
10993 case TARGET_NR_setresuid32
:
10994 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10997 #ifdef TARGET_NR_getresuid32
10998 case TARGET_NR_getresuid32
:
11000 uid_t ruid
, euid
, suid
;
11001 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11002 if (!is_error(ret
)) {
11003 if (put_user_u32(ruid
, arg1
)
11004 || put_user_u32(euid
, arg2
)
11005 || put_user_u32(suid
, arg3
))
11011 #ifdef TARGET_NR_setresgid32
11012 case TARGET_NR_setresgid32
:
11013 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11016 #ifdef TARGET_NR_getresgid32
11017 case TARGET_NR_getresgid32
:
11019 gid_t rgid
, egid
, sgid
;
11020 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11021 if (!is_error(ret
)) {
11022 if (put_user_u32(rgid
, arg1
)
11023 || put_user_u32(egid
, arg2
)
11024 || put_user_u32(sgid
, arg3
))
11030 #ifdef TARGET_NR_chown32
11031 case TARGET_NR_chown32
:
11032 if (!(p
= lock_user_string(arg1
)))
11034 ret
= get_errno(chown(p
, arg2
, arg3
));
11035 unlock_user(p
, arg1
, 0);
11038 #ifdef TARGET_NR_setuid32
11039 case TARGET_NR_setuid32
:
11040 ret
= get_errno(sys_setuid(arg1
));
11043 #ifdef TARGET_NR_setgid32
11044 case TARGET_NR_setgid32
:
11045 ret
= get_errno(sys_setgid(arg1
));
11048 #ifdef TARGET_NR_setfsuid32
11049 case TARGET_NR_setfsuid32
:
11050 ret
= get_errno(setfsuid(arg1
));
11053 #ifdef TARGET_NR_setfsgid32
11054 case TARGET_NR_setfsgid32
:
11055 ret
= get_errno(setfsgid(arg1
));
11059 case TARGET_NR_pivot_root
:
11060 goto unimplemented
;
11061 #ifdef TARGET_NR_mincore
11062 case TARGET_NR_mincore
:
11065 ret
= -TARGET_EFAULT
;
11066 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
11068 if (!(p
= lock_user_string(arg3
)))
11070 ret
= get_errno(mincore(a
, arg2
, p
));
11071 unlock_user(p
, arg3
, ret
);
11073 unlock_user(a
, arg1
, 0);
11077 #ifdef TARGET_NR_arm_fadvise64_64
11078 case TARGET_NR_arm_fadvise64_64
:
11079 /* arm_fadvise64_64 looks like fadvise64_64 but
11080 * with different argument order: fd, advice, offset, len
11081 * rather than the usual fd, offset, len, advice.
11082 * Note that offset and len are both 64-bit so appear as
11083 * pairs of 32-bit registers.
11085 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11086 target_offset64(arg5
, arg6
), arg2
);
11087 ret
= -host_to_target_errno(ret
);
11091 #if TARGET_ABI_BITS == 32
11093 #ifdef TARGET_NR_fadvise64_64
11094 case TARGET_NR_fadvise64_64
:
11095 /* 6 args: fd, offset (high, low), len (high, low), advice */
11096 if (regpairs_aligned(cpu_env
)) {
11097 /* offset is in (3,4), len in (5,6) and advice in 7 */
11104 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11105 target_offset64(arg2
, arg3
),
11106 target_offset64(arg4
, arg5
),
11111 #ifdef TARGET_NR_fadvise64
11112 case TARGET_NR_fadvise64
:
11113 /* 5 args: fd, offset (high, low), len, advice */
11114 if (regpairs_aligned(cpu_env
)) {
11115 /* offset is in (3,4), len in 5 and advice in 6 */
11121 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11122 target_offset64(arg2
, arg3
),
11127 #else /* not a 32-bit ABI */
11128 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11129 #ifdef TARGET_NR_fadvise64_64
11130 case TARGET_NR_fadvise64_64
:
11132 #ifdef TARGET_NR_fadvise64
11133 case TARGET_NR_fadvise64
:
11135 #ifdef TARGET_S390X
11137 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11138 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11139 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11140 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11144 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11147 #endif /* end of 64-bit ABI fadvise handling */
11149 #ifdef TARGET_NR_madvise
11150 case TARGET_NR_madvise
:
11151 /* A straight passthrough may not be safe because qemu sometimes
11152 turns private file-backed mappings into anonymous mappings.
11153 This will break MADV_DONTNEED.
11154 This is a hint, so ignoring and returning success is ok. */
11155 ret
= get_errno(0);
11158 #if TARGET_ABI_BITS == 32
11159 case TARGET_NR_fcntl64
:
11163 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11164 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11167 if (((CPUARMState
*)cpu_env
)->eabi
) {
11168 copyfrom
= copy_from_user_eabi_flock64
;
11169 copyto
= copy_to_user_eabi_flock64
;
11173 cmd
= target_to_host_fcntl_cmd(arg2
);
11174 if (cmd
== -TARGET_EINVAL
) {
11180 case TARGET_F_GETLK64
:
11181 ret
= copyfrom(&fl
, arg3
);
11185 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11187 ret
= copyto(arg3
, &fl
);
11191 case TARGET_F_SETLK64
:
11192 case TARGET_F_SETLKW64
:
11193 ret
= copyfrom(&fl
, arg3
);
11197 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11200 ret
= do_fcntl(arg1
, arg2
, arg3
);
11206 #ifdef TARGET_NR_cacheflush
11207 case TARGET_NR_cacheflush
:
11208 /* self-modifying code is handled automatically, so nothing needed */
11212 #ifdef TARGET_NR_security
11213 case TARGET_NR_security
:
11214 goto unimplemented
;
11216 #ifdef TARGET_NR_getpagesize
11217 case TARGET_NR_getpagesize
:
11218 ret
= TARGET_PAGE_SIZE
;
11221 case TARGET_NR_gettid
:
11222 ret
= get_errno(gettid());
11224 #ifdef TARGET_NR_readahead
11225 case TARGET_NR_readahead
:
11226 #if TARGET_ABI_BITS == 32
11227 if (regpairs_aligned(cpu_env
)) {
11232 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11234 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11239 #ifdef TARGET_NR_setxattr
11240 case TARGET_NR_listxattr
:
11241 case TARGET_NR_llistxattr
:
11245 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11247 ret
= -TARGET_EFAULT
;
11251 p
= lock_user_string(arg1
);
11253 if (num
== TARGET_NR_listxattr
) {
11254 ret
= get_errno(listxattr(p
, b
, arg3
));
11256 ret
= get_errno(llistxattr(p
, b
, arg3
));
11259 ret
= -TARGET_EFAULT
;
11261 unlock_user(p
, arg1
, 0);
11262 unlock_user(b
, arg2
, arg3
);
11265 case TARGET_NR_flistxattr
:
11269 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11271 ret
= -TARGET_EFAULT
;
11275 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11276 unlock_user(b
, arg2
, arg3
);
11279 case TARGET_NR_setxattr
:
11280 case TARGET_NR_lsetxattr
:
11282 void *p
, *n
, *v
= 0;
11284 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11286 ret
= -TARGET_EFAULT
;
11290 p
= lock_user_string(arg1
);
11291 n
= lock_user_string(arg2
);
11293 if (num
== TARGET_NR_setxattr
) {
11294 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11296 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11299 ret
= -TARGET_EFAULT
;
11301 unlock_user(p
, arg1
, 0);
11302 unlock_user(n
, arg2
, 0);
11303 unlock_user(v
, arg3
, 0);
11306 case TARGET_NR_fsetxattr
:
11310 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11312 ret
= -TARGET_EFAULT
;
11316 n
= lock_user_string(arg2
);
11318 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11320 ret
= -TARGET_EFAULT
;
11322 unlock_user(n
, arg2
, 0);
11323 unlock_user(v
, arg3
, 0);
11326 case TARGET_NR_getxattr
:
11327 case TARGET_NR_lgetxattr
:
11329 void *p
, *n
, *v
= 0;
11331 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11333 ret
= -TARGET_EFAULT
;
11337 p
= lock_user_string(arg1
);
11338 n
= lock_user_string(arg2
);
11340 if (num
== TARGET_NR_getxattr
) {
11341 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11343 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11346 ret
= -TARGET_EFAULT
;
11348 unlock_user(p
, arg1
, 0);
11349 unlock_user(n
, arg2
, 0);
11350 unlock_user(v
, arg3
, arg4
);
11353 case TARGET_NR_fgetxattr
:
11357 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11359 ret
= -TARGET_EFAULT
;
11363 n
= lock_user_string(arg2
);
11365 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11367 ret
= -TARGET_EFAULT
;
11369 unlock_user(n
, arg2
, 0);
11370 unlock_user(v
, arg3
, arg4
);
11373 case TARGET_NR_removexattr
:
11374 case TARGET_NR_lremovexattr
:
11377 p
= lock_user_string(arg1
);
11378 n
= lock_user_string(arg2
);
11380 if (num
== TARGET_NR_removexattr
) {
11381 ret
= get_errno(removexattr(p
, n
));
11383 ret
= get_errno(lremovexattr(p
, n
));
11386 ret
= -TARGET_EFAULT
;
11388 unlock_user(p
, arg1
, 0);
11389 unlock_user(n
, arg2
, 0);
11392 case TARGET_NR_fremovexattr
:
11395 n
= lock_user_string(arg2
);
11397 ret
= get_errno(fremovexattr(arg1
, n
));
11399 ret
= -TARGET_EFAULT
;
11401 unlock_user(n
, arg2
, 0);
11405 #endif /* CONFIG_ATTR */
11406 #ifdef TARGET_NR_set_thread_area
11407 case TARGET_NR_set_thread_area
:
11408 #if defined(TARGET_MIPS)
11409 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11412 #elif defined(TARGET_CRIS)
11414 ret
= -TARGET_EINVAL
;
11416 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11420 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11421 ret
= do_set_thread_area(cpu_env
, arg1
);
11423 #elif defined(TARGET_M68K)
11425 TaskState
*ts
= cpu
->opaque
;
11426 ts
->tp_value
= arg1
;
11431 goto unimplemented_nowarn
;
11434 #ifdef TARGET_NR_get_thread_area
11435 case TARGET_NR_get_thread_area
:
11436 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11437 ret
= do_get_thread_area(cpu_env
, arg1
);
11439 #elif defined(TARGET_M68K)
11441 TaskState
*ts
= cpu
->opaque
;
11442 ret
= ts
->tp_value
;
11446 goto unimplemented_nowarn
;
11449 #ifdef TARGET_NR_getdomainname
11450 case TARGET_NR_getdomainname
:
11451 goto unimplemented_nowarn
;
11454 #ifdef TARGET_NR_clock_gettime
11455 case TARGET_NR_clock_gettime
:
11457 struct timespec ts
;
11458 ret
= get_errno(clock_gettime(arg1
, &ts
));
11459 if (!is_error(ret
)) {
11460 host_to_target_timespec(arg2
, &ts
);
11465 #ifdef TARGET_NR_clock_getres
11466 case TARGET_NR_clock_getres
:
11468 struct timespec ts
;
11469 ret
= get_errno(clock_getres(arg1
, &ts
));
11470 if (!is_error(ret
)) {
11471 host_to_target_timespec(arg2
, &ts
);
11476 #ifdef TARGET_NR_clock_nanosleep
11477 case TARGET_NR_clock_nanosleep
:
11479 struct timespec ts
;
11480 target_to_host_timespec(&ts
, arg3
);
11481 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11482 &ts
, arg4
? &ts
: NULL
));
11484 host_to_target_timespec(arg4
, &ts
);
11486 #if defined(TARGET_PPC)
11487 /* clock_nanosleep is odd in that it returns positive errno values.
11488 * On PPC, CR0 bit 3 should be set in such a situation. */
11489 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11490 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11497 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11498 case TARGET_NR_set_tid_address
:
11499 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11503 case TARGET_NR_tkill
:
11504 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11507 case TARGET_NR_tgkill
:
11508 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11509 target_to_host_signal(arg3
)));
11512 #ifdef TARGET_NR_set_robust_list
11513 case TARGET_NR_set_robust_list
:
11514 case TARGET_NR_get_robust_list
:
11515 /* The ABI for supporting robust futexes has userspace pass
11516 * the kernel a pointer to a linked list which is updated by
11517 * userspace after the syscall; the list is walked by the kernel
11518 * when the thread exits. Since the linked list in QEMU guest
11519 * memory isn't a valid linked list for the host and we have
11520 * no way to reliably intercept the thread-death event, we can't
11521 * support these. Silently return ENOSYS so that guest userspace
11522 * falls back to a non-robust futex implementation (which should
11523 * be OK except in the corner case of the guest crashing while
11524 * holding a mutex that is shared with another process via
11527 goto unimplemented_nowarn
;
11530 #if defined(TARGET_NR_utimensat)
11531 case TARGET_NR_utimensat
:
11533 struct timespec
*tsp
, ts
[2];
11537 target_to_host_timespec(ts
, arg3
);
11538 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11542 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11544 if (!(p
= lock_user_string(arg2
))) {
11545 ret
= -TARGET_EFAULT
;
11548 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11549 unlock_user(p
, arg2
, 0);
11554 case TARGET_NR_futex
:
11555 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11557 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11558 case TARGET_NR_inotify_init
:
11559 ret
= get_errno(sys_inotify_init());
11562 #ifdef CONFIG_INOTIFY1
11563 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11564 case TARGET_NR_inotify_init1
:
11565 ret
= get_errno(sys_inotify_init1(arg1
));
11569 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11570 case TARGET_NR_inotify_add_watch
:
11571 p
= lock_user_string(arg2
);
11572 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11573 unlock_user(p
, arg2
, 0);
11576 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11577 case TARGET_NR_inotify_rm_watch
:
11578 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11582 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11583 case TARGET_NR_mq_open
:
11585 struct mq_attr posix_mq_attr
;
11588 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11589 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11592 p
= lock_user_string(arg1
- 1);
11596 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11597 unlock_user (p
, arg1
, 0);
11601 case TARGET_NR_mq_unlink
:
11602 p
= lock_user_string(arg1
- 1);
11604 ret
= -TARGET_EFAULT
;
11607 ret
= get_errno(mq_unlink(p
));
11608 unlock_user (p
, arg1
, 0);
11611 case TARGET_NR_mq_timedsend
:
11613 struct timespec ts
;
11615 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11617 target_to_host_timespec(&ts
, arg5
);
11618 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11619 host_to_target_timespec(arg5
, &ts
);
11621 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11623 unlock_user (p
, arg2
, arg3
);
11627 case TARGET_NR_mq_timedreceive
:
11629 struct timespec ts
;
11632 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11634 target_to_host_timespec(&ts
, arg5
);
11635 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11637 host_to_target_timespec(arg5
, &ts
);
11639 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11642 unlock_user (p
, arg2
, arg3
);
11644 put_user_u32(prio
, arg4
);
11648 /* Not implemented for now... */
11649 /* case TARGET_NR_mq_notify: */
11652 case TARGET_NR_mq_getsetattr
:
11654 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11657 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11658 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11661 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11662 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11669 #ifdef CONFIG_SPLICE
11670 #ifdef TARGET_NR_tee
11671 case TARGET_NR_tee
:
11673 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11677 #ifdef TARGET_NR_splice
11678 case TARGET_NR_splice
:
11680 loff_t loff_in
, loff_out
;
11681 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11683 if (get_user_u64(loff_in
, arg2
)) {
11686 ploff_in
= &loff_in
;
11689 if (get_user_u64(loff_out
, arg4
)) {
11692 ploff_out
= &loff_out
;
11694 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11696 if (put_user_u64(loff_in
, arg2
)) {
11701 if (put_user_u64(loff_out
, arg4
)) {
11708 #ifdef TARGET_NR_vmsplice
11709 case TARGET_NR_vmsplice
:
11711 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11713 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11714 unlock_iovec(vec
, arg2
, arg3
, 0);
11716 ret
= -host_to_target_errno(errno
);
11721 #endif /* CONFIG_SPLICE */
11722 #ifdef CONFIG_EVENTFD
11723 #if defined(TARGET_NR_eventfd)
11724 case TARGET_NR_eventfd
:
11725 ret
= get_errno(eventfd(arg1
, 0));
11726 fd_trans_unregister(ret
);
11729 #if defined(TARGET_NR_eventfd2)
11730 case TARGET_NR_eventfd2
:
11732 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11733 if (arg2
& TARGET_O_NONBLOCK
) {
11734 host_flags
|= O_NONBLOCK
;
11736 if (arg2
& TARGET_O_CLOEXEC
) {
11737 host_flags
|= O_CLOEXEC
;
11739 ret
= get_errno(eventfd(arg1
, host_flags
));
11740 fd_trans_unregister(ret
);
11744 #endif /* CONFIG_EVENTFD */
11745 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11746 case TARGET_NR_fallocate
:
11747 #if TARGET_ABI_BITS == 32
11748 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11749 target_offset64(arg5
, arg6
)));
11751 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11755 #if defined(CONFIG_SYNC_FILE_RANGE)
11756 #if defined(TARGET_NR_sync_file_range)
11757 case TARGET_NR_sync_file_range
:
11758 #if TARGET_ABI_BITS == 32
11759 #if defined(TARGET_MIPS)
11760 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11761 target_offset64(arg5
, arg6
), arg7
));
11763 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11764 target_offset64(arg4
, arg5
), arg6
));
11765 #endif /* !TARGET_MIPS */
11767 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11771 #if defined(TARGET_NR_sync_file_range2)
11772 case TARGET_NR_sync_file_range2
:
11773 /* This is like sync_file_range but the arguments are reordered */
11774 #if TARGET_ABI_BITS == 32
11775 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11776 target_offset64(arg5
, arg6
), arg2
));
11778 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11783 #if defined(TARGET_NR_signalfd4)
11784 case TARGET_NR_signalfd4
:
11785 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11788 #if defined(TARGET_NR_signalfd)
11789 case TARGET_NR_signalfd
:
11790 ret
= do_signalfd4(arg1
, arg2
, 0);
11793 #if defined(CONFIG_EPOLL)
11794 #if defined(TARGET_NR_epoll_create)
11795 case TARGET_NR_epoll_create
:
11796 ret
= get_errno(epoll_create(arg1
));
11799 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11800 case TARGET_NR_epoll_create1
:
11801 ret
= get_errno(epoll_create1(arg1
));
11804 #if defined(TARGET_NR_epoll_ctl)
11805 case TARGET_NR_epoll_ctl
:
11807 struct epoll_event ep
;
11808 struct epoll_event
*epp
= 0;
11810 struct target_epoll_event
*target_ep
;
11811 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11814 ep
.events
= tswap32(target_ep
->events
);
11815 /* The epoll_data_t union is just opaque data to the kernel,
11816 * so we transfer all 64 bits across and need not worry what
11817 * actual data type it is.
11819 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11820 unlock_user_struct(target_ep
, arg4
, 0);
11823 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11828 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11829 #if defined(TARGET_NR_epoll_wait)
11830 case TARGET_NR_epoll_wait
:
11832 #if defined(TARGET_NR_epoll_pwait)
11833 case TARGET_NR_epoll_pwait
:
11836 struct target_epoll_event
*target_ep
;
11837 struct epoll_event
*ep
;
11839 int maxevents
= arg3
;
11840 int timeout
= arg4
;
11842 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11843 ret
= -TARGET_EINVAL
;
11847 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11848 maxevents
* sizeof(struct target_epoll_event
), 1);
11853 ep
= g_try_new(struct epoll_event
, maxevents
);
11855 unlock_user(target_ep
, arg2
, 0);
11856 ret
= -TARGET_ENOMEM
;
11861 #if defined(TARGET_NR_epoll_pwait)
11862 case TARGET_NR_epoll_pwait
:
11864 target_sigset_t
*target_set
;
11865 sigset_t _set
, *set
= &_set
;
11868 if (arg6
!= sizeof(target_sigset_t
)) {
11869 ret
= -TARGET_EINVAL
;
11873 target_set
= lock_user(VERIFY_READ
, arg5
,
11874 sizeof(target_sigset_t
), 1);
11876 ret
= -TARGET_EFAULT
;
11879 target_to_host_sigset(set
, target_set
);
11880 unlock_user(target_set
, arg5
, 0);
11885 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11886 set
, SIGSET_T_SIZE
));
11890 #if defined(TARGET_NR_epoll_wait)
11891 case TARGET_NR_epoll_wait
:
11892 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11897 ret
= -TARGET_ENOSYS
;
11899 if (!is_error(ret
)) {
11901 for (i
= 0; i
< ret
; i
++) {
11902 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11903 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11905 unlock_user(target_ep
, arg2
,
11906 ret
* sizeof(struct target_epoll_event
));
11908 unlock_user(target_ep
, arg2
, 0);
11915 #ifdef TARGET_NR_prlimit64
11916 case TARGET_NR_prlimit64
:
11918 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11919 struct target_rlimit64
*target_rnew
, *target_rold
;
11920 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11921 int resource
= target_to_host_resource(arg2
);
11923 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11926 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11927 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11928 unlock_user_struct(target_rnew
, arg3
, 0);
11932 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11933 if (!is_error(ret
) && arg4
) {
11934 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11937 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11938 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11939 unlock_user_struct(target_rold
, arg4
, 1);
11944 #ifdef TARGET_NR_gethostname
11945 case TARGET_NR_gethostname
:
11947 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11949 ret
= get_errno(gethostname(name
, arg2
));
11950 unlock_user(name
, arg1
, arg2
);
11952 ret
= -TARGET_EFAULT
;
11957 #ifdef TARGET_NR_atomic_cmpxchg_32
11958 case TARGET_NR_atomic_cmpxchg_32
:
11960 /* should use start_exclusive from main.c */
11961 abi_ulong mem_value
;
11962 if (get_user_u32(mem_value
, arg6
)) {
11963 target_siginfo_t info
;
11964 info
.si_signo
= SIGSEGV
;
11966 info
.si_code
= TARGET_SEGV_MAPERR
;
11967 info
._sifields
._sigfault
._addr
= arg6
;
11968 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11969 QEMU_SI_FAULT
, &info
);
11973 if (mem_value
== arg2
)
11974 put_user_u32(arg1
, arg6
);
11979 #ifdef TARGET_NR_atomic_barrier
11980 case TARGET_NR_atomic_barrier
:
11982 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11988 #ifdef TARGET_NR_timer_create
11989 case TARGET_NR_timer_create
:
11991 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11993 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11996 int timer_index
= next_free_host_timer();
11998 if (timer_index
< 0) {
11999 ret
= -TARGET_EAGAIN
;
12001 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12004 phost_sevp
= &host_sevp
;
12005 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12011 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12015 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12024 #ifdef TARGET_NR_timer_settime
12025 case TARGET_NR_timer_settime
:
12027 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12028 * struct itimerspec * old_value */
12029 target_timer_t timerid
= get_timer_id(arg1
);
12033 } else if (arg3
== 0) {
12034 ret
= -TARGET_EINVAL
;
12036 timer_t htimer
= g_posix_timers
[timerid
];
12037 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12039 target_to_host_itimerspec(&hspec_new
, arg3
);
12041 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12042 host_to_target_itimerspec(arg2
, &hspec_old
);
12048 #ifdef TARGET_NR_timer_gettime
12049 case TARGET_NR_timer_gettime
:
12051 /* args: timer_t timerid, struct itimerspec *curr_value */
12052 target_timer_t timerid
= get_timer_id(arg1
);
12056 } else if (!arg2
) {
12057 ret
= -TARGET_EFAULT
;
12059 timer_t htimer
= g_posix_timers
[timerid
];
12060 struct itimerspec hspec
;
12061 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12063 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12064 ret
= -TARGET_EFAULT
;
12071 #ifdef TARGET_NR_timer_getoverrun
12072 case TARGET_NR_timer_getoverrun
:
12074 /* args: timer_t timerid */
12075 target_timer_t timerid
= get_timer_id(arg1
);
12080 timer_t htimer
= g_posix_timers
[timerid
];
12081 ret
= get_errno(timer_getoverrun(htimer
));
12083 fd_trans_unregister(ret
);
12088 #ifdef TARGET_NR_timer_delete
12089 case TARGET_NR_timer_delete
:
12091 /* args: timer_t timerid */
12092 target_timer_t timerid
= get_timer_id(arg1
);
12097 timer_t htimer
= g_posix_timers
[timerid
];
12098 ret
= get_errno(timer_delete(htimer
));
12099 g_posix_timers
[timerid
] = 0;
12105 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12106 case TARGET_NR_timerfd_create
:
12107 ret
= get_errno(timerfd_create(arg1
,
12108 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12112 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12113 case TARGET_NR_timerfd_gettime
:
12115 struct itimerspec its_curr
;
12117 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12119 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12126 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12127 case TARGET_NR_timerfd_settime
:
12129 struct itimerspec its_new
, its_old
, *p_new
;
12132 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12140 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12142 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12149 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12150 case TARGET_NR_ioprio_get
:
12151 ret
= get_errno(ioprio_get(arg1
, arg2
));
12155 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12156 case TARGET_NR_ioprio_set
:
12157 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12161 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12162 case TARGET_NR_setns
:
12163 ret
= get_errno(setns(arg1
, arg2
));
12166 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12167 case TARGET_NR_unshare
:
12168 ret
= get_errno(unshare(arg1
));
12171 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12172 case TARGET_NR_kcmp
:
12173 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12179 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12180 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12181 unimplemented_nowarn
:
12183 ret
= -TARGET_ENOSYS
;
12188 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12191 print_syscall_ret(num
, ret
);
12192 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12195 ret
= -TARGET_EFAULT
;