4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
110 #include <linux/audit.h>
111 #include "linux_loop.h"
117 #define CLONE_IO 0x80000000 /* Clone io context */
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 #define __NR__llseek __NR_lseek
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
255 _syscall0(int, gettid
)
257 /* This is a replacement for the host gettid() and must return a host
259 static int gettid(void) {
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
272 loff_t
*, res
, uint
, wh
);
274 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
275 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group
,int,error_code
)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address
,int *,tidptr
)
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
284 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
288 unsigned long *, user_mask_ptr
);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
291 unsigned long *, user_mask_ptr
);
292 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
294 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
295 struct __user_cap_data_struct
*, data
);
296 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
297 struct __user_cap_data_struct
*, data
);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get
, int, which
, int, who
)
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
308 static bitmask_transtbl fcntl_flags_tbl
[] = {
309 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
310 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
311 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
312 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
313 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
314 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
315 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
316 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
317 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
318 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
319 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
320 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
321 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
322 #if defined(O_DIRECT)
323 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
325 #if defined(O_NOATIME)
326 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
328 #if defined(O_CLOEXEC)
329 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
332 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
334 /* Don't terminate the list prematurely on 64-bit host+guest. */
335 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
336 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
343 QEMU_IFLA_BR_FORWARD_DELAY
,
344 QEMU_IFLA_BR_HELLO_TIME
,
345 QEMU_IFLA_BR_MAX_AGE
,
346 QEMU_IFLA_BR_AGEING_TIME
,
347 QEMU_IFLA_BR_STP_STATE
,
348 QEMU_IFLA_BR_PRIORITY
,
349 QEMU_IFLA_BR_VLAN_FILTERING
,
350 QEMU_IFLA_BR_VLAN_PROTOCOL
,
351 QEMU_IFLA_BR_GROUP_FWD_MASK
,
352 QEMU_IFLA_BR_ROOT_ID
,
353 QEMU_IFLA_BR_BRIDGE_ID
,
354 QEMU_IFLA_BR_ROOT_PORT
,
355 QEMU_IFLA_BR_ROOT_PATH_COST
,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
357 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
358 QEMU_IFLA_BR_HELLO_TIMER
,
359 QEMU_IFLA_BR_TCN_TIMER
,
360 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
361 QEMU_IFLA_BR_GC_TIMER
,
362 QEMU_IFLA_BR_GROUP_ADDR
,
363 QEMU_IFLA_BR_FDB_FLUSH
,
364 QEMU_IFLA_BR_MCAST_ROUTER
,
365 QEMU_IFLA_BR_MCAST_SNOOPING
,
366 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
367 QEMU_IFLA_BR_MCAST_QUERIER
,
368 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
369 QEMU_IFLA_BR_MCAST_HASH_MAX
,
370 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
371 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
372 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
373 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
374 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
375 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
376 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
378 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
379 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
380 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
381 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
383 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
384 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
408 QEMU_IFLA_NET_NS_PID
,
411 QEMU_IFLA_VFINFO_LIST
,
419 QEMU_IFLA_PROMISCUITY
,
420 QEMU_IFLA_NUM_TX_QUEUES
,
421 QEMU_IFLA_NUM_RX_QUEUES
,
423 QEMU_IFLA_PHYS_PORT_ID
,
424 QEMU_IFLA_CARRIER_CHANGES
,
425 QEMU_IFLA_PHYS_SWITCH_ID
,
426 QEMU_IFLA_LINK_NETNSID
,
427 QEMU_IFLA_PHYS_PORT_NAME
,
428 QEMU_IFLA_PROTO_DOWN
,
429 QEMU_IFLA_GSO_MAX_SEGS
,
430 QEMU_IFLA_GSO_MAX_SIZE
,
437 QEMU_IFLA_BRPORT_UNSPEC
,
438 QEMU_IFLA_BRPORT_STATE
,
439 QEMU_IFLA_BRPORT_PRIORITY
,
440 QEMU_IFLA_BRPORT_COST
,
441 QEMU_IFLA_BRPORT_MODE
,
442 QEMU_IFLA_BRPORT_GUARD
,
443 QEMU_IFLA_BRPORT_PROTECT
,
444 QEMU_IFLA_BRPORT_FAST_LEAVE
,
445 QEMU_IFLA_BRPORT_LEARNING
,
446 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
447 QEMU_IFLA_BRPORT_PROXYARP
,
448 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
449 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
450 QEMU_IFLA_BRPORT_ROOT_ID
,
451 QEMU_IFLA_BRPORT_BRIDGE_ID
,
452 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
453 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
456 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
457 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
458 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
459 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
460 QEMU_IFLA_BRPORT_HOLD_TIMER
,
461 QEMU_IFLA_BRPORT_FLUSH
,
462 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
463 QEMU_IFLA_BRPORT_PAD
,
464 QEMU___IFLA_BRPORT_MAX
468 QEMU_IFLA_INFO_UNSPEC
,
471 QEMU_IFLA_INFO_XSTATS
,
472 QEMU_IFLA_INFO_SLAVE_KIND
,
473 QEMU_IFLA_INFO_SLAVE_DATA
,
474 QEMU___IFLA_INFO_MAX
,
478 QEMU_IFLA_INET_UNSPEC
,
480 QEMU___IFLA_INET_MAX
,
484 QEMU_IFLA_INET6_UNSPEC
,
485 QEMU_IFLA_INET6_FLAGS
,
486 QEMU_IFLA_INET6_CONF
,
487 QEMU_IFLA_INET6_STATS
,
488 QEMU_IFLA_INET6_MCAST
,
489 QEMU_IFLA_INET6_CACHEINFO
,
490 QEMU_IFLA_INET6_ICMP6STATS
,
491 QEMU_IFLA_INET6_TOKEN
,
492 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
493 QEMU___IFLA_INET6_MAX
496 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
497 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
498 typedef struct TargetFdTrans
{
499 TargetFdDataFunc host_to_target_data
;
500 TargetFdDataFunc target_to_host_data
;
501 TargetFdAddrFunc target_to_host_addr
;
504 static TargetFdTrans
**target_fd_trans
;
506 static unsigned int target_fd_max
;
508 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
510 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
511 return target_fd_trans
[fd
]->target_to_host_data
;
516 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
518 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
519 return target_fd_trans
[fd
]->host_to_target_data
;
524 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
526 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
527 return target_fd_trans
[fd
]->target_to_host_addr
;
532 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
536 if (fd
>= target_fd_max
) {
537 oldmax
= target_fd_max
;
538 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
539 target_fd_trans
= g_renew(TargetFdTrans
*,
540 target_fd_trans
, target_fd_max
);
541 memset((void *)(target_fd_trans
+ oldmax
), 0,
542 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
544 target_fd_trans
[fd
] = trans
;
547 static void fd_trans_unregister(int fd
)
549 if (fd
>= 0 && fd
< target_fd_max
) {
550 target_fd_trans
[fd
] = NULL
;
554 static void fd_trans_dup(int oldfd
, int newfd
)
556 fd_trans_unregister(newfd
);
557 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
558 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
562 static int sys_getcwd1(char *buf
, size_t size
)
564 if (getcwd(buf
, size
) == NULL
) {
565 /* getcwd() sets errno */
568 return strlen(buf
)+1;
571 #ifdef TARGET_NR_utimensat
572 #if defined(__NR_utimensat)
573 #define __NR_sys_utimensat __NR_utimensat
574 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
575 const struct timespec
*,tsp
,int,flags
)
577 static int sys_utimensat(int dirfd
, const char *pathname
,
578 const struct timespec times
[2], int flags
)
584 #endif /* TARGET_NR_utimensat */
586 #ifdef CONFIG_INOTIFY
587 #include <sys/inotify.h>
589 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
590 static int sys_inotify_init(void)
592 return (inotify_init());
595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
596 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
598 return (inotify_add_watch(fd
, pathname
, mask
));
601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
602 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
604 return (inotify_rm_watch(fd
, wd
));
607 #ifdef CONFIG_INOTIFY1
608 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
609 static int sys_inotify_init1(int flags
)
611 return (inotify_init1(flags
));
616 /* Userspace can usually survive runtime without inotify */
617 #undef TARGET_NR_inotify_init
618 #undef TARGET_NR_inotify_init1
619 #undef TARGET_NR_inotify_add_watch
620 #undef TARGET_NR_inotify_rm_watch
621 #endif /* CONFIG_INOTIFY */
623 #if defined(TARGET_NR_prlimit64)
624 #ifndef __NR_prlimit64
625 # define __NR_prlimit64 -1
627 #define __NR_sys_prlimit64 __NR_prlimit64
628 /* The glibc rlimit structure may not be that used by the underlying syscall */
629 struct host_rlimit64
{
633 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
634 const struct host_rlimit64
*, new_limit
,
635 struct host_rlimit64
*, old_limit
)
639 #if defined(TARGET_NR_timer_create)
640 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
641 static timer_t g_posix_timers
[32] = { 0, } ;
643 static inline int next_free_host_timer(void)
646 /* FIXME: Does finding the next free slot require a lock? */
647 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
648 if (g_posix_timers
[k
] == 0) {
649 g_posix_timers
[k
] = (timer_t
) 1;
657 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
659 static inline int regpairs_aligned(void *cpu_env
) {
660 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
662 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
663 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
664 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
665 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
666 * of registers which translates to the same as ARM/MIPS, because we start with
668 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
670 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
673 #define ERRNO_TABLE_SIZE 1200
675 /* target_to_host_errno_table[] is initialized from
676 * host_to_target_errno_table[] in syscall_init(). */
677 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
681 * This list is the union of errno values overridden in asm-<arch>/errno.h
682 * minus the errnos that are not actually generic to all archs.
684 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
685 [EAGAIN
] = TARGET_EAGAIN
,
686 [EIDRM
] = TARGET_EIDRM
,
687 [ECHRNG
] = TARGET_ECHRNG
,
688 [EL2NSYNC
] = TARGET_EL2NSYNC
,
689 [EL3HLT
] = TARGET_EL3HLT
,
690 [EL3RST
] = TARGET_EL3RST
,
691 [ELNRNG
] = TARGET_ELNRNG
,
692 [EUNATCH
] = TARGET_EUNATCH
,
693 [ENOCSI
] = TARGET_ENOCSI
,
694 [EL2HLT
] = TARGET_EL2HLT
,
695 [EDEADLK
] = TARGET_EDEADLK
,
696 [ENOLCK
] = TARGET_ENOLCK
,
697 [EBADE
] = TARGET_EBADE
,
698 [EBADR
] = TARGET_EBADR
,
699 [EXFULL
] = TARGET_EXFULL
,
700 [ENOANO
] = TARGET_ENOANO
,
701 [EBADRQC
] = TARGET_EBADRQC
,
702 [EBADSLT
] = TARGET_EBADSLT
,
703 [EBFONT
] = TARGET_EBFONT
,
704 [ENOSTR
] = TARGET_ENOSTR
,
705 [ENODATA
] = TARGET_ENODATA
,
706 [ETIME
] = TARGET_ETIME
,
707 [ENOSR
] = TARGET_ENOSR
,
708 [ENONET
] = TARGET_ENONET
,
709 [ENOPKG
] = TARGET_ENOPKG
,
710 [EREMOTE
] = TARGET_EREMOTE
,
711 [ENOLINK
] = TARGET_ENOLINK
,
712 [EADV
] = TARGET_EADV
,
713 [ESRMNT
] = TARGET_ESRMNT
,
714 [ECOMM
] = TARGET_ECOMM
,
715 [EPROTO
] = TARGET_EPROTO
,
716 [EDOTDOT
] = TARGET_EDOTDOT
,
717 [EMULTIHOP
] = TARGET_EMULTIHOP
,
718 [EBADMSG
] = TARGET_EBADMSG
,
719 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
720 [EOVERFLOW
] = TARGET_EOVERFLOW
,
721 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
722 [EBADFD
] = TARGET_EBADFD
,
723 [EREMCHG
] = TARGET_EREMCHG
,
724 [ELIBACC
] = TARGET_ELIBACC
,
725 [ELIBBAD
] = TARGET_ELIBBAD
,
726 [ELIBSCN
] = TARGET_ELIBSCN
,
727 [ELIBMAX
] = TARGET_ELIBMAX
,
728 [ELIBEXEC
] = TARGET_ELIBEXEC
,
729 [EILSEQ
] = TARGET_EILSEQ
,
730 [ENOSYS
] = TARGET_ENOSYS
,
731 [ELOOP
] = TARGET_ELOOP
,
732 [ERESTART
] = TARGET_ERESTART
,
733 [ESTRPIPE
] = TARGET_ESTRPIPE
,
734 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
735 [EUSERS
] = TARGET_EUSERS
,
736 [ENOTSOCK
] = TARGET_ENOTSOCK
,
737 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
738 [EMSGSIZE
] = TARGET_EMSGSIZE
,
739 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
740 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
741 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
742 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
743 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
744 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
745 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
746 [EADDRINUSE
] = TARGET_EADDRINUSE
,
747 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
748 [ENETDOWN
] = TARGET_ENETDOWN
,
749 [ENETUNREACH
] = TARGET_ENETUNREACH
,
750 [ENETRESET
] = TARGET_ENETRESET
,
751 [ECONNABORTED
] = TARGET_ECONNABORTED
,
752 [ECONNRESET
] = TARGET_ECONNRESET
,
753 [ENOBUFS
] = TARGET_ENOBUFS
,
754 [EISCONN
] = TARGET_EISCONN
,
755 [ENOTCONN
] = TARGET_ENOTCONN
,
756 [EUCLEAN
] = TARGET_EUCLEAN
,
757 [ENOTNAM
] = TARGET_ENOTNAM
,
758 [ENAVAIL
] = TARGET_ENAVAIL
,
759 [EISNAM
] = TARGET_EISNAM
,
760 [EREMOTEIO
] = TARGET_EREMOTEIO
,
761 [EDQUOT
] = TARGET_EDQUOT
,
762 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
763 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
764 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
765 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
766 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
767 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
768 [EALREADY
] = TARGET_EALREADY
,
769 [EINPROGRESS
] = TARGET_EINPROGRESS
,
770 [ESTALE
] = TARGET_ESTALE
,
771 [ECANCELED
] = TARGET_ECANCELED
,
772 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
773 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
775 [ENOKEY
] = TARGET_ENOKEY
,
778 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
781 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
784 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
787 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
789 #ifdef ENOTRECOVERABLE
790 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
793 [ENOMSG
] = TARGET_ENOMSG
,
797 static inline int host_to_target_errno(int err
)
799 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
800 host_to_target_errno_table
[err
]) {
801 return host_to_target_errno_table
[err
];
806 static inline int target_to_host_errno(int err
)
808 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
809 target_to_host_errno_table
[err
]) {
810 return target_to_host_errno_table
[err
];
815 static inline abi_long
get_errno(abi_long ret
)
818 return -host_to_target_errno(errno
);
823 static inline int is_error(abi_long ret
)
825 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
828 const char *target_strerror(int err
)
830 if (err
== TARGET_ERESTARTSYS
) {
831 return "To be restarted";
833 if (err
== TARGET_QEMU_ESIGRETURN
) {
834 return "Successful exit from sigreturn";
837 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
840 return strerror(target_to_host_errno(err
));
843 #define safe_syscall0(type, name) \
844 static type safe_##name(void) \
846 return safe_syscall(__NR_##name); \
849 #define safe_syscall1(type, name, type1, arg1) \
850 static type safe_##name(type1 arg1) \
852 return safe_syscall(__NR_##name, arg1); \
855 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
856 static type safe_##name(type1 arg1, type2 arg2) \
858 return safe_syscall(__NR_##name, arg1, arg2); \
861 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
862 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
864 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
867 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
869 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
871 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
874 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 type4, arg4, type5, arg5) \
876 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
879 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
882 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
883 type4, arg4, type5, arg5, type6, arg6) \
884 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 type5 arg5, type6 arg6) \
887 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
890 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
891 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
892 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
893 int, flags
, mode_t
, mode
)
894 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
895 struct rusage
*, rusage
)
896 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
897 int, options
, struct rusage
*, rusage
)
898 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
899 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
900 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
901 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
902 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
904 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
905 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
907 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
908 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
909 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
910 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
911 safe_syscall2(int, tkill
, int, tid
, int, sig
)
912 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
913 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
914 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
915 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
917 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
918 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
919 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
920 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
921 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
922 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
923 safe_syscall2(int, flock
, int, fd
, int, operation
)
924 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
925 const struct timespec
*, uts
, size_t, sigsetsize
)
926 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
928 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
929 struct timespec
*, rem
)
930 #ifdef TARGET_NR_clock_nanosleep
931 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
932 const struct timespec
*, req
, struct timespec
*, rem
)
935 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
937 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
938 long, msgtype
, int, flags
)
939 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
940 unsigned, nsops
, const struct timespec
*, timeout
)
942 /* This host kernel architecture uses a single ipc syscall; fake up
943 * wrappers for the sub-operations to hide this implementation detail.
944 * Annoyingly we can't include linux/ipc.h to get the constant definitions
945 * for the call parameter because some structs in there conflict with the
946 * sys/ipc.h ones. So we just define them here, and rely on them being
947 * the same for all host architectures.
949 #define Q_SEMTIMEDOP 4
952 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
954 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
955 void *, ptr
, long, fifth
)
956 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
958 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
960 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
962 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
964 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
965 const struct timespec
*timeout
)
967 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
971 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
972 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
973 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
974 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
975 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
977 /* We do ioctl like this rather than via safe_syscall3 to preserve the
978 * "third argument might be integer or pointer or not present" behaviour of
981 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
982 /* Similarly for fcntl. Note that callers must always:
983 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
984 * use the flock64 struct rather than unsuffixed flock
985 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
988 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
990 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
993 static inline int host_to_target_sock_type(int host_type
)
997 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
999 target_type
= TARGET_SOCK_DGRAM
;
1002 target_type
= TARGET_SOCK_STREAM
;
1005 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1009 #if defined(SOCK_CLOEXEC)
1010 if (host_type
& SOCK_CLOEXEC
) {
1011 target_type
|= TARGET_SOCK_CLOEXEC
;
1015 #if defined(SOCK_NONBLOCK)
1016 if (host_type
& SOCK_NONBLOCK
) {
1017 target_type
|= TARGET_SOCK_NONBLOCK
;
1024 static abi_ulong target_brk
;
1025 static abi_ulong target_original_brk
;
1026 static abi_ulong brk_page
;
1028 void target_set_brk(abi_ulong new_brk
)
1030 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1031 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1034 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1035 #define DEBUGF_BRK(message, args...)
1037 /* do_brk() must return target values and target errnos. */
1038 abi_long
do_brk(abi_ulong new_brk
)
1040 abi_long mapped_addr
;
1041 abi_ulong new_alloc_size
;
1043 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1046 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1049 if (new_brk
< target_original_brk
) {
1050 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1055 /* If the new brk is less than the highest page reserved to the
1056 * target heap allocation, set it and we're almost done... */
1057 if (new_brk
<= brk_page
) {
1058 /* Heap contents are initialized to zero, as for anonymous
1060 if (new_brk
> target_brk
) {
1061 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1063 target_brk
= new_brk
;
1064 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1068 /* We need to allocate more memory after the brk... Note that
1069 * we don't use MAP_FIXED because that will map over the top of
1070 * any existing mapping (like the one with the host libc or qemu
1071 * itself); instead we treat "mapped but at wrong address" as
1072 * a failure and unmap again.
1074 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1075 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1076 PROT_READ
|PROT_WRITE
,
1077 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1079 if (mapped_addr
== brk_page
) {
1080 /* Heap contents are initialized to zero, as for anonymous
1081 * mapped pages. Technically the new pages are already
1082 * initialized to zero since they *are* anonymous mapped
1083 * pages, however we have to take care with the contents that
1084 * come from the remaining part of the previous page: it may
1085 * contains garbage data due to a previous heap usage (grown
1086 * then shrunken). */
1087 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1089 target_brk
= new_brk
;
1090 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1091 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1094 } else if (mapped_addr
!= -1) {
1095 /* Mapped but at wrong address, meaning there wasn't actually
1096 * enough space for this brk.
1098 target_munmap(mapped_addr
, new_alloc_size
);
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1103 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1106 #if defined(TARGET_ALPHA)
1107 /* We (partially) emulate OSF/1 on Alpha, which requires we
1108 return a proper errno, not an unchanged brk value. */
1109 return -TARGET_ENOMEM
;
1111 /* For everything else, return the previous break. */
1115 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1116 abi_ulong target_fds_addr
,
1120 abi_ulong b
, *target_fds
;
1122 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1123 if (!(target_fds
= lock_user(VERIFY_READ
,
1125 sizeof(abi_ulong
) * nw
,
1127 return -TARGET_EFAULT
;
1131 for (i
= 0; i
< nw
; i
++) {
1132 /* grab the abi_ulong */
1133 __get_user(b
, &target_fds
[i
]);
1134 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1135 /* check the bit inside the abi_ulong */
1142 unlock_user(target_fds
, target_fds_addr
, 0);
1147 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1148 abi_ulong target_fds_addr
,
1151 if (target_fds_addr
) {
1152 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1153 return -TARGET_EFAULT
;
1161 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1167 abi_ulong
*target_fds
;
1169 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1170 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1172 sizeof(abi_ulong
) * nw
,
1174 return -TARGET_EFAULT
;
1177 for (i
= 0; i
< nw
; i
++) {
1179 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1180 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1183 __put_user(v
, &target_fds
[i
]);
1186 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1191 #if defined(__alpha__)
1192 #define HOST_HZ 1024
1197 static inline abi_long
host_to_target_clock_t(long ticks
)
1199 #if HOST_HZ == TARGET_HZ
1202 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1206 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1207 const struct rusage
*rusage
)
1209 struct target_rusage
*target_rusage
;
1211 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1212 return -TARGET_EFAULT
;
1213 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1214 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1215 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1216 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1217 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1218 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1219 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1220 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1221 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1222 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1223 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1224 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1225 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1226 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1227 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1228 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1229 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1230 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1231 unlock_user_struct(target_rusage
, target_addr
, 1);
1236 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1238 abi_ulong target_rlim_swap
;
1241 target_rlim_swap
= tswapal(target_rlim
);
1242 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1243 return RLIM_INFINITY
;
1245 result
= target_rlim_swap
;
1246 if (target_rlim_swap
!= (rlim_t
)result
)
1247 return RLIM_INFINITY
;
1252 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1254 abi_ulong target_rlim_swap
;
1257 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1258 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1260 target_rlim_swap
= rlim
;
1261 result
= tswapal(target_rlim_swap
);
1266 static inline int target_to_host_resource(int code
)
1269 case TARGET_RLIMIT_AS
:
1271 case TARGET_RLIMIT_CORE
:
1273 case TARGET_RLIMIT_CPU
:
1275 case TARGET_RLIMIT_DATA
:
1277 case TARGET_RLIMIT_FSIZE
:
1278 return RLIMIT_FSIZE
;
1279 case TARGET_RLIMIT_LOCKS
:
1280 return RLIMIT_LOCKS
;
1281 case TARGET_RLIMIT_MEMLOCK
:
1282 return RLIMIT_MEMLOCK
;
1283 case TARGET_RLIMIT_MSGQUEUE
:
1284 return RLIMIT_MSGQUEUE
;
1285 case TARGET_RLIMIT_NICE
:
1287 case TARGET_RLIMIT_NOFILE
:
1288 return RLIMIT_NOFILE
;
1289 case TARGET_RLIMIT_NPROC
:
1290 return RLIMIT_NPROC
;
1291 case TARGET_RLIMIT_RSS
:
1293 case TARGET_RLIMIT_RTPRIO
:
1294 return RLIMIT_RTPRIO
;
1295 case TARGET_RLIMIT_SIGPENDING
:
1296 return RLIMIT_SIGPENDING
;
1297 case TARGET_RLIMIT_STACK
:
1298 return RLIMIT_STACK
;
1304 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1305 abi_ulong target_tv_addr
)
1307 struct target_timeval
*target_tv
;
1309 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1310 return -TARGET_EFAULT
;
1312 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1313 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1315 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1320 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1321 const struct timeval
*tv
)
1323 struct target_timeval
*target_tv
;
1325 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1326 return -TARGET_EFAULT
;
1328 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1329 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1331 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1336 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1337 abi_ulong target_tz_addr
)
1339 struct target_timezone
*target_tz
;
1341 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1342 return -TARGET_EFAULT
;
1345 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1346 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1348 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1353 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1356 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1357 abi_ulong target_mq_attr_addr
)
1359 struct target_mq_attr
*target_mq_attr
;
1361 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1362 target_mq_attr_addr
, 1))
1363 return -TARGET_EFAULT
;
1365 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1366 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1367 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1368 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1370 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1375 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1376 const struct mq_attr
*attr
)
1378 struct target_mq_attr
*target_mq_attr
;
1380 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1381 target_mq_attr_addr
, 0))
1382 return -TARGET_EFAULT
;
1384 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1385 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1386 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1387 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1389 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1395 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1396 /* do_select() must return target values and target errnos. */
1397 static abi_long
do_select(int n
,
1398 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1399 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1401 fd_set rfds
, wfds
, efds
;
1402 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1404 struct timespec ts
, *ts_ptr
;
1407 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1411 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1415 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1420 if (target_tv_addr
) {
1421 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1422 return -TARGET_EFAULT
;
1423 ts
.tv_sec
= tv
.tv_sec
;
1424 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1430 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1433 if (!is_error(ret
)) {
1434 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1435 return -TARGET_EFAULT
;
1436 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1437 return -TARGET_EFAULT
;
1438 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1439 return -TARGET_EFAULT
;
1441 if (target_tv_addr
) {
1442 tv
.tv_sec
= ts
.tv_sec
;
1443 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1444 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1445 return -TARGET_EFAULT
;
1453 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1454 static abi_long
do_old_select(abi_ulong arg1
)
1456 struct target_sel_arg_struct
*sel
;
1457 abi_ulong inp
, outp
, exp
, tvp
;
1460 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1461 return -TARGET_EFAULT
;
1464 nsel
= tswapal(sel
->n
);
1465 inp
= tswapal(sel
->inp
);
1466 outp
= tswapal(sel
->outp
);
1467 exp
= tswapal(sel
->exp
);
1468 tvp
= tswapal(sel
->tvp
);
1470 unlock_user_struct(sel
, arg1
, 0);
1472 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1477 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1480 return pipe2(host_pipe
, flags
);
1486 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1487 int flags
, int is_pipe2
)
1491 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1494 return get_errno(ret
);
1496 /* Several targets have special calling conventions for the original
1497 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1499 #if defined(TARGET_ALPHA)
1500 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1501 return host_pipe
[0];
1502 #elif defined(TARGET_MIPS)
1503 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1504 return host_pipe
[0];
1505 #elif defined(TARGET_SH4)
1506 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1507 return host_pipe
[0];
1508 #elif defined(TARGET_SPARC)
1509 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1510 return host_pipe
[0];
1514 if (put_user_s32(host_pipe
[0], pipedes
)
1515 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1516 return -TARGET_EFAULT
;
1517 return get_errno(ret
);
1520 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1521 abi_ulong target_addr
,
1524 struct target_ip_mreqn
*target_smreqn
;
1526 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1528 return -TARGET_EFAULT
;
1529 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1530 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1531 if (len
== sizeof(struct target_ip_mreqn
))
1532 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1533 unlock_user(target_smreqn
, target_addr
, 0);
1538 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1539 abi_ulong target_addr
,
1542 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1543 sa_family_t sa_family
;
1544 struct target_sockaddr
*target_saddr
;
1546 if (fd_trans_target_to_host_addr(fd
)) {
1547 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1550 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1552 return -TARGET_EFAULT
;
1554 sa_family
= tswap16(target_saddr
->sa_family
);
1556 /* Oops. The caller might send a incomplete sun_path; sun_path
1557 * must be terminated by \0 (see the manual page), but
1558 * unfortunately it is quite common to specify sockaddr_un
1559 * length as "strlen(x->sun_path)" while it should be
1560 * "strlen(...) + 1". We'll fix that here if needed.
1561 * Linux kernel has a similar feature.
1564 if (sa_family
== AF_UNIX
) {
1565 if (len
< unix_maxlen
&& len
> 0) {
1566 char *cp
= (char*)target_saddr
;
1568 if ( cp
[len
-1] && !cp
[len
] )
1571 if (len
> unix_maxlen
)
1575 memcpy(addr
, target_saddr
, len
);
1576 addr
->sa_family
= sa_family
;
1577 if (sa_family
== AF_NETLINK
) {
1578 struct sockaddr_nl
*nladdr
;
1580 nladdr
= (struct sockaddr_nl
*)addr
;
1581 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1582 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1583 } else if (sa_family
== AF_PACKET
) {
1584 struct target_sockaddr_ll
*lladdr
;
1586 lladdr
= (struct target_sockaddr_ll
*)addr
;
1587 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1588 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1590 unlock_user(target_saddr
, target_addr
, 0);
1595 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1596 struct sockaddr
*addr
,
1599 struct target_sockaddr
*target_saddr
;
1605 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1607 return -TARGET_EFAULT
;
1608 memcpy(target_saddr
, addr
, len
);
1609 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1610 sizeof(target_saddr
->sa_family
)) {
1611 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1613 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1614 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1615 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1616 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1617 } else if (addr
->sa_family
== AF_PACKET
) {
1618 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1619 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1620 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1622 unlock_user(target_saddr
, target_addr
, len
);
1627 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1628 struct target_msghdr
*target_msgh
)
1630 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1631 abi_long msg_controllen
;
1632 abi_ulong target_cmsg_addr
;
1633 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1634 socklen_t space
= 0;
1636 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1637 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1639 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1640 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1641 target_cmsg_start
= target_cmsg
;
1643 return -TARGET_EFAULT
;
1645 while (cmsg
&& target_cmsg
) {
1646 void *data
= CMSG_DATA(cmsg
);
1647 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1649 int len
= tswapal(target_cmsg
->cmsg_len
)
1650 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1652 space
+= CMSG_SPACE(len
);
1653 if (space
> msgh
->msg_controllen
) {
1654 space
-= CMSG_SPACE(len
);
1655 /* This is a QEMU bug, since we allocated the payload
1656 * area ourselves (unlike overflow in host-to-target
1657 * conversion, which is just the guest giving us a buffer
1658 * that's too small). It can't happen for the payload types
1659 * we currently support; if it becomes an issue in future
1660 * we would need to improve our allocation strategy to
1661 * something more intelligent than "twice the size of the
1662 * target buffer we're reading from".
1664 gemu_log("Host cmsg overflow\n");
1668 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1669 cmsg
->cmsg_level
= SOL_SOCKET
;
1671 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1673 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1674 cmsg
->cmsg_len
= CMSG_LEN(len
);
1676 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1677 int *fd
= (int *)data
;
1678 int *target_fd
= (int *)target_data
;
1679 int i
, numfds
= len
/ sizeof(int);
1681 for (i
= 0; i
< numfds
; i
++) {
1682 __get_user(fd
[i
], target_fd
+ i
);
1684 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1685 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1686 struct ucred
*cred
= (struct ucred
*)data
;
1687 struct target_ucred
*target_cred
=
1688 (struct target_ucred
*)target_data
;
1690 __get_user(cred
->pid
, &target_cred
->pid
);
1691 __get_user(cred
->uid
, &target_cred
->uid
);
1692 __get_user(cred
->gid
, &target_cred
->gid
);
1694 gemu_log("Unsupported ancillary data: %d/%d\n",
1695 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1696 memcpy(data
, target_data
, len
);
1699 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1700 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1703 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1705 msgh
->msg_controllen
= space
;
1709 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1710 struct msghdr
*msgh
)
1712 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1713 abi_long msg_controllen
;
1714 abi_ulong target_cmsg_addr
;
1715 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1716 socklen_t space
= 0;
1718 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1719 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1721 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1722 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1723 target_cmsg_start
= target_cmsg
;
1725 return -TARGET_EFAULT
;
1727 while (cmsg
&& target_cmsg
) {
1728 void *data
= CMSG_DATA(cmsg
);
1729 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1731 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1732 int tgt_len
, tgt_space
;
1734 /* We never copy a half-header but may copy half-data;
1735 * this is Linux's behaviour in put_cmsg(). Note that
1736 * truncation here is a guest problem (which we report
1737 * to the guest via the CTRUNC bit), unlike truncation
1738 * in target_to_host_cmsg, which is a QEMU bug.
1740 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1741 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1745 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1746 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1748 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1750 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1752 tgt_len
= TARGET_CMSG_LEN(len
);
1754 /* Payload types which need a different size of payload on
1755 * the target must adjust tgt_len here.
1757 switch (cmsg
->cmsg_level
) {
1759 switch (cmsg
->cmsg_type
) {
1761 tgt_len
= sizeof(struct target_timeval
);
1770 if (msg_controllen
< tgt_len
) {
1771 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1772 tgt_len
= msg_controllen
;
1775 /* We must now copy-and-convert len bytes of payload
1776 * into tgt_len bytes of destination space. Bear in mind
1777 * that in both source and destination we may be dealing
1778 * with a truncated value!
1780 switch (cmsg
->cmsg_level
) {
1782 switch (cmsg
->cmsg_type
) {
1785 int *fd
= (int *)data
;
1786 int *target_fd
= (int *)target_data
;
1787 int i
, numfds
= tgt_len
/ sizeof(int);
1789 for (i
= 0; i
< numfds
; i
++) {
1790 __put_user(fd
[i
], target_fd
+ i
);
1796 struct timeval
*tv
= (struct timeval
*)data
;
1797 struct target_timeval
*target_tv
=
1798 (struct target_timeval
*)target_data
;
1800 if (len
!= sizeof(struct timeval
) ||
1801 tgt_len
!= sizeof(struct target_timeval
)) {
1805 /* copy struct timeval to target */
1806 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1807 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1810 case SCM_CREDENTIALS
:
1812 struct ucred
*cred
= (struct ucred
*)data
;
1813 struct target_ucred
*target_cred
=
1814 (struct target_ucred
*)target_data
;
1816 __put_user(cred
->pid
, &target_cred
->pid
);
1817 __put_user(cred
->uid
, &target_cred
->uid
);
1818 __put_user(cred
->gid
, &target_cred
->gid
);
1828 gemu_log("Unsupported ancillary data: %d/%d\n",
1829 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1830 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1831 if (tgt_len
> len
) {
1832 memset(target_data
+ len
, 0, tgt_len
- len
);
1836 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1837 tgt_space
= TARGET_CMSG_SPACE(len
);
1838 if (msg_controllen
< tgt_space
) {
1839 tgt_space
= msg_controllen
;
1841 msg_controllen
-= tgt_space
;
1843 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1844 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1847 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1849 target_msgh
->msg_controllen
= tswapal(space
);
1853 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1855 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1856 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1857 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1858 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1859 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1862 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1864 abi_long (*host_to_target_nlmsg
)
1865 (struct nlmsghdr
*))
1870 while (len
> sizeof(struct nlmsghdr
)) {
1872 nlmsg_len
= nlh
->nlmsg_len
;
1873 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1878 switch (nlh
->nlmsg_type
) {
1880 tswap_nlmsghdr(nlh
);
1886 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1887 e
->error
= tswap32(e
->error
);
1888 tswap_nlmsghdr(&e
->msg
);
1889 tswap_nlmsghdr(nlh
);
1893 ret
= host_to_target_nlmsg(nlh
);
1895 tswap_nlmsghdr(nlh
);
1900 tswap_nlmsghdr(nlh
);
1901 len
-= NLMSG_ALIGN(nlmsg_len
);
1902 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1907 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1909 abi_long (*target_to_host_nlmsg
)
1910 (struct nlmsghdr
*))
1914 while (len
> sizeof(struct nlmsghdr
)) {
1915 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1916 tswap32(nlh
->nlmsg_len
) > len
) {
1919 tswap_nlmsghdr(nlh
);
1920 switch (nlh
->nlmsg_type
) {
1927 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1928 e
->error
= tswap32(e
->error
);
1929 tswap_nlmsghdr(&e
->msg
);
1933 ret
= target_to_host_nlmsg(nlh
);
1938 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1939 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1944 #ifdef CONFIG_RTNETLINK
1945 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1946 size_t len
, void *context
,
1947 abi_long (*host_to_target_nlattr
)
1951 unsigned short nla_len
;
1954 while (len
> sizeof(struct nlattr
)) {
1955 nla_len
= nlattr
->nla_len
;
1956 if (nla_len
< sizeof(struct nlattr
) ||
1960 ret
= host_to_target_nlattr(nlattr
, context
);
1961 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1962 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1966 len
-= NLA_ALIGN(nla_len
);
1967 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1972 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1974 abi_long (*host_to_target_rtattr
)
1977 unsigned short rta_len
;
1980 while (len
> sizeof(struct rtattr
)) {
1981 rta_len
= rtattr
->rta_len
;
1982 if (rta_len
< sizeof(struct rtattr
) ||
1986 ret
= host_to_target_rtattr(rtattr
);
1987 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1988 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1992 len
-= RTA_ALIGN(rta_len
);
1993 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1998 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2000 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2007 switch (nlattr
->nla_type
) {
2009 case QEMU_IFLA_BR_FDB_FLUSH
:
2012 case QEMU_IFLA_BR_GROUP_ADDR
:
2015 case QEMU_IFLA_BR_VLAN_FILTERING
:
2016 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2017 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2018 case QEMU_IFLA_BR_MCAST_ROUTER
:
2019 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2020 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2021 case QEMU_IFLA_BR_MCAST_QUERIER
:
2022 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2023 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2024 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2027 case QEMU_IFLA_BR_PRIORITY
:
2028 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2029 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2030 case QEMU_IFLA_BR_ROOT_PORT
:
2031 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2032 u16
= NLA_DATA(nlattr
);
2033 *u16
= tswap16(*u16
);
2036 case QEMU_IFLA_BR_FORWARD_DELAY
:
2037 case QEMU_IFLA_BR_HELLO_TIME
:
2038 case QEMU_IFLA_BR_MAX_AGE
:
2039 case QEMU_IFLA_BR_AGEING_TIME
:
2040 case QEMU_IFLA_BR_STP_STATE
:
2041 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2042 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2043 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2044 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2045 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2046 u32
= NLA_DATA(nlattr
);
2047 *u32
= tswap32(*u32
);
2050 case QEMU_IFLA_BR_HELLO_TIMER
:
2051 case QEMU_IFLA_BR_TCN_TIMER
:
2052 case QEMU_IFLA_BR_GC_TIMER
:
2053 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2055 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2056 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2057 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2058 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2059 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2060 u64
= NLA_DATA(nlattr
);
2061 *u64
= tswap64(*u64
);
2063 /* ifla_bridge_id: uin8_t[] */
2064 case QEMU_IFLA_BR_ROOT_ID
:
2065 case QEMU_IFLA_BR_BRIDGE_ID
:
2068 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2074 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2081 switch (nlattr
->nla_type
) {
2083 case QEMU_IFLA_BRPORT_STATE
:
2084 case QEMU_IFLA_BRPORT_MODE
:
2085 case QEMU_IFLA_BRPORT_GUARD
:
2086 case QEMU_IFLA_BRPORT_PROTECT
:
2087 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2088 case QEMU_IFLA_BRPORT_LEARNING
:
2089 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2090 case QEMU_IFLA_BRPORT_PROXYARP
:
2091 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2092 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2093 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2094 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2095 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2098 case QEMU_IFLA_BRPORT_PRIORITY
:
2099 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2100 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2101 case QEMU_IFLA_BRPORT_ID
:
2102 case QEMU_IFLA_BRPORT_NO
:
2103 u16
= NLA_DATA(nlattr
);
2104 *u16
= tswap16(*u16
);
2107 case QEMU_IFLA_BRPORT_COST
:
2108 u32
= NLA_DATA(nlattr
);
2109 *u32
= tswap32(*u32
);
2112 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2113 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2114 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2115 u64
= NLA_DATA(nlattr
);
2116 *u64
= tswap64(*u64
);
2118 /* ifla_bridge_id: uint8_t[] */
2119 case QEMU_IFLA_BRPORT_ROOT_ID
:
2120 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2123 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2129 struct linkinfo_context
{
2136 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2139 struct linkinfo_context
*li_context
= context
;
2141 switch (nlattr
->nla_type
) {
2143 case QEMU_IFLA_INFO_KIND
:
2144 li_context
->name
= NLA_DATA(nlattr
);
2145 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2147 case QEMU_IFLA_INFO_SLAVE_KIND
:
2148 li_context
->slave_name
= NLA_DATA(nlattr
);
2149 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2152 case QEMU_IFLA_INFO_XSTATS
:
2153 /* FIXME: only used by CAN */
2156 case QEMU_IFLA_INFO_DATA
:
2157 if (strncmp(li_context
->name
, "bridge",
2158 li_context
->len
) == 0) {
2159 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2162 host_to_target_data_bridge_nlattr
);
2164 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2167 case QEMU_IFLA_INFO_SLAVE_DATA
:
2168 if (strncmp(li_context
->slave_name
, "bridge",
2169 li_context
->slave_len
) == 0) {
2170 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2173 host_to_target_slave_data_bridge_nlattr
);
2175 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2176 li_context
->slave_name
);
2180 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2187 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2193 switch (nlattr
->nla_type
) {
2194 case QEMU_IFLA_INET_CONF
:
2195 u32
= NLA_DATA(nlattr
);
2196 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2198 u32
[i
] = tswap32(u32
[i
]);
2202 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2207 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2212 struct ifla_cacheinfo
*ci
;
2215 switch (nlattr
->nla_type
) {
2217 case QEMU_IFLA_INET6_TOKEN
:
2220 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2223 case QEMU_IFLA_INET6_FLAGS
:
2224 u32
= NLA_DATA(nlattr
);
2225 *u32
= tswap32(*u32
);
2228 case QEMU_IFLA_INET6_CONF
:
2229 u32
= NLA_DATA(nlattr
);
2230 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2232 u32
[i
] = tswap32(u32
[i
]);
2235 /* ifla_cacheinfo */
2236 case QEMU_IFLA_INET6_CACHEINFO
:
2237 ci
= NLA_DATA(nlattr
);
2238 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2239 ci
->tstamp
= tswap32(ci
->tstamp
);
2240 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2241 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2244 case QEMU_IFLA_INET6_STATS
:
2245 case QEMU_IFLA_INET6_ICMP6STATS
:
2246 u64
= NLA_DATA(nlattr
);
2247 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2249 u64
[i
] = tswap64(u64
[i
]);
2253 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2258 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2261 switch (nlattr
->nla_type
) {
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2265 host_to_target_data_inet_nlattr
);
2267 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2269 host_to_target_data_inet6_nlattr
);
2271 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2277 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2280 struct rtnl_link_stats
*st
;
2281 struct rtnl_link_stats64
*st64
;
2282 struct rtnl_link_ifmap
*map
;
2283 struct linkinfo_context li_context
;
2285 switch (rtattr
->rta_type
) {
2287 case QEMU_IFLA_ADDRESS
:
2288 case QEMU_IFLA_BROADCAST
:
2290 case QEMU_IFLA_IFNAME
:
2291 case QEMU_IFLA_QDISC
:
2294 case QEMU_IFLA_OPERSTATE
:
2295 case QEMU_IFLA_LINKMODE
:
2296 case QEMU_IFLA_CARRIER
:
2297 case QEMU_IFLA_PROTO_DOWN
:
2301 case QEMU_IFLA_LINK
:
2302 case QEMU_IFLA_WEIGHT
:
2303 case QEMU_IFLA_TXQLEN
:
2304 case QEMU_IFLA_CARRIER_CHANGES
:
2305 case QEMU_IFLA_NUM_RX_QUEUES
:
2306 case QEMU_IFLA_NUM_TX_QUEUES
:
2307 case QEMU_IFLA_PROMISCUITY
:
2308 case QEMU_IFLA_EXT_MASK
:
2309 case QEMU_IFLA_LINK_NETNSID
:
2310 case QEMU_IFLA_GROUP
:
2311 case QEMU_IFLA_MASTER
:
2312 case QEMU_IFLA_NUM_VF
:
2313 u32
= RTA_DATA(rtattr
);
2314 *u32
= tswap32(*u32
);
2316 /* struct rtnl_link_stats */
2317 case QEMU_IFLA_STATS
:
2318 st
= RTA_DATA(rtattr
);
2319 st
->rx_packets
= tswap32(st
->rx_packets
);
2320 st
->tx_packets
= tswap32(st
->tx_packets
);
2321 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2322 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2323 st
->rx_errors
= tswap32(st
->rx_errors
);
2324 st
->tx_errors
= tswap32(st
->tx_errors
);
2325 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2326 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2327 st
->multicast
= tswap32(st
->multicast
);
2328 st
->collisions
= tswap32(st
->collisions
);
2330 /* detailed rx_errors: */
2331 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2332 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2333 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2334 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2335 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2336 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2338 /* detailed tx_errors */
2339 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2340 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2341 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2342 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2343 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2346 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2347 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2349 /* struct rtnl_link_stats64 */
2350 case QEMU_IFLA_STATS64
:
2351 st64
= RTA_DATA(rtattr
);
2352 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2353 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2354 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2355 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2356 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2357 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2358 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2359 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2360 st64
->multicast
= tswap64(st64
->multicast
);
2361 st64
->collisions
= tswap64(st64
->collisions
);
2363 /* detailed rx_errors: */
2364 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2365 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2366 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2367 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2368 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2369 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2371 /* detailed tx_errors */
2372 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2373 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2374 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2375 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2376 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2379 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2380 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2382 /* struct rtnl_link_ifmap */
2384 map
= RTA_DATA(rtattr
);
2385 map
->mem_start
= tswap64(map
->mem_start
);
2386 map
->mem_end
= tswap64(map
->mem_end
);
2387 map
->base_addr
= tswap64(map
->base_addr
);
2388 map
->irq
= tswap16(map
->irq
);
2391 case QEMU_IFLA_LINKINFO
:
2392 memset(&li_context
, 0, sizeof(li_context
));
2393 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2395 host_to_target_data_linkinfo_nlattr
);
2396 case QEMU_IFLA_AF_SPEC
:
2397 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2399 host_to_target_data_spec_nlattr
);
2401 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2407 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2410 struct ifa_cacheinfo
*ci
;
2412 switch (rtattr
->rta_type
) {
2413 /* binary: depends on family type */
2423 u32
= RTA_DATA(rtattr
);
2424 *u32
= tswap32(*u32
);
2426 /* struct ifa_cacheinfo */
2428 ci
= RTA_DATA(rtattr
);
2429 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2430 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2431 ci
->cstamp
= tswap32(ci
->cstamp
);
2432 ci
->tstamp
= tswap32(ci
->tstamp
);
2435 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2441 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2444 switch (rtattr
->rta_type
) {
2445 /* binary: depends on family type */
2454 u32
= RTA_DATA(rtattr
);
2455 *u32
= tswap32(*u32
);
2458 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2464 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2465 uint32_t rtattr_len
)
2467 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2468 host_to_target_data_link_rtattr
);
2471 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2472 uint32_t rtattr_len
)
2474 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2475 host_to_target_data_addr_rtattr
);
2478 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2479 uint32_t rtattr_len
)
2481 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2482 host_to_target_data_route_rtattr
);
2485 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2488 struct ifinfomsg
*ifi
;
2489 struct ifaddrmsg
*ifa
;
2492 nlmsg_len
= nlh
->nlmsg_len
;
2493 switch (nlh
->nlmsg_type
) {
2497 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2498 ifi
= NLMSG_DATA(nlh
);
2499 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2500 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2501 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2502 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2503 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2504 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2510 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2511 ifa
= NLMSG_DATA(nlh
);
2512 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2513 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2514 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2520 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2521 rtm
= NLMSG_DATA(nlh
);
2522 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2523 host_to_target_route_rtattr(RTM_RTA(rtm
),
2524 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2528 return -TARGET_EINVAL
;
2533 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2536 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2539 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2541 abi_long (*target_to_host_rtattr
)
2546 while (len
>= sizeof(struct rtattr
)) {
2547 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2548 tswap16(rtattr
->rta_len
) > len
) {
2551 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2552 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2553 ret
= target_to_host_rtattr(rtattr
);
2557 len
-= RTA_ALIGN(rtattr
->rta_len
);
2558 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2559 RTA_ALIGN(rtattr
->rta_len
));
2564 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2566 switch (rtattr
->rta_type
) {
2568 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2574 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2576 switch (rtattr
->rta_type
) {
2577 /* binary: depends on family type */
2582 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2588 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2591 switch (rtattr
->rta_type
) {
2592 /* binary: depends on family type */
2599 u32
= RTA_DATA(rtattr
);
2600 *u32
= tswap32(*u32
);
2603 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2609 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2610 uint32_t rtattr_len
)
2612 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2613 target_to_host_data_link_rtattr
);
2616 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2617 uint32_t rtattr_len
)
2619 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2620 target_to_host_data_addr_rtattr
);
2623 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2624 uint32_t rtattr_len
)
2626 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2627 target_to_host_data_route_rtattr
);
2630 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2632 struct ifinfomsg
*ifi
;
2633 struct ifaddrmsg
*ifa
;
2636 switch (nlh
->nlmsg_type
) {
2641 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2642 ifi
= NLMSG_DATA(nlh
);
2643 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2644 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2645 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2646 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2647 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2648 NLMSG_LENGTH(sizeof(*ifi
)));
2654 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2655 ifa
= NLMSG_DATA(nlh
);
2656 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2657 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2658 NLMSG_LENGTH(sizeof(*ifa
)));
2665 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2666 rtm
= NLMSG_DATA(nlh
);
2667 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2668 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2669 NLMSG_LENGTH(sizeof(*rtm
)));
2673 return -TARGET_EOPNOTSUPP
;
2678 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2680 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2682 #endif /* CONFIG_RTNETLINK */
2684 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2686 switch (nlh
->nlmsg_type
) {
2688 gemu_log("Unknown host audit message type %d\n",
2690 return -TARGET_EINVAL
;
2695 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2698 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2701 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2703 switch (nlh
->nlmsg_type
) {
2705 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2706 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2709 gemu_log("Unknown target audit message type %d\n",
2711 return -TARGET_EINVAL
;
2717 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2719 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2722 /* do_setsockopt() Must return target values and target errnos. */
2723 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2724 abi_ulong optval_addr
, socklen_t optlen
)
2728 struct ip_mreqn
*ip_mreq
;
2729 struct ip_mreq_source
*ip_mreq_source
;
2733 /* TCP options all take an 'int' value. */
2734 if (optlen
< sizeof(uint32_t))
2735 return -TARGET_EINVAL
;
2737 if (get_user_u32(val
, optval_addr
))
2738 return -TARGET_EFAULT
;
2739 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2746 case IP_ROUTER_ALERT
:
2750 case IP_MTU_DISCOVER
:
2756 case IP_MULTICAST_TTL
:
2757 case IP_MULTICAST_LOOP
:
2759 if (optlen
>= sizeof(uint32_t)) {
2760 if (get_user_u32(val
, optval_addr
))
2761 return -TARGET_EFAULT
;
2762 } else if (optlen
>= 1) {
2763 if (get_user_u8(val
, optval_addr
))
2764 return -TARGET_EFAULT
;
2766 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2768 case IP_ADD_MEMBERSHIP
:
2769 case IP_DROP_MEMBERSHIP
:
2770 if (optlen
< sizeof (struct target_ip_mreq
) ||
2771 optlen
> sizeof (struct target_ip_mreqn
))
2772 return -TARGET_EINVAL
;
2774 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2775 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2776 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2779 case IP_BLOCK_SOURCE
:
2780 case IP_UNBLOCK_SOURCE
:
2781 case IP_ADD_SOURCE_MEMBERSHIP
:
2782 case IP_DROP_SOURCE_MEMBERSHIP
:
2783 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2784 return -TARGET_EINVAL
;
2786 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2787 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2788 unlock_user (ip_mreq_source
, optval_addr
, 0);
2797 case IPV6_MTU_DISCOVER
:
2800 case IPV6_RECVPKTINFO
:
2802 if (optlen
< sizeof(uint32_t)) {
2803 return -TARGET_EINVAL
;
2805 if (get_user_u32(val
, optval_addr
)) {
2806 return -TARGET_EFAULT
;
2808 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2809 &val
, sizeof(val
)));
2818 /* struct icmp_filter takes an u32 value */
2819 if (optlen
< sizeof(uint32_t)) {
2820 return -TARGET_EINVAL
;
2823 if (get_user_u32(val
, optval_addr
)) {
2824 return -TARGET_EFAULT
;
2826 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2827 &val
, sizeof(val
)));
2834 case TARGET_SOL_SOCKET
:
2836 case TARGET_SO_RCVTIMEO
:
2840 optname
= SO_RCVTIMEO
;
2843 if (optlen
!= sizeof(struct target_timeval
)) {
2844 return -TARGET_EINVAL
;
2847 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2848 return -TARGET_EFAULT
;
2851 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2855 case TARGET_SO_SNDTIMEO
:
2856 optname
= SO_SNDTIMEO
;
2858 case TARGET_SO_ATTACH_FILTER
:
2860 struct target_sock_fprog
*tfprog
;
2861 struct target_sock_filter
*tfilter
;
2862 struct sock_fprog fprog
;
2863 struct sock_filter
*filter
;
2866 if (optlen
!= sizeof(*tfprog
)) {
2867 return -TARGET_EINVAL
;
2869 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2870 return -TARGET_EFAULT
;
2872 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2873 tswapal(tfprog
->filter
), 0)) {
2874 unlock_user_struct(tfprog
, optval_addr
, 1);
2875 return -TARGET_EFAULT
;
2878 fprog
.len
= tswap16(tfprog
->len
);
2879 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2880 if (filter
== NULL
) {
2881 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2882 unlock_user_struct(tfprog
, optval_addr
, 1);
2883 return -TARGET_ENOMEM
;
2885 for (i
= 0; i
< fprog
.len
; i
++) {
2886 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2887 filter
[i
].jt
= tfilter
[i
].jt
;
2888 filter
[i
].jf
= tfilter
[i
].jf
;
2889 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2891 fprog
.filter
= filter
;
2893 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2894 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2897 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2898 unlock_user_struct(tfprog
, optval_addr
, 1);
2901 case TARGET_SO_BINDTODEVICE
:
2903 char *dev_ifname
, *addr_ifname
;
2905 if (optlen
> IFNAMSIZ
- 1) {
2906 optlen
= IFNAMSIZ
- 1;
2908 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2910 return -TARGET_EFAULT
;
2912 optname
= SO_BINDTODEVICE
;
2913 addr_ifname
= alloca(IFNAMSIZ
);
2914 memcpy(addr_ifname
, dev_ifname
, optlen
);
2915 addr_ifname
[optlen
] = 0;
2916 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2917 addr_ifname
, optlen
));
2918 unlock_user (dev_ifname
, optval_addr
, 0);
2921 /* Options with 'int' argument. */
2922 case TARGET_SO_DEBUG
:
2925 case TARGET_SO_REUSEADDR
:
2926 optname
= SO_REUSEADDR
;
2928 case TARGET_SO_TYPE
:
2931 case TARGET_SO_ERROR
:
2934 case TARGET_SO_DONTROUTE
:
2935 optname
= SO_DONTROUTE
;
2937 case TARGET_SO_BROADCAST
:
2938 optname
= SO_BROADCAST
;
2940 case TARGET_SO_SNDBUF
:
2941 optname
= SO_SNDBUF
;
2943 case TARGET_SO_SNDBUFFORCE
:
2944 optname
= SO_SNDBUFFORCE
;
2946 case TARGET_SO_RCVBUF
:
2947 optname
= SO_RCVBUF
;
2949 case TARGET_SO_RCVBUFFORCE
:
2950 optname
= SO_RCVBUFFORCE
;
2952 case TARGET_SO_KEEPALIVE
:
2953 optname
= SO_KEEPALIVE
;
2955 case TARGET_SO_OOBINLINE
:
2956 optname
= SO_OOBINLINE
;
2958 case TARGET_SO_NO_CHECK
:
2959 optname
= SO_NO_CHECK
;
2961 case TARGET_SO_PRIORITY
:
2962 optname
= SO_PRIORITY
;
2965 case TARGET_SO_BSDCOMPAT
:
2966 optname
= SO_BSDCOMPAT
;
2969 case TARGET_SO_PASSCRED
:
2970 optname
= SO_PASSCRED
;
2972 case TARGET_SO_PASSSEC
:
2973 optname
= SO_PASSSEC
;
2975 case TARGET_SO_TIMESTAMP
:
2976 optname
= SO_TIMESTAMP
;
2978 case TARGET_SO_RCVLOWAT
:
2979 optname
= SO_RCVLOWAT
;
2985 if (optlen
< sizeof(uint32_t))
2986 return -TARGET_EINVAL
;
2988 if (get_user_u32(val
, optval_addr
))
2989 return -TARGET_EFAULT
;
2990 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2994 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2995 ret
= -TARGET_ENOPROTOOPT
;
3000 /* do_getsockopt() Must return target values and target errnos. */
3001 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3002 abi_ulong optval_addr
, abi_ulong optlen
)
3009 case TARGET_SOL_SOCKET
:
3012 /* These don't just return a single integer */
3013 case TARGET_SO_LINGER
:
3014 case TARGET_SO_RCVTIMEO
:
3015 case TARGET_SO_SNDTIMEO
:
3016 case TARGET_SO_PEERNAME
:
3018 case TARGET_SO_PEERCRED
: {
3021 struct target_ucred
*tcr
;
3023 if (get_user_u32(len
, optlen
)) {
3024 return -TARGET_EFAULT
;
3027 return -TARGET_EINVAL
;
3031 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3039 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3040 return -TARGET_EFAULT
;
3042 __put_user(cr
.pid
, &tcr
->pid
);
3043 __put_user(cr
.uid
, &tcr
->uid
);
3044 __put_user(cr
.gid
, &tcr
->gid
);
3045 unlock_user_struct(tcr
, optval_addr
, 1);
3046 if (put_user_u32(len
, optlen
)) {
3047 return -TARGET_EFAULT
;
3051 /* Options with 'int' argument. */
3052 case TARGET_SO_DEBUG
:
3055 case TARGET_SO_REUSEADDR
:
3056 optname
= SO_REUSEADDR
;
3058 case TARGET_SO_TYPE
:
3061 case TARGET_SO_ERROR
:
3064 case TARGET_SO_DONTROUTE
:
3065 optname
= SO_DONTROUTE
;
3067 case TARGET_SO_BROADCAST
:
3068 optname
= SO_BROADCAST
;
3070 case TARGET_SO_SNDBUF
:
3071 optname
= SO_SNDBUF
;
3073 case TARGET_SO_RCVBUF
:
3074 optname
= SO_RCVBUF
;
3076 case TARGET_SO_KEEPALIVE
:
3077 optname
= SO_KEEPALIVE
;
3079 case TARGET_SO_OOBINLINE
:
3080 optname
= SO_OOBINLINE
;
3082 case TARGET_SO_NO_CHECK
:
3083 optname
= SO_NO_CHECK
;
3085 case TARGET_SO_PRIORITY
:
3086 optname
= SO_PRIORITY
;
3089 case TARGET_SO_BSDCOMPAT
:
3090 optname
= SO_BSDCOMPAT
;
3093 case TARGET_SO_PASSCRED
:
3094 optname
= SO_PASSCRED
;
3096 case TARGET_SO_TIMESTAMP
:
3097 optname
= SO_TIMESTAMP
;
3099 case TARGET_SO_RCVLOWAT
:
3100 optname
= SO_RCVLOWAT
;
3102 case TARGET_SO_ACCEPTCONN
:
3103 optname
= SO_ACCEPTCONN
;
3110 /* TCP options all take an 'int' value. */
3112 if (get_user_u32(len
, optlen
))
3113 return -TARGET_EFAULT
;
3115 return -TARGET_EINVAL
;
3117 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3120 if (optname
== SO_TYPE
) {
3121 val
= host_to_target_sock_type(val
);
3126 if (put_user_u32(val
, optval_addr
))
3127 return -TARGET_EFAULT
;
3129 if (put_user_u8(val
, optval_addr
))
3130 return -TARGET_EFAULT
;
3132 if (put_user_u32(len
, optlen
))
3133 return -TARGET_EFAULT
;
3140 case IP_ROUTER_ALERT
:
3144 case IP_MTU_DISCOVER
:
3150 case IP_MULTICAST_TTL
:
3151 case IP_MULTICAST_LOOP
:
3152 if (get_user_u32(len
, optlen
))
3153 return -TARGET_EFAULT
;
3155 return -TARGET_EINVAL
;
3157 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3160 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3162 if (put_user_u32(len
, optlen
)
3163 || put_user_u8(val
, optval_addr
))
3164 return -TARGET_EFAULT
;
3166 if (len
> sizeof(int))
3168 if (put_user_u32(len
, optlen
)
3169 || put_user_u32(val
, optval_addr
))
3170 return -TARGET_EFAULT
;
3174 ret
= -TARGET_ENOPROTOOPT
;
3180 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3182 ret
= -TARGET_EOPNOTSUPP
;
3188 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3189 abi_ulong count
, int copy
)
3191 struct target_iovec
*target_vec
;
3193 abi_ulong total_len
, max_len
;
3196 bool bad_address
= false;
3202 if (count
> IOV_MAX
) {
3207 vec
= g_try_new0(struct iovec
, count
);
3213 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3214 count
* sizeof(struct target_iovec
), 1);
3215 if (target_vec
== NULL
) {
3220 /* ??? If host page size > target page size, this will result in a
3221 value larger than what we can actually support. */
3222 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3225 for (i
= 0; i
< count
; i
++) {
3226 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3227 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3232 } else if (len
== 0) {
3233 /* Zero length pointer is ignored. */
3234 vec
[i
].iov_base
= 0;
3236 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3237 /* If the first buffer pointer is bad, this is a fault. But
3238 * subsequent bad buffers will result in a partial write; this
3239 * is realized by filling the vector with null pointers and
3241 if (!vec
[i
].iov_base
) {
3252 if (len
> max_len
- total_len
) {
3253 len
= max_len
- total_len
;
3256 vec
[i
].iov_len
= len
;
3260 unlock_user(target_vec
, target_addr
, 0);
3265 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3266 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3269 unlock_user(target_vec
, target_addr
, 0);
3276 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3277 abi_ulong count
, int copy
)
3279 struct target_iovec
*target_vec
;
3282 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3283 count
* sizeof(struct target_iovec
), 1);
3285 for (i
= 0; i
< count
; i
++) {
3286 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3287 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3291 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3293 unlock_user(target_vec
, target_addr
, 0);
3299 static inline int target_to_host_sock_type(int *type
)
3302 int target_type
= *type
;
3304 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3305 case TARGET_SOCK_DGRAM
:
3306 host_type
= SOCK_DGRAM
;
3308 case TARGET_SOCK_STREAM
:
3309 host_type
= SOCK_STREAM
;
3312 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3315 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3316 #if defined(SOCK_CLOEXEC)
3317 host_type
|= SOCK_CLOEXEC
;
3319 return -TARGET_EINVAL
;
3322 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3323 #if defined(SOCK_NONBLOCK)
3324 host_type
|= SOCK_NONBLOCK
;
3325 #elif !defined(O_NONBLOCK)
3326 return -TARGET_EINVAL
;
3333 /* Try to emulate socket type flags after socket creation. */
3334 static int sock_flags_fixup(int fd
, int target_type
)
3336 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3337 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3338 int flags
= fcntl(fd
, F_GETFL
);
3339 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3341 return -TARGET_EINVAL
;
3348 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3349 abi_ulong target_addr
,
3352 struct sockaddr
*addr
= host_addr
;
3353 struct target_sockaddr
*target_saddr
;
3355 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3356 if (!target_saddr
) {
3357 return -TARGET_EFAULT
;
3360 memcpy(addr
, target_saddr
, len
);
3361 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3362 /* spkt_protocol is big-endian */
3364 unlock_user(target_saddr
, target_addr
, 0);
3368 static TargetFdTrans target_packet_trans
= {
3369 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3372 #ifdef CONFIG_RTNETLINK
3373 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3377 ret
= target_to_host_nlmsg_route(buf
, len
);
3385 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3389 ret
= host_to_target_nlmsg_route(buf
, len
);
3397 static TargetFdTrans target_netlink_route_trans
= {
3398 .target_to_host_data
= netlink_route_target_to_host
,
3399 .host_to_target_data
= netlink_route_host_to_target
,
3401 #endif /* CONFIG_RTNETLINK */
3403 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3407 ret
= target_to_host_nlmsg_audit(buf
, len
);
3415 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3419 ret
= host_to_target_nlmsg_audit(buf
, len
);
3427 static TargetFdTrans target_netlink_audit_trans
= {
3428 .target_to_host_data
= netlink_audit_target_to_host
,
3429 .host_to_target_data
= netlink_audit_host_to_target
,
3432 /* do_socket() Must return target values and target errnos. */
3433 static abi_long
do_socket(int domain
, int type
, int protocol
)
3435 int target_type
= type
;
3438 ret
= target_to_host_sock_type(&type
);
3443 if (domain
== PF_NETLINK
&& !(
3444 #ifdef CONFIG_RTNETLINK
3445 protocol
== NETLINK_ROUTE
||
3447 protocol
== NETLINK_KOBJECT_UEVENT
||
3448 protocol
== NETLINK_AUDIT
)) {
3449 return -EPFNOSUPPORT
;
3452 if (domain
== AF_PACKET
||
3453 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3454 protocol
= tswap16(protocol
);
3457 ret
= get_errno(socket(domain
, type
, protocol
));
3459 ret
= sock_flags_fixup(ret
, target_type
);
3460 if (type
== SOCK_PACKET
) {
3461 /* Manage an obsolete case :
3462 * if socket type is SOCK_PACKET, bind by name
3464 fd_trans_register(ret
, &target_packet_trans
);
3465 } else if (domain
== PF_NETLINK
) {
3467 #ifdef CONFIG_RTNETLINK
3469 fd_trans_register(ret
, &target_netlink_route_trans
);
3472 case NETLINK_KOBJECT_UEVENT
:
3473 /* nothing to do: messages are strings */
3476 fd_trans_register(ret
, &target_netlink_audit_trans
);
3479 g_assert_not_reached();
3486 /* do_bind() Must return target values and target errnos. */
3487 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3493 if ((int)addrlen
< 0) {
3494 return -TARGET_EINVAL
;
3497 addr
= alloca(addrlen
+1);
3499 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3503 return get_errno(bind(sockfd
, addr
, addrlen
));
3506 /* do_connect() Must return target values and target errnos. */
3507 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3513 if ((int)addrlen
< 0) {
3514 return -TARGET_EINVAL
;
3517 addr
= alloca(addrlen
+1);
3519 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3523 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3526 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3527 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3528 int flags
, int send
)
3534 abi_ulong target_vec
;
3536 if (msgp
->msg_name
) {
3537 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3538 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3539 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3540 tswapal(msgp
->msg_name
),
3542 if (ret
== -TARGET_EFAULT
) {
3543 /* For connected sockets msg_name and msg_namelen must
3544 * be ignored, so returning EFAULT immediately is wrong.
3545 * Instead, pass a bad msg_name to the host kernel, and
3546 * let it decide whether to return EFAULT or not.
3548 msg
.msg_name
= (void *)-1;
3553 msg
.msg_name
= NULL
;
3554 msg
.msg_namelen
= 0;
3556 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3557 msg
.msg_control
= alloca(msg
.msg_controllen
);
3558 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3560 count
= tswapal(msgp
->msg_iovlen
);
3561 target_vec
= tswapal(msgp
->msg_iov
);
3563 if (count
> IOV_MAX
) {
3564 /* sendrcvmsg returns a different errno for this condition than
3565 * readv/writev, so we must catch it here before lock_iovec() does.
3567 ret
= -TARGET_EMSGSIZE
;
3571 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3572 target_vec
, count
, send
);
3574 ret
= -host_to_target_errno(errno
);
3577 msg
.msg_iovlen
= count
;
3581 if (fd_trans_target_to_host_data(fd
)) {
3584 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3585 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3586 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3587 msg
.msg_iov
->iov_len
);
3589 msg
.msg_iov
->iov_base
= host_msg
;
3590 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3594 ret
= target_to_host_cmsg(&msg
, msgp
);
3596 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3600 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3601 if (!is_error(ret
)) {
3603 if (fd_trans_host_to_target_data(fd
)) {
3604 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3607 ret
= host_to_target_cmsg(msgp
, &msg
);
3609 if (!is_error(ret
)) {
3610 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3611 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3612 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3613 msg
.msg_name
, msg
.msg_namelen
);
3625 unlock_iovec(vec
, target_vec
, count
, !send
);
3630 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3631 int flags
, int send
)
3634 struct target_msghdr
*msgp
;
3636 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3640 return -TARGET_EFAULT
;
3642 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3643 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3647 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3648 * so it might not have this *mmsg-specific flag either.
3650 #ifndef MSG_WAITFORONE
3651 #define MSG_WAITFORONE 0x10000
3654 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3655 unsigned int vlen
, unsigned int flags
,
3658 struct target_mmsghdr
*mmsgp
;
3662 if (vlen
> UIO_MAXIOV
) {
3666 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3668 return -TARGET_EFAULT
;
3671 for (i
= 0; i
< vlen
; i
++) {
3672 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3673 if (is_error(ret
)) {
3676 mmsgp
[i
].msg_len
= tswap32(ret
);
3677 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3678 if (flags
& MSG_WAITFORONE
) {
3679 flags
|= MSG_DONTWAIT
;
3683 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3685 /* Return number of datagrams sent if we sent any at all;
3686 * otherwise return the error.
3694 /* do_accept4() Must return target values and target errnos. */
3695 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3696 abi_ulong target_addrlen_addr
, int flags
)
3703 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3705 if (target_addr
== 0) {
3706 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3709 /* linux returns EINVAL if addrlen pointer is invalid */
3710 if (get_user_u32(addrlen
, target_addrlen_addr
))
3711 return -TARGET_EINVAL
;
3713 if ((int)addrlen
< 0) {
3714 return -TARGET_EINVAL
;
3717 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3718 return -TARGET_EINVAL
;
3720 addr
= alloca(addrlen
);
3722 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3723 if (!is_error(ret
)) {
3724 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3725 if (put_user_u32(addrlen
, target_addrlen_addr
))
3726 ret
= -TARGET_EFAULT
;
3731 /* do_getpeername() Must return target values and target errnos. */
3732 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3733 abi_ulong target_addrlen_addr
)
3739 if (get_user_u32(addrlen
, target_addrlen_addr
))
3740 return -TARGET_EFAULT
;
3742 if ((int)addrlen
< 0) {
3743 return -TARGET_EINVAL
;
3746 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3747 return -TARGET_EFAULT
;
3749 addr
= alloca(addrlen
);
3751 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3752 if (!is_error(ret
)) {
3753 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3754 if (put_user_u32(addrlen
, target_addrlen_addr
))
3755 ret
= -TARGET_EFAULT
;
3760 /* do_getsockname() Must return target values and target errnos. */
3761 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3762 abi_ulong target_addrlen_addr
)
3768 if (get_user_u32(addrlen
, target_addrlen_addr
))
3769 return -TARGET_EFAULT
;
3771 if ((int)addrlen
< 0) {
3772 return -TARGET_EINVAL
;
3775 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3776 return -TARGET_EFAULT
;
3778 addr
= alloca(addrlen
);
3780 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3781 if (!is_error(ret
)) {
3782 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3783 if (put_user_u32(addrlen
, target_addrlen_addr
))
3784 ret
= -TARGET_EFAULT
;
3789 /* do_socketpair() Must return target values and target errnos. */
3790 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3791 abi_ulong target_tab_addr
)
3796 target_to_host_sock_type(&type
);
3798 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3799 if (!is_error(ret
)) {
3800 if (put_user_s32(tab
[0], target_tab_addr
)
3801 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3802 ret
= -TARGET_EFAULT
;
3807 /* do_sendto() Must return target values and target errnos. */
3808 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3809 abi_ulong target_addr
, socklen_t addrlen
)
3813 void *copy_msg
= NULL
;
3816 if ((int)addrlen
< 0) {
3817 return -TARGET_EINVAL
;
3820 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3822 return -TARGET_EFAULT
;
3823 if (fd_trans_target_to_host_data(fd
)) {
3824 copy_msg
= host_msg
;
3825 host_msg
= g_malloc(len
);
3826 memcpy(host_msg
, copy_msg
, len
);
3827 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3833 addr
= alloca(addrlen
+1);
3834 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3838 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3840 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3845 host_msg
= copy_msg
;
3847 unlock_user(host_msg
, msg
, 0);
3851 /* do_recvfrom() Must return target values and target errnos. */
3852 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3853 abi_ulong target_addr
,
3854 abi_ulong target_addrlen
)
3861 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3863 return -TARGET_EFAULT
;
3865 if (get_user_u32(addrlen
, target_addrlen
)) {
3866 ret
= -TARGET_EFAULT
;
3869 if ((int)addrlen
< 0) {
3870 ret
= -TARGET_EINVAL
;
3873 addr
= alloca(addrlen
);
3874 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3877 addr
= NULL
; /* To keep compiler quiet. */
3878 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3880 if (!is_error(ret
)) {
3881 if (fd_trans_host_to_target_data(fd
)) {
3882 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3885 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3886 if (put_user_u32(addrlen
, target_addrlen
)) {
3887 ret
= -TARGET_EFAULT
;
3891 unlock_user(host_msg
, msg
, len
);
3894 unlock_user(host_msg
, msg
, 0);
3899 #ifdef TARGET_NR_socketcall
3900 /* do_socketcall() Must return target values and target errnos. */
3901 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3903 static const unsigned ac
[] = { /* number of arguments per call */
3904 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3905 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3908 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3909 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3910 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3911 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3912 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3913 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3914 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3915 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3916 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3917 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3918 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3919 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3920 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3921 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3922 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3923 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3925 abi_long a
[6]; /* max 6 args */
3927 /* first, collect the arguments in a[] according to ac[] */
3928 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3930 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3931 for (i
= 0; i
< ac
[num
]; ++i
) {
3932 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3933 return -TARGET_EFAULT
;
3938 /* now when we have the args, actually handle the call */
3940 case SOCKOP_socket
: /* domain, type, protocol */
3941 return do_socket(a
[0], a
[1], a
[2]);
3942 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3943 return do_bind(a
[0], a
[1], a
[2]);
3944 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3945 return do_connect(a
[0], a
[1], a
[2]);
3946 case SOCKOP_listen
: /* sockfd, backlog */
3947 return get_errno(listen(a
[0], a
[1]));
3948 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3949 return do_accept4(a
[0], a
[1], a
[2], 0);
3950 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3951 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3952 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3953 return do_getsockname(a
[0], a
[1], a
[2]);
3954 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3955 return do_getpeername(a
[0], a
[1], a
[2]);
3956 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3957 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3958 case SOCKOP_send
: /* sockfd, msg, len, flags */
3959 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3960 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3961 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3962 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3963 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3964 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3965 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3966 case SOCKOP_shutdown
: /* sockfd, how */
3967 return get_errno(shutdown(a
[0], a
[1]));
3968 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3969 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3970 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3971 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3972 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3973 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3974 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3975 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3976 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3977 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3978 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3979 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3981 gemu_log("Unsupported socketcall: %d\n", num
);
3982 return -TARGET_ENOSYS
;
3987 #define N_SHM_REGIONS 32
3989 static struct shm_region
{
3993 } shm_regions
[N_SHM_REGIONS
];
3995 #ifndef TARGET_SEMID64_DS
3996 /* asm-generic version of this struct */
3997 struct target_semid64_ds
3999 struct target_ipc_perm sem_perm
;
4000 abi_ulong sem_otime
;
4001 #if TARGET_ABI_BITS == 32
4002 abi_ulong __unused1
;
4004 abi_ulong sem_ctime
;
4005 #if TARGET_ABI_BITS == 32
4006 abi_ulong __unused2
;
4008 abi_ulong sem_nsems
;
4009 abi_ulong __unused3
;
4010 abi_ulong __unused4
;
4014 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4015 abi_ulong target_addr
)
4017 struct target_ipc_perm
*target_ip
;
4018 struct target_semid64_ds
*target_sd
;
4020 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4021 return -TARGET_EFAULT
;
4022 target_ip
= &(target_sd
->sem_perm
);
4023 host_ip
->__key
= tswap32(target_ip
->__key
);
4024 host_ip
->uid
= tswap32(target_ip
->uid
);
4025 host_ip
->gid
= tswap32(target_ip
->gid
);
4026 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4027 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4028 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4029 host_ip
->mode
= tswap32(target_ip
->mode
);
4031 host_ip
->mode
= tswap16(target_ip
->mode
);
4033 #if defined(TARGET_PPC)
4034 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4036 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4038 unlock_user_struct(target_sd
, target_addr
, 0);
4042 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4043 struct ipc_perm
*host_ip
)
4045 struct target_ipc_perm
*target_ip
;
4046 struct target_semid64_ds
*target_sd
;
4048 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4049 return -TARGET_EFAULT
;
4050 target_ip
= &(target_sd
->sem_perm
);
4051 target_ip
->__key
= tswap32(host_ip
->__key
);
4052 target_ip
->uid
= tswap32(host_ip
->uid
);
4053 target_ip
->gid
= tswap32(host_ip
->gid
);
4054 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4055 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4056 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4057 target_ip
->mode
= tswap32(host_ip
->mode
);
4059 target_ip
->mode
= tswap16(host_ip
->mode
);
4061 #if defined(TARGET_PPC)
4062 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4064 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4066 unlock_user_struct(target_sd
, target_addr
, 1);
4070 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4071 abi_ulong target_addr
)
4073 struct target_semid64_ds
*target_sd
;
4075 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4076 return -TARGET_EFAULT
;
4077 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4078 return -TARGET_EFAULT
;
4079 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4080 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4081 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4082 unlock_user_struct(target_sd
, target_addr
, 0);
4086 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4087 struct semid_ds
*host_sd
)
4089 struct target_semid64_ds
*target_sd
;
4091 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4092 return -TARGET_EFAULT
;
4093 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4094 return -TARGET_EFAULT
;
4095 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4096 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4097 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4098 unlock_user_struct(target_sd
, target_addr
, 1);
4102 struct target_seminfo
{
4115 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4116 struct seminfo
*host_seminfo
)
4118 struct target_seminfo
*target_seminfo
;
4119 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4120 return -TARGET_EFAULT
;
4121 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4122 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4123 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4124 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4125 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4126 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4127 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4128 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4129 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4130 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4131 unlock_user_struct(target_seminfo
, target_addr
, 1);
4137 struct semid_ds
*buf
;
4138 unsigned short *array
;
4139 struct seminfo
*__buf
;
4142 union target_semun
{
4149 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4150 abi_ulong target_addr
)
4153 unsigned short *array
;
4155 struct semid_ds semid_ds
;
4158 semun
.buf
= &semid_ds
;
4160 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4162 return get_errno(ret
);
4164 nsems
= semid_ds
.sem_nsems
;
4166 *host_array
= g_try_new(unsigned short, nsems
);
4168 return -TARGET_ENOMEM
;
4170 array
= lock_user(VERIFY_READ
, target_addr
,
4171 nsems
*sizeof(unsigned short), 1);
4173 g_free(*host_array
);
4174 return -TARGET_EFAULT
;
4177 for(i
=0; i
<nsems
; i
++) {
4178 __get_user((*host_array
)[i
], &array
[i
]);
4180 unlock_user(array
, target_addr
, 0);
4185 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4186 unsigned short **host_array
)
4189 unsigned short *array
;
4191 struct semid_ds semid_ds
;
4194 semun
.buf
= &semid_ds
;
4196 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4198 return get_errno(ret
);
4200 nsems
= semid_ds
.sem_nsems
;
4202 array
= lock_user(VERIFY_WRITE
, target_addr
,
4203 nsems
*sizeof(unsigned short), 0);
4205 return -TARGET_EFAULT
;
4207 for(i
=0; i
<nsems
; i
++) {
4208 __put_user((*host_array
)[i
], &array
[i
]);
4210 g_free(*host_array
);
4211 unlock_user(array
, target_addr
, 1);
4216 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4217 abi_ulong target_arg
)
4219 union target_semun target_su
= { .buf
= target_arg
};
4221 struct semid_ds dsarg
;
4222 unsigned short *array
= NULL
;
4223 struct seminfo seminfo
;
4224 abi_long ret
= -TARGET_EINVAL
;
4231 /* In 64 bit cross-endian situations, we will erroneously pick up
4232 * the wrong half of the union for the "val" element. To rectify
4233 * this, the entire 8-byte structure is byteswapped, followed by
4234 * a swap of the 4 byte val field. In other cases, the data is
4235 * already in proper host byte order. */
4236 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4237 target_su
.buf
= tswapal(target_su
.buf
);
4238 arg
.val
= tswap32(target_su
.val
);
4240 arg
.val
= target_su
.val
;
4242 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4246 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4250 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4251 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4258 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4262 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4263 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4269 arg
.__buf
= &seminfo
;
4270 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4271 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4279 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4286 struct target_sembuf
{
4287 unsigned short sem_num
;
4292 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4293 abi_ulong target_addr
,
4296 struct target_sembuf
*target_sembuf
;
4299 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4300 nsops
*sizeof(struct target_sembuf
), 1);
4302 return -TARGET_EFAULT
;
4304 for(i
=0; i
<nsops
; i
++) {
4305 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4306 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4307 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4310 unlock_user(target_sembuf
, target_addr
, 0);
4315 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4317 struct sembuf sops
[nsops
];
4319 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4320 return -TARGET_EFAULT
;
4322 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4325 struct target_msqid_ds
4327 struct target_ipc_perm msg_perm
;
4328 abi_ulong msg_stime
;
4329 #if TARGET_ABI_BITS == 32
4330 abi_ulong __unused1
;
4332 abi_ulong msg_rtime
;
4333 #if TARGET_ABI_BITS == 32
4334 abi_ulong __unused2
;
4336 abi_ulong msg_ctime
;
4337 #if TARGET_ABI_BITS == 32
4338 abi_ulong __unused3
;
4340 abi_ulong __msg_cbytes
;
4342 abi_ulong msg_qbytes
;
4343 abi_ulong msg_lspid
;
4344 abi_ulong msg_lrpid
;
4345 abi_ulong __unused4
;
4346 abi_ulong __unused5
;
4349 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4350 abi_ulong target_addr
)
4352 struct target_msqid_ds
*target_md
;
4354 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4355 return -TARGET_EFAULT
;
4356 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4357 return -TARGET_EFAULT
;
4358 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4359 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4360 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4361 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4362 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4363 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4364 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4365 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4366 unlock_user_struct(target_md
, target_addr
, 0);
4370 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4371 struct msqid_ds
*host_md
)
4373 struct target_msqid_ds
*target_md
;
4375 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4376 return -TARGET_EFAULT
;
4377 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4378 return -TARGET_EFAULT
;
4379 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4380 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4381 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4382 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4383 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4384 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4385 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4386 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4387 unlock_user_struct(target_md
, target_addr
, 1);
4391 struct target_msginfo
{
4399 unsigned short int msgseg
;
4402 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4403 struct msginfo
*host_msginfo
)
4405 struct target_msginfo
*target_msginfo
;
4406 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4407 return -TARGET_EFAULT
;
4408 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4409 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4410 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4411 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4412 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4413 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4414 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4415 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4416 unlock_user_struct(target_msginfo
, target_addr
, 1);
4420 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4422 struct msqid_ds dsarg
;
4423 struct msginfo msginfo
;
4424 abi_long ret
= -TARGET_EINVAL
;
4432 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4433 return -TARGET_EFAULT
;
4434 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4435 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4436 return -TARGET_EFAULT
;
4439 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4443 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4444 if (host_to_target_msginfo(ptr
, &msginfo
))
4445 return -TARGET_EFAULT
;
4452 struct target_msgbuf
{
4457 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4458 ssize_t msgsz
, int msgflg
)
4460 struct target_msgbuf
*target_mb
;
4461 struct msgbuf
*host_mb
;
4465 return -TARGET_EINVAL
;
4468 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4469 return -TARGET_EFAULT
;
4470 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4472 unlock_user_struct(target_mb
, msgp
, 0);
4473 return -TARGET_ENOMEM
;
4475 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4476 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4477 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4479 unlock_user_struct(target_mb
, msgp
, 0);
4484 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4485 ssize_t msgsz
, abi_long msgtyp
,
4488 struct target_msgbuf
*target_mb
;
4490 struct msgbuf
*host_mb
;
4494 return -TARGET_EINVAL
;
4497 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4498 return -TARGET_EFAULT
;
4500 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4502 ret
= -TARGET_ENOMEM
;
4505 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4508 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4509 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4510 if (!target_mtext
) {
4511 ret
= -TARGET_EFAULT
;
4514 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4515 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4518 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4522 unlock_user_struct(target_mb
, msgp
, 1);
4527 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4528 abi_ulong target_addr
)
4530 struct target_shmid_ds
*target_sd
;
4532 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4533 return -TARGET_EFAULT
;
4534 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4535 return -TARGET_EFAULT
;
4536 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4537 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4538 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4539 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4540 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4541 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4542 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4543 unlock_user_struct(target_sd
, target_addr
, 0);
4547 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4548 struct shmid_ds
*host_sd
)
4550 struct target_shmid_ds
*target_sd
;
4552 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4553 return -TARGET_EFAULT
;
4554 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4555 return -TARGET_EFAULT
;
4556 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4557 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4558 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4559 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4560 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4561 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4562 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4563 unlock_user_struct(target_sd
, target_addr
, 1);
4567 struct target_shminfo
{
4575 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4576 struct shminfo
*host_shminfo
)
4578 struct target_shminfo
*target_shminfo
;
4579 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4580 return -TARGET_EFAULT
;
4581 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4582 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4583 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4584 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4585 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4586 unlock_user_struct(target_shminfo
, target_addr
, 1);
4590 struct target_shm_info
{
4595 abi_ulong swap_attempts
;
4596 abi_ulong swap_successes
;
4599 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4600 struct shm_info
*host_shm_info
)
4602 struct target_shm_info
*target_shm_info
;
4603 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4604 return -TARGET_EFAULT
;
4605 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4606 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4607 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4608 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4609 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4610 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4611 unlock_user_struct(target_shm_info
, target_addr
, 1);
4615 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4617 struct shmid_ds dsarg
;
4618 struct shminfo shminfo
;
4619 struct shm_info shm_info
;
4620 abi_long ret
= -TARGET_EINVAL
;
4628 if (target_to_host_shmid_ds(&dsarg
, buf
))
4629 return -TARGET_EFAULT
;
4630 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4631 if (host_to_target_shmid_ds(buf
, &dsarg
))
4632 return -TARGET_EFAULT
;
4635 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4636 if (host_to_target_shminfo(buf
, &shminfo
))
4637 return -TARGET_EFAULT
;
4640 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4641 if (host_to_target_shm_info(buf
, &shm_info
))
4642 return -TARGET_EFAULT
;
4647 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4654 #ifndef TARGET_FORCE_SHMLBA
4655 /* For most architectures, SHMLBA is the same as the page size;
4656 * some architectures have larger values, in which case they should
4657 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4658 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4659 * and defining its own value for SHMLBA.
4661 * The kernel also permits SHMLBA to be set by the architecture to a
4662 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4663 * this means that addresses are rounded to the large size if
4664 * SHM_RND is set but addresses not aligned to that size are not rejected
4665 * as long as they are at least page-aligned. Since the only architecture
4666 * which uses this is ia64 this code doesn't provide for that oddity.
4668 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4670 return TARGET_PAGE_SIZE
;
4674 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4675 int shmid
, abi_ulong shmaddr
, int shmflg
)
4679 struct shmid_ds shm_info
;
4683 /* find out the length of the shared memory segment */
4684 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4685 if (is_error(ret
)) {
4686 /* can't get length, bail out */
4690 shmlba
= target_shmlba(cpu_env
);
4692 if (shmaddr
& (shmlba
- 1)) {
4693 if (shmflg
& SHM_RND
) {
4694 shmaddr
&= ~(shmlba
- 1);
4696 return -TARGET_EINVAL
;
4703 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4705 abi_ulong mmap_start
;
4707 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4709 if (mmap_start
== -1) {
4711 host_raddr
= (void *)-1;
4713 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4716 if (host_raddr
== (void *)-1) {
4718 return get_errno((long)host_raddr
);
4720 raddr
=h2g((unsigned long)host_raddr
);
4722 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4723 PAGE_VALID
| PAGE_READ
|
4724 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4726 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4727 if (!shm_regions
[i
].in_use
) {
4728 shm_regions
[i
].in_use
= true;
4729 shm_regions
[i
].start
= raddr
;
4730 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4740 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4744 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4745 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4746 shm_regions
[i
].in_use
= false;
4747 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4752 return get_errno(shmdt(g2h(shmaddr
)));
4755 #ifdef TARGET_NR_ipc
4756 /* ??? This only works with linear mappings. */
4757 /* do_ipc() must return target values and target errnos. */
4758 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4759 unsigned int call
, abi_long first
,
4760 abi_long second
, abi_long third
,
4761 abi_long ptr
, abi_long fifth
)
4766 version
= call
>> 16;
4771 ret
= do_semop(first
, ptr
, second
);
4775 ret
= get_errno(semget(first
, second
, third
));
4778 case IPCOP_semctl
: {
4779 /* The semun argument to semctl is passed by value, so dereference the
4782 get_user_ual(atptr
, ptr
);
4783 ret
= do_semctl(first
, second
, third
, atptr
);
4788 ret
= get_errno(msgget(first
, second
));
4792 ret
= do_msgsnd(first
, ptr
, second
, third
);
4796 ret
= do_msgctl(first
, second
, ptr
);
4803 struct target_ipc_kludge
{
4808 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4809 ret
= -TARGET_EFAULT
;
4813 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4815 unlock_user_struct(tmp
, ptr
, 0);
4819 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4828 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4829 if (is_error(raddr
))
4830 return get_errno(raddr
);
4831 if (put_user_ual(raddr
, third
))
4832 return -TARGET_EFAULT
;
4836 ret
= -TARGET_EINVAL
;
4841 ret
= do_shmdt(ptr
);
4845 /* IPC_* flag values are the same on all linux platforms */
4846 ret
= get_errno(shmget(first
, second
, third
));
4849 /* IPC_* and SHM_* command values are the same on all linux platforms */
4851 ret
= do_shmctl(first
, second
, ptr
);
4854 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4855 ret
= -TARGET_ENOSYS
;
4862 /* kernel structure types definitions */
4864 #define STRUCT(name, ...) STRUCT_ ## name,
4865 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4867 #include "syscall_types.h"
4871 #undef STRUCT_SPECIAL
4873 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4874 #define STRUCT_SPECIAL(name)
4875 #include "syscall_types.h"
4877 #undef STRUCT_SPECIAL
4879 typedef struct IOCTLEntry IOCTLEntry
;
4881 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4882 int fd
, int cmd
, abi_long arg
);
4886 unsigned int host_cmd
;
4889 do_ioctl_fn
*do_ioctl
;
4890 const argtype arg_type
[5];
4893 #define IOC_R 0x0001
4894 #define IOC_W 0x0002
4895 #define IOC_RW (IOC_R | IOC_W)
4897 #define MAX_STRUCT_SIZE 4096
4899 #ifdef CONFIG_FIEMAP
4900 /* So fiemap access checks don't overflow on 32 bit systems.
4901 * This is very slightly smaller than the limit imposed by
4902 * the underlying kernel.
4904 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4905 / sizeof(struct fiemap_extent))
4907 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4908 int fd
, int cmd
, abi_long arg
)
4910 /* The parameter for this ioctl is a struct fiemap followed
4911 * by an array of struct fiemap_extent whose size is set
4912 * in fiemap->fm_extent_count. The array is filled in by the
4915 int target_size_in
, target_size_out
;
4917 const argtype
*arg_type
= ie
->arg_type
;
4918 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4921 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4925 assert(arg_type
[0] == TYPE_PTR
);
4926 assert(ie
->access
== IOC_RW
);
4928 target_size_in
= thunk_type_size(arg_type
, 0);
4929 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4931 return -TARGET_EFAULT
;
4933 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4934 unlock_user(argptr
, arg
, 0);
4935 fm
= (struct fiemap
*)buf_temp
;
4936 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4937 return -TARGET_EINVAL
;
4940 outbufsz
= sizeof (*fm
) +
4941 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4943 if (outbufsz
> MAX_STRUCT_SIZE
) {
4944 /* We can't fit all the extents into the fixed size buffer.
4945 * Allocate one that is large enough and use it instead.
4947 fm
= g_try_malloc(outbufsz
);
4949 return -TARGET_ENOMEM
;
4951 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4954 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4955 if (!is_error(ret
)) {
4956 target_size_out
= target_size_in
;
4957 /* An extent_count of 0 means we were only counting the extents
4958 * so there are no structs to copy
4960 if (fm
->fm_extent_count
!= 0) {
4961 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4963 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4965 ret
= -TARGET_EFAULT
;
4967 /* Convert the struct fiemap */
4968 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4969 if (fm
->fm_extent_count
!= 0) {
4970 p
= argptr
+ target_size_in
;
4971 /* ...and then all the struct fiemap_extents */
4972 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4973 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4978 unlock_user(argptr
, arg
, target_size_out
);
4988 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4989 int fd
, int cmd
, abi_long arg
)
4991 const argtype
*arg_type
= ie
->arg_type
;
4995 struct ifconf
*host_ifconf
;
4997 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4998 int target_ifreq_size
;
5003 abi_long target_ifc_buf
;
5007 assert(arg_type
[0] == TYPE_PTR
);
5008 assert(ie
->access
== IOC_RW
);
5011 target_size
= thunk_type_size(arg_type
, 0);
5013 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5015 return -TARGET_EFAULT
;
5016 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5017 unlock_user(argptr
, arg
, 0);
5019 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5020 target_ifc_len
= host_ifconf
->ifc_len
;
5021 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5023 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5024 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5025 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5027 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5028 if (outbufsz
> MAX_STRUCT_SIZE
) {
5029 /* We can't fit all the extents into the fixed size buffer.
5030 * Allocate one that is large enough and use it instead.
5032 host_ifconf
= malloc(outbufsz
);
5034 return -TARGET_ENOMEM
;
5036 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5039 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5041 host_ifconf
->ifc_len
= host_ifc_len
;
5042 host_ifconf
->ifc_buf
= host_ifc_buf
;
5044 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5045 if (!is_error(ret
)) {
5046 /* convert host ifc_len to target ifc_len */
5048 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5049 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5050 host_ifconf
->ifc_len
= target_ifc_len
;
5052 /* restore target ifc_buf */
5054 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5056 /* copy struct ifconf to target user */
5058 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5060 return -TARGET_EFAULT
;
5061 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5062 unlock_user(argptr
, arg
, target_size
);
5064 /* copy ifreq[] to target user */
5066 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5067 for (i
= 0; i
< nb_ifreq
; i
++) {
5068 thunk_convert(argptr
+ i
* target_ifreq_size
,
5069 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5070 ifreq_arg_type
, THUNK_TARGET
);
5072 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5082 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5083 int cmd
, abi_long arg
)
5086 struct dm_ioctl
*host_dm
;
5087 abi_long guest_data
;
5088 uint32_t guest_data_size
;
5090 const argtype
*arg_type
= ie
->arg_type
;
5092 void *big_buf
= NULL
;
5096 target_size
= thunk_type_size(arg_type
, 0);
5097 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5099 ret
= -TARGET_EFAULT
;
5102 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5103 unlock_user(argptr
, arg
, 0);
5105 /* buf_temp is too small, so fetch things into a bigger buffer */
5106 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5107 memcpy(big_buf
, buf_temp
, target_size
);
5111 guest_data
= arg
+ host_dm
->data_start
;
5112 if ((guest_data
- arg
) < 0) {
5113 ret
= -TARGET_EINVAL
;
5116 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5117 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5119 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5121 ret
= -TARGET_EFAULT
;
5125 switch (ie
->host_cmd
) {
5127 case DM_LIST_DEVICES
:
5130 case DM_DEV_SUSPEND
:
5133 case DM_TABLE_STATUS
:
5134 case DM_TABLE_CLEAR
:
5136 case DM_LIST_VERSIONS
:
5140 case DM_DEV_SET_GEOMETRY
:
5141 /* data contains only strings */
5142 memcpy(host_data
, argptr
, guest_data_size
);
5145 memcpy(host_data
, argptr
, guest_data_size
);
5146 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5150 void *gspec
= argptr
;
5151 void *cur_data
= host_data
;
5152 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5153 int spec_size
= thunk_type_size(arg_type
, 0);
5156 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5157 struct dm_target_spec
*spec
= cur_data
;
5161 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5162 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5164 spec
->next
= sizeof(*spec
) + slen
;
5165 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5167 cur_data
+= spec
->next
;
5172 ret
= -TARGET_EINVAL
;
5173 unlock_user(argptr
, guest_data
, 0);
5176 unlock_user(argptr
, guest_data
, 0);
5178 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5179 if (!is_error(ret
)) {
5180 guest_data
= arg
+ host_dm
->data_start
;
5181 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5182 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5183 switch (ie
->host_cmd
) {
5188 case DM_DEV_SUSPEND
:
5191 case DM_TABLE_CLEAR
:
5193 case DM_DEV_SET_GEOMETRY
:
5194 /* no return data */
5196 case DM_LIST_DEVICES
:
5198 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5199 uint32_t remaining_data
= guest_data_size
;
5200 void *cur_data
= argptr
;
5201 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5202 int nl_size
= 12; /* can't use thunk_size due to alignment */
5205 uint32_t next
= nl
->next
;
5207 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5209 if (remaining_data
< nl
->next
) {
5210 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5213 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5214 strcpy(cur_data
+ nl_size
, nl
->name
);
5215 cur_data
+= nl
->next
;
5216 remaining_data
-= nl
->next
;
5220 nl
= (void*)nl
+ next
;
5225 case DM_TABLE_STATUS
:
5227 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5228 void *cur_data
= argptr
;
5229 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5230 int spec_size
= thunk_type_size(arg_type
, 0);
5233 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5234 uint32_t next
= spec
->next
;
5235 int slen
= strlen((char*)&spec
[1]) + 1;
5236 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5237 if (guest_data_size
< spec
->next
) {
5238 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5241 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5242 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5243 cur_data
= argptr
+ spec
->next
;
5244 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5250 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5251 int count
= *(uint32_t*)hdata
;
5252 uint64_t *hdev
= hdata
+ 8;
5253 uint64_t *gdev
= argptr
+ 8;
5256 *(uint32_t*)argptr
= tswap32(count
);
5257 for (i
= 0; i
< count
; i
++) {
5258 *gdev
= tswap64(*hdev
);
5264 case DM_LIST_VERSIONS
:
5266 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5267 uint32_t remaining_data
= guest_data_size
;
5268 void *cur_data
= argptr
;
5269 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5270 int vers_size
= thunk_type_size(arg_type
, 0);
5273 uint32_t next
= vers
->next
;
5275 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5277 if (remaining_data
< vers
->next
) {
5278 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5281 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5282 strcpy(cur_data
+ vers_size
, vers
->name
);
5283 cur_data
+= vers
->next
;
5284 remaining_data
-= vers
->next
;
5288 vers
= (void*)vers
+ next
;
5293 unlock_user(argptr
, guest_data
, 0);
5294 ret
= -TARGET_EINVAL
;
5297 unlock_user(argptr
, guest_data
, guest_data_size
);
5299 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5301 ret
= -TARGET_EFAULT
;
5304 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5305 unlock_user(argptr
, arg
, target_size
);
5312 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5313 int cmd
, abi_long arg
)
5317 const argtype
*arg_type
= ie
->arg_type
;
5318 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5321 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5322 struct blkpg_partition host_part
;
5324 /* Read and convert blkpg */
5326 target_size
= thunk_type_size(arg_type
, 0);
5327 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5329 ret
= -TARGET_EFAULT
;
5332 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5333 unlock_user(argptr
, arg
, 0);
5335 switch (host_blkpg
->op
) {
5336 case BLKPG_ADD_PARTITION
:
5337 case BLKPG_DEL_PARTITION
:
5338 /* payload is struct blkpg_partition */
5341 /* Unknown opcode */
5342 ret
= -TARGET_EINVAL
;
5346 /* Read and convert blkpg->data */
5347 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5348 target_size
= thunk_type_size(part_arg_type
, 0);
5349 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5351 ret
= -TARGET_EFAULT
;
5354 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5355 unlock_user(argptr
, arg
, 0);
5357 /* Swizzle the data pointer to our local copy and call! */
5358 host_blkpg
->data
= &host_part
;
5359 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5365 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5366 int fd
, int cmd
, abi_long arg
)
5368 const argtype
*arg_type
= ie
->arg_type
;
5369 const StructEntry
*se
;
5370 const argtype
*field_types
;
5371 const int *dst_offsets
, *src_offsets
;
5374 abi_ulong
*target_rt_dev_ptr
;
5375 unsigned long *host_rt_dev_ptr
;
5379 assert(ie
->access
== IOC_W
);
5380 assert(*arg_type
== TYPE_PTR
);
5382 assert(*arg_type
== TYPE_STRUCT
);
5383 target_size
= thunk_type_size(arg_type
, 0);
5384 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5386 return -TARGET_EFAULT
;
5389 assert(*arg_type
== (int)STRUCT_rtentry
);
5390 se
= struct_entries
+ *arg_type
++;
5391 assert(se
->convert
[0] == NULL
);
5392 /* convert struct here to be able to catch rt_dev string */
5393 field_types
= se
->field_types
;
5394 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5395 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5396 for (i
= 0; i
< se
->nb_fields
; i
++) {
5397 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5398 assert(*field_types
== TYPE_PTRVOID
);
5399 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5400 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5401 if (*target_rt_dev_ptr
!= 0) {
5402 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5403 tswapal(*target_rt_dev_ptr
));
5404 if (!*host_rt_dev_ptr
) {
5405 unlock_user(argptr
, arg
, 0);
5406 return -TARGET_EFAULT
;
5409 *host_rt_dev_ptr
= 0;
5414 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5415 argptr
+ src_offsets
[i
],
5416 field_types
, THUNK_HOST
);
5418 unlock_user(argptr
, arg
, 0);
5420 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5421 if (*host_rt_dev_ptr
!= 0) {
5422 unlock_user((void *)*host_rt_dev_ptr
,
5423 *target_rt_dev_ptr
, 0);
5428 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5429 int fd
, int cmd
, abi_long arg
)
5431 int sig
= target_to_host_signal(arg
);
5432 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5435 static IOCTLEntry ioctl_entries
[] = {
5436 #define IOCTL(cmd, access, ...) \
5437 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5438 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5439 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5444 /* ??? Implement proper locking for ioctls. */
5445 /* do_ioctl() Must return target values and target errnos. */
5446 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5448 const IOCTLEntry
*ie
;
5449 const argtype
*arg_type
;
5451 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5457 if (ie
->target_cmd
== 0) {
5458 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5459 return -TARGET_ENOSYS
;
5461 if (ie
->target_cmd
== cmd
)
5465 arg_type
= ie
->arg_type
;
5467 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5470 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5473 switch(arg_type
[0]) {
5476 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5480 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5484 target_size
= thunk_type_size(arg_type
, 0);
5485 switch(ie
->access
) {
5487 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5488 if (!is_error(ret
)) {
5489 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5491 return -TARGET_EFAULT
;
5492 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5493 unlock_user(argptr
, arg
, target_size
);
5497 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5499 return -TARGET_EFAULT
;
5500 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5501 unlock_user(argptr
, arg
, 0);
5502 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5506 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5508 return -TARGET_EFAULT
;
5509 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5510 unlock_user(argptr
, arg
, 0);
5511 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5512 if (!is_error(ret
)) {
5513 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5515 return -TARGET_EFAULT
;
5516 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5517 unlock_user(argptr
, arg
, target_size
);
5523 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5524 (long)cmd
, arg_type
[0]);
5525 ret
= -TARGET_ENOSYS
;
5531 static const bitmask_transtbl iflag_tbl
[] = {
5532 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5533 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5534 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5535 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5536 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5537 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5538 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5539 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5540 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5541 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5542 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5543 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5544 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5545 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5549 static const bitmask_transtbl oflag_tbl
[] = {
5550 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5551 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5552 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5553 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5554 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5555 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5556 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5557 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5558 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5559 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5560 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5561 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5562 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5563 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5564 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5565 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5566 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5567 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5568 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5569 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5570 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5571 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5572 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5573 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5577 static const bitmask_transtbl cflag_tbl
[] = {
5578 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5579 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5580 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5581 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5582 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5583 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5584 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5585 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5586 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5587 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5588 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5589 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5590 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5591 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5592 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5593 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5594 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5595 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5596 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5597 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5598 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5599 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5600 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5601 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5602 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5603 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5604 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5605 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5606 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5607 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5608 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5612 static const bitmask_transtbl lflag_tbl
[] = {
5613 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5614 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5615 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5616 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5617 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5618 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5619 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5620 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5621 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5622 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5623 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5624 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5625 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5626 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5627 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5631 static void target_to_host_termios (void *dst
, const void *src
)
5633 struct host_termios
*host
= dst
;
5634 const struct target_termios
*target
= src
;
5637 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5639 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5641 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5643 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5644 host
->c_line
= target
->c_line
;
5646 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5647 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5648 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5649 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5650 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5651 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5652 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5653 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5654 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5655 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5656 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5657 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5658 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5659 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5660 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5661 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5662 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5663 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5666 static void host_to_target_termios (void *dst
, const void *src
)
5668 struct target_termios
*target
= dst
;
5669 const struct host_termios
*host
= src
;
5672 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5674 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5676 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5678 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5679 target
->c_line
= host
->c_line
;
5681 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5682 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5683 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5684 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5685 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5686 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5687 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5688 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5689 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5690 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5691 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5692 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5693 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5694 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5695 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5696 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5697 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5698 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5701 static const StructEntry struct_termios_def
= {
5702 .convert
= { host_to_target_termios
, target_to_host_termios
},
5703 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5704 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5707 static bitmask_transtbl mmap_flags_tbl
[] = {
5708 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5709 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5710 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5711 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5712 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5713 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5714 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5715 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5716 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5721 #if defined(TARGET_I386)
5723 /* NOTE: there is really one LDT for all the threads */
5724 static uint8_t *ldt_table
;
5726 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5733 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5734 if (size
> bytecount
)
5736 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5738 return -TARGET_EFAULT
;
5739 /* ??? Should this by byteswapped? */
5740 memcpy(p
, ldt_table
, size
);
5741 unlock_user(p
, ptr
, size
);
5745 /* XXX: add locking support */
5746 static abi_long
write_ldt(CPUX86State
*env
,
5747 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5749 struct target_modify_ldt_ldt_s ldt_info
;
5750 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5751 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5752 int seg_not_present
, useable
, lm
;
5753 uint32_t *lp
, entry_1
, entry_2
;
5755 if (bytecount
!= sizeof(ldt_info
))
5756 return -TARGET_EINVAL
;
5757 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5758 return -TARGET_EFAULT
;
5759 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5760 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5761 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5762 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5763 unlock_user_struct(target_ldt_info
, ptr
, 0);
5765 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5766 return -TARGET_EINVAL
;
5767 seg_32bit
= ldt_info
.flags
& 1;
5768 contents
= (ldt_info
.flags
>> 1) & 3;
5769 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5770 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5771 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5772 useable
= (ldt_info
.flags
>> 6) & 1;
5776 lm
= (ldt_info
.flags
>> 7) & 1;
5778 if (contents
== 3) {
5780 return -TARGET_EINVAL
;
5781 if (seg_not_present
== 0)
5782 return -TARGET_EINVAL
;
5784 /* allocate the LDT */
5786 env
->ldt
.base
= target_mmap(0,
5787 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5788 PROT_READ
|PROT_WRITE
,
5789 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5790 if (env
->ldt
.base
== -1)
5791 return -TARGET_ENOMEM
;
5792 memset(g2h(env
->ldt
.base
), 0,
5793 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5794 env
->ldt
.limit
= 0xffff;
5795 ldt_table
= g2h(env
->ldt
.base
);
5798 /* NOTE: same code as Linux kernel */
5799 /* Allow LDTs to be cleared by the user. */
5800 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5803 read_exec_only
== 1 &&
5805 limit_in_pages
== 0 &&
5806 seg_not_present
== 1 &&
5814 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5815 (ldt_info
.limit
& 0x0ffff);
5816 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5817 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5818 (ldt_info
.limit
& 0xf0000) |
5819 ((read_exec_only
^ 1) << 9) |
5821 ((seg_not_present
^ 1) << 15) |
5823 (limit_in_pages
<< 23) |
5827 entry_2
|= (useable
<< 20);
5829 /* Install the new entry ... */
5831 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5832 lp
[0] = tswap32(entry_1
);
5833 lp
[1] = tswap32(entry_2
);
5837 /* specific and weird i386 syscalls */
5838 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5839 unsigned long bytecount
)
5845 ret
= read_ldt(ptr
, bytecount
);
5848 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5851 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5854 ret
= -TARGET_ENOSYS
;
5860 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5861 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5863 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5864 struct target_modify_ldt_ldt_s ldt_info
;
5865 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5866 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5867 int seg_not_present
, useable
, lm
;
5868 uint32_t *lp
, entry_1
, entry_2
;
5871 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5872 if (!target_ldt_info
)
5873 return -TARGET_EFAULT
;
5874 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5875 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5876 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5877 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5878 if (ldt_info
.entry_number
== -1) {
5879 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5880 if (gdt_table
[i
] == 0) {
5881 ldt_info
.entry_number
= i
;
5882 target_ldt_info
->entry_number
= tswap32(i
);
5887 unlock_user_struct(target_ldt_info
, ptr
, 1);
5889 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5890 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5891 return -TARGET_EINVAL
;
5892 seg_32bit
= ldt_info
.flags
& 1;
5893 contents
= (ldt_info
.flags
>> 1) & 3;
5894 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5895 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5896 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5897 useable
= (ldt_info
.flags
>> 6) & 1;
5901 lm
= (ldt_info
.flags
>> 7) & 1;
5904 if (contents
== 3) {
5905 if (seg_not_present
== 0)
5906 return -TARGET_EINVAL
;
5909 /* NOTE: same code as Linux kernel */
5910 /* Allow LDTs to be cleared by the user. */
5911 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5912 if ((contents
== 0 &&
5913 read_exec_only
== 1 &&
5915 limit_in_pages
== 0 &&
5916 seg_not_present
== 1 &&
5924 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5925 (ldt_info
.limit
& 0x0ffff);
5926 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5927 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5928 (ldt_info
.limit
& 0xf0000) |
5929 ((read_exec_only
^ 1) << 9) |
5931 ((seg_not_present
^ 1) << 15) |
5933 (limit_in_pages
<< 23) |
5938 /* Install the new entry ... */
5940 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5941 lp
[0] = tswap32(entry_1
);
5942 lp
[1] = tswap32(entry_2
);
5946 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5948 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5949 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5950 uint32_t base_addr
, limit
, flags
;
5951 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5952 int seg_not_present
, useable
, lm
;
5953 uint32_t *lp
, entry_1
, entry_2
;
5955 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5956 if (!target_ldt_info
)
5957 return -TARGET_EFAULT
;
5958 idx
= tswap32(target_ldt_info
->entry_number
);
5959 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5960 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5961 unlock_user_struct(target_ldt_info
, ptr
, 1);
5962 return -TARGET_EINVAL
;
5964 lp
= (uint32_t *)(gdt_table
+ idx
);
5965 entry_1
= tswap32(lp
[0]);
5966 entry_2
= tswap32(lp
[1]);
5968 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5969 contents
= (entry_2
>> 10) & 3;
5970 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5971 seg_32bit
= (entry_2
>> 22) & 1;
5972 limit_in_pages
= (entry_2
>> 23) & 1;
5973 useable
= (entry_2
>> 20) & 1;
5977 lm
= (entry_2
>> 21) & 1;
5979 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5980 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5981 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5982 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5983 base_addr
= (entry_1
>> 16) |
5984 (entry_2
& 0xff000000) |
5985 ((entry_2
& 0xff) << 16);
5986 target_ldt_info
->base_addr
= tswapal(base_addr
);
5987 target_ldt_info
->limit
= tswap32(limit
);
5988 target_ldt_info
->flags
= tswap32(flags
);
5989 unlock_user_struct(target_ldt_info
, ptr
, 1);
5992 #endif /* TARGET_I386 && TARGET_ABI32 */
5994 #ifndef TARGET_ABI32
5995 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6002 case TARGET_ARCH_SET_GS
:
6003 case TARGET_ARCH_SET_FS
:
6004 if (code
== TARGET_ARCH_SET_GS
)
6008 cpu_x86_load_seg(env
, idx
, 0);
6009 env
->segs
[idx
].base
= addr
;
6011 case TARGET_ARCH_GET_GS
:
6012 case TARGET_ARCH_GET_FS
:
6013 if (code
== TARGET_ARCH_GET_GS
)
6017 val
= env
->segs
[idx
].base
;
6018 if (put_user(val
, addr
, abi_ulong
))
6019 ret
= -TARGET_EFAULT
;
6022 ret
= -TARGET_EINVAL
;
6029 #endif /* defined(TARGET_I386) */
6031 #define NEW_STACK_SIZE 0x40000
6034 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6037 pthread_mutex_t mutex
;
6038 pthread_cond_t cond
;
6041 abi_ulong child_tidptr
;
6042 abi_ulong parent_tidptr
;
6046 static void *clone_func(void *arg
)
6048 new_thread_info
*info
= arg
;
6053 rcu_register_thread();
6055 cpu
= ENV_GET_CPU(env
);
6057 ts
= (TaskState
*)cpu
->opaque
;
6058 info
->tid
= gettid();
6059 cpu
->host_tid
= info
->tid
;
6061 if (info
->child_tidptr
)
6062 put_user_u32(info
->tid
, info
->child_tidptr
);
6063 if (info
->parent_tidptr
)
6064 put_user_u32(info
->tid
, info
->parent_tidptr
);
6065 /* Enable signals. */
6066 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6067 /* Signal to the parent that we're ready. */
6068 pthread_mutex_lock(&info
->mutex
);
6069 pthread_cond_broadcast(&info
->cond
);
6070 pthread_mutex_unlock(&info
->mutex
);
6071 /* Wait until the parent has finshed initializing the tls state. */
6072 pthread_mutex_lock(&clone_lock
);
6073 pthread_mutex_unlock(&clone_lock
);
6079 /* do_fork() Must return host values and target errnos (unlike most
6080 do_*() functions). */
6081 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6082 abi_ulong parent_tidptr
, target_ulong newtls
,
6083 abi_ulong child_tidptr
)
6085 CPUState
*cpu
= ENV_GET_CPU(env
);
6089 CPUArchState
*new_env
;
6092 flags
&= ~CLONE_IGNORED_FLAGS
;
6094 /* Emulate vfork() with fork() */
6095 if (flags
& CLONE_VFORK
)
6096 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6098 if (flags
& CLONE_VM
) {
6099 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6100 new_thread_info info
;
6101 pthread_attr_t attr
;
6103 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6104 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6105 return -TARGET_EINVAL
;
6108 ts
= g_new0(TaskState
, 1);
6109 init_task_state(ts
);
6110 /* we create a new CPU instance. */
6111 new_env
= cpu_copy(env
);
6112 /* Init regs that differ from the parent. */
6113 cpu_clone_regs(new_env
, newsp
);
6114 new_cpu
= ENV_GET_CPU(new_env
);
6115 new_cpu
->opaque
= ts
;
6116 ts
->bprm
= parent_ts
->bprm
;
6117 ts
->info
= parent_ts
->info
;
6118 ts
->signal_mask
= parent_ts
->signal_mask
;
6120 if (flags
& CLONE_CHILD_CLEARTID
) {
6121 ts
->child_tidptr
= child_tidptr
;
6124 if (flags
& CLONE_SETTLS
) {
6125 cpu_set_tls (new_env
, newtls
);
6128 /* Grab a mutex so that thread setup appears atomic. */
6129 pthread_mutex_lock(&clone_lock
);
6131 memset(&info
, 0, sizeof(info
));
6132 pthread_mutex_init(&info
.mutex
, NULL
);
6133 pthread_mutex_lock(&info
.mutex
);
6134 pthread_cond_init(&info
.cond
, NULL
);
6136 if (flags
& CLONE_CHILD_SETTID
) {
6137 info
.child_tidptr
= child_tidptr
;
6139 if (flags
& CLONE_PARENT_SETTID
) {
6140 info
.parent_tidptr
= parent_tidptr
;
6143 ret
= pthread_attr_init(&attr
);
6144 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6145 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6146 /* It is not safe to deliver signals until the child has finished
6147 initializing, so temporarily block all signals. */
6148 sigfillset(&sigmask
);
6149 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6151 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6152 /* TODO: Free new CPU state if thread creation failed. */
6154 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6155 pthread_attr_destroy(&attr
);
6157 /* Wait for the child to initialize. */
6158 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6163 pthread_mutex_unlock(&info
.mutex
);
6164 pthread_cond_destroy(&info
.cond
);
6165 pthread_mutex_destroy(&info
.mutex
);
6166 pthread_mutex_unlock(&clone_lock
);
6168 /* if no CLONE_VM, we consider it is a fork */
6169 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6170 return -TARGET_EINVAL
;
6173 /* We can't support custom termination signals */
6174 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6175 return -TARGET_EINVAL
;
6178 if (block_signals()) {
6179 return -TARGET_ERESTARTSYS
;
6185 /* Child Process. */
6187 cpu_clone_regs(env
, newsp
);
6189 /* There is a race condition here. The parent process could
6190 theoretically read the TID in the child process before the child
6191 tid is set. This would require using either ptrace
6192 (not implemented) or having *_tidptr to point at a shared memory
6193 mapping. We can't repeat the spinlock hack used above because
6194 the child process gets its own copy of the lock. */
6195 if (flags
& CLONE_CHILD_SETTID
)
6196 put_user_u32(gettid(), child_tidptr
);
6197 if (flags
& CLONE_PARENT_SETTID
)
6198 put_user_u32(gettid(), parent_tidptr
);
6199 ts
= (TaskState
*)cpu
->opaque
;
6200 if (flags
& CLONE_SETTLS
)
6201 cpu_set_tls (env
, newtls
);
6202 if (flags
& CLONE_CHILD_CLEARTID
)
6203 ts
->child_tidptr
= child_tidptr
;
6211 /* warning : doesn't handle linux specific flags... */
6212 static int target_to_host_fcntl_cmd(int cmd
)
6215 case TARGET_F_DUPFD
:
6216 case TARGET_F_GETFD
:
6217 case TARGET_F_SETFD
:
6218 case TARGET_F_GETFL
:
6219 case TARGET_F_SETFL
:
6221 case TARGET_F_GETLK
:
6223 case TARGET_F_SETLK
:
6225 case TARGET_F_SETLKW
:
6227 case TARGET_F_GETOWN
:
6229 case TARGET_F_SETOWN
:
6231 case TARGET_F_GETSIG
:
6233 case TARGET_F_SETSIG
:
6235 #if TARGET_ABI_BITS == 32
6236 case TARGET_F_GETLK64
:
6238 case TARGET_F_SETLK64
:
6240 case TARGET_F_SETLKW64
:
6243 case TARGET_F_SETLEASE
:
6245 case TARGET_F_GETLEASE
:
6247 #ifdef F_DUPFD_CLOEXEC
6248 case TARGET_F_DUPFD_CLOEXEC
:
6249 return F_DUPFD_CLOEXEC
;
6251 case TARGET_F_NOTIFY
:
6254 case TARGET_F_GETOWN_EX
:
6258 case TARGET_F_SETOWN_EX
:
6262 case TARGET_F_SETPIPE_SZ
:
6263 return F_SETPIPE_SZ
;
6264 case TARGET_F_GETPIPE_SZ
:
6265 return F_GETPIPE_SZ
;
6268 return -TARGET_EINVAL
;
6270 return -TARGET_EINVAL
;
6273 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6274 static const bitmask_transtbl flock_tbl
[] = {
6275 TRANSTBL_CONVERT(F_RDLCK
),
6276 TRANSTBL_CONVERT(F_WRLCK
),
6277 TRANSTBL_CONVERT(F_UNLCK
),
6278 TRANSTBL_CONVERT(F_EXLCK
),
6279 TRANSTBL_CONVERT(F_SHLCK
),
6283 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6284 abi_ulong target_flock_addr
)
6286 struct target_flock
*target_fl
;
6289 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6290 return -TARGET_EFAULT
;
6293 __get_user(l_type
, &target_fl
->l_type
);
6294 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6295 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6296 __get_user(fl
->l_start
, &target_fl
->l_start
);
6297 __get_user(fl
->l_len
, &target_fl
->l_len
);
6298 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6299 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6303 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6304 const struct flock64
*fl
)
6306 struct target_flock
*target_fl
;
6309 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6310 return -TARGET_EFAULT
;
6313 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6314 __put_user(l_type
, &target_fl
->l_type
);
6315 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6316 __put_user(fl
->l_start
, &target_fl
->l_start
);
6317 __put_user(fl
->l_len
, &target_fl
->l_len
);
6318 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6319 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6323 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6324 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6326 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6327 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6328 abi_ulong target_flock_addr
)
6330 struct target_eabi_flock64
*target_fl
;
6333 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6334 return -TARGET_EFAULT
;
6337 __get_user(l_type
, &target_fl
->l_type
);
6338 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6339 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6340 __get_user(fl
->l_start
, &target_fl
->l_start
);
6341 __get_user(fl
->l_len
, &target_fl
->l_len
);
6342 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6343 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6347 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6348 const struct flock64
*fl
)
6350 struct target_eabi_flock64
*target_fl
;
6353 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6354 return -TARGET_EFAULT
;
6357 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6358 __put_user(l_type
, &target_fl
->l_type
);
6359 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6360 __put_user(fl
->l_start
, &target_fl
->l_start
);
6361 __put_user(fl
->l_len
, &target_fl
->l_len
);
6362 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6363 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6368 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6369 abi_ulong target_flock_addr
)
6371 struct target_flock64
*target_fl
;
6374 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6375 return -TARGET_EFAULT
;
6378 __get_user(l_type
, &target_fl
->l_type
);
6379 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6380 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6381 __get_user(fl
->l_start
, &target_fl
->l_start
);
6382 __get_user(fl
->l_len
, &target_fl
->l_len
);
6383 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6384 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6388 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6389 const struct flock64
*fl
)
6391 struct target_flock64
*target_fl
;
6394 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6395 return -TARGET_EFAULT
;
6398 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6399 __put_user(l_type
, &target_fl
->l_type
);
6400 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6401 __put_user(fl
->l_start
, &target_fl
->l_start
);
6402 __put_user(fl
->l_len
, &target_fl
->l_len
);
6403 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6404 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6408 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6410 struct flock64 fl64
;
6412 struct f_owner_ex fox
;
6413 struct target_f_owner_ex
*target_fox
;
6416 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6418 if (host_cmd
== -TARGET_EINVAL
)
6422 case TARGET_F_GETLK
:
6423 ret
= copy_from_user_flock(&fl64
, arg
);
6427 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6429 ret
= copy_to_user_flock(arg
, &fl64
);
6433 case TARGET_F_SETLK
:
6434 case TARGET_F_SETLKW
:
6435 ret
= copy_from_user_flock(&fl64
, arg
);
6439 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6442 case TARGET_F_GETLK64
:
6443 ret
= copy_from_user_flock64(&fl64
, arg
);
6447 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6449 ret
= copy_to_user_flock64(arg
, &fl64
);
6452 case TARGET_F_SETLK64
:
6453 case TARGET_F_SETLKW64
:
6454 ret
= copy_from_user_flock64(&fl64
, arg
);
6458 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6461 case TARGET_F_GETFL
:
6462 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6464 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6468 case TARGET_F_SETFL
:
6469 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6470 target_to_host_bitmask(arg
,
6475 case TARGET_F_GETOWN_EX
:
6476 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6478 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6479 return -TARGET_EFAULT
;
6480 target_fox
->type
= tswap32(fox
.type
);
6481 target_fox
->pid
= tswap32(fox
.pid
);
6482 unlock_user_struct(target_fox
, arg
, 1);
6488 case TARGET_F_SETOWN_EX
:
6489 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6490 return -TARGET_EFAULT
;
6491 fox
.type
= tswap32(target_fox
->type
);
6492 fox
.pid
= tswap32(target_fox
->pid
);
6493 unlock_user_struct(target_fox
, arg
, 0);
6494 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6498 case TARGET_F_SETOWN
:
6499 case TARGET_F_GETOWN
:
6500 case TARGET_F_SETSIG
:
6501 case TARGET_F_GETSIG
:
6502 case TARGET_F_SETLEASE
:
6503 case TARGET_F_GETLEASE
:
6504 case TARGET_F_SETPIPE_SZ
:
6505 case TARGET_F_GETPIPE_SZ
:
6506 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6510 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6518 static inline int high2lowuid(int uid
)
6526 static inline int high2lowgid(int gid
)
6534 static inline int low2highuid(int uid
)
6536 if ((int16_t)uid
== -1)
6542 static inline int low2highgid(int gid
)
6544 if ((int16_t)gid
== -1)
6549 static inline int tswapid(int id
)
6554 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6556 #else /* !USE_UID16 */
6557 static inline int high2lowuid(int uid
)
6561 static inline int high2lowgid(int gid
)
6565 static inline int low2highuid(int uid
)
6569 static inline int low2highgid(int gid
)
6573 static inline int tswapid(int id
)
6578 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6580 #endif /* USE_UID16 */
6582 /* We must do direct syscalls for setting UID/GID, because we want to
6583 * implement the Linux system call semantics of "change only for this thread",
6584 * not the libc/POSIX semantics of "change for all threads in process".
6585 * (See http://ewontfix.com/17/ for more details.)
6586 * We use the 32-bit version of the syscalls if present; if it is not
6587 * then either the host architecture supports 32-bit UIDs natively with
6588 * the standard syscall, or the 16-bit UID is the best we can do.
6590 #ifdef __NR_setuid32
6591 #define __NR_sys_setuid __NR_setuid32
6593 #define __NR_sys_setuid __NR_setuid
6595 #ifdef __NR_setgid32
6596 #define __NR_sys_setgid __NR_setgid32
6598 #define __NR_sys_setgid __NR_setgid
6600 #ifdef __NR_setresuid32
6601 #define __NR_sys_setresuid __NR_setresuid32
6603 #define __NR_sys_setresuid __NR_setresuid
6605 #ifdef __NR_setresgid32
6606 #define __NR_sys_setresgid __NR_setresgid32
6608 #define __NR_sys_setresgid __NR_setresgid
6611 _syscall1(int, sys_setuid
, uid_t
, uid
)
6612 _syscall1(int, sys_setgid
, gid_t
, gid
)
6613 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6614 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6616 void syscall_init(void)
6619 const argtype
*arg_type
;
6623 thunk_init(STRUCT_MAX
);
6625 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6626 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6627 #include "syscall_types.h"
6629 #undef STRUCT_SPECIAL
6631 /* Build target_to_host_errno_table[] table from
6632 * host_to_target_errno_table[]. */
6633 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6634 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6637 /* we patch the ioctl size if necessary. We rely on the fact that
6638 no ioctl has all the bits at '1' in the size field */
6640 while (ie
->target_cmd
!= 0) {
6641 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6642 TARGET_IOC_SIZEMASK
) {
6643 arg_type
= ie
->arg_type
;
6644 if (arg_type
[0] != TYPE_PTR
) {
6645 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6650 size
= thunk_type_size(arg_type
, 0);
6651 ie
->target_cmd
= (ie
->target_cmd
&
6652 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6653 (size
<< TARGET_IOC_SIZESHIFT
);
6656 /* automatic consistency check if same arch */
6657 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6658 (defined(__x86_64__) && defined(TARGET_X86_64))
6659 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6660 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6661 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6668 #if TARGET_ABI_BITS == 32
6669 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6671 #ifdef TARGET_WORDS_BIGENDIAN
6672 return ((uint64_t)word0
<< 32) | word1
;
6674 return ((uint64_t)word1
<< 32) | word0
;
6677 #else /* TARGET_ABI_BITS == 32 */
6678 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6682 #endif /* TARGET_ABI_BITS != 32 */
6684 #ifdef TARGET_NR_truncate64
6685 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6690 if (regpairs_aligned(cpu_env
)) {
6694 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6698 #ifdef TARGET_NR_ftruncate64
6699 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6704 if (regpairs_aligned(cpu_env
)) {
6708 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6712 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6713 abi_ulong target_addr
)
6715 struct target_timespec
*target_ts
;
6717 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6718 return -TARGET_EFAULT
;
6719 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6720 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6721 unlock_user_struct(target_ts
, target_addr
, 0);
6725 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6726 struct timespec
*host_ts
)
6728 struct target_timespec
*target_ts
;
6730 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6731 return -TARGET_EFAULT
;
6732 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6733 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6734 unlock_user_struct(target_ts
, target_addr
, 1);
6738 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6739 abi_ulong target_addr
)
6741 struct target_itimerspec
*target_itspec
;
6743 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6744 return -TARGET_EFAULT
;
6747 host_itspec
->it_interval
.tv_sec
=
6748 tswapal(target_itspec
->it_interval
.tv_sec
);
6749 host_itspec
->it_interval
.tv_nsec
=
6750 tswapal(target_itspec
->it_interval
.tv_nsec
);
6751 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6752 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6754 unlock_user_struct(target_itspec
, target_addr
, 1);
6758 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6759 struct itimerspec
*host_its
)
6761 struct target_itimerspec
*target_itspec
;
6763 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6764 return -TARGET_EFAULT
;
6767 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6768 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6770 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6771 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6773 unlock_user_struct(target_itspec
, target_addr
, 0);
6777 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6778 abi_long target_addr
)
6780 struct target_timex
*target_tx
;
6782 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6783 return -TARGET_EFAULT
;
6786 __get_user(host_tx
->modes
, &target_tx
->modes
);
6787 __get_user(host_tx
->offset
, &target_tx
->offset
);
6788 __get_user(host_tx
->freq
, &target_tx
->freq
);
6789 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6790 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6791 __get_user(host_tx
->status
, &target_tx
->status
);
6792 __get_user(host_tx
->constant
, &target_tx
->constant
);
6793 __get_user(host_tx
->precision
, &target_tx
->precision
);
6794 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6795 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6796 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6797 __get_user(host_tx
->tick
, &target_tx
->tick
);
6798 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6799 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6800 __get_user(host_tx
->shift
, &target_tx
->shift
);
6801 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6802 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6803 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6804 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6805 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6806 __get_user(host_tx
->tai
, &target_tx
->tai
);
6808 unlock_user_struct(target_tx
, target_addr
, 0);
6812 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6813 struct timex
*host_tx
)
6815 struct target_timex
*target_tx
;
6817 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6818 return -TARGET_EFAULT
;
6821 __put_user(host_tx
->modes
, &target_tx
->modes
);
6822 __put_user(host_tx
->offset
, &target_tx
->offset
);
6823 __put_user(host_tx
->freq
, &target_tx
->freq
);
6824 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6825 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6826 __put_user(host_tx
->status
, &target_tx
->status
);
6827 __put_user(host_tx
->constant
, &target_tx
->constant
);
6828 __put_user(host_tx
->precision
, &target_tx
->precision
);
6829 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6830 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6831 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6832 __put_user(host_tx
->tick
, &target_tx
->tick
);
6833 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6834 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6835 __put_user(host_tx
->shift
, &target_tx
->shift
);
6836 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6837 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6838 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6839 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6840 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6841 __put_user(host_tx
->tai
, &target_tx
->tai
);
6843 unlock_user_struct(target_tx
, target_addr
, 1);
6848 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6849 abi_ulong target_addr
)
6851 struct target_sigevent
*target_sevp
;
6853 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6854 return -TARGET_EFAULT
;
6857 /* This union is awkward on 64 bit systems because it has a 32 bit
6858 * integer and a pointer in it; we follow the conversion approach
6859 * used for handling sigval types in signal.c so the guest should get
6860 * the correct value back even if we did a 64 bit byteswap and it's
6861 * using the 32 bit integer.
6863 host_sevp
->sigev_value
.sival_ptr
=
6864 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6865 host_sevp
->sigev_signo
=
6866 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6867 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6868 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6870 unlock_user_struct(target_sevp
, target_addr
, 1);
6874 #if defined(TARGET_NR_mlockall)
6875 static inline int target_to_host_mlockall_arg(int arg
)
6879 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6880 result
|= MCL_CURRENT
;
6882 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6883 result
|= MCL_FUTURE
;
6889 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6890 abi_ulong target_addr
,
6891 struct stat
*host_st
)
6893 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6894 if (((CPUARMState
*)cpu_env
)->eabi
) {
6895 struct target_eabi_stat64
*target_st
;
6897 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6898 return -TARGET_EFAULT
;
6899 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6900 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6901 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6902 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6903 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6905 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6906 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6907 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6908 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6909 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6910 __put_user(host_st
->st_size
, &target_st
->st_size
);
6911 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6912 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6913 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6914 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6915 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6916 unlock_user_struct(target_st
, target_addr
, 1);
6920 #if defined(TARGET_HAS_STRUCT_STAT64)
6921 struct target_stat64
*target_st
;
6923 struct target_stat
*target_st
;
6926 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6927 return -TARGET_EFAULT
;
6928 memset(target_st
, 0, sizeof(*target_st
));
6929 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6930 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6931 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6932 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6934 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6935 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6936 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6937 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6938 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6939 /* XXX: better use of kernel struct */
6940 __put_user(host_st
->st_size
, &target_st
->st_size
);
6941 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6942 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6943 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6944 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6945 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6946 unlock_user_struct(target_st
, target_addr
, 1);
6952 /* ??? Using host futex calls even when target atomic operations
6953 are not really atomic probably breaks things. However implementing
6954 futexes locally would make futexes shared between multiple processes
6955 tricky. However they're probably useless because guest atomic
6956 operations won't work either. */
6957 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6958 target_ulong uaddr2
, int val3
)
6960 struct timespec ts
, *pts
;
6963 /* ??? We assume FUTEX_* constants are the same on both host
6965 #ifdef FUTEX_CMD_MASK
6966 base_op
= op
& FUTEX_CMD_MASK
;
6972 case FUTEX_WAIT_BITSET
:
6975 target_to_host_timespec(pts
, timeout
);
6979 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6982 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6984 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6986 case FUTEX_CMP_REQUEUE
:
6988 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6989 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6990 But the prototype takes a `struct timespec *'; insert casts
6991 to satisfy the compiler. We do not need to tswap TIMEOUT
6992 since it's not compared to guest memory. */
6993 pts
= (struct timespec
*)(uintptr_t) timeout
;
6994 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6996 (base_op
== FUTEX_CMP_REQUEUE
7000 return -TARGET_ENOSYS
;
7003 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7004 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7005 abi_long handle
, abi_long mount_id
,
7008 struct file_handle
*target_fh
;
7009 struct file_handle
*fh
;
7013 unsigned int size
, total_size
;
7015 if (get_user_s32(size
, handle
)) {
7016 return -TARGET_EFAULT
;
7019 name
= lock_user_string(pathname
);
7021 return -TARGET_EFAULT
;
7024 total_size
= sizeof(struct file_handle
) + size
;
7025 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7027 unlock_user(name
, pathname
, 0);
7028 return -TARGET_EFAULT
;
7031 fh
= g_malloc0(total_size
);
7032 fh
->handle_bytes
= size
;
7034 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7035 unlock_user(name
, pathname
, 0);
7037 /* man name_to_handle_at(2):
7038 * Other than the use of the handle_bytes field, the caller should treat
7039 * the file_handle structure as an opaque data type
7042 memcpy(target_fh
, fh
, total_size
);
7043 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7044 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7046 unlock_user(target_fh
, handle
, total_size
);
7048 if (put_user_s32(mid
, mount_id
)) {
7049 return -TARGET_EFAULT
;
7057 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7058 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7061 struct file_handle
*target_fh
;
7062 struct file_handle
*fh
;
7063 unsigned int size
, total_size
;
7066 if (get_user_s32(size
, handle
)) {
7067 return -TARGET_EFAULT
;
7070 total_size
= sizeof(struct file_handle
) + size
;
7071 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7073 return -TARGET_EFAULT
;
7076 fh
= g_memdup(target_fh
, total_size
);
7077 fh
->handle_bytes
= size
;
7078 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7080 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7081 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7085 unlock_user(target_fh
, handle
, total_size
);
7091 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7093 /* signalfd siginfo conversion */
7096 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7097 const struct signalfd_siginfo
*info
)
7099 int sig
= host_to_target_signal(info
->ssi_signo
);
7101 /* linux/signalfd.h defines a ssi_addr_lsb
7102 * not defined in sys/signalfd.h but used by some kernels
7105 #ifdef BUS_MCEERR_AO
7106 if (tinfo
->ssi_signo
== SIGBUS
&&
7107 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7108 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7109 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7110 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7111 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7115 tinfo
->ssi_signo
= tswap32(sig
);
7116 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7117 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7118 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7119 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7120 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7121 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7122 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7123 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7124 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7125 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7126 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7127 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7128 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7129 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7130 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7133 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7137 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7138 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7144 static TargetFdTrans target_signalfd_trans
= {
7145 .host_to_target_data
= host_to_target_data_signalfd
,
7148 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7151 target_sigset_t
*target_mask
;
7155 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7156 return -TARGET_EINVAL
;
7158 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7159 return -TARGET_EFAULT
;
7162 target_to_host_sigset(&host_mask
, target_mask
);
7164 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7166 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7168 fd_trans_register(ret
, &target_signalfd_trans
);
7171 unlock_user_struct(target_mask
, mask
, 0);
7177 /* Map host to target signal numbers for the wait family of syscalls.
7178 Assume all other status bits are the same. */
7179 int host_to_target_waitstatus(int status
)
7181 if (WIFSIGNALED(status
)) {
7182 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7184 if (WIFSTOPPED(status
)) {
7185 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7191 static int open_self_cmdline(void *cpu_env
, int fd
)
7194 bool word_skipped
= false;
7196 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7206 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7209 fd_orig
= close(fd_orig
);
7212 } else if (nb_read
== 0) {
7216 if (!word_skipped
) {
7217 /* Skip the first string, which is the path to qemu-*-static
7218 instead of the actual command. */
7219 cp_buf
= memchr(buf
, 0, nb_read
);
7221 /* Null byte found, skip one string */
7223 nb_read
-= cp_buf
- buf
;
7224 word_skipped
= true;
7229 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7238 return close(fd_orig
);
7241 static int open_self_maps(void *cpu_env
, int fd
)
7243 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7244 TaskState
*ts
= cpu
->opaque
;
7250 fp
= fopen("/proc/self/maps", "r");
7255 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7256 int fields
, dev_maj
, dev_min
, inode
;
7257 uint64_t min
, max
, offset
;
7258 char flag_r
, flag_w
, flag_x
, flag_p
;
7259 char path
[512] = "";
7260 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7261 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7262 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7264 if ((fields
< 10) || (fields
> 11)) {
7267 if (h2g_valid(min
)) {
7268 int flags
= page_get_flags(h2g(min
));
7269 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7270 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7273 if (h2g(min
) == ts
->info
->stack_limit
) {
7274 pstrcpy(path
, sizeof(path
), " [stack]");
7276 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7277 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7278 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7279 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7280 path
[0] ? " " : "", path
);
7290 static int open_self_stat(void *cpu_env
, int fd
)
7292 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7293 TaskState
*ts
= cpu
->opaque
;
7294 abi_ulong start_stack
= ts
->info
->start_stack
;
7297 for (i
= 0; i
< 44; i
++) {
7305 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7306 } else if (i
== 1) {
7308 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7309 } else if (i
== 27) {
7312 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7314 /* for the rest, there is MasterCard */
7315 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7319 if (write(fd
, buf
, len
) != len
) {
7327 static int open_self_auxv(void *cpu_env
, int fd
)
7329 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7330 TaskState
*ts
= cpu
->opaque
;
7331 abi_ulong auxv
= ts
->info
->saved_auxv
;
7332 abi_ulong len
= ts
->info
->auxv_len
;
7336 * Auxiliary vector is stored in target process stack.
7337 * read in whole auxv vector and copy it to file
7339 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7343 r
= write(fd
, ptr
, len
);
7350 lseek(fd
, 0, SEEK_SET
);
7351 unlock_user(ptr
, auxv
, len
);
7357 static int is_proc_myself(const char *filename
, const char *entry
)
7359 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7360 filename
+= strlen("/proc/");
7361 if (!strncmp(filename
, "self/", strlen("self/"))) {
7362 filename
+= strlen("self/");
7363 } else if (*filename
>= '1' && *filename
<= '9') {
7365 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7366 if (!strncmp(filename
, myself
, strlen(myself
))) {
7367 filename
+= strlen(myself
);
7374 if (!strcmp(filename
, entry
)) {
7381 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7382 static int is_proc(const char *filename
, const char *entry
)
7384 return strcmp(filename
, entry
) == 0;
7387 static int open_net_route(void *cpu_env
, int fd
)
7394 fp
= fopen("/proc/net/route", "r");
7401 read
= getline(&line
, &len
, fp
);
7402 dprintf(fd
, "%s", line
);
7406 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7408 uint32_t dest
, gw
, mask
;
7409 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7410 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7411 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7412 &mask
, &mtu
, &window
, &irtt
);
7413 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7414 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7415 metric
, tswap32(mask
), mtu
, window
, irtt
);
7425 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7428 const char *filename
;
7429 int (*fill
)(void *cpu_env
, int fd
);
7430 int (*cmp
)(const char *s1
, const char *s2
);
7432 const struct fake_open
*fake_open
;
7433 static const struct fake_open fakes
[] = {
7434 { "maps", open_self_maps
, is_proc_myself
},
7435 { "stat", open_self_stat
, is_proc_myself
},
7436 { "auxv", open_self_auxv
, is_proc_myself
},
7437 { "cmdline", open_self_cmdline
, is_proc_myself
},
7438 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7439 { "/proc/net/route", open_net_route
, is_proc
},
7441 { NULL
, NULL
, NULL
}
7444 if (is_proc_myself(pathname
, "exe")) {
7445 int execfd
= qemu_getauxval(AT_EXECFD
);
7446 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7449 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7450 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7455 if (fake_open
->filename
) {
7457 char filename
[PATH_MAX
];
7460 /* create temporary file to map stat to */
7461 tmpdir
= getenv("TMPDIR");
7464 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7465 fd
= mkstemp(filename
);
7471 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7477 lseek(fd
, 0, SEEK_SET
);
7482 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7485 #define TIMER_MAGIC 0x0caf0000
7486 #define TIMER_MAGIC_MASK 0xffff0000
7488 /* Convert QEMU provided timer ID back to internal 16bit index format */
7489 static target_timer_t
get_timer_id(abi_long arg
)
7491 target_timer_t timerid
= arg
;
7493 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7494 return -TARGET_EINVAL
;
7499 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7500 return -TARGET_EINVAL
;
7506 /* do_syscall() should always have a single exit point at the end so
7507 that actions, such as logging of syscall results, can be performed.
7508 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7509 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7510 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7511 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7514 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7520 #if defined(DEBUG_ERESTARTSYS)
7521 /* Debug-only code for exercising the syscall-restart code paths
7522 * in the per-architecture cpu main loops: restart every syscall
7523 * the guest makes once before letting it through.
7530 return -TARGET_ERESTARTSYS
;
7536 gemu_log("syscall %d", num
);
7538 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7540 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7543 case TARGET_NR_exit
:
7544 /* In old applications this may be used to implement _exit(2).
7545 However in threaded applictions it is used for thread termination,
7546 and _exit_group is used for application termination.
7547 Do thread termination if we have more then one thread. */
7549 if (block_signals()) {
7550 ret
= -TARGET_ERESTARTSYS
;
7556 if (CPU_NEXT(first_cpu
)) {
7559 /* Remove the CPU from the list. */
7560 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7565 if (ts
->child_tidptr
) {
7566 put_user_u32(0, ts
->child_tidptr
);
7567 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7571 object_unref(OBJECT(cpu
));
7573 rcu_unregister_thread();
7581 gdb_exit(cpu_env
, arg1
);
7583 ret
= 0; /* avoid warning */
7585 case TARGET_NR_read
:
7589 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7591 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7593 fd_trans_host_to_target_data(arg1
)) {
7594 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7596 unlock_user(p
, arg2
, ret
);
7599 case TARGET_NR_write
:
7600 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7602 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7603 unlock_user(p
, arg2
, 0);
7605 #ifdef TARGET_NR_open
7606 case TARGET_NR_open
:
7607 if (!(p
= lock_user_string(arg1
)))
7609 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7610 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7612 fd_trans_unregister(ret
);
7613 unlock_user(p
, arg1
, 0);
7616 case TARGET_NR_openat
:
7617 if (!(p
= lock_user_string(arg2
)))
7619 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7620 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7622 fd_trans_unregister(ret
);
7623 unlock_user(p
, arg2
, 0);
7625 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7626 case TARGET_NR_name_to_handle_at
:
7627 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7630 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7631 case TARGET_NR_open_by_handle_at
:
7632 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7633 fd_trans_unregister(ret
);
7636 case TARGET_NR_close
:
7637 fd_trans_unregister(arg1
);
7638 ret
= get_errno(close(arg1
));
7643 #ifdef TARGET_NR_fork
7644 case TARGET_NR_fork
:
7645 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7648 #ifdef TARGET_NR_waitpid
7649 case TARGET_NR_waitpid
:
7652 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7653 if (!is_error(ret
) && arg2
&& ret
7654 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7659 #ifdef TARGET_NR_waitid
7660 case TARGET_NR_waitid
:
7664 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7665 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7666 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7668 host_to_target_siginfo(p
, &info
);
7669 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7674 #ifdef TARGET_NR_creat /* not on alpha */
7675 case TARGET_NR_creat
:
7676 if (!(p
= lock_user_string(arg1
)))
7678 ret
= get_errno(creat(p
, arg2
));
7679 fd_trans_unregister(ret
);
7680 unlock_user(p
, arg1
, 0);
7683 #ifdef TARGET_NR_link
7684 case TARGET_NR_link
:
7687 p
= lock_user_string(arg1
);
7688 p2
= lock_user_string(arg2
);
7690 ret
= -TARGET_EFAULT
;
7692 ret
= get_errno(link(p
, p2
));
7693 unlock_user(p2
, arg2
, 0);
7694 unlock_user(p
, arg1
, 0);
7698 #if defined(TARGET_NR_linkat)
7699 case TARGET_NR_linkat
:
7704 p
= lock_user_string(arg2
);
7705 p2
= lock_user_string(arg4
);
7707 ret
= -TARGET_EFAULT
;
7709 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7710 unlock_user(p
, arg2
, 0);
7711 unlock_user(p2
, arg4
, 0);
7715 #ifdef TARGET_NR_unlink
7716 case TARGET_NR_unlink
:
7717 if (!(p
= lock_user_string(arg1
)))
7719 ret
= get_errno(unlink(p
));
7720 unlock_user(p
, arg1
, 0);
7723 #if defined(TARGET_NR_unlinkat)
7724 case TARGET_NR_unlinkat
:
7725 if (!(p
= lock_user_string(arg2
)))
7727 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7728 unlock_user(p
, arg2
, 0);
7731 case TARGET_NR_execve
:
7733 char **argp
, **envp
;
7736 abi_ulong guest_argp
;
7737 abi_ulong guest_envp
;
7744 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7745 if (get_user_ual(addr
, gp
))
7753 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7754 if (get_user_ual(addr
, gp
))
7761 argp
= alloca((argc
+ 1) * sizeof(void *));
7762 envp
= alloca((envc
+ 1) * sizeof(void *));
7764 for (gp
= guest_argp
, q
= argp
; gp
;
7765 gp
+= sizeof(abi_ulong
), q
++) {
7766 if (get_user_ual(addr
, gp
))
7770 if (!(*q
= lock_user_string(addr
)))
7772 total_size
+= strlen(*q
) + 1;
7776 for (gp
= guest_envp
, q
= envp
; gp
;
7777 gp
+= sizeof(abi_ulong
), q
++) {
7778 if (get_user_ual(addr
, gp
))
7782 if (!(*q
= lock_user_string(addr
)))
7784 total_size
+= strlen(*q
) + 1;
7788 if (!(p
= lock_user_string(arg1
)))
7790 /* Although execve() is not an interruptible syscall it is
7791 * a special case where we must use the safe_syscall wrapper:
7792 * if we allow a signal to happen before we make the host
7793 * syscall then we will 'lose' it, because at the point of
7794 * execve the process leaves QEMU's control. So we use the
7795 * safe syscall wrapper to ensure that we either take the
7796 * signal as a guest signal, or else it does not happen
7797 * before the execve completes and makes it the other
7798 * program's problem.
7800 ret
= get_errno(safe_execve(p
, argp
, envp
));
7801 unlock_user(p
, arg1
, 0);
7806 ret
= -TARGET_EFAULT
;
7809 for (gp
= guest_argp
, q
= argp
; *q
;
7810 gp
+= sizeof(abi_ulong
), q
++) {
7811 if (get_user_ual(addr
, gp
)
7814 unlock_user(*q
, addr
, 0);
7816 for (gp
= guest_envp
, q
= envp
; *q
;
7817 gp
+= sizeof(abi_ulong
), q
++) {
7818 if (get_user_ual(addr
, gp
)
7821 unlock_user(*q
, addr
, 0);
7825 case TARGET_NR_chdir
:
7826 if (!(p
= lock_user_string(arg1
)))
7828 ret
= get_errno(chdir(p
));
7829 unlock_user(p
, arg1
, 0);
7831 #ifdef TARGET_NR_time
7832 case TARGET_NR_time
:
7835 ret
= get_errno(time(&host_time
));
7838 && put_user_sal(host_time
, arg1
))
7843 #ifdef TARGET_NR_mknod
7844 case TARGET_NR_mknod
:
7845 if (!(p
= lock_user_string(arg1
)))
7847 ret
= get_errno(mknod(p
, arg2
, arg3
));
7848 unlock_user(p
, arg1
, 0);
7851 #if defined(TARGET_NR_mknodat)
7852 case TARGET_NR_mknodat
:
7853 if (!(p
= lock_user_string(arg2
)))
7855 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7856 unlock_user(p
, arg2
, 0);
7859 #ifdef TARGET_NR_chmod
7860 case TARGET_NR_chmod
:
7861 if (!(p
= lock_user_string(arg1
)))
7863 ret
= get_errno(chmod(p
, arg2
));
7864 unlock_user(p
, arg1
, 0);
7867 #ifdef TARGET_NR_break
7868 case TARGET_NR_break
:
7871 #ifdef TARGET_NR_oldstat
7872 case TARGET_NR_oldstat
:
7875 case TARGET_NR_lseek
:
7876 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7878 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7879 /* Alpha specific */
7880 case TARGET_NR_getxpid
:
7881 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7882 ret
= get_errno(getpid());
7885 #ifdef TARGET_NR_getpid
7886 case TARGET_NR_getpid
:
7887 ret
= get_errno(getpid());
7890 case TARGET_NR_mount
:
7892 /* need to look at the data field */
7896 p
= lock_user_string(arg1
);
7904 p2
= lock_user_string(arg2
);
7907 unlock_user(p
, arg1
, 0);
7913 p3
= lock_user_string(arg3
);
7916 unlock_user(p
, arg1
, 0);
7918 unlock_user(p2
, arg2
, 0);
7925 /* FIXME - arg5 should be locked, but it isn't clear how to
7926 * do that since it's not guaranteed to be a NULL-terminated
7930 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7932 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7934 ret
= get_errno(ret
);
7937 unlock_user(p
, arg1
, 0);
7939 unlock_user(p2
, arg2
, 0);
7941 unlock_user(p3
, arg3
, 0);
7945 #ifdef TARGET_NR_umount
7946 case TARGET_NR_umount
:
7947 if (!(p
= lock_user_string(arg1
)))
7949 ret
= get_errno(umount(p
));
7950 unlock_user(p
, arg1
, 0);
7953 #ifdef TARGET_NR_stime /* not on alpha */
7954 case TARGET_NR_stime
:
7957 if (get_user_sal(host_time
, arg1
))
7959 ret
= get_errno(stime(&host_time
));
7963 case TARGET_NR_ptrace
:
7965 #ifdef TARGET_NR_alarm /* not on alpha */
7966 case TARGET_NR_alarm
:
7970 #ifdef TARGET_NR_oldfstat
7971 case TARGET_NR_oldfstat
:
7974 #ifdef TARGET_NR_pause /* not on alpha */
7975 case TARGET_NR_pause
:
7976 if (!block_signals()) {
7977 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7979 ret
= -TARGET_EINTR
;
7982 #ifdef TARGET_NR_utime
7983 case TARGET_NR_utime
:
7985 struct utimbuf tbuf
, *host_tbuf
;
7986 struct target_utimbuf
*target_tbuf
;
7988 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7990 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7991 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7992 unlock_user_struct(target_tbuf
, arg2
, 0);
7997 if (!(p
= lock_user_string(arg1
)))
7999 ret
= get_errno(utime(p
, host_tbuf
));
8000 unlock_user(p
, arg1
, 0);
8004 #ifdef TARGET_NR_utimes
8005 case TARGET_NR_utimes
:
8007 struct timeval
*tvp
, tv
[2];
8009 if (copy_from_user_timeval(&tv
[0], arg2
)
8010 || copy_from_user_timeval(&tv
[1],
8011 arg2
+ sizeof(struct target_timeval
)))
8017 if (!(p
= lock_user_string(arg1
)))
8019 ret
= get_errno(utimes(p
, tvp
));
8020 unlock_user(p
, arg1
, 0);
8024 #if defined(TARGET_NR_futimesat)
8025 case TARGET_NR_futimesat
:
8027 struct timeval
*tvp
, tv
[2];
8029 if (copy_from_user_timeval(&tv
[0], arg3
)
8030 || copy_from_user_timeval(&tv
[1],
8031 arg3
+ sizeof(struct target_timeval
)))
8037 if (!(p
= lock_user_string(arg2
)))
8039 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8040 unlock_user(p
, arg2
, 0);
8044 #ifdef TARGET_NR_stty
8045 case TARGET_NR_stty
:
8048 #ifdef TARGET_NR_gtty
8049 case TARGET_NR_gtty
:
8052 #ifdef TARGET_NR_access
8053 case TARGET_NR_access
:
8054 if (!(p
= lock_user_string(arg1
)))
8056 ret
= get_errno(access(path(p
), arg2
));
8057 unlock_user(p
, arg1
, 0);
8060 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8061 case TARGET_NR_faccessat
:
8062 if (!(p
= lock_user_string(arg2
)))
8064 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8065 unlock_user(p
, arg2
, 0);
8068 #ifdef TARGET_NR_nice /* not on alpha */
8069 case TARGET_NR_nice
:
8070 ret
= get_errno(nice(arg1
));
8073 #ifdef TARGET_NR_ftime
8074 case TARGET_NR_ftime
:
8077 case TARGET_NR_sync
:
8081 case TARGET_NR_kill
:
8082 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8084 #ifdef TARGET_NR_rename
8085 case TARGET_NR_rename
:
8088 p
= lock_user_string(arg1
);
8089 p2
= lock_user_string(arg2
);
8091 ret
= -TARGET_EFAULT
;
8093 ret
= get_errno(rename(p
, p2
));
8094 unlock_user(p2
, arg2
, 0);
8095 unlock_user(p
, arg1
, 0);
8099 #if defined(TARGET_NR_renameat)
8100 case TARGET_NR_renameat
:
8103 p
= lock_user_string(arg2
);
8104 p2
= lock_user_string(arg4
);
8106 ret
= -TARGET_EFAULT
;
8108 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8109 unlock_user(p2
, arg4
, 0);
8110 unlock_user(p
, arg2
, 0);
8114 #ifdef TARGET_NR_mkdir
8115 case TARGET_NR_mkdir
:
8116 if (!(p
= lock_user_string(arg1
)))
8118 ret
= get_errno(mkdir(p
, arg2
));
8119 unlock_user(p
, arg1
, 0);
8122 #if defined(TARGET_NR_mkdirat)
8123 case TARGET_NR_mkdirat
:
8124 if (!(p
= lock_user_string(arg2
)))
8126 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8127 unlock_user(p
, arg2
, 0);
8130 #ifdef TARGET_NR_rmdir
8131 case TARGET_NR_rmdir
:
8132 if (!(p
= lock_user_string(arg1
)))
8134 ret
= get_errno(rmdir(p
));
8135 unlock_user(p
, arg1
, 0);
8139 ret
= get_errno(dup(arg1
));
8141 fd_trans_dup(arg1
, ret
);
8144 #ifdef TARGET_NR_pipe
8145 case TARGET_NR_pipe
:
8146 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8149 #ifdef TARGET_NR_pipe2
8150 case TARGET_NR_pipe2
:
8151 ret
= do_pipe(cpu_env
, arg1
,
8152 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8155 case TARGET_NR_times
:
8157 struct target_tms
*tmsp
;
8159 ret
= get_errno(times(&tms
));
8161 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8164 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8165 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8166 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8167 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8170 ret
= host_to_target_clock_t(ret
);
8173 #ifdef TARGET_NR_prof
8174 case TARGET_NR_prof
:
8177 #ifdef TARGET_NR_signal
8178 case TARGET_NR_signal
:
8181 case TARGET_NR_acct
:
8183 ret
= get_errno(acct(NULL
));
8185 if (!(p
= lock_user_string(arg1
)))
8187 ret
= get_errno(acct(path(p
)));
8188 unlock_user(p
, arg1
, 0);
8191 #ifdef TARGET_NR_umount2
8192 case TARGET_NR_umount2
:
8193 if (!(p
= lock_user_string(arg1
)))
8195 ret
= get_errno(umount2(p
, arg2
));
8196 unlock_user(p
, arg1
, 0);
8199 #ifdef TARGET_NR_lock
8200 case TARGET_NR_lock
:
8203 case TARGET_NR_ioctl
:
8204 ret
= do_ioctl(arg1
, arg2
, arg3
);
8206 case TARGET_NR_fcntl
:
8207 ret
= do_fcntl(arg1
, arg2
, arg3
);
8209 #ifdef TARGET_NR_mpx
8213 case TARGET_NR_setpgid
:
8214 ret
= get_errno(setpgid(arg1
, arg2
));
8216 #ifdef TARGET_NR_ulimit
8217 case TARGET_NR_ulimit
:
8220 #ifdef TARGET_NR_oldolduname
8221 case TARGET_NR_oldolduname
:
8224 case TARGET_NR_umask
:
8225 ret
= get_errno(umask(arg1
));
8227 case TARGET_NR_chroot
:
8228 if (!(p
= lock_user_string(arg1
)))
8230 ret
= get_errno(chroot(p
));
8231 unlock_user(p
, arg1
, 0);
8233 #ifdef TARGET_NR_ustat
8234 case TARGET_NR_ustat
:
8237 #ifdef TARGET_NR_dup2
8238 case TARGET_NR_dup2
:
8239 ret
= get_errno(dup2(arg1
, arg2
));
8241 fd_trans_dup(arg1
, arg2
);
8245 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8246 case TARGET_NR_dup3
:
8247 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8249 fd_trans_dup(arg1
, arg2
);
8253 #ifdef TARGET_NR_getppid /* not on alpha */
8254 case TARGET_NR_getppid
:
8255 ret
= get_errno(getppid());
8258 #ifdef TARGET_NR_getpgrp
8259 case TARGET_NR_getpgrp
:
8260 ret
= get_errno(getpgrp());
8263 case TARGET_NR_setsid
:
8264 ret
= get_errno(setsid());
8266 #ifdef TARGET_NR_sigaction
8267 case TARGET_NR_sigaction
:
8269 #if defined(TARGET_ALPHA)
8270 struct target_sigaction act
, oact
, *pact
= 0;
8271 struct target_old_sigaction
*old_act
;
8273 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8275 act
._sa_handler
= old_act
->_sa_handler
;
8276 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8277 act
.sa_flags
= old_act
->sa_flags
;
8278 act
.sa_restorer
= 0;
8279 unlock_user_struct(old_act
, arg2
, 0);
8282 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8283 if (!is_error(ret
) && arg3
) {
8284 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8286 old_act
->_sa_handler
= oact
._sa_handler
;
8287 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8288 old_act
->sa_flags
= oact
.sa_flags
;
8289 unlock_user_struct(old_act
, arg3
, 1);
8291 #elif defined(TARGET_MIPS)
8292 struct target_sigaction act
, oact
, *pact
, *old_act
;
8295 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8297 act
._sa_handler
= old_act
->_sa_handler
;
8298 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8299 act
.sa_flags
= old_act
->sa_flags
;
8300 unlock_user_struct(old_act
, arg2
, 0);
8306 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8308 if (!is_error(ret
) && arg3
) {
8309 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8311 old_act
->_sa_handler
= oact
._sa_handler
;
8312 old_act
->sa_flags
= oact
.sa_flags
;
8313 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8314 old_act
->sa_mask
.sig
[1] = 0;
8315 old_act
->sa_mask
.sig
[2] = 0;
8316 old_act
->sa_mask
.sig
[3] = 0;
8317 unlock_user_struct(old_act
, arg3
, 1);
8320 struct target_old_sigaction
*old_act
;
8321 struct target_sigaction act
, oact
, *pact
;
8323 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8325 act
._sa_handler
= old_act
->_sa_handler
;
8326 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8327 act
.sa_flags
= old_act
->sa_flags
;
8328 act
.sa_restorer
= old_act
->sa_restorer
;
8329 unlock_user_struct(old_act
, arg2
, 0);
8334 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8335 if (!is_error(ret
) && arg3
) {
8336 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8338 old_act
->_sa_handler
= oact
._sa_handler
;
8339 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8340 old_act
->sa_flags
= oact
.sa_flags
;
8341 old_act
->sa_restorer
= oact
.sa_restorer
;
8342 unlock_user_struct(old_act
, arg3
, 1);
8348 case TARGET_NR_rt_sigaction
:
8350 #if defined(TARGET_ALPHA)
8351 struct target_sigaction act
, oact
, *pact
= 0;
8352 struct target_rt_sigaction
*rt_act
;
8354 if (arg4
!= sizeof(target_sigset_t
)) {
8355 ret
= -TARGET_EINVAL
;
8359 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8361 act
._sa_handler
= rt_act
->_sa_handler
;
8362 act
.sa_mask
= rt_act
->sa_mask
;
8363 act
.sa_flags
= rt_act
->sa_flags
;
8364 act
.sa_restorer
= arg5
;
8365 unlock_user_struct(rt_act
, arg2
, 0);
8368 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8369 if (!is_error(ret
) && arg3
) {
8370 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8372 rt_act
->_sa_handler
= oact
._sa_handler
;
8373 rt_act
->sa_mask
= oact
.sa_mask
;
8374 rt_act
->sa_flags
= oact
.sa_flags
;
8375 unlock_user_struct(rt_act
, arg3
, 1);
8378 struct target_sigaction
*act
;
8379 struct target_sigaction
*oact
;
8381 if (arg4
!= sizeof(target_sigset_t
)) {
8382 ret
= -TARGET_EINVAL
;
8386 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8391 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8392 ret
= -TARGET_EFAULT
;
8393 goto rt_sigaction_fail
;
8397 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8400 unlock_user_struct(act
, arg2
, 0);
8402 unlock_user_struct(oact
, arg3
, 1);
8406 #ifdef TARGET_NR_sgetmask /* not on alpha */
8407 case TARGET_NR_sgetmask
:
8410 abi_ulong target_set
;
8411 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8413 host_to_target_old_sigset(&target_set
, &cur_set
);
8419 #ifdef TARGET_NR_ssetmask /* not on alpha */
8420 case TARGET_NR_ssetmask
:
8422 sigset_t set
, oset
, cur_set
;
8423 abi_ulong target_set
= arg1
;
8424 /* We only have one word of the new mask so we must read
8425 * the rest of it with do_sigprocmask() and OR in this word.
8426 * We are guaranteed that a do_sigprocmask() that only queries
8427 * the signal mask will not fail.
8429 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8431 target_to_host_old_sigset(&set
, &target_set
);
8432 sigorset(&set
, &set
, &cur_set
);
8433 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8435 host_to_target_old_sigset(&target_set
, &oset
);
8441 #ifdef TARGET_NR_sigprocmask
8442 case TARGET_NR_sigprocmask
:
8444 #if defined(TARGET_ALPHA)
8445 sigset_t set
, oldset
;
8450 case TARGET_SIG_BLOCK
:
8453 case TARGET_SIG_UNBLOCK
:
8456 case TARGET_SIG_SETMASK
:
8460 ret
= -TARGET_EINVAL
;
8464 target_to_host_old_sigset(&set
, &mask
);
8466 ret
= do_sigprocmask(how
, &set
, &oldset
);
8467 if (!is_error(ret
)) {
8468 host_to_target_old_sigset(&mask
, &oldset
);
8470 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8473 sigset_t set
, oldset
, *set_ptr
;
8478 case TARGET_SIG_BLOCK
:
8481 case TARGET_SIG_UNBLOCK
:
8484 case TARGET_SIG_SETMASK
:
8488 ret
= -TARGET_EINVAL
;
8491 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8493 target_to_host_old_sigset(&set
, p
);
8494 unlock_user(p
, arg2
, 0);
8500 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8501 if (!is_error(ret
) && arg3
) {
8502 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8504 host_to_target_old_sigset(p
, &oldset
);
8505 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8511 case TARGET_NR_rt_sigprocmask
:
8514 sigset_t set
, oldset
, *set_ptr
;
8516 if (arg4
!= sizeof(target_sigset_t
)) {
8517 ret
= -TARGET_EINVAL
;
8523 case TARGET_SIG_BLOCK
:
8526 case TARGET_SIG_UNBLOCK
:
8529 case TARGET_SIG_SETMASK
:
8533 ret
= -TARGET_EINVAL
;
8536 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8538 target_to_host_sigset(&set
, p
);
8539 unlock_user(p
, arg2
, 0);
8545 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8546 if (!is_error(ret
) && arg3
) {
8547 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8549 host_to_target_sigset(p
, &oldset
);
8550 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8554 #ifdef TARGET_NR_sigpending
8555 case TARGET_NR_sigpending
:
8558 ret
= get_errno(sigpending(&set
));
8559 if (!is_error(ret
)) {
8560 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8562 host_to_target_old_sigset(p
, &set
);
8563 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8568 case TARGET_NR_rt_sigpending
:
8572 /* Yes, this check is >, not != like most. We follow the kernel's
8573 * logic and it does it like this because it implements
8574 * NR_sigpending through the same code path, and in that case
8575 * the old_sigset_t is smaller in size.
8577 if (arg2
> sizeof(target_sigset_t
)) {
8578 ret
= -TARGET_EINVAL
;
8582 ret
= get_errno(sigpending(&set
));
8583 if (!is_error(ret
)) {
8584 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8586 host_to_target_sigset(p
, &set
);
8587 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8591 #ifdef TARGET_NR_sigsuspend
8592 case TARGET_NR_sigsuspend
:
8594 TaskState
*ts
= cpu
->opaque
;
8595 #if defined(TARGET_ALPHA)
8596 abi_ulong mask
= arg1
;
8597 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8599 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8601 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8602 unlock_user(p
, arg1
, 0);
8604 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8606 if (ret
!= -TARGET_ERESTARTSYS
) {
8607 ts
->in_sigsuspend
= 1;
8612 case TARGET_NR_rt_sigsuspend
:
8614 TaskState
*ts
= cpu
->opaque
;
8616 if (arg2
!= sizeof(target_sigset_t
)) {
8617 ret
= -TARGET_EINVAL
;
8620 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8622 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8623 unlock_user(p
, arg1
, 0);
8624 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8626 if (ret
!= -TARGET_ERESTARTSYS
) {
8627 ts
->in_sigsuspend
= 1;
8631 case TARGET_NR_rt_sigtimedwait
:
8634 struct timespec uts
, *puts
;
8637 if (arg4
!= sizeof(target_sigset_t
)) {
8638 ret
= -TARGET_EINVAL
;
8642 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8644 target_to_host_sigset(&set
, p
);
8645 unlock_user(p
, arg1
, 0);
8648 target_to_host_timespec(puts
, arg3
);
8652 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8654 if (!is_error(ret
)) {
8656 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8661 host_to_target_siginfo(p
, &uinfo
);
8662 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8664 ret
= host_to_target_signal(ret
);
8668 case TARGET_NR_rt_sigqueueinfo
:
8672 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8676 target_to_host_siginfo(&uinfo
, p
);
8677 unlock_user(p
, arg1
, 0);
8678 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8681 #ifdef TARGET_NR_sigreturn
8682 case TARGET_NR_sigreturn
:
8683 if (block_signals()) {
8684 ret
= -TARGET_ERESTARTSYS
;
8686 ret
= do_sigreturn(cpu_env
);
8690 case TARGET_NR_rt_sigreturn
:
8691 if (block_signals()) {
8692 ret
= -TARGET_ERESTARTSYS
;
8694 ret
= do_rt_sigreturn(cpu_env
);
8697 case TARGET_NR_sethostname
:
8698 if (!(p
= lock_user_string(arg1
)))
8700 ret
= get_errno(sethostname(p
, arg2
));
8701 unlock_user(p
, arg1
, 0);
8703 case TARGET_NR_setrlimit
:
8705 int resource
= target_to_host_resource(arg1
);
8706 struct target_rlimit
*target_rlim
;
8708 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8710 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8711 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8712 unlock_user_struct(target_rlim
, arg2
, 0);
8713 ret
= get_errno(setrlimit(resource
, &rlim
));
8716 case TARGET_NR_getrlimit
:
8718 int resource
= target_to_host_resource(arg1
);
8719 struct target_rlimit
*target_rlim
;
8722 ret
= get_errno(getrlimit(resource
, &rlim
));
8723 if (!is_error(ret
)) {
8724 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8726 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8727 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8728 unlock_user_struct(target_rlim
, arg2
, 1);
8732 case TARGET_NR_getrusage
:
8734 struct rusage rusage
;
8735 ret
= get_errno(getrusage(arg1
, &rusage
));
8736 if (!is_error(ret
)) {
8737 ret
= host_to_target_rusage(arg2
, &rusage
);
8741 case TARGET_NR_gettimeofday
:
8744 ret
= get_errno(gettimeofday(&tv
, NULL
));
8745 if (!is_error(ret
)) {
8746 if (copy_to_user_timeval(arg1
, &tv
))
8751 case TARGET_NR_settimeofday
:
8753 struct timeval tv
, *ptv
= NULL
;
8754 struct timezone tz
, *ptz
= NULL
;
8757 if (copy_from_user_timeval(&tv
, arg1
)) {
8764 if (copy_from_user_timezone(&tz
, arg2
)) {
8770 ret
= get_errno(settimeofday(ptv
, ptz
));
8773 #if defined(TARGET_NR_select)
8774 case TARGET_NR_select
:
8775 #if defined(TARGET_WANT_NI_OLD_SELECT)
8776 /* some architectures used to have old_select here
8777 * but now ENOSYS it.
8779 ret
= -TARGET_ENOSYS
;
8780 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8781 ret
= do_old_select(arg1
);
8783 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8787 #ifdef TARGET_NR_pselect6
8788 case TARGET_NR_pselect6
:
8790 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8791 fd_set rfds
, wfds
, efds
;
8792 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8793 struct timespec ts
, *ts_ptr
;
8796 * The 6th arg is actually two args smashed together,
8797 * so we cannot use the C library.
8805 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8806 target_sigset_t
*target_sigset
;
8814 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8818 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8822 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8828 * This takes a timespec, and not a timeval, so we cannot
8829 * use the do_select() helper ...
8832 if (target_to_host_timespec(&ts
, ts_addr
)) {
8840 /* Extract the two packed args for the sigset */
8843 sig
.size
= SIGSET_T_SIZE
;
8845 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8849 arg_sigset
= tswapal(arg7
[0]);
8850 arg_sigsize
= tswapal(arg7
[1]);
8851 unlock_user(arg7
, arg6
, 0);
8855 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8856 /* Like the kernel, we enforce correct size sigsets */
8857 ret
= -TARGET_EINVAL
;
8860 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8861 sizeof(*target_sigset
), 1);
8862 if (!target_sigset
) {
8865 target_to_host_sigset(&set
, target_sigset
);
8866 unlock_user(target_sigset
, arg_sigset
, 0);
8874 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8877 if (!is_error(ret
)) {
8878 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8880 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8882 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8885 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8891 #ifdef TARGET_NR_symlink
8892 case TARGET_NR_symlink
:
8895 p
= lock_user_string(arg1
);
8896 p2
= lock_user_string(arg2
);
8898 ret
= -TARGET_EFAULT
;
8900 ret
= get_errno(symlink(p
, p2
));
8901 unlock_user(p2
, arg2
, 0);
8902 unlock_user(p
, arg1
, 0);
8906 #if defined(TARGET_NR_symlinkat)
8907 case TARGET_NR_symlinkat
:
8910 p
= lock_user_string(arg1
);
8911 p2
= lock_user_string(arg3
);
8913 ret
= -TARGET_EFAULT
;
8915 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8916 unlock_user(p2
, arg3
, 0);
8917 unlock_user(p
, arg1
, 0);
8921 #ifdef TARGET_NR_oldlstat
8922 case TARGET_NR_oldlstat
:
8925 #ifdef TARGET_NR_readlink
8926 case TARGET_NR_readlink
:
8929 p
= lock_user_string(arg1
);
8930 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8932 ret
= -TARGET_EFAULT
;
8934 /* Short circuit this for the magic exe check. */
8935 ret
= -TARGET_EINVAL
;
8936 } else if (is_proc_myself((const char *)p
, "exe")) {
8937 char real
[PATH_MAX
], *temp
;
8938 temp
= realpath(exec_path
, real
);
8939 /* Return value is # of bytes that we wrote to the buffer. */
8941 ret
= get_errno(-1);
8943 /* Don't worry about sign mismatch as earlier mapping
8944 * logic would have thrown a bad address error. */
8945 ret
= MIN(strlen(real
), arg3
);
8946 /* We cannot NUL terminate the string. */
8947 memcpy(p2
, real
, ret
);
8950 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8952 unlock_user(p2
, arg2
, ret
);
8953 unlock_user(p
, arg1
, 0);
8957 #if defined(TARGET_NR_readlinkat)
8958 case TARGET_NR_readlinkat
:
8961 p
= lock_user_string(arg2
);
8962 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8964 ret
= -TARGET_EFAULT
;
8965 } else if (is_proc_myself((const char *)p
, "exe")) {
8966 char real
[PATH_MAX
], *temp
;
8967 temp
= realpath(exec_path
, real
);
8968 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8969 snprintf((char *)p2
, arg4
, "%s", real
);
8971 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8973 unlock_user(p2
, arg3
, ret
);
8974 unlock_user(p
, arg2
, 0);
8978 #ifdef TARGET_NR_uselib
8979 case TARGET_NR_uselib
:
8982 #ifdef TARGET_NR_swapon
8983 case TARGET_NR_swapon
:
8984 if (!(p
= lock_user_string(arg1
)))
8986 ret
= get_errno(swapon(p
, arg2
));
8987 unlock_user(p
, arg1
, 0);
8990 case TARGET_NR_reboot
:
8991 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8992 /* arg4 must be ignored in all other cases */
8993 p
= lock_user_string(arg4
);
8997 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8998 unlock_user(p
, arg4
, 0);
9000 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9003 #ifdef TARGET_NR_readdir
9004 case TARGET_NR_readdir
:
9007 #ifdef TARGET_NR_mmap
9008 case TARGET_NR_mmap
:
9009 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9010 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9011 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9012 || defined(TARGET_S390X)
9015 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9016 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9024 unlock_user(v
, arg1
, 0);
9025 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9026 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9030 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9031 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9037 #ifdef TARGET_NR_mmap2
9038 case TARGET_NR_mmap2
:
9040 #define MMAP_SHIFT 12
9042 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9043 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9045 arg6
<< MMAP_SHIFT
));
9048 case TARGET_NR_munmap
:
9049 ret
= get_errno(target_munmap(arg1
, arg2
));
9051 case TARGET_NR_mprotect
:
9053 TaskState
*ts
= cpu
->opaque
;
9054 /* Special hack to detect libc making the stack executable. */
9055 if ((arg3
& PROT_GROWSDOWN
)
9056 && arg1
>= ts
->info
->stack_limit
9057 && arg1
<= ts
->info
->start_stack
) {
9058 arg3
&= ~PROT_GROWSDOWN
;
9059 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9060 arg1
= ts
->info
->stack_limit
;
9063 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9065 #ifdef TARGET_NR_mremap
9066 case TARGET_NR_mremap
:
9067 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9070 /* ??? msync/mlock/munlock are broken for softmmu. */
9071 #ifdef TARGET_NR_msync
9072 case TARGET_NR_msync
:
9073 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9076 #ifdef TARGET_NR_mlock
9077 case TARGET_NR_mlock
:
9078 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9081 #ifdef TARGET_NR_munlock
9082 case TARGET_NR_munlock
:
9083 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9086 #ifdef TARGET_NR_mlockall
9087 case TARGET_NR_mlockall
:
9088 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9091 #ifdef TARGET_NR_munlockall
9092 case TARGET_NR_munlockall
:
9093 ret
= get_errno(munlockall());
9096 case TARGET_NR_truncate
:
9097 if (!(p
= lock_user_string(arg1
)))
9099 ret
= get_errno(truncate(p
, arg2
));
9100 unlock_user(p
, arg1
, 0);
9102 case TARGET_NR_ftruncate
:
9103 ret
= get_errno(ftruncate(arg1
, arg2
));
9105 case TARGET_NR_fchmod
:
9106 ret
= get_errno(fchmod(arg1
, arg2
));
9108 #if defined(TARGET_NR_fchmodat)
9109 case TARGET_NR_fchmodat
:
9110 if (!(p
= lock_user_string(arg2
)))
9112 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9113 unlock_user(p
, arg2
, 0);
9116 case TARGET_NR_getpriority
:
9117 /* Note that negative values are valid for getpriority, so we must
9118 differentiate based on errno settings. */
9120 ret
= getpriority(arg1
, arg2
);
9121 if (ret
== -1 && errno
!= 0) {
9122 ret
= -host_to_target_errno(errno
);
9126 /* Return value is the unbiased priority. Signal no error. */
9127 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9129 /* Return value is a biased priority to avoid negative numbers. */
9133 case TARGET_NR_setpriority
:
9134 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9136 #ifdef TARGET_NR_profil
9137 case TARGET_NR_profil
:
9140 case TARGET_NR_statfs
:
9141 if (!(p
= lock_user_string(arg1
)))
9143 ret
= get_errno(statfs(path(p
), &stfs
));
9144 unlock_user(p
, arg1
, 0);
9146 if (!is_error(ret
)) {
9147 struct target_statfs
*target_stfs
;
9149 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9151 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9152 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9153 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9154 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9155 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9156 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9157 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9158 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9159 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9160 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9161 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9162 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9163 unlock_user_struct(target_stfs
, arg2
, 1);
9166 case TARGET_NR_fstatfs
:
9167 ret
= get_errno(fstatfs(arg1
, &stfs
));
9168 goto convert_statfs
;
9169 #ifdef TARGET_NR_statfs64
9170 case TARGET_NR_statfs64
:
9171 if (!(p
= lock_user_string(arg1
)))
9173 ret
= get_errno(statfs(path(p
), &stfs
));
9174 unlock_user(p
, arg1
, 0);
9176 if (!is_error(ret
)) {
9177 struct target_statfs64
*target_stfs
;
9179 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9181 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9182 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9183 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9184 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9185 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9186 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9187 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9188 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9189 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9190 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9191 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9192 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9193 unlock_user_struct(target_stfs
, arg3
, 1);
9196 case TARGET_NR_fstatfs64
:
9197 ret
= get_errno(fstatfs(arg1
, &stfs
));
9198 goto convert_statfs64
;
9200 #ifdef TARGET_NR_ioperm
9201 case TARGET_NR_ioperm
:
9204 #ifdef TARGET_NR_socketcall
9205 case TARGET_NR_socketcall
:
9206 ret
= do_socketcall(arg1
, arg2
);
9209 #ifdef TARGET_NR_accept
9210 case TARGET_NR_accept
:
9211 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9214 #ifdef TARGET_NR_accept4
9215 case TARGET_NR_accept4
:
9216 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9219 #ifdef TARGET_NR_bind
9220 case TARGET_NR_bind
:
9221 ret
= do_bind(arg1
, arg2
, arg3
);
9224 #ifdef TARGET_NR_connect
9225 case TARGET_NR_connect
:
9226 ret
= do_connect(arg1
, arg2
, arg3
);
9229 #ifdef TARGET_NR_getpeername
9230 case TARGET_NR_getpeername
:
9231 ret
= do_getpeername(arg1
, arg2
, arg3
);
9234 #ifdef TARGET_NR_getsockname
9235 case TARGET_NR_getsockname
:
9236 ret
= do_getsockname(arg1
, arg2
, arg3
);
9239 #ifdef TARGET_NR_getsockopt
9240 case TARGET_NR_getsockopt
:
9241 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9244 #ifdef TARGET_NR_listen
9245 case TARGET_NR_listen
:
9246 ret
= get_errno(listen(arg1
, arg2
));
9249 #ifdef TARGET_NR_recv
9250 case TARGET_NR_recv
:
9251 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9254 #ifdef TARGET_NR_recvfrom
9255 case TARGET_NR_recvfrom
:
9256 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9259 #ifdef TARGET_NR_recvmsg
9260 case TARGET_NR_recvmsg
:
9261 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9264 #ifdef TARGET_NR_send
9265 case TARGET_NR_send
:
9266 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9269 #ifdef TARGET_NR_sendmsg
9270 case TARGET_NR_sendmsg
:
9271 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9274 #ifdef TARGET_NR_sendmmsg
9275 case TARGET_NR_sendmmsg
:
9276 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9278 case TARGET_NR_recvmmsg
:
9279 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9282 #ifdef TARGET_NR_sendto
9283 case TARGET_NR_sendto
:
9284 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9287 #ifdef TARGET_NR_shutdown
9288 case TARGET_NR_shutdown
:
9289 ret
= get_errno(shutdown(arg1
, arg2
));
9292 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9293 case TARGET_NR_getrandom
:
9294 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9298 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9299 unlock_user(p
, arg1
, ret
);
9302 #ifdef TARGET_NR_socket
9303 case TARGET_NR_socket
:
9304 ret
= do_socket(arg1
, arg2
, arg3
);
9305 fd_trans_unregister(ret
);
9308 #ifdef TARGET_NR_socketpair
9309 case TARGET_NR_socketpair
:
9310 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9313 #ifdef TARGET_NR_setsockopt
9314 case TARGET_NR_setsockopt
:
9315 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9319 case TARGET_NR_syslog
:
9320 if (!(p
= lock_user_string(arg2
)))
9322 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9323 unlock_user(p
, arg2
, 0);
9326 case TARGET_NR_setitimer
:
9328 struct itimerval value
, ovalue
, *pvalue
;
9332 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9333 || copy_from_user_timeval(&pvalue
->it_value
,
9334 arg2
+ sizeof(struct target_timeval
)))
9339 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9340 if (!is_error(ret
) && arg3
) {
9341 if (copy_to_user_timeval(arg3
,
9342 &ovalue
.it_interval
)
9343 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9349 case TARGET_NR_getitimer
:
9351 struct itimerval value
;
9353 ret
= get_errno(getitimer(arg1
, &value
));
9354 if (!is_error(ret
) && arg2
) {
9355 if (copy_to_user_timeval(arg2
,
9357 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9363 #ifdef TARGET_NR_stat
9364 case TARGET_NR_stat
:
9365 if (!(p
= lock_user_string(arg1
)))
9367 ret
= get_errno(stat(path(p
), &st
));
9368 unlock_user(p
, arg1
, 0);
9371 #ifdef TARGET_NR_lstat
9372 case TARGET_NR_lstat
:
9373 if (!(p
= lock_user_string(arg1
)))
9375 ret
= get_errno(lstat(path(p
), &st
));
9376 unlock_user(p
, arg1
, 0);
9379 case TARGET_NR_fstat
:
9381 ret
= get_errno(fstat(arg1
, &st
));
9382 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9385 if (!is_error(ret
)) {
9386 struct target_stat
*target_st
;
9388 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9390 memset(target_st
, 0, sizeof(*target_st
));
9391 __put_user(st
.st_dev
, &target_st
->st_dev
);
9392 __put_user(st
.st_ino
, &target_st
->st_ino
);
9393 __put_user(st
.st_mode
, &target_st
->st_mode
);
9394 __put_user(st
.st_uid
, &target_st
->st_uid
);
9395 __put_user(st
.st_gid
, &target_st
->st_gid
);
9396 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9397 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9398 __put_user(st
.st_size
, &target_st
->st_size
);
9399 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9400 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9401 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9402 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9403 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9404 unlock_user_struct(target_st
, arg2
, 1);
9408 #ifdef TARGET_NR_olduname
9409 case TARGET_NR_olduname
:
9412 #ifdef TARGET_NR_iopl
9413 case TARGET_NR_iopl
:
9416 case TARGET_NR_vhangup
:
9417 ret
= get_errno(vhangup());
9419 #ifdef TARGET_NR_idle
9420 case TARGET_NR_idle
:
9423 #ifdef TARGET_NR_syscall
9424 case TARGET_NR_syscall
:
9425 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9426 arg6
, arg7
, arg8
, 0);
9429 case TARGET_NR_wait4
:
9432 abi_long status_ptr
= arg2
;
9433 struct rusage rusage
, *rusage_ptr
;
9434 abi_ulong target_rusage
= arg4
;
9435 abi_long rusage_err
;
9437 rusage_ptr
= &rusage
;
9440 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9441 if (!is_error(ret
)) {
9442 if (status_ptr
&& ret
) {
9443 status
= host_to_target_waitstatus(status
);
9444 if (put_user_s32(status
, status_ptr
))
9447 if (target_rusage
) {
9448 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9456 #ifdef TARGET_NR_swapoff
9457 case TARGET_NR_swapoff
:
9458 if (!(p
= lock_user_string(arg1
)))
9460 ret
= get_errno(swapoff(p
));
9461 unlock_user(p
, arg1
, 0);
9464 case TARGET_NR_sysinfo
:
9466 struct target_sysinfo
*target_value
;
9467 struct sysinfo value
;
9468 ret
= get_errno(sysinfo(&value
));
9469 if (!is_error(ret
) && arg1
)
9471 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9473 __put_user(value
.uptime
, &target_value
->uptime
);
9474 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9475 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9476 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9477 __put_user(value
.totalram
, &target_value
->totalram
);
9478 __put_user(value
.freeram
, &target_value
->freeram
);
9479 __put_user(value
.sharedram
, &target_value
->sharedram
);
9480 __put_user(value
.bufferram
, &target_value
->bufferram
);
9481 __put_user(value
.totalswap
, &target_value
->totalswap
);
9482 __put_user(value
.freeswap
, &target_value
->freeswap
);
9483 __put_user(value
.procs
, &target_value
->procs
);
9484 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9485 __put_user(value
.freehigh
, &target_value
->freehigh
);
9486 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9487 unlock_user_struct(target_value
, arg1
, 1);
9491 #ifdef TARGET_NR_ipc
9493 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9496 #ifdef TARGET_NR_semget
9497 case TARGET_NR_semget
:
9498 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9501 #ifdef TARGET_NR_semop
9502 case TARGET_NR_semop
:
9503 ret
= do_semop(arg1
, arg2
, arg3
);
9506 #ifdef TARGET_NR_semctl
9507 case TARGET_NR_semctl
:
9508 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9511 #ifdef TARGET_NR_msgctl
9512 case TARGET_NR_msgctl
:
9513 ret
= do_msgctl(arg1
, arg2
, arg3
);
9516 #ifdef TARGET_NR_msgget
9517 case TARGET_NR_msgget
:
9518 ret
= get_errno(msgget(arg1
, arg2
));
9521 #ifdef TARGET_NR_msgrcv
9522 case TARGET_NR_msgrcv
:
9523 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9526 #ifdef TARGET_NR_msgsnd
9527 case TARGET_NR_msgsnd
:
9528 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9531 #ifdef TARGET_NR_shmget
9532 case TARGET_NR_shmget
:
9533 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9536 #ifdef TARGET_NR_shmctl
9537 case TARGET_NR_shmctl
:
9538 ret
= do_shmctl(arg1
, arg2
, arg3
);
9541 #ifdef TARGET_NR_shmat
9542 case TARGET_NR_shmat
:
9543 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9546 #ifdef TARGET_NR_shmdt
9547 case TARGET_NR_shmdt
:
9548 ret
= do_shmdt(arg1
);
9551 case TARGET_NR_fsync
:
9552 ret
= get_errno(fsync(arg1
));
9554 case TARGET_NR_clone
:
9555 /* Linux manages to have three different orderings for its
9556 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9557 * match the kernel's CONFIG_CLONE_* settings.
9558 * Microblaze is further special in that it uses a sixth
9559 * implicit argument to clone for the TLS pointer.
9561 #if defined(TARGET_MICROBLAZE)
9562 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9563 #elif defined(TARGET_CLONE_BACKWARDS)
9564 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9565 #elif defined(TARGET_CLONE_BACKWARDS2)
9566 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9568 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9571 #ifdef __NR_exit_group
9572 /* new thread calls */
9573 case TARGET_NR_exit_group
:
9577 gdb_exit(cpu_env
, arg1
);
9578 ret
= get_errno(exit_group(arg1
));
9581 case TARGET_NR_setdomainname
:
9582 if (!(p
= lock_user_string(arg1
)))
9584 ret
= get_errno(setdomainname(p
, arg2
));
9585 unlock_user(p
, arg1
, 0);
9587 case TARGET_NR_uname
:
9588 /* no need to transcode because we use the linux syscall */
9590 struct new_utsname
* buf
;
9592 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9594 ret
= get_errno(sys_uname(buf
));
9595 if (!is_error(ret
)) {
9596 /* Overwrite the native machine name with whatever is being
9598 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9599 /* Allow the user to override the reported release. */
9600 if (qemu_uname_release
&& *qemu_uname_release
) {
9601 g_strlcpy(buf
->release
, qemu_uname_release
,
9602 sizeof(buf
->release
));
9605 unlock_user_struct(buf
, arg1
, 1);
9609 case TARGET_NR_modify_ldt
:
9610 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9612 #if !defined(TARGET_X86_64)
9613 case TARGET_NR_vm86old
:
9615 case TARGET_NR_vm86
:
9616 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9620 case TARGET_NR_adjtimex
:
9622 struct timex host_buf
;
9624 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9627 ret
= get_errno(adjtimex(&host_buf
));
9628 if (!is_error(ret
)) {
9629 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9635 #ifdef TARGET_NR_create_module
9636 case TARGET_NR_create_module
:
9638 case TARGET_NR_init_module
:
9639 case TARGET_NR_delete_module
:
9640 #ifdef TARGET_NR_get_kernel_syms
9641 case TARGET_NR_get_kernel_syms
:
9644 case TARGET_NR_quotactl
:
9646 case TARGET_NR_getpgid
:
9647 ret
= get_errno(getpgid(arg1
));
9649 case TARGET_NR_fchdir
:
9650 ret
= get_errno(fchdir(arg1
));
9652 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9653 case TARGET_NR_bdflush
:
9656 #ifdef TARGET_NR_sysfs
9657 case TARGET_NR_sysfs
:
9660 case TARGET_NR_personality
:
9661 ret
= get_errno(personality(arg1
));
9663 #ifdef TARGET_NR_afs_syscall
9664 case TARGET_NR_afs_syscall
:
9667 #ifdef TARGET_NR__llseek /* Not on alpha */
9668 case TARGET_NR__llseek
:
9671 #if !defined(__NR_llseek)
9672 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9674 ret
= get_errno(res
);
9679 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9681 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9687 #ifdef TARGET_NR_getdents
9688 case TARGET_NR_getdents
:
9689 #ifdef __NR_getdents
9690 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9692 struct target_dirent
*target_dirp
;
9693 struct linux_dirent
*dirp
;
9694 abi_long count
= arg3
;
9696 dirp
= g_try_malloc(count
);
9698 ret
= -TARGET_ENOMEM
;
9702 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9703 if (!is_error(ret
)) {
9704 struct linux_dirent
*de
;
9705 struct target_dirent
*tde
;
9707 int reclen
, treclen
;
9708 int count1
, tnamelen
;
9712 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9716 reclen
= de
->d_reclen
;
9717 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9718 assert(tnamelen
>= 0);
9719 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9720 assert(count1
+ treclen
<= count
);
9721 tde
->d_reclen
= tswap16(treclen
);
9722 tde
->d_ino
= tswapal(de
->d_ino
);
9723 tde
->d_off
= tswapal(de
->d_off
);
9724 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9725 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9727 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9731 unlock_user(target_dirp
, arg2
, ret
);
9737 struct linux_dirent
*dirp
;
9738 abi_long count
= arg3
;
9740 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9742 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9743 if (!is_error(ret
)) {
9744 struct linux_dirent
*de
;
9749 reclen
= de
->d_reclen
;
9752 de
->d_reclen
= tswap16(reclen
);
9753 tswapls(&de
->d_ino
);
9754 tswapls(&de
->d_off
);
9755 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9759 unlock_user(dirp
, arg2
, ret
);
9763 /* Implement getdents in terms of getdents64 */
9765 struct linux_dirent64
*dirp
;
9766 abi_long count
= arg3
;
9768 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9772 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9773 if (!is_error(ret
)) {
9774 /* Convert the dirent64 structs to target dirent. We do this
9775 * in-place, since we can guarantee that a target_dirent is no
9776 * larger than a dirent64; however this means we have to be
9777 * careful to read everything before writing in the new format.
9779 struct linux_dirent64
*de
;
9780 struct target_dirent
*tde
;
9785 tde
= (struct target_dirent
*)dirp
;
9787 int namelen
, treclen
;
9788 int reclen
= de
->d_reclen
;
9789 uint64_t ino
= de
->d_ino
;
9790 int64_t off
= de
->d_off
;
9791 uint8_t type
= de
->d_type
;
9793 namelen
= strlen(de
->d_name
);
9794 treclen
= offsetof(struct target_dirent
, d_name
)
9796 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9798 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9799 tde
->d_ino
= tswapal(ino
);
9800 tde
->d_off
= tswapal(off
);
9801 tde
->d_reclen
= tswap16(treclen
);
9802 /* The target_dirent type is in what was formerly a padding
9803 * byte at the end of the structure:
9805 *(((char *)tde
) + treclen
- 1) = type
;
9807 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9808 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9814 unlock_user(dirp
, arg2
, ret
);
9818 #endif /* TARGET_NR_getdents */
9819 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9820 case TARGET_NR_getdents64
:
9822 struct linux_dirent64
*dirp
;
9823 abi_long count
= arg3
;
9824 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9826 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9827 if (!is_error(ret
)) {
9828 struct linux_dirent64
*de
;
9833 reclen
= de
->d_reclen
;
9836 de
->d_reclen
= tswap16(reclen
);
9837 tswap64s((uint64_t *)&de
->d_ino
);
9838 tswap64s((uint64_t *)&de
->d_off
);
9839 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9843 unlock_user(dirp
, arg2
, ret
);
9846 #endif /* TARGET_NR_getdents64 */
9847 #if defined(TARGET_NR__newselect)
9848 case TARGET_NR__newselect
:
9849 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9852 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9853 # ifdef TARGET_NR_poll
9854 case TARGET_NR_poll
:
9856 # ifdef TARGET_NR_ppoll
9857 case TARGET_NR_ppoll
:
9860 struct target_pollfd
*target_pfd
;
9861 unsigned int nfds
= arg2
;
9868 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9869 ret
= -TARGET_EINVAL
;
9873 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9874 sizeof(struct target_pollfd
) * nfds
, 1);
9879 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9880 for (i
= 0; i
< nfds
; i
++) {
9881 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9882 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9887 # ifdef TARGET_NR_ppoll
9888 case TARGET_NR_ppoll
:
9890 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9891 target_sigset_t
*target_set
;
9892 sigset_t _set
, *set
= &_set
;
9895 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9896 unlock_user(target_pfd
, arg1
, 0);
9904 if (arg5
!= sizeof(target_sigset_t
)) {
9905 unlock_user(target_pfd
, arg1
, 0);
9906 ret
= -TARGET_EINVAL
;
9910 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9912 unlock_user(target_pfd
, arg1
, 0);
9915 target_to_host_sigset(set
, target_set
);
9920 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9921 set
, SIGSET_T_SIZE
));
9923 if (!is_error(ret
) && arg3
) {
9924 host_to_target_timespec(arg3
, timeout_ts
);
9927 unlock_user(target_set
, arg4
, 0);
9932 # ifdef TARGET_NR_poll
9933 case TARGET_NR_poll
:
9935 struct timespec ts
, *pts
;
9938 /* Convert ms to secs, ns */
9939 ts
.tv_sec
= arg3
/ 1000;
9940 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9943 /* -ve poll() timeout means "infinite" */
9946 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9951 g_assert_not_reached();
9954 if (!is_error(ret
)) {
9955 for(i
= 0; i
< nfds
; i
++) {
9956 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9959 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9963 case TARGET_NR_flock
:
9964 /* NOTE: the flock constant seems to be the same for every
9966 ret
= get_errno(safe_flock(arg1
, arg2
));
9968 case TARGET_NR_readv
:
9970 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9972 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9973 unlock_iovec(vec
, arg2
, arg3
, 1);
9975 ret
= -host_to_target_errno(errno
);
9979 case TARGET_NR_writev
:
9981 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9983 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9984 unlock_iovec(vec
, arg2
, arg3
, 0);
9986 ret
= -host_to_target_errno(errno
);
9990 case TARGET_NR_getsid
:
9991 ret
= get_errno(getsid(arg1
));
9993 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9994 case TARGET_NR_fdatasync
:
9995 ret
= get_errno(fdatasync(arg1
));
9998 #ifdef TARGET_NR__sysctl
9999 case TARGET_NR__sysctl
:
10000 /* We don't implement this, but ENOTDIR is always a safe
10002 ret
= -TARGET_ENOTDIR
;
10005 case TARGET_NR_sched_getaffinity
:
10007 unsigned int mask_size
;
10008 unsigned long *mask
;
10011 * sched_getaffinity needs multiples of ulong, so need to take
10012 * care of mismatches between target ulong and host ulong sizes.
10014 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10015 ret
= -TARGET_EINVAL
;
10018 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10020 mask
= alloca(mask_size
);
10021 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10023 if (!is_error(ret
)) {
10025 /* More data returned than the caller's buffer will fit.
10026 * This only happens if sizeof(abi_long) < sizeof(long)
10027 * and the caller passed us a buffer holding an odd number
10028 * of abi_longs. If the host kernel is actually using the
10029 * extra 4 bytes then fail EINVAL; otherwise we can just
10030 * ignore them and only copy the interesting part.
10032 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10033 if (numcpus
> arg2
* 8) {
10034 ret
= -TARGET_EINVAL
;
10040 if (copy_to_user(arg3
, mask
, ret
)) {
10046 case TARGET_NR_sched_setaffinity
:
10048 unsigned int mask_size
;
10049 unsigned long *mask
;
10052 * sched_setaffinity needs multiples of ulong, so need to take
10053 * care of mismatches between target ulong and host ulong sizes.
10055 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10056 ret
= -TARGET_EINVAL
;
10059 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10061 mask
= alloca(mask_size
);
10062 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10065 memcpy(mask
, p
, arg2
);
10066 unlock_user_struct(p
, arg2
, 0);
10068 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10071 case TARGET_NR_sched_setparam
:
10073 struct sched_param
*target_schp
;
10074 struct sched_param schp
;
10077 return -TARGET_EINVAL
;
10079 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10081 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10082 unlock_user_struct(target_schp
, arg2
, 0);
10083 ret
= get_errno(sched_setparam(arg1
, &schp
));
10086 case TARGET_NR_sched_getparam
:
10088 struct sched_param
*target_schp
;
10089 struct sched_param schp
;
10092 return -TARGET_EINVAL
;
10094 ret
= get_errno(sched_getparam(arg1
, &schp
));
10095 if (!is_error(ret
)) {
10096 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10098 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10099 unlock_user_struct(target_schp
, arg2
, 1);
10103 case TARGET_NR_sched_setscheduler
:
10105 struct sched_param
*target_schp
;
10106 struct sched_param schp
;
10108 return -TARGET_EINVAL
;
10110 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10112 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10113 unlock_user_struct(target_schp
, arg3
, 0);
10114 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10117 case TARGET_NR_sched_getscheduler
:
10118 ret
= get_errno(sched_getscheduler(arg1
));
10120 case TARGET_NR_sched_yield
:
10121 ret
= get_errno(sched_yield());
10123 case TARGET_NR_sched_get_priority_max
:
10124 ret
= get_errno(sched_get_priority_max(arg1
));
10126 case TARGET_NR_sched_get_priority_min
:
10127 ret
= get_errno(sched_get_priority_min(arg1
));
10129 case TARGET_NR_sched_rr_get_interval
:
10131 struct timespec ts
;
10132 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10133 if (!is_error(ret
)) {
10134 ret
= host_to_target_timespec(arg2
, &ts
);
10138 case TARGET_NR_nanosleep
:
10140 struct timespec req
, rem
;
10141 target_to_host_timespec(&req
, arg1
);
10142 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10143 if (is_error(ret
) && arg2
) {
10144 host_to_target_timespec(arg2
, &rem
);
10148 #ifdef TARGET_NR_query_module
10149 case TARGET_NR_query_module
:
10150 goto unimplemented
;
10152 #ifdef TARGET_NR_nfsservctl
10153 case TARGET_NR_nfsservctl
:
10154 goto unimplemented
;
10156 case TARGET_NR_prctl
:
10158 case PR_GET_PDEATHSIG
:
10161 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10162 if (!is_error(ret
) && arg2
10163 && put_user_ual(deathsig
, arg2
)) {
10171 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10175 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10176 arg3
, arg4
, arg5
));
10177 unlock_user(name
, arg2
, 16);
10182 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10186 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10187 arg3
, arg4
, arg5
));
10188 unlock_user(name
, arg2
, 0);
10193 /* Most prctl options have no pointer arguments */
10194 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10198 #ifdef TARGET_NR_arch_prctl
10199 case TARGET_NR_arch_prctl
:
10200 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10201 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10204 goto unimplemented
;
10207 #ifdef TARGET_NR_pread64
10208 case TARGET_NR_pread64
:
10209 if (regpairs_aligned(cpu_env
)) {
10213 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10215 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10216 unlock_user(p
, arg2
, ret
);
10218 case TARGET_NR_pwrite64
:
10219 if (regpairs_aligned(cpu_env
)) {
10223 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10225 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10226 unlock_user(p
, arg2
, 0);
10229 case TARGET_NR_getcwd
:
10230 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10232 ret
= get_errno(sys_getcwd1(p
, arg2
));
10233 unlock_user(p
, arg1
, ret
);
10235 case TARGET_NR_capget
:
10236 case TARGET_NR_capset
:
10238 struct target_user_cap_header
*target_header
;
10239 struct target_user_cap_data
*target_data
= NULL
;
10240 struct __user_cap_header_struct header
;
10241 struct __user_cap_data_struct data
[2];
10242 struct __user_cap_data_struct
*dataptr
= NULL
;
10243 int i
, target_datalen
;
10244 int data_items
= 1;
10246 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10249 header
.version
= tswap32(target_header
->version
);
10250 header
.pid
= tswap32(target_header
->pid
);
10252 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10253 /* Version 2 and up takes pointer to two user_data structs */
10257 target_datalen
= sizeof(*target_data
) * data_items
;
10260 if (num
== TARGET_NR_capget
) {
10261 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10263 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10265 if (!target_data
) {
10266 unlock_user_struct(target_header
, arg1
, 0);
10270 if (num
== TARGET_NR_capset
) {
10271 for (i
= 0; i
< data_items
; i
++) {
10272 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10273 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10274 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10281 if (num
== TARGET_NR_capget
) {
10282 ret
= get_errno(capget(&header
, dataptr
));
10284 ret
= get_errno(capset(&header
, dataptr
));
10287 /* The kernel always updates version for both capget and capset */
10288 target_header
->version
= tswap32(header
.version
);
10289 unlock_user_struct(target_header
, arg1
, 1);
10292 if (num
== TARGET_NR_capget
) {
10293 for (i
= 0; i
< data_items
; i
++) {
10294 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10295 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10296 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10298 unlock_user(target_data
, arg2
, target_datalen
);
10300 unlock_user(target_data
, arg2
, 0);
10305 case TARGET_NR_sigaltstack
:
10306 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10309 #ifdef CONFIG_SENDFILE
10310 case TARGET_NR_sendfile
:
10312 off_t
*offp
= NULL
;
10315 ret
= get_user_sal(off
, arg3
);
10316 if (is_error(ret
)) {
10321 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10322 if (!is_error(ret
) && arg3
) {
10323 abi_long ret2
= put_user_sal(off
, arg3
);
10324 if (is_error(ret2
)) {
10330 #ifdef TARGET_NR_sendfile64
10331 case TARGET_NR_sendfile64
:
10333 off_t
*offp
= NULL
;
10336 ret
= get_user_s64(off
, arg3
);
10337 if (is_error(ret
)) {
10342 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10343 if (!is_error(ret
) && arg3
) {
10344 abi_long ret2
= put_user_s64(off
, arg3
);
10345 if (is_error(ret2
)) {
10353 case TARGET_NR_sendfile
:
10354 #ifdef TARGET_NR_sendfile64
10355 case TARGET_NR_sendfile64
:
10357 goto unimplemented
;
10360 #ifdef TARGET_NR_getpmsg
10361 case TARGET_NR_getpmsg
:
10362 goto unimplemented
;
10364 #ifdef TARGET_NR_putpmsg
10365 case TARGET_NR_putpmsg
:
10366 goto unimplemented
;
10368 #ifdef TARGET_NR_vfork
10369 case TARGET_NR_vfork
:
10370 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10374 #ifdef TARGET_NR_ugetrlimit
10375 case TARGET_NR_ugetrlimit
:
10377 struct rlimit rlim
;
10378 int resource
= target_to_host_resource(arg1
);
10379 ret
= get_errno(getrlimit(resource
, &rlim
));
10380 if (!is_error(ret
)) {
10381 struct target_rlimit
*target_rlim
;
10382 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10384 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10385 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10386 unlock_user_struct(target_rlim
, arg2
, 1);
10391 #ifdef TARGET_NR_truncate64
10392 case TARGET_NR_truncate64
:
10393 if (!(p
= lock_user_string(arg1
)))
10395 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10396 unlock_user(p
, arg1
, 0);
10399 #ifdef TARGET_NR_ftruncate64
10400 case TARGET_NR_ftruncate64
:
10401 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10404 #ifdef TARGET_NR_stat64
10405 case TARGET_NR_stat64
:
10406 if (!(p
= lock_user_string(arg1
)))
10408 ret
= get_errno(stat(path(p
), &st
));
10409 unlock_user(p
, arg1
, 0);
10410 if (!is_error(ret
))
10411 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10414 #ifdef TARGET_NR_lstat64
10415 case TARGET_NR_lstat64
:
10416 if (!(p
= lock_user_string(arg1
)))
10418 ret
= get_errno(lstat(path(p
), &st
));
10419 unlock_user(p
, arg1
, 0);
10420 if (!is_error(ret
))
10421 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10424 #ifdef TARGET_NR_fstat64
10425 case TARGET_NR_fstat64
:
10426 ret
= get_errno(fstat(arg1
, &st
));
10427 if (!is_error(ret
))
10428 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10431 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10432 #ifdef TARGET_NR_fstatat64
10433 case TARGET_NR_fstatat64
:
10435 #ifdef TARGET_NR_newfstatat
10436 case TARGET_NR_newfstatat
:
10438 if (!(p
= lock_user_string(arg2
)))
10440 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10441 if (!is_error(ret
))
10442 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10445 #ifdef TARGET_NR_lchown
10446 case TARGET_NR_lchown
:
10447 if (!(p
= lock_user_string(arg1
)))
10449 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10450 unlock_user(p
, arg1
, 0);
10453 #ifdef TARGET_NR_getuid
10454 case TARGET_NR_getuid
:
10455 ret
= get_errno(high2lowuid(getuid()));
10458 #ifdef TARGET_NR_getgid
10459 case TARGET_NR_getgid
:
10460 ret
= get_errno(high2lowgid(getgid()));
10463 #ifdef TARGET_NR_geteuid
10464 case TARGET_NR_geteuid
:
10465 ret
= get_errno(high2lowuid(geteuid()));
10468 #ifdef TARGET_NR_getegid
10469 case TARGET_NR_getegid
:
10470 ret
= get_errno(high2lowgid(getegid()));
10473 case TARGET_NR_setreuid
:
10474 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10476 case TARGET_NR_setregid
:
10477 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10479 case TARGET_NR_getgroups
:
10481 int gidsetsize
= arg1
;
10482 target_id
*target_grouplist
;
10486 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10487 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10488 if (gidsetsize
== 0)
10490 if (!is_error(ret
)) {
10491 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10492 if (!target_grouplist
)
10494 for(i
= 0;i
< ret
; i
++)
10495 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10496 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10500 case TARGET_NR_setgroups
:
10502 int gidsetsize
= arg1
;
10503 target_id
*target_grouplist
;
10504 gid_t
*grouplist
= NULL
;
10507 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10508 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10509 if (!target_grouplist
) {
10510 ret
= -TARGET_EFAULT
;
10513 for (i
= 0; i
< gidsetsize
; i
++) {
10514 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10516 unlock_user(target_grouplist
, arg2
, 0);
10518 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10521 case TARGET_NR_fchown
:
10522 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10524 #if defined(TARGET_NR_fchownat)
10525 case TARGET_NR_fchownat
:
10526 if (!(p
= lock_user_string(arg2
)))
10528 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10529 low2highgid(arg4
), arg5
));
10530 unlock_user(p
, arg2
, 0);
10533 #ifdef TARGET_NR_setresuid
10534 case TARGET_NR_setresuid
:
10535 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10537 low2highuid(arg3
)));
10540 #ifdef TARGET_NR_getresuid
10541 case TARGET_NR_getresuid
:
10543 uid_t ruid
, euid
, suid
;
10544 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10545 if (!is_error(ret
)) {
10546 if (put_user_id(high2lowuid(ruid
), arg1
)
10547 || put_user_id(high2lowuid(euid
), arg2
)
10548 || put_user_id(high2lowuid(suid
), arg3
))
10554 #ifdef TARGET_NR_getresgid
10555 case TARGET_NR_setresgid
:
10556 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10558 low2highgid(arg3
)));
10561 #ifdef TARGET_NR_getresgid
10562 case TARGET_NR_getresgid
:
10564 gid_t rgid
, egid
, sgid
;
10565 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10566 if (!is_error(ret
)) {
10567 if (put_user_id(high2lowgid(rgid
), arg1
)
10568 || put_user_id(high2lowgid(egid
), arg2
)
10569 || put_user_id(high2lowgid(sgid
), arg3
))
10575 #ifdef TARGET_NR_chown
10576 case TARGET_NR_chown
:
10577 if (!(p
= lock_user_string(arg1
)))
10579 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10580 unlock_user(p
, arg1
, 0);
10583 case TARGET_NR_setuid
:
10584 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10586 case TARGET_NR_setgid
:
10587 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10589 case TARGET_NR_setfsuid
:
10590 ret
= get_errno(setfsuid(arg1
));
10592 case TARGET_NR_setfsgid
:
10593 ret
= get_errno(setfsgid(arg1
));
10596 #ifdef TARGET_NR_lchown32
10597 case TARGET_NR_lchown32
:
10598 if (!(p
= lock_user_string(arg1
)))
10600 ret
= get_errno(lchown(p
, arg2
, arg3
));
10601 unlock_user(p
, arg1
, 0);
10604 #ifdef TARGET_NR_getuid32
10605 case TARGET_NR_getuid32
:
10606 ret
= get_errno(getuid());
10610 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10611 /* Alpha specific */
10612 case TARGET_NR_getxuid
:
10616 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10618 ret
= get_errno(getuid());
10621 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10622 /* Alpha specific */
10623 case TARGET_NR_getxgid
:
10627 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10629 ret
= get_errno(getgid());
10632 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10633 /* Alpha specific */
10634 case TARGET_NR_osf_getsysinfo
:
10635 ret
= -TARGET_EOPNOTSUPP
;
10637 case TARGET_GSI_IEEE_FP_CONTROL
:
10639 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10641 /* Copied from linux ieee_fpcr_to_swcr. */
10642 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10643 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10644 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10645 | SWCR_TRAP_ENABLE_DZE
10646 | SWCR_TRAP_ENABLE_OVF
);
10647 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10648 | SWCR_TRAP_ENABLE_INE
);
10649 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10650 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10652 if (put_user_u64 (swcr
, arg2
))
10658 /* case GSI_IEEE_STATE_AT_SIGNAL:
10659 -- Not implemented in linux kernel.
10661 -- Retrieves current unaligned access state; not much used.
10662 case GSI_PROC_TYPE:
10663 -- Retrieves implver information; surely not used.
10664 case GSI_GET_HWRPB:
10665 -- Grabs a copy of the HWRPB; surely not used.
10670 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10671 /* Alpha specific */
10672 case TARGET_NR_osf_setsysinfo
:
10673 ret
= -TARGET_EOPNOTSUPP
;
10675 case TARGET_SSI_IEEE_FP_CONTROL
:
10677 uint64_t swcr
, fpcr
, orig_fpcr
;
10679 if (get_user_u64 (swcr
, arg2
)) {
10682 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10683 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10685 /* Copied from linux ieee_swcr_to_fpcr. */
10686 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10687 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10688 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10689 | SWCR_TRAP_ENABLE_DZE
10690 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10691 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10692 | SWCR_TRAP_ENABLE_INE
)) << 57;
10693 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10694 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10696 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10701 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10703 uint64_t exc
, fpcr
, orig_fpcr
;
10706 if (get_user_u64(exc
, arg2
)) {
10710 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10712 /* We only add to the exception status here. */
10713 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10715 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10718 /* Old exceptions are not signaled. */
10719 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10721 /* If any exceptions set by this call,
10722 and are unmasked, send a signal. */
10724 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10725 si_code
= TARGET_FPE_FLTRES
;
10727 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10728 si_code
= TARGET_FPE_FLTUND
;
10730 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10731 si_code
= TARGET_FPE_FLTOVF
;
10733 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10734 si_code
= TARGET_FPE_FLTDIV
;
10736 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10737 si_code
= TARGET_FPE_FLTINV
;
10739 if (si_code
!= 0) {
10740 target_siginfo_t info
;
10741 info
.si_signo
= SIGFPE
;
10743 info
.si_code
= si_code
;
10744 info
._sifields
._sigfault
._addr
10745 = ((CPUArchState
*)cpu_env
)->pc
;
10746 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10747 QEMU_SI_FAULT
, &info
);
10752 /* case SSI_NVPAIRS:
10753 -- Used with SSIN_UACPROC to enable unaligned accesses.
10754 case SSI_IEEE_STATE_AT_SIGNAL:
10755 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10756 -- Not implemented in linux kernel
10761 #ifdef TARGET_NR_osf_sigprocmask
10762 /* Alpha specific. */
10763 case TARGET_NR_osf_sigprocmask
:
10767 sigset_t set
, oldset
;
10770 case TARGET_SIG_BLOCK
:
10773 case TARGET_SIG_UNBLOCK
:
10776 case TARGET_SIG_SETMASK
:
10780 ret
= -TARGET_EINVAL
;
10784 target_to_host_old_sigset(&set
, &mask
);
10785 ret
= do_sigprocmask(how
, &set
, &oldset
);
10787 host_to_target_old_sigset(&mask
, &oldset
);
10794 #ifdef TARGET_NR_getgid32
10795 case TARGET_NR_getgid32
:
10796 ret
= get_errno(getgid());
10799 #ifdef TARGET_NR_geteuid32
10800 case TARGET_NR_geteuid32
:
10801 ret
= get_errno(geteuid());
10804 #ifdef TARGET_NR_getegid32
10805 case TARGET_NR_getegid32
:
10806 ret
= get_errno(getegid());
10809 #ifdef TARGET_NR_setreuid32
10810 case TARGET_NR_setreuid32
:
10811 ret
= get_errno(setreuid(arg1
, arg2
));
10814 #ifdef TARGET_NR_setregid32
10815 case TARGET_NR_setregid32
:
10816 ret
= get_errno(setregid(arg1
, arg2
));
10819 #ifdef TARGET_NR_getgroups32
10820 case TARGET_NR_getgroups32
:
10822 int gidsetsize
= arg1
;
10823 uint32_t *target_grouplist
;
10827 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10828 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10829 if (gidsetsize
== 0)
10831 if (!is_error(ret
)) {
10832 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10833 if (!target_grouplist
) {
10834 ret
= -TARGET_EFAULT
;
10837 for(i
= 0;i
< ret
; i
++)
10838 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10839 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10844 #ifdef TARGET_NR_setgroups32
10845 case TARGET_NR_setgroups32
:
10847 int gidsetsize
= arg1
;
10848 uint32_t *target_grouplist
;
10852 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10853 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10854 if (!target_grouplist
) {
10855 ret
= -TARGET_EFAULT
;
10858 for(i
= 0;i
< gidsetsize
; i
++)
10859 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10860 unlock_user(target_grouplist
, arg2
, 0);
10861 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10865 #ifdef TARGET_NR_fchown32
10866 case TARGET_NR_fchown32
:
10867 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10870 #ifdef TARGET_NR_setresuid32
10871 case TARGET_NR_setresuid32
:
10872 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10875 #ifdef TARGET_NR_getresuid32
10876 case TARGET_NR_getresuid32
:
10878 uid_t ruid
, euid
, suid
;
10879 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10880 if (!is_error(ret
)) {
10881 if (put_user_u32(ruid
, arg1
)
10882 || put_user_u32(euid
, arg2
)
10883 || put_user_u32(suid
, arg3
))
10889 #ifdef TARGET_NR_setresgid32
10890 case TARGET_NR_setresgid32
:
10891 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10894 #ifdef TARGET_NR_getresgid32
10895 case TARGET_NR_getresgid32
:
10897 gid_t rgid
, egid
, sgid
;
10898 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10899 if (!is_error(ret
)) {
10900 if (put_user_u32(rgid
, arg1
)
10901 || put_user_u32(egid
, arg2
)
10902 || put_user_u32(sgid
, arg3
))
10908 #ifdef TARGET_NR_chown32
10909 case TARGET_NR_chown32
:
10910 if (!(p
= lock_user_string(arg1
)))
10912 ret
= get_errno(chown(p
, arg2
, arg3
));
10913 unlock_user(p
, arg1
, 0);
10916 #ifdef TARGET_NR_setuid32
10917 case TARGET_NR_setuid32
:
10918 ret
= get_errno(sys_setuid(arg1
));
10921 #ifdef TARGET_NR_setgid32
10922 case TARGET_NR_setgid32
:
10923 ret
= get_errno(sys_setgid(arg1
));
10926 #ifdef TARGET_NR_setfsuid32
10927 case TARGET_NR_setfsuid32
:
10928 ret
= get_errno(setfsuid(arg1
));
10931 #ifdef TARGET_NR_setfsgid32
10932 case TARGET_NR_setfsgid32
:
10933 ret
= get_errno(setfsgid(arg1
));
10937 case TARGET_NR_pivot_root
:
10938 goto unimplemented
;
10939 #ifdef TARGET_NR_mincore
10940 case TARGET_NR_mincore
:
10943 ret
= -TARGET_EFAULT
;
10944 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10946 if (!(p
= lock_user_string(arg3
)))
10948 ret
= get_errno(mincore(a
, arg2
, p
));
10949 unlock_user(p
, arg3
, ret
);
10951 unlock_user(a
, arg1
, 0);
10955 #ifdef TARGET_NR_arm_fadvise64_64
10956 case TARGET_NR_arm_fadvise64_64
:
10957 /* arm_fadvise64_64 looks like fadvise64_64 but
10958 * with different argument order: fd, advice, offset, len
10959 * rather than the usual fd, offset, len, advice.
10960 * Note that offset and len are both 64-bit so appear as
10961 * pairs of 32-bit registers.
10963 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10964 target_offset64(arg5
, arg6
), arg2
);
10965 ret
= -host_to_target_errno(ret
);
10969 #if TARGET_ABI_BITS == 32
10971 #ifdef TARGET_NR_fadvise64_64
10972 case TARGET_NR_fadvise64_64
:
10973 /* 6 args: fd, offset (high, low), len (high, low), advice */
10974 if (regpairs_aligned(cpu_env
)) {
10975 /* offset is in (3,4), len in (5,6) and advice in 7 */
10982 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10983 target_offset64(arg2
, arg3
),
10984 target_offset64(arg4
, arg5
),
10989 #ifdef TARGET_NR_fadvise64
10990 case TARGET_NR_fadvise64
:
10991 /* 5 args: fd, offset (high, low), len, advice */
10992 if (regpairs_aligned(cpu_env
)) {
10993 /* offset is in (3,4), len in 5 and advice in 6 */
10999 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11000 target_offset64(arg2
, arg3
),
11005 #else /* not a 32-bit ABI */
11006 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11007 #ifdef TARGET_NR_fadvise64_64
11008 case TARGET_NR_fadvise64_64
:
11010 #ifdef TARGET_NR_fadvise64
11011 case TARGET_NR_fadvise64
:
11013 #ifdef TARGET_S390X
11015 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11016 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11017 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11018 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11022 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11025 #endif /* end of 64-bit ABI fadvise handling */
11027 #ifdef TARGET_NR_madvise
11028 case TARGET_NR_madvise
:
11029 /* A straight passthrough may not be safe because qemu sometimes
11030 turns private file-backed mappings into anonymous mappings.
11031 This will break MADV_DONTNEED.
11032 This is a hint, so ignoring and returning success is ok. */
11033 ret
= get_errno(0);
11036 #if TARGET_ABI_BITS == 32
11037 case TARGET_NR_fcntl64
:
11041 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11042 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11045 if (((CPUARMState
*)cpu_env
)->eabi
) {
11046 copyfrom
= copy_from_user_eabi_flock64
;
11047 copyto
= copy_to_user_eabi_flock64
;
11051 cmd
= target_to_host_fcntl_cmd(arg2
);
11052 if (cmd
== -TARGET_EINVAL
) {
11058 case TARGET_F_GETLK64
:
11059 ret
= copyfrom(&fl
, arg3
);
11063 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11065 ret
= copyto(arg3
, &fl
);
11069 case TARGET_F_SETLK64
:
11070 case TARGET_F_SETLKW64
:
11071 ret
= copyfrom(&fl
, arg3
);
11075 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11078 ret
= do_fcntl(arg1
, arg2
, arg3
);
11084 #ifdef TARGET_NR_cacheflush
11085 case TARGET_NR_cacheflush
:
11086 /* self-modifying code is handled automatically, so nothing needed */
11090 #ifdef TARGET_NR_security
11091 case TARGET_NR_security
:
11092 goto unimplemented
;
11094 #ifdef TARGET_NR_getpagesize
11095 case TARGET_NR_getpagesize
:
11096 ret
= TARGET_PAGE_SIZE
;
11099 case TARGET_NR_gettid
:
11100 ret
= get_errno(gettid());
11102 #ifdef TARGET_NR_readahead
11103 case TARGET_NR_readahead
:
11104 #if TARGET_ABI_BITS == 32
11105 if (regpairs_aligned(cpu_env
)) {
11110 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11112 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11117 #ifdef TARGET_NR_setxattr
11118 case TARGET_NR_listxattr
:
11119 case TARGET_NR_llistxattr
:
11123 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11125 ret
= -TARGET_EFAULT
;
11129 p
= lock_user_string(arg1
);
11131 if (num
== TARGET_NR_listxattr
) {
11132 ret
= get_errno(listxattr(p
, b
, arg3
));
11134 ret
= get_errno(llistxattr(p
, b
, arg3
));
11137 ret
= -TARGET_EFAULT
;
11139 unlock_user(p
, arg1
, 0);
11140 unlock_user(b
, arg2
, arg3
);
11143 case TARGET_NR_flistxattr
:
11147 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11149 ret
= -TARGET_EFAULT
;
11153 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11154 unlock_user(b
, arg2
, arg3
);
11157 case TARGET_NR_setxattr
:
11158 case TARGET_NR_lsetxattr
:
11160 void *p
, *n
, *v
= 0;
11162 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11164 ret
= -TARGET_EFAULT
;
11168 p
= lock_user_string(arg1
);
11169 n
= lock_user_string(arg2
);
11171 if (num
== TARGET_NR_setxattr
) {
11172 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11174 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11177 ret
= -TARGET_EFAULT
;
11179 unlock_user(p
, arg1
, 0);
11180 unlock_user(n
, arg2
, 0);
11181 unlock_user(v
, arg3
, 0);
11184 case TARGET_NR_fsetxattr
:
11188 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11190 ret
= -TARGET_EFAULT
;
11194 n
= lock_user_string(arg2
);
11196 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11198 ret
= -TARGET_EFAULT
;
11200 unlock_user(n
, arg2
, 0);
11201 unlock_user(v
, arg3
, 0);
11204 case TARGET_NR_getxattr
:
11205 case TARGET_NR_lgetxattr
:
11207 void *p
, *n
, *v
= 0;
11209 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11211 ret
= -TARGET_EFAULT
;
11215 p
= lock_user_string(arg1
);
11216 n
= lock_user_string(arg2
);
11218 if (num
== TARGET_NR_getxattr
) {
11219 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11221 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11224 ret
= -TARGET_EFAULT
;
11226 unlock_user(p
, arg1
, 0);
11227 unlock_user(n
, arg2
, 0);
11228 unlock_user(v
, arg3
, arg4
);
11231 case TARGET_NR_fgetxattr
:
11235 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11237 ret
= -TARGET_EFAULT
;
11241 n
= lock_user_string(arg2
);
11243 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11245 ret
= -TARGET_EFAULT
;
11247 unlock_user(n
, arg2
, 0);
11248 unlock_user(v
, arg3
, arg4
);
11251 case TARGET_NR_removexattr
:
11252 case TARGET_NR_lremovexattr
:
11255 p
= lock_user_string(arg1
);
11256 n
= lock_user_string(arg2
);
11258 if (num
== TARGET_NR_removexattr
) {
11259 ret
= get_errno(removexattr(p
, n
));
11261 ret
= get_errno(lremovexattr(p
, n
));
11264 ret
= -TARGET_EFAULT
;
11266 unlock_user(p
, arg1
, 0);
11267 unlock_user(n
, arg2
, 0);
11270 case TARGET_NR_fremovexattr
:
11273 n
= lock_user_string(arg2
);
11275 ret
= get_errno(fremovexattr(arg1
, n
));
11277 ret
= -TARGET_EFAULT
;
11279 unlock_user(n
, arg2
, 0);
11283 #endif /* CONFIG_ATTR */
11284 #ifdef TARGET_NR_set_thread_area
11285 case TARGET_NR_set_thread_area
:
11286 #if defined(TARGET_MIPS)
11287 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11290 #elif defined(TARGET_CRIS)
11292 ret
= -TARGET_EINVAL
;
11294 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11298 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11299 ret
= do_set_thread_area(cpu_env
, arg1
);
11301 #elif defined(TARGET_M68K)
11303 TaskState
*ts
= cpu
->opaque
;
11304 ts
->tp_value
= arg1
;
11309 goto unimplemented_nowarn
;
11312 #ifdef TARGET_NR_get_thread_area
11313 case TARGET_NR_get_thread_area
:
11314 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11315 ret
= do_get_thread_area(cpu_env
, arg1
);
11317 #elif defined(TARGET_M68K)
11319 TaskState
*ts
= cpu
->opaque
;
11320 ret
= ts
->tp_value
;
11324 goto unimplemented_nowarn
;
11327 #ifdef TARGET_NR_getdomainname
11328 case TARGET_NR_getdomainname
:
11329 goto unimplemented_nowarn
;
11332 #ifdef TARGET_NR_clock_gettime
11333 case TARGET_NR_clock_gettime
:
11335 struct timespec ts
;
11336 ret
= get_errno(clock_gettime(arg1
, &ts
));
11337 if (!is_error(ret
)) {
11338 host_to_target_timespec(arg2
, &ts
);
11343 #ifdef TARGET_NR_clock_getres
11344 case TARGET_NR_clock_getres
:
11346 struct timespec ts
;
11347 ret
= get_errno(clock_getres(arg1
, &ts
));
11348 if (!is_error(ret
)) {
11349 host_to_target_timespec(arg2
, &ts
);
11354 #ifdef TARGET_NR_clock_nanosleep
11355 case TARGET_NR_clock_nanosleep
:
11357 struct timespec ts
;
11358 target_to_host_timespec(&ts
, arg3
);
11359 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11360 &ts
, arg4
? &ts
: NULL
));
11362 host_to_target_timespec(arg4
, &ts
);
11364 #if defined(TARGET_PPC)
11365 /* clock_nanosleep is odd in that it returns positive errno values.
11366 * On PPC, CR0 bit 3 should be set in such a situation. */
11367 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11368 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11375 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11376 case TARGET_NR_set_tid_address
:
11377 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11381 case TARGET_NR_tkill
:
11382 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11385 case TARGET_NR_tgkill
:
11386 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11387 target_to_host_signal(arg3
)));
11390 #ifdef TARGET_NR_set_robust_list
11391 case TARGET_NR_set_robust_list
:
11392 case TARGET_NR_get_robust_list
:
11393 /* The ABI for supporting robust futexes has userspace pass
11394 * the kernel a pointer to a linked list which is updated by
11395 * userspace after the syscall; the list is walked by the kernel
11396 * when the thread exits. Since the linked list in QEMU guest
11397 * memory isn't a valid linked list for the host and we have
11398 * no way to reliably intercept the thread-death event, we can't
11399 * support these. Silently return ENOSYS so that guest userspace
11400 * falls back to a non-robust futex implementation (which should
11401 * be OK except in the corner case of the guest crashing while
11402 * holding a mutex that is shared with another process via
11405 goto unimplemented_nowarn
;
11408 #if defined(TARGET_NR_utimensat)
11409 case TARGET_NR_utimensat
:
11411 struct timespec
*tsp
, ts
[2];
11415 target_to_host_timespec(ts
, arg3
);
11416 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11420 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11422 if (!(p
= lock_user_string(arg2
))) {
11423 ret
= -TARGET_EFAULT
;
11426 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11427 unlock_user(p
, arg2
, 0);
11432 case TARGET_NR_futex
:
11433 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11435 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11436 case TARGET_NR_inotify_init
:
11437 ret
= get_errno(sys_inotify_init());
11440 #ifdef CONFIG_INOTIFY1
11441 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11442 case TARGET_NR_inotify_init1
:
11443 ret
= get_errno(sys_inotify_init1(arg1
));
11447 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11448 case TARGET_NR_inotify_add_watch
:
11449 p
= lock_user_string(arg2
);
11450 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11451 unlock_user(p
, arg2
, 0);
11454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11455 case TARGET_NR_inotify_rm_watch
:
11456 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11460 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11461 case TARGET_NR_mq_open
:
11463 struct mq_attr posix_mq_attr
;
11466 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11467 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11470 p
= lock_user_string(arg1
- 1);
11474 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11475 unlock_user (p
, arg1
, 0);
11479 case TARGET_NR_mq_unlink
:
11480 p
= lock_user_string(arg1
- 1);
11482 ret
= -TARGET_EFAULT
;
11485 ret
= get_errno(mq_unlink(p
));
11486 unlock_user (p
, arg1
, 0);
11489 case TARGET_NR_mq_timedsend
:
11491 struct timespec ts
;
11493 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11495 target_to_host_timespec(&ts
, arg5
);
11496 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11497 host_to_target_timespec(arg5
, &ts
);
11499 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11501 unlock_user (p
, arg2
, arg3
);
11505 case TARGET_NR_mq_timedreceive
:
11507 struct timespec ts
;
11510 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11512 target_to_host_timespec(&ts
, arg5
);
11513 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11515 host_to_target_timespec(arg5
, &ts
);
11517 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11520 unlock_user (p
, arg2
, arg3
);
11522 put_user_u32(prio
, arg4
);
11526 /* Not implemented for now... */
11527 /* case TARGET_NR_mq_notify: */
11530 case TARGET_NR_mq_getsetattr
:
11532 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11535 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11536 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11539 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11540 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11547 #ifdef CONFIG_SPLICE
11548 #ifdef TARGET_NR_tee
11549 case TARGET_NR_tee
:
11551 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11555 #ifdef TARGET_NR_splice
11556 case TARGET_NR_splice
:
11558 loff_t loff_in
, loff_out
;
11559 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11561 if (get_user_u64(loff_in
, arg2
)) {
11564 ploff_in
= &loff_in
;
11567 if (get_user_u64(loff_out
, arg4
)) {
11570 ploff_out
= &loff_out
;
11572 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11574 if (put_user_u64(loff_in
, arg2
)) {
11579 if (put_user_u64(loff_out
, arg4
)) {
11586 #ifdef TARGET_NR_vmsplice
11587 case TARGET_NR_vmsplice
:
11589 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11591 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11592 unlock_iovec(vec
, arg2
, arg3
, 0);
11594 ret
= -host_to_target_errno(errno
);
11599 #endif /* CONFIG_SPLICE */
11600 #ifdef CONFIG_EVENTFD
11601 #if defined(TARGET_NR_eventfd)
11602 case TARGET_NR_eventfd
:
11603 ret
= get_errno(eventfd(arg1
, 0));
11604 fd_trans_unregister(ret
);
11607 #if defined(TARGET_NR_eventfd2)
11608 case TARGET_NR_eventfd2
:
11610 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11611 if (arg2
& TARGET_O_NONBLOCK
) {
11612 host_flags
|= O_NONBLOCK
;
11614 if (arg2
& TARGET_O_CLOEXEC
) {
11615 host_flags
|= O_CLOEXEC
;
11617 ret
= get_errno(eventfd(arg1
, host_flags
));
11618 fd_trans_unregister(ret
);
11622 #endif /* CONFIG_EVENTFD */
11623 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11624 case TARGET_NR_fallocate
:
11625 #if TARGET_ABI_BITS == 32
11626 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11627 target_offset64(arg5
, arg6
)));
11629 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11633 #if defined(CONFIG_SYNC_FILE_RANGE)
11634 #if defined(TARGET_NR_sync_file_range)
11635 case TARGET_NR_sync_file_range
:
11636 #if TARGET_ABI_BITS == 32
11637 #if defined(TARGET_MIPS)
11638 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11639 target_offset64(arg5
, arg6
), arg7
));
11641 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11642 target_offset64(arg4
, arg5
), arg6
));
11643 #endif /* !TARGET_MIPS */
11645 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11649 #if defined(TARGET_NR_sync_file_range2)
11650 case TARGET_NR_sync_file_range2
:
11651 /* This is like sync_file_range but the arguments are reordered */
11652 #if TARGET_ABI_BITS == 32
11653 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11654 target_offset64(arg5
, arg6
), arg2
));
11656 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11661 #if defined(TARGET_NR_signalfd4)
11662 case TARGET_NR_signalfd4
:
11663 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11666 #if defined(TARGET_NR_signalfd)
11667 case TARGET_NR_signalfd
:
11668 ret
= do_signalfd4(arg1
, arg2
, 0);
11671 #if defined(CONFIG_EPOLL)
11672 #if defined(TARGET_NR_epoll_create)
11673 case TARGET_NR_epoll_create
:
11674 ret
= get_errno(epoll_create(arg1
));
11677 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11678 case TARGET_NR_epoll_create1
:
11679 ret
= get_errno(epoll_create1(arg1
));
11682 #if defined(TARGET_NR_epoll_ctl)
11683 case TARGET_NR_epoll_ctl
:
11685 struct epoll_event ep
;
11686 struct epoll_event
*epp
= 0;
11688 struct target_epoll_event
*target_ep
;
11689 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11692 ep
.events
= tswap32(target_ep
->events
);
11693 /* The epoll_data_t union is just opaque data to the kernel,
11694 * so we transfer all 64 bits across and need not worry what
11695 * actual data type it is.
11697 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11698 unlock_user_struct(target_ep
, arg4
, 0);
11701 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11706 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11707 #if defined(TARGET_NR_epoll_wait)
11708 case TARGET_NR_epoll_wait
:
11710 #if defined(TARGET_NR_epoll_pwait)
11711 case TARGET_NR_epoll_pwait
:
11714 struct target_epoll_event
*target_ep
;
11715 struct epoll_event
*ep
;
11717 int maxevents
= arg3
;
11718 int timeout
= arg4
;
11720 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11721 ret
= -TARGET_EINVAL
;
11725 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11726 maxevents
* sizeof(struct target_epoll_event
), 1);
11731 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11734 #if defined(TARGET_NR_epoll_pwait)
11735 case TARGET_NR_epoll_pwait
:
11737 target_sigset_t
*target_set
;
11738 sigset_t _set
, *set
= &_set
;
11741 if (arg6
!= sizeof(target_sigset_t
)) {
11742 ret
= -TARGET_EINVAL
;
11746 target_set
= lock_user(VERIFY_READ
, arg5
,
11747 sizeof(target_sigset_t
), 1);
11749 unlock_user(target_ep
, arg2
, 0);
11752 target_to_host_sigset(set
, target_set
);
11753 unlock_user(target_set
, arg5
, 0);
11758 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11759 set
, SIGSET_T_SIZE
));
11763 #if defined(TARGET_NR_epoll_wait)
11764 case TARGET_NR_epoll_wait
:
11765 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11770 ret
= -TARGET_ENOSYS
;
11772 if (!is_error(ret
)) {
11774 for (i
= 0; i
< ret
; i
++) {
11775 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11776 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11779 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11784 #ifdef TARGET_NR_prlimit64
11785 case TARGET_NR_prlimit64
:
11787 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11788 struct target_rlimit64
*target_rnew
, *target_rold
;
11789 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11790 int resource
= target_to_host_resource(arg2
);
11792 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11795 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11796 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11797 unlock_user_struct(target_rnew
, arg3
, 0);
11801 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11802 if (!is_error(ret
) && arg4
) {
11803 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11806 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11807 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11808 unlock_user_struct(target_rold
, arg4
, 1);
11813 #ifdef TARGET_NR_gethostname
11814 case TARGET_NR_gethostname
:
11816 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11818 ret
= get_errno(gethostname(name
, arg2
));
11819 unlock_user(name
, arg1
, arg2
);
11821 ret
= -TARGET_EFAULT
;
11826 #ifdef TARGET_NR_atomic_cmpxchg_32
11827 case TARGET_NR_atomic_cmpxchg_32
:
11829 /* should use start_exclusive from main.c */
11830 abi_ulong mem_value
;
11831 if (get_user_u32(mem_value
, arg6
)) {
11832 target_siginfo_t info
;
11833 info
.si_signo
= SIGSEGV
;
11835 info
.si_code
= TARGET_SEGV_MAPERR
;
11836 info
._sifields
._sigfault
._addr
= arg6
;
11837 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11838 QEMU_SI_FAULT
, &info
);
11842 if (mem_value
== arg2
)
11843 put_user_u32(arg1
, arg6
);
11848 #ifdef TARGET_NR_atomic_barrier
11849 case TARGET_NR_atomic_barrier
:
11851 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11857 #ifdef TARGET_NR_timer_create
11858 case TARGET_NR_timer_create
:
11860 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11862 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11865 int timer_index
= next_free_host_timer();
11867 if (timer_index
< 0) {
11868 ret
= -TARGET_EAGAIN
;
11870 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11873 phost_sevp
= &host_sevp
;
11874 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11880 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11884 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11893 #ifdef TARGET_NR_timer_settime
11894 case TARGET_NR_timer_settime
:
11896 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11897 * struct itimerspec * old_value */
11898 target_timer_t timerid
= get_timer_id(arg1
);
11902 } else if (arg3
== 0) {
11903 ret
= -TARGET_EINVAL
;
11905 timer_t htimer
= g_posix_timers
[timerid
];
11906 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11908 target_to_host_itimerspec(&hspec_new
, arg3
);
11910 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11911 host_to_target_itimerspec(arg2
, &hspec_old
);
11917 #ifdef TARGET_NR_timer_gettime
11918 case TARGET_NR_timer_gettime
:
11920 /* args: timer_t timerid, struct itimerspec *curr_value */
11921 target_timer_t timerid
= get_timer_id(arg1
);
11925 } else if (!arg2
) {
11926 ret
= -TARGET_EFAULT
;
11928 timer_t htimer
= g_posix_timers
[timerid
];
11929 struct itimerspec hspec
;
11930 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11932 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11933 ret
= -TARGET_EFAULT
;
11940 #ifdef TARGET_NR_timer_getoverrun
11941 case TARGET_NR_timer_getoverrun
:
11943 /* args: timer_t timerid */
11944 target_timer_t timerid
= get_timer_id(arg1
);
11949 timer_t htimer
= g_posix_timers
[timerid
];
11950 ret
= get_errno(timer_getoverrun(htimer
));
11952 fd_trans_unregister(ret
);
11957 #ifdef TARGET_NR_timer_delete
11958 case TARGET_NR_timer_delete
:
11960 /* args: timer_t timerid */
11961 target_timer_t timerid
= get_timer_id(arg1
);
11966 timer_t htimer
= g_posix_timers
[timerid
];
11967 ret
= get_errno(timer_delete(htimer
));
11968 g_posix_timers
[timerid
] = 0;
11974 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11975 case TARGET_NR_timerfd_create
:
11976 ret
= get_errno(timerfd_create(arg1
,
11977 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11981 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11982 case TARGET_NR_timerfd_gettime
:
11984 struct itimerspec its_curr
;
11986 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11988 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11995 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11996 case TARGET_NR_timerfd_settime
:
11998 struct itimerspec its_new
, its_old
, *p_new
;
12001 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12009 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12011 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12018 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12019 case TARGET_NR_ioprio_get
:
12020 ret
= get_errno(ioprio_get(arg1
, arg2
));
12024 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12025 case TARGET_NR_ioprio_set
:
12026 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12030 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12031 case TARGET_NR_setns
:
12032 ret
= get_errno(setns(arg1
, arg2
));
12035 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12036 case TARGET_NR_unshare
:
12037 ret
= get_errno(unshare(arg1
));
12043 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12044 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12045 unimplemented_nowarn
:
12047 ret
= -TARGET_ENOSYS
;
12052 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12055 print_syscall_ret(num
, ret
);
12056 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12059 ret
= -TARGET_EFAULT
;