4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
110 #include <linux/audit.h>
111 #include "linux_loop.h"
117 #define CLONE_IO 0x80000000 /* Clone io context */
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 #define __NR__llseek __NR_lseek
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
255 _syscall0(int, gettid
)
257 /* This is a replacement for the host gettid() and must return a host
259 static int gettid(void) {
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
272 loff_t
*, res
, uint
, wh
);
274 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
275 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group
,int,error_code
)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address
,int *,tidptr
)
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
284 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
288 unsigned long *, user_mask_ptr
);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
291 unsigned long *, user_mask_ptr
);
292 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
294 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
295 struct __user_cap_data_struct
*, data
);
296 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
297 struct __user_cap_data_struct
*, data
);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get
, int, which
, int, who
)
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
308 static bitmask_transtbl fcntl_flags_tbl
[] = {
309 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
310 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
311 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
312 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
313 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
314 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
315 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
316 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
317 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
318 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
319 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
320 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
321 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
322 #if defined(O_DIRECT)
323 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
325 #if defined(O_NOATIME)
326 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
328 #if defined(O_CLOEXEC)
329 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
332 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
334 /* Don't terminate the list prematurely on 64-bit host+guest. */
335 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
336 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
343 QEMU_IFLA_BR_FORWARD_DELAY
,
344 QEMU_IFLA_BR_HELLO_TIME
,
345 QEMU_IFLA_BR_MAX_AGE
,
346 QEMU_IFLA_BR_AGEING_TIME
,
347 QEMU_IFLA_BR_STP_STATE
,
348 QEMU_IFLA_BR_PRIORITY
,
349 QEMU_IFLA_BR_VLAN_FILTERING
,
350 QEMU_IFLA_BR_VLAN_PROTOCOL
,
351 QEMU_IFLA_BR_GROUP_FWD_MASK
,
352 QEMU_IFLA_BR_ROOT_ID
,
353 QEMU_IFLA_BR_BRIDGE_ID
,
354 QEMU_IFLA_BR_ROOT_PORT
,
355 QEMU_IFLA_BR_ROOT_PATH_COST
,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
357 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
358 QEMU_IFLA_BR_HELLO_TIMER
,
359 QEMU_IFLA_BR_TCN_TIMER
,
360 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
361 QEMU_IFLA_BR_GC_TIMER
,
362 QEMU_IFLA_BR_GROUP_ADDR
,
363 QEMU_IFLA_BR_FDB_FLUSH
,
364 QEMU_IFLA_BR_MCAST_ROUTER
,
365 QEMU_IFLA_BR_MCAST_SNOOPING
,
366 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
367 QEMU_IFLA_BR_MCAST_QUERIER
,
368 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
369 QEMU_IFLA_BR_MCAST_HASH_MAX
,
370 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
371 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
372 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
373 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
374 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
375 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
376 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
378 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
379 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
380 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
381 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
383 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
384 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
408 QEMU_IFLA_NET_NS_PID
,
411 QEMU_IFLA_VFINFO_LIST
,
419 QEMU_IFLA_PROMISCUITY
,
420 QEMU_IFLA_NUM_TX_QUEUES
,
421 QEMU_IFLA_NUM_RX_QUEUES
,
423 QEMU_IFLA_PHYS_PORT_ID
,
424 QEMU_IFLA_CARRIER_CHANGES
,
425 QEMU_IFLA_PHYS_SWITCH_ID
,
426 QEMU_IFLA_LINK_NETNSID
,
427 QEMU_IFLA_PHYS_PORT_NAME
,
428 QEMU_IFLA_PROTO_DOWN
,
429 QEMU_IFLA_GSO_MAX_SEGS
,
430 QEMU_IFLA_GSO_MAX_SIZE
,
437 QEMU_IFLA_BRPORT_UNSPEC
,
438 QEMU_IFLA_BRPORT_STATE
,
439 QEMU_IFLA_BRPORT_PRIORITY
,
440 QEMU_IFLA_BRPORT_COST
,
441 QEMU_IFLA_BRPORT_MODE
,
442 QEMU_IFLA_BRPORT_GUARD
,
443 QEMU_IFLA_BRPORT_PROTECT
,
444 QEMU_IFLA_BRPORT_FAST_LEAVE
,
445 QEMU_IFLA_BRPORT_LEARNING
,
446 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
447 QEMU_IFLA_BRPORT_PROXYARP
,
448 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
449 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
450 QEMU_IFLA_BRPORT_ROOT_ID
,
451 QEMU_IFLA_BRPORT_BRIDGE_ID
,
452 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
453 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
456 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
457 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
458 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
459 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
460 QEMU_IFLA_BRPORT_HOLD_TIMER
,
461 QEMU_IFLA_BRPORT_FLUSH
,
462 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
463 QEMU_IFLA_BRPORT_PAD
,
464 QEMU___IFLA_BRPORT_MAX
468 QEMU_IFLA_INFO_UNSPEC
,
471 QEMU_IFLA_INFO_XSTATS
,
472 QEMU_IFLA_INFO_SLAVE_KIND
,
473 QEMU_IFLA_INFO_SLAVE_DATA
,
474 QEMU___IFLA_INFO_MAX
,
478 QEMU_IFLA_INET_UNSPEC
,
480 QEMU___IFLA_INET_MAX
,
484 QEMU_IFLA_INET6_UNSPEC
,
485 QEMU_IFLA_INET6_FLAGS
,
486 QEMU_IFLA_INET6_CONF
,
487 QEMU_IFLA_INET6_STATS
,
488 QEMU_IFLA_INET6_MCAST
,
489 QEMU_IFLA_INET6_CACHEINFO
,
490 QEMU_IFLA_INET6_ICMP6STATS
,
491 QEMU_IFLA_INET6_TOKEN
,
492 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
493 QEMU___IFLA_INET6_MAX
496 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
497 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
498 typedef struct TargetFdTrans
{
499 TargetFdDataFunc host_to_target_data
;
500 TargetFdDataFunc target_to_host_data
;
501 TargetFdAddrFunc target_to_host_addr
;
504 static TargetFdTrans
**target_fd_trans
;
506 static unsigned int target_fd_max
;
508 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
510 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
511 return target_fd_trans
[fd
]->target_to_host_data
;
516 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
518 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
519 return target_fd_trans
[fd
]->host_to_target_data
;
524 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
526 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
527 return target_fd_trans
[fd
]->target_to_host_addr
;
532 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
536 if (fd
>= target_fd_max
) {
537 oldmax
= target_fd_max
;
538 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
539 target_fd_trans
= g_renew(TargetFdTrans
*,
540 target_fd_trans
, target_fd_max
);
541 memset((void *)(target_fd_trans
+ oldmax
), 0,
542 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
544 target_fd_trans
[fd
] = trans
;
547 static void fd_trans_unregister(int fd
)
549 if (fd
>= 0 && fd
< target_fd_max
) {
550 target_fd_trans
[fd
] = NULL
;
554 static void fd_trans_dup(int oldfd
, int newfd
)
556 fd_trans_unregister(newfd
);
557 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
558 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
562 static int sys_getcwd1(char *buf
, size_t size
)
564 if (getcwd(buf
, size
) == NULL
) {
565 /* getcwd() sets errno */
568 return strlen(buf
)+1;
571 #ifdef TARGET_NR_utimensat
572 #if defined(__NR_utimensat)
573 #define __NR_sys_utimensat __NR_utimensat
574 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
575 const struct timespec
*,tsp
,int,flags
)
577 static int sys_utimensat(int dirfd
, const char *pathname
,
578 const struct timespec times
[2], int flags
)
584 #endif /* TARGET_NR_utimensat */
586 #ifdef CONFIG_INOTIFY
587 #include <sys/inotify.h>
589 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
590 static int sys_inotify_init(void)
592 return (inotify_init());
595 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
596 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
598 return (inotify_add_watch(fd
, pathname
, mask
));
601 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
602 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
604 return (inotify_rm_watch(fd
, wd
));
607 #ifdef CONFIG_INOTIFY1
608 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
609 static int sys_inotify_init1(int flags
)
611 return (inotify_init1(flags
));
616 /* Userspace can usually survive runtime without inotify */
617 #undef TARGET_NR_inotify_init
618 #undef TARGET_NR_inotify_init1
619 #undef TARGET_NR_inotify_add_watch
620 #undef TARGET_NR_inotify_rm_watch
621 #endif /* CONFIG_INOTIFY */
623 #if defined(TARGET_NR_prlimit64)
624 #ifndef __NR_prlimit64
625 # define __NR_prlimit64 -1
627 #define __NR_sys_prlimit64 __NR_prlimit64
628 /* The glibc rlimit structure may not be that used by the underlying syscall */
629 struct host_rlimit64
{
633 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
634 const struct host_rlimit64
*, new_limit
,
635 struct host_rlimit64
*, old_limit
)
639 #if defined(TARGET_NR_timer_create)
640 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
641 static timer_t g_posix_timers
[32] = { 0, } ;
643 static inline int next_free_host_timer(void)
646 /* FIXME: Does finding the next free slot require a lock? */
647 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
648 if (g_posix_timers
[k
] == 0) {
649 g_posix_timers
[k
] = (timer_t
) 1;
657 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
659 static inline int regpairs_aligned(void *cpu_env
) {
660 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
662 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
663 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
664 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
665 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
666 * of registers which translates to the same as ARM/MIPS, because we start with
668 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
670 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
673 #define ERRNO_TABLE_SIZE 1200
675 /* target_to_host_errno_table[] is initialized from
676 * host_to_target_errno_table[] in syscall_init(). */
677 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
681 * This list is the union of errno values overridden in asm-<arch>/errno.h
682 * minus the errnos that are not actually generic to all archs.
684 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
685 [EAGAIN
] = TARGET_EAGAIN
,
686 [EIDRM
] = TARGET_EIDRM
,
687 [ECHRNG
] = TARGET_ECHRNG
,
688 [EL2NSYNC
] = TARGET_EL2NSYNC
,
689 [EL3HLT
] = TARGET_EL3HLT
,
690 [EL3RST
] = TARGET_EL3RST
,
691 [ELNRNG
] = TARGET_ELNRNG
,
692 [EUNATCH
] = TARGET_EUNATCH
,
693 [ENOCSI
] = TARGET_ENOCSI
,
694 [EL2HLT
] = TARGET_EL2HLT
,
695 [EDEADLK
] = TARGET_EDEADLK
,
696 [ENOLCK
] = TARGET_ENOLCK
,
697 [EBADE
] = TARGET_EBADE
,
698 [EBADR
] = TARGET_EBADR
,
699 [EXFULL
] = TARGET_EXFULL
,
700 [ENOANO
] = TARGET_ENOANO
,
701 [EBADRQC
] = TARGET_EBADRQC
,
702 [EBADSLT
] = TARGET_EBADSLT
,
703 [EBFONT
] = TARGET_EBFONT
,
704 [ENOSTR
] = TARGET_ENOSTR
,
705 [ENODATA
] = TARGET_ENODATA
,
706 [ETIME
] = TARGET_ETIME
,
707 [ENOSR
] = TARGET_ENOSR
,
708 [ENONET
] = TARGET_ENONET
,
709 [ENOPKG
] = TARGET_ENOPKG
,
710 [EREMOTE
] = TARGET_EREMOTE
,
711 [ENOLINK
] = TARGET_ENOLINK
,
712 [EADV
] = TARGET_EADV
,
713 [ESRMNT
] = TARGET_ESRMNT
,
714 [ECOMM
] = TARGET_ECOMM
,
715 [EPROTO
] = TARGET_EPROTO
,
716 [EDOTDOT
] = TARGET_EDOTDOT
,
717 [EMULTIHOP
] = TARGET_EMULTIHOP
,
718 [EBADMSG
] = TARGET_EBADMSG
,
719 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
720 [EOVERFLOW
] = TARGET_EOVERFLOW
,
721 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
722 [EBADFD
] = TARGET_EBADFD
,
723 [EREMCHG
] = TARGET_EREMCHG
,
724 [ELIBACC
] = TARGET_ELIBACC
,
725 [ELIBBAD
] = TARGET_ELIBBAD
,
726 [ELIBSCN
] = TARGET_ELIBSCN
,
727 [ELIBMAX
] = TARGET_ELIBMAX
,
728 [ELIBEXEC
] = TARGET_ELIBEXEC
,
729 [EILSEQ
] = TARGET_EILSEQ
,
730 [ENOSYS
] = TARGET_ENOSYS
,
731 [ELOOP
] = TARGET_ELOOP
,
732 [ERESTART
] = TARGET_ERESTART
,
733 [ESTRPIPE
] = TARGET_ESTRPIPE
,
734 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
735 [EUSERS
] = TARGET_EUSERS
,
736 [ENOTSOCK
] = TARGET_ENOTSOCK
,
737 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
738 [EMSGSIZE
] = TARGET_EMSGSIZE
,
739 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
740 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
741 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
742 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
743 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
744 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
745 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
746 [EADDRINUSE
] = TARGET_EADDRINUSE
,
747 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
748 [ENETDOWN
] = TARGET_ENETDOWN
,
749 [ENETUNREACH
] = TARGET_ENETUNREACH
,
750 [ENETRESET
] = TARGET_ENETRESET
,
751 [ECONNABORTED
] = TARGET_ECONNABORTED
,
752 [ECONNRESET
] = TARGET_ECONNRESET
,
753 [ENOBUFS
] = TARGET_ENOBUFS
,
754 [EISCONN
] = TARGET_EISCONN
,
755 [ENOTCONN
] = TARGET_ENOTCONN
,
756 [EUCLEAN
] = TARGET_EUCLEAN
,
757 [ENOTNAM
] = TARGET_ENOTNAM
,
758 [ENAVAIL
] = TARGET_ENAVAIL
,
759 [EISNAM
] = TARGET_EISNAM
,
760 [EREMOTEIO
] = TARGET_EREMOTEIO
,
761 [EDQUOT
] = TARGET_EDQUOT
,
762 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
763 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
764 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
765 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
766 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
767 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
768 [EALREADY
] = TARGET_EALREADY
,
769 [EINPROGRESS
] = TARGET_EINPROGRESS
,
770 [ESTALE
] = TARGET_ESTALE
,
771 [ECANCELED
] = TARGET_ECANCELED
,
772 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
773 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
775 [ENOKEY
] = TARGET_ENOKEY
,
778 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
781 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
784 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
787 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
789 #ifdef ENOTRECOVERABLE
790 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
794 static inline int host_to_target_errno(int err
)
796 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
797 host_to_target_errno_table
[err
]) {
798 return host_to_target_errno_table
[err
];
803 static inline int target_to_host_errno(int err
)
805 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
806 target_to_host_errno_table
[err
]) {
807 return target_to_host_errno_table
[err
];
812 static inline abi_long
get_errno(abi_long ret
)
815 return -host_to_target_errno(errno
);
820 static inline int is_error(abi_long ret
)
822 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
825 const char *target_strerror(int err
)
827 if (err
== TARGET_ERESTARTSYS
) {
828 return "To be restarted";
830 if (err
== TARGET_QEMU_ESIGRETURN
) {
831 return "Successful exit from sigreturn";
834 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
837 return strerror(target_to_host_errno(err
));
840 #define safe_syscall0(type, name) \
841 static type safe_##name(void) \
843 return safe_syscall(__NR_##name); \
846 #define safe_syscall1(type, name, type1, arg1) \
847 static type safe_##name(type1 arg1) \
849 return safe_syscall(__NR_##name, arg1); \
852 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
853 static type safe_##name(type1 arg1, type2 arg2) \
855 return safe_syscall(__NR_##name, arg1, arg2); \
858 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
859 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
861 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
864 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
866 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
868 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
871 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
872 type4, arg4, type5, arg5) \
873 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
876 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
879 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
880 type4, arg4, type5, arg5, type6, arg6) \
881 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
882 type5 arg5, type6 arg6) \
884 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
887 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
888 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
889 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
890 int, flags
, mode_t
, mode
)
891 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
892 struct rusage
*, rusage
)
893 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
894 int, options
, struct rusage
*, rusage
)
895 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
896 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
897 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
898 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
899 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
901 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
902 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
904 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
905 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
906 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
907 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
908 safe_syscall2(int, tkill
, int, tid
, int, sig
)
909 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
910 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
911 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
912 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
914 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
915 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
916 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
917 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
918 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
919 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
920 safe_syscall2(int, flock
, int, fd
, int, operation
)
921 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
922 const struct timespec
*, uts
, size_t, sigsetsize
)
923 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
925 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
926 struct timespec
*, rem
)
927 #ifdef TARGET_NR_clock_nanosleep
928 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
929 const struct timespec
*, req
, struct timespec
*, rem
)
932 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
934 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
935 long, msgtype
, int, flags
)
936 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
937 unsigned, nsops
, const struct timespec
*, timeout
)
939 /* This host kernel architecture uses a single ipc syscall; fake up
940 * wrappers for the sub-operations to hide this implementation detail.
941 * Annoyingly we can't include linux/ipc.h to get the constant definitions
942 * for the call parameter because some structs in there conflict with the
943 * sys/ipc.h ones. So we just define them here, and rely on them being
944 * the same for all host architectures.
946 #define Q_SEMTIMEDOP 4
949 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
951 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
952 void *, ptr
, long, fifth
)
953 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
955 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
957 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
959 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
961 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
962 const struct timespec
*timeout
)
964 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
968 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
969 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
970 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
971 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
972 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
974 /* We do ioctl like this rather than via safe_syscall3 to preserve the
975 * "third argument might be integer or pointer or not present" behaviour of
978 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
979 /* Similarly for fcntl. Note that callers must always:
980 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
981 * use the flock64 struct rather than unsuffixed flock
982 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
985 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
987 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
990 static inline int host_to_target_sock_type(int host_type
)
994 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
996 target_type
= TARGET_SOCK_DGRAM
;
999 target_type
= TARGET_SOCK_STREAM
;
1002 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1006 #if defined(SOCK_CLOEXEC)
1007 if (host_type
& SOCK_CLOEXEC
) {
1008 target_type
|= TARGET_SOCK_CLOEXEC
;
1012 #if defined(SOCK_NONBLOCK)
1013 if (host_type
& SOCK_NONBLOCK
) {
1014 target_type
|= TARGET_SOCK_NONBLOCK
;
1021 static abi_ulong target_brk
;
1022 static abi_ulong target_original_brk
;
1023 static abi_ulong brk_page
;
1025 void target_set_brk(abi_ulong new_brk
)
1027 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1028 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1031 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1032 #define DEBUGF_BRK(message, args...)
1034 /* do_brk() must return target values and target errnos. */
1035 abi_long
do_brk(abi_ulong new_brk
)
1037 abi_long mapped_addr
;
1038 abi_ulong new_alloc_size
;
1040 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1043 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1046 if (new_brk
< target_original_brk
) {
1047 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1052 /* If the new brk is less than the highest page reserved to the
1053 * target heap allocation, set it and we're almost done... */
1054 if (new_brk
<= brk_page
) {
1055 /* Heap contents are initialized to zero, as for anonymous
1057 if (new_brk
> target_brk
) {
1058 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1060 target_brk
= new_brk
;
1061 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1065 /* We need to allocate more memory after the brk... Note that
1066 * we don't use MAP_FIXED because that will map over the top of
1067 * any existing mapping (like the one with the host libc or qemu
1068 * itself); instead we treat "mapped but at wrong address" as
1069 * a failure and unmap again.
1071 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1072 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1073 PROT_READ
|PROT_WRITE
,
1074 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1076 if (mapped_addr
== brk_page
) {
1077 /* Heap contents are initialized to zero, as for anonymous
1078 * mapped pages. Technically the new pages are already
1079 * initialized to zero since they *are* anonymous mapped
1080 * pages, however we have to take care with the contents that
1081 * come from the remaining part of the previous page: it may
1082 * contains garbage data due to a previous heap usage (grown
1083 * then shrunken). */
1084 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1086 target_brk
= new_brk
;
1087 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1088 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1091 } else if (mapped_addr
!= -1) {
1092 /* Mapped but at wrong address, meaning there wasn't actually
1093 * enough space for this brk.
1095 target_munmap(mapped_addr
, new_alloc_size
);
1097 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1100 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1103 #if defined(TARGET_ALPHA)
1104 /* We (partially) emulate OSF/1 on Alpha, which requires we
1105 return a proper errno, not an unchanged brk value. */
1106 return -TARGET_ENOMEM
;
1108 /* For everything else, return the previous break. */
1112 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1113 abi_ulong target_fds_addr
,
1117 abi_ulong b
, *target_fds
;
1119 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1120 if (!(target_fds
= lock_user(VERIFY_READ
,
1122 sizeof(abi_ulong
) * nw
,
1124 return -TARGET_EFAULT
;
1128 for (i
= 0; i
< nw
; i
++) {
1129 /* grab the abi_ulong */
1130 __get_user(b
, &target_fds
[i
]);
1131 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1132 /* check the bit inside the abi_ulong */
1139 unlock_user(target_fds
, target_fds_addr
, 0);
1144 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1145 abi_ulong target_fds_addr
,
1148 if (target_fds_addr
) {
1149 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1150 return -TARGET_EFAULT
;
1158 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1164 abi_ulong
*target_fds
;
1166 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1167 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1169 sizeof(abi_ulong
) * nw
,
1171 return -TARGET_EFAULT
;
1174 for (i
= 0; i
< nw
; i
++) {
1176 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1177 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1180 __put_user(v
, &target_fds
[i
]);
1183 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1188 #if defined(__alpha__)
1189 #define HOST_HZ 1024
1194 static inline abi_long
host_to_target_clock_t(long ticks
)
1196 #if HOST_HZ == TARGET_HZ
1199 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1203 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1204 const struct rusage
*rusage
)
1206 struct target_rusage
*target_rusage
;
1208 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1209 return -TARGET_EFAULT
;
1210 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1211 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1212 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1213 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1214 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1215 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1216 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1217 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1218 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1219 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1220 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1221 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1222 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1223 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1224 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1225 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1226 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1227 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1228 unlock_user_struct(target_rusage
, target_addr
, 1);
1233 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1235 abi_ulong target_rlim_swap
;
1238 target_rlim_swap
= tswapal(target_rlim
);
1239 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1240 return RLIM_INFINITY
;
1242 result
= target_rlim_swap
;
1243 if (target_rlim_swap
!= (rlim_t
)result
)
1244 return RLIM_INFINITY
;
1249 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1251 abi_ulong target_rlim_swap
;
1254 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1255 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1257 target_rlim_swap
= rlim
;
1258 result
= tswapal(target_rlim_swap
);
1263 static inline int target_to_host_resource(int code
)
1266 case TARGET_RLIMIT_AS
:
1268 case TARGET_RLIMIT_CORE
:
1270 case TARGET_RLIMIT_CPU
:
1272 case TARGET_RLIMIT_DATA
:
1274 case TARGET_RLIMIT_FSIZE
:
1275 return RLIMIT_FSIZE
;
1276 case TARGET_RLIMIT_LOCKS
:
1277 return RLIMIT_LOCKS
;
1278 case TARGET_RLIMIT_MEMLOCK
:
1279 return RLIMIT_MEMLOCK
;
1280 case TARGET_RLIMIT_MSGQUEUE
:
1281 return RLIMIT_MSGQUEUE
;
1282 case TARGET_RLIMIT_NICE
:
1284 case TARGET_RLIMIT_NOFILE
:
1285 return RLIMIT_NOFILE
;
1286 case TARGET_RLIMIT_NPROC
:
1287 return RLIMIT_NPROC
;
1288 case TARGET_RLIMIT_RSS
:
1290 case TARGET_RLIMIT_RTPRIO
:
1291 return RLIMIT_RTPRIO
;
1292 case TARGET_RLIMIT_SIGPENDING
:
1293 return RLIMIT_SIGPENDING
;
1294 case TARGET_RLIMIT_STACK
:
1295 return RLIMIT_STACK
;
1301 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1302 abi_ulong target_tv_addr
)
1304 struct target_timeval
*target_tv
;
1306 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1307 return -TARGET_EFAULT
;
1309 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1310 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1312 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1317 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1318 const struct timeval
*tv
)
1320 struct target_timeval
*target_tv
;
1322 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1323 return -TARGET_EFAULT
;
1325 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1326 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1328 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1333 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1334 abi_ulong target_tz_addr
)
1336 struct target_timezone
*target_tz
;
1338 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1339 return -TARGET_EFAULT
;
1342 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1343 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1345 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1350 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1353 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1354 abi_ulong target_mq_attr_addr
)
1356 struct target_mq_attr
*target_mq_attr
;
1358 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1359 target_mq_attr_addr
, 1))
1360 return -TARGET_EFAULT
;
1362 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1363 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1364 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1365 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1367 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1372 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1373 const struct mq_attr
*attr
)
1375 struct target_mq_attr
*target_mq_attr
;
1377 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1378 target_mq_attr_addr
, 0))
1379 return -TARGET_EFAULT
;
1381 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1382 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1383 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1384 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1386 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1392 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1393 /* do_select() must return target values and target errnos. */
1394 static abi_long
do_select(int n
,
1395 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1396 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1398 fd_set rfds
, wfds
, efds
;
1399 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1401 struct timespec ts
, *ts_ptr
;
1404 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1408 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1412 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1417 if (target_tv_addr
) {
1418 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1419 return -TARGET_EFAULT
;
1420 ts
.tv_sec
= tv
.tv_sec
;
1421 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1427 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1430 if (!is_error(ret
)) {
1431 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1432 return -TARGET_EFAULT
;
1433 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1434 return -TARGET_EFAULT
;
1435 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1436 return -TARGET_EFAULT
;
1438 if (target_tv_addr
) {
1439 tv
.tv_sec
= ts
.tv_sec
;
1440 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1441 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1442 return -TARGET_EFAULT
;
1450 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1451 static abi_long
do_old_select(abi_ulong arg1
)
1453 struct target_sel_arg_struct
*sel
;
1454 abi_ulong inp
, outp
, exp
, tvp
;
1457 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1458 return -TARGET_EFAULT
;
1461 nsel
= tswapal(sel
->n
);
1462 inp
= tswapal(sel
->inp
);
1463 outp
= tswapal(sel
->outp
);
1464 exp
= tswapal(sel
->exp
);
1465 tvp
= tswapal(sel
->tvp
);
1467 unlock_user_struct(sel
, arg1
, 0);
1469 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1474 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1477 return pipe2(host_pipe
, flags
);
1483 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1484 int flags
, int is_pipe2
)
1488 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1491 return get_errno(ret
);
1493 /* Several targets have special calling conventions for the original
1494 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1496 #if defined(TARGET_ALPHA)
1497 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1498 return host_pipe
[0];
1499 #elif defined(TARGET_MIPS)
1500 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1501 return host_pipe
[0];
1502 #elif defined(TARGET_SH4)
1503 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1504 return host_pipe
[0];
1505 #elif defined(TARGET_SPARC)
1506 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1507 return host_pipe
[0];
1511 if (put_user_s32(host_pipe
[0], pipedes
)
1512 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1513 return -TARGET_EFAULT
;
1514 return get_errno(ret
);
1517 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1518 abi_ulong target_addr
,
1521 struct target_ip_mreqn
*target_smreqn
;
1523 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1525 return -TARGET_EFAULT
;
1526 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1527 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1528 if (len
== sizeof(struct target_ip_mreqn
))
1529 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1530 unlock_user(target_smreqn
, target_addr
, 0);
1535 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1536 abi_ulong target_addr
,
1539 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1540 sa_family_t sa_family
;
1541 struct target_sockaddr
*target_saddr
;
1543 if (fd_trans_target_to_host_addr(fd
)) {
1544 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1547 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1549 return -TARGET_EFAULT
;
1551 sa_family
= tswap16(target_saddr
->sa_family
);
1553 /* Oops. The caller might send a incomplete sun_path; sun_path
1554 * must be terminated by \0 (see the manual page), but
1555 * unfortunately it is quite common to specify sockaddr_un
1556 * length as "strlen(x->sun_path)" while it should be
1557 * "strlen(...) + 1". We'll fix that here if needed.
1558 * Linux kernel has a similar feature.
1561 if (sa_family
== AF_UNIX
) {
1562 if (len
< unix_maxlen
&& len
> 0) {
1563 char *cp
= (char*)target_saddr
;
1565 if ( cp
[len
-1] && !cp
[len
] )
1568 if (len
> unix_maxlen
)
1572 memcpy(addr
, target_saddr
, len
);
1573 addr
->sa_family
= sa_family
;
1574 if (sa_family
== AF_NETLINK
) {
1575 struct sockaddr_nl
*nladdr
;
1577 nladdr
= (struct sockaddr_nl
*)addr
;
1578 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1579 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1580 } else if (sa_family
== AF_PACKET
) {
1581 struct target_sockaddr_ll
*lladdr
;
1583 lladdr
= (struct target_sockaddr_ll
*)addr
;
1584 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1585 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1587 unlock_user(target_saddr
, target_addr
, 0);
1592 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1593 struct sockaddr
*addr
,
1596 struct target_sockaddr
*target_saddr
;
1602 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1604 return -TARGET_EFAULT
;
1605 memcpy(target_saddr
, addr
, len
);
1606 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1607 sizeof(target_saddr
->sa_family
)) {
1608 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1610 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1611 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1612 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1613 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1614 } else if (addr
->sa_family
== AF_PACKET
) {
1615 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1616 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1617 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1619 unlock_user(target_saddr
, target_addr
, len
);
1624 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1625 struct target_msghdr
*target_msgh
)
1627 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1628 abi_long msg_controllen
;
1629 abi_ulong target_cmsg_addr
;
1630 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1631 socklen_t space
= 0;
1633 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1634 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1636 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1637 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1638 target_cmsg_start
= target_cmsg
;
1640 return -TARGET_EFAULT
;
1642 while (cmsg
&& target_cmsg
) {
1643 void *data
= CMSG_DATA(cmsg
);
1644 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1646 int len
= tswapal(target_cmsg
->cmsg_len
)
1647 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1649 space
+= CMSG_SPACE(len
);
1650 if (space
> msgh
->msg_controllen
) {
1651 space
-= CMSG_SPACE(len
);
1652 /* This is a QEMU bug, since we allocated the payload
1653 * area ourselves (unlike overflow in host-to-target
1654 * conversion, which is just the guest giving us a buffer
1655 * that's too small). It can't happen for the payload types
1656 * we currently support; if it becomes an issue in future
1657 * we would need to improve our allocation strategy to
1658 * something more intelligent than "twice the size of the
1659 * target buffer we're reading from".
1661 gemu_log("Host cmsg overflow\n");
1665 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1666 cmsg
->cmsg_level
= SOL_SOCKET
;
1668 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1670 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1671 cmsg
->cmsg_len
= CMSG_LEN(len
);
1673 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1674 int *fd
= (int *)data
;
1675 int *target_fd
= (int *)target_data
;
1676 int i
, numfds
= len
/ sizeof(int);
1678 for (i
= 0; i
< numfds
; i
++) {
1679 __get_user(fd
[i
], target_fd
+ i
);
1681 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1682 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1683 struct ucred
*cred
= (struct ucred
*)data
;
1684 struct target_ucred
*target_cred
=
1685 (struct target_ucred
*)target_data
;
1687 __get_user(cred
->pid
, &target_cred
->pid
);
1688 __get_user(cred
->uid
, &target_cred
->uid
);
1689 __get_user(cred
->gid
, &target_cred
->gid
);
1691 gemu_log("Unsupported ancillary data: %d/%d\n",
1692 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1693 memcpy(data
, target_data
, len
);
1696 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1697 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1700 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1702 msgh
->msg_controllen
= space
;
1706 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1707 struct msghdr
*msgh
)
1709 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1710 abi_long msg_controllen
;
1711 abi_ulong target_cmsg_addr
;
1712 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1713 socklen_t space
= 0;
1715 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1716 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1718 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1719 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1720 target_cmsg_start
= target_cmsg
;
1722 return -TARGET_EFAULT
;
1724 while (cmsg
&& target_cmsg
) {
1725 void *data
= CMSG_DATA(cmsg
);
1726 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1728 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1729 int tgt_len
, tgt_space
;
1731 /* We never copy a half-header but may copy half-data;
1732 * this is Linux's behaviour in put_cmsg(). Note that
1733 * truncation here is a guest problem (which we report
1734 * to the guest via the CTRUNC bit), unlike truncation
1735 * in target_to_host_cmsg, which is a QEMU bug.
1737 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1738 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1742 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1743 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1745 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1747 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1749 tgt_len
= TARGET_CMSG_LEN(len
);
1751 /* Payload types which need a different size of payload on
1752 * the target must adjust tgt_len here.
1754 switch (cmsg
->cmsg_level
) {
1756 switch (cmsg
->cmsg_type
) {
1758 tgt_len
= sizeof(struct target_timeval
);
1767 if (msg_controllen
< tgt_len
) {
1768 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1769 tgt_len
= msg_controllen
;
1772 /* We must now copy-and-convert len bytes of payload
1773 * into tgt_len bytes of destination space. Bear in mind
1774 * that in both source and destination we may be dealing
1775 * with a truncated value!
1777 switch (cmsg
->cmsg_level
) {
1779 switch (cmsg
->cmsg_type
) {
1782 int *fd
= (int *)data
;
1783 int *target_fd
= (int *)target_data
;
1784 int i
, numfds
= tgt_len
/ sizeof(int);
1786 for (i
= 0; i
< numfds
; i
++) {
1787 __put_user(fd
[i
], target_fd
+ i
);
1793 struct timeval
*tv
= (struct timeval
*)data
;
1794 struct target_timeval
*target_tv
=
1795 (struct target_timeval
*)target_data
;
1797 if (len
!= sizeof(struct timeval
) ||
1798 tgt_len
!= sizeof(struct target_timeval
)) {
1802 /* copy struct timeval to target */
1803 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1804 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1807 case SCM_CREDENTIALS
:
1809 struct ucred
*cred
= (struct ucred
*)data
;
1810 struct target_ucred
*target_cred
=
1811 (struct target_ucred
*)target_data
;
1813 __put_user(cred
->pid
, &target_cred
->pid
);
1814 __put_user(cred
->uid
, &target_cred
->uid
);
1815 __put_user(cred
->gid
, &target_cred
->gid
);
1825 gemu_log("Unsupported ancillary data: %d/%d\n",
1826 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1827 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1828 if (tgt_len
> len
) {
1829 memset(target_data
+ len
, 0, tgt_len
- len
);
1833 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1834 tgt_space
= TARGET_CMSG_SPACE(len
);
1835 if (msg_controllen
< tgt_space
) {
1836 tgt_space
= msg_controllen
;
1838 msg_controllen
-= tgt_space
;
1840 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1841 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1844 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1846 target_msgh
->msg_controllen
= tswapal(space
);
1850 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1852 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1853 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1854 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1855 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1856 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1859 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1861 abi_long (*host_to_target_nlmsg
)
1862 (struct nlmsghdr
*))
1867 while (len
> sizeof(struct nlmsghdr
)) {
1869 nlmsg_len
= nlh
->nlmsg_len
;
1870 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1875 switch (nlh
->nlmsg_type
) {
1877 tswap_nlmsghdr(nlh
);
1883 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1884 e
->error
= tswap32(e
->error
);
1885 tswap_nlmsghdr(&e
->msg
);
1886 tswap_nlmsghdr(nlh
);
1890 ret
= host_to_target_nlmsg(nlh
);
1892 tswap_nlmsghdr(nlh
);
1897 tswap_nlmsghdr(nlh
);
1898 len
-= NLMSG_ALIGN(nlmsg_len
);
1899 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1904 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1906 abi_long (*target_to_host_nlmsg
)
1907 (struct nlmsghdr
*))
1911 while (len
> sizeof(struct nlmsghdr
)) {
1912 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1913 tswap32(nlh
->nlmsg_len
) > len
) {
1916 tswap_nlmsghdr(nlh
);
1917 switch (nlh
->nlmsg_type
) {
1924 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1925 e
->error
= tswap32(e
->error
);
1926 tswap_nlmsghdr(&e
->msg
);
1930 ret
= target_to_host_nlmsg(nlh
);
1935 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1936 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1941 #ifdef CONFIG_RTNETLINK
1942 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1943 size_t len
, void *context
,
1944 abi_long (*host_to_target_nlattr
)
1948 unsigned short nla_len
;
1951 while (len
> sizeof(struct nlattr
)) {
1952 nla_len
= nlattr
->nla_len
;
1953 if (nla_len
< sizeof(struct nlattr
) ||
1957 ret
= host_to_target_nlattr(nlattr
, context
);
1958 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1959 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1963 len
-= NLA_ALIGN(nla_len
);
1964 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1969 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1971 abi_long (*host_to_target_rtattr
)
1974 unsigned short rta_len
;
1977 while (len
> sizeof(struct rtattr
)) {
1978 rta_len
= rtattr
->rta_len
;
1979 if (rta_len
< sizeof(struct rtattr
) ||
1983 ret
= host_to_target_rtattr(rtattr
);
1984 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1985 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1989 len
-= RTA_ALIGN(rta_len
);
1990 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1995 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1997 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2004 switch (nlattr
->nla_type
) {
2006 case QEMU_IFLA_BR_FDB_FLUSH
:
2009 case QEMU_IFLA_BR_GROUP_ADDR
:
2012 case QEMU_IFLA_BR_VLAN_FILTERING
:
2013 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2014 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2015 case QEMU_IFLA_BR_MCAST_ROUTER
:
2016 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2017 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2018 case QEMU_IFLA_BR_MCAST_QUERIER
:
2019 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2020 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2021 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2024 case QEMU_IFLA_BR_PRIORITY
:
2025 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2026 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2027 case QEMU_IFLA_BR_ROOT_PORT
:
2028 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2029 u16
= NLA_DATA(nlattr
);
2030 *u16
= tswap16(*u16
);
2033 case QEMU_IFLA_BR_FORWARD_DELAY
:
2034 case QEMU_IFLA_BR_HELLO_TIME
:
2035 case QEMU_IFLA_BR_MAX_AGE
:
2036 case QEMU_IFLA_BR_AGEING_TIME
:
2037 case QEMU_IFLA_BR_STP_STATE
:
2038 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2039 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2040 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2041 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2042 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2043 u32
= NLA_DATA(nlattr
);
2044 *u32
= tswap32(*u32
);
2047 case QEMU_IFLA_BR_HELLO_TIMER
:
2048 case QEMU_IFLA_BR_TCN_TIMER
:
2049 case QEMU_IFLA_BR_GC_TIMER
:
2050 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2051 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2052 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2053 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2054 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2055 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2056 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2057 u64
= NLA_DATA(nlattr
);
2058 *u64
= tswap64(*u64
);
2060 /* ifla_bridge_id: uin8_t[] */
2061 case QEMU_IFLA_BR_ROOT_ID
:
2062 case QEMU_IFLA_BR_BRIDGE_ID
:
2065 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2071 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2078 switch (nlattr
->nla_type
) {
2080 case QEMU_IFLA_BRPORT_STATE
:
2081 case QEMU_IFLA_BRPORT_MODE
:
2082 case QEMU_IFLA_BRPORT_GUARD
:
2083 case QEMU_IFLA_BRPORT_PROTECT
:
2084 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2085 case QEMU_IFLA_BRPORT_LEARNING
:
2086 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2087 case QEMU_IFLA_BRPORT_PROXYARP
:
2088 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2089 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2090 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2091 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2092 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2095 case QEMU_IFLA_BRPORT_PRIORITY
:
2096 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2097 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2098 case QEMU_IFLA_BRPORT_ID
:
2099 case QEMU_IFLA_BRPORT_NO
:
2100 u16
= NLA_DATA(nlattr
);
2101 *u16
= tswap16(*u16
);
2104 case QEMU_IFLA_BRPORT_COST
:
2105 u32
= NLA_DATA(nlattr
);
2106 *u32
= tswap32(*u32
);
2109 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2110 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2111 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2112 u64
= NLA_DATA(nlattr
);
2113 *u64
= tswap64(*u64
);
2115 /* ifla_bridge_id: uint8_t[] */
2116 case QEMU_IFLA_BRPORT_ROOT_ID
:
2117 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2120 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2126 struct linkinfo_context
{
2133 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2136 struct linkinfo_context
*li_context
= context
;
2138 switch (nlattr
->nla_type
) {
2140 case QEMU_IFLA_INFO_KIND
:
2141 li_context
->name
= NLA_DATA(nlattr
);
2142 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2144 case QEMU_IFLA_INFO_SLAVE_KIND
:
2145 li_context
->slave_name
= NLA_DATA(nlattr
);
2146 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2149 case QEMU_IFLA_INFO_XSTATS
:
2150 /* FIXME: only used by CAN */
2153 case QEMU_IFLA_INFO_DATA
:
2154 if (strncmp(li_context
->name
, "bridge",
2155 li_context
->len
) == 0) {
2156 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2159 host_to_target_data_bridge_nlattr
);
2161 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2164 case QEMU_IFLA_INFO_SLAVE_DATA
:
2165 if (strncmp(li_context
->slave_name
, "bridge",
2166 li_context
->slave_len
) == 0) {
2167 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2170 host_to_target_slave_data_bridge_nlattr
);
2172 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2173 li_context
->slave_name
);
2177 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2184 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2190 switch (nlattr
->nla_type
) {
2191 case QEMU_IFLA_INET_CONF
:
2192 u32
= NLA_DATA(nlattr
);
2193 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2195 u32
[i
] = tswap32(u32
[i
]);
2199 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2204 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2209 struct ifla_cacheinfo
*ci
;
2212 switch (nlattr
->nla_type
) {
2214 case QEMU_IFLA_INET6_TOKEN
:
2217 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2220 case QEMU_IFLA_INET6_FLAGS
:
2221 u32
= NLA_DATA(nlattr
);
2222 *u32
= tswap32(*u32
);
2225 case QEMU_IFLA_INET6_CONF
:
2226 u32
= NLA_DATA(nlattr
);
2227 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2229 u32
[i
] = tswap32(u32
[i
]);
2232 /* ifla_cacheinfo */
2233 case QEMU_IFLA_INET6_CACHEINFO
:
2234 ci
= NLA_DATA(nlattr
);
2235 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2236 ci
->tstamp
= tswap32(ci
->tstamp
);
2237 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2238 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2241 case QEMU_IFLA_INET6_STATS
:
2242 case QEMU_IFLA_INET6_ICMP6STATS
:
2243 u64
= NLA_DATA(nlattr
);
2244 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2246 u64
[i
] = tswap64(u64
[i
]);
2250 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2255 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2258 switch (nlattr
->nla_type
) {
2260 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2262 host_to_target_data_inet_nlattr
);
2264 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2266 host_to_target_data_inet6_nlattr
);
2268 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2274 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2277 struct rtnl_link_stats
*st
;
2278 struct rtnl_link_stats64
*st64
;
2279 struct rtnl_link_ifmap
*map
;
2280 struct linkinfo_context li_context
;
2282 switch (rtattr
->rta_type
) {
2284 case QEMU_IFLA_ADDRESS
:
2285 case QEMU_IFLA_BROADCAST
:
2287 case QEMU_IFLA_IFNAME
:
2288 case QEMU_IFLA_QDISC
:
2291 case QEMU_IFLA_OPERSTATE
:
2292 case QEMU_IFLA_LINKMODE
:
2293 case QEMU_IFLA_CARRIER
:
2294 case QEMU_IFLA_PROTO_DOWN
:
2298 case QEMU_IFLA_LINK
:
2299 case QEMU_IFLA_WEIGHT
:
2300 case QEMU_IFLA_TXQLEN
:
2301 case QEMU_IFLA_CARRIER_CHANGES
:
2302 case QEMU_IFLA_NUM_RX_QUEUES
:
2303 case QEMU_IFLA_NUM_TX_QUEUES
:
2304 case QEMU_IFLA_PROMISCUITY
:
2305 case QEMU_IFLA_EXT_MASK
:
2306 case QEMU_IFLA_LINK_NETNSID
:
2307 case QEMU_IFLA_GROUP
:
2308 case QEMU_IFLA_MASTER
:
2309 case QEMU_IFLA_NUM_VF
:
2310 u32
= RTA_DATA(rtattr
);
2311 *u32
= tswap32(*u32
);
2313 /* struct rtnl_link_stats */
2314 case QEMU_IFLA_STATS
:
2315 st
= RTA_DATA(rtattr
);
2316 st
->rx_packets
= tswap32(st
->rx_packets
);
2317 st
->tx_packets
= tswap32(st
->tx_packets
);
2318 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2319 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2320 st
->rx_errors
= tswap32(st
->rx_errors
);
2321 st
->tx_errors
= tswap32(st
->tx_errors
);
2322 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2323 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2324 st
->multicast
= tswap32(st
->multicast
);
2325 st
->collisions
= tswap32(st
->collisions
);
2327 /* detailed rx_errors: */
2328 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2329 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2330 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2331 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2332 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2333 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2335 /* detailed tx_errors */
2336 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2337 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2338 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2339 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2340 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2343 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2344 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2346 /* struct rtnl_link_stats64 */
2347 case QEMU_IFLA_STATS64
:
2348 st64
= RTA_DATA(rtattr
);
2349 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2350 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2351 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2352 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2353 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2354 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2355 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2356 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2357 st64
->multicast
= tswap64(st64
->multicast
);
2358 st64
->collisions
= tswap64(st64
->collisions
);
2360 /* detailed rx_errors: */
2361 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2362 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2363 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2364 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2365 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2366 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2368 /* detailed tx_errors */
2369 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2370 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2371 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2372 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2373 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2376 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2377 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2379 /* struct rtnl_link_ifmap */
2381 map
= RTA_DATA(rtattr
);
2382 map
->mem_start
= tswap64(map
->mem_start
);
2383 map
->mem_end
= tswap64(map
->mem_end
);
2384 map
->base_addr
= tswap64(map
->base_addr
);
2385 map
->irq
= tswap16(map
->irq
);
2388 case QEMU_IFLA_LINKINFO
:
2389 memset(&li_context
, 0, sizeof(li_context
));
2390 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2392 host_to_target_data_linkinfo_nlattr
);
2393 case QEMU_IFLA_AF_SPEC
:
2394 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2396 host_to_target_data_spec_nlattr
);
2398 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2404 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2407 struct ifa_cacheinfo
*ci
;
2409 switch (rtattr
->rta_type
) {
2410 /* binary: depends on family type */
2420 u32
= RTA_DATA(rtattr
);
2421 *u32
= tswap32(*u32
);
2423 /* struct ifa_cacheinfo */
2425 ci
= RTA_DATA(rtattr
);
2426 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2427 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2428 ci
->cstamp
= tswap32(ci
->cstamp
);
2429 ci
->tstamp
= tswap32(ci
->tstamp
);
2432 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2438 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2441 switch (rtattr
->rta_type
) {
2442 /* binary: depends on family type */
2451 u32
= RTA_DATA(rtattr
);
2452 *u32
= tswap32(*u32
);
2455 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2461 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2462 uint32_t rtattr_len
)
2464 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2465 host_to_target_data_link_rtattr
);
2468 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2469 uint32_t rtattr_len
)
2471 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2472 host_to_target_data_addr_rtattr
);
2475 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2476 uint32_t rtattr_len
)
2478 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2479 host_to_target_data_route_rtattr
);
2482 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2485 struct ifinfomsg
*ifi
;
2486 struct ifaddrmsg
*ifa
;
2489 nlmsg_len
= nlh
->nlmsg_len
;
2490 switch (nlh
->nlmsg_type
) {
2494 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2495 ifi
= NLMSG_DATA(nlh
);
2496 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2497 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2498 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2499 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2500 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2501 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2507 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2508 ifa
= NLMSG_DATA(nlh
);
2509 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2510 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2511 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2517 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2518 rtm
= NLMSG_DATA(nlh
);
2519 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2520 host_to_target_route_rtattr(RTM_RTA(rtm
),
2521 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2525 return -TARGET_EINVAL
;
2530 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2533 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2536 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2538 abi_long (*target_to_host_rtattr
)
2543 while (len
>= sizeof(struct rtattr
)) {
2544 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2545 tswap16(rtattr
->rta_len
) > len
) {
2548 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2549 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2550 ret
= target_to_host_rtattr(rtattr
);
2554 len
-= RTA_ALIGN(rtattr
->rta_len
);
2555 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2556 RTA_ALIGN(rtattr
->rta_len
));
2561 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2563 switch (rtattr
->rta_type
) {
2565 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2571 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2573 switch (rtattr
->rta_type
) {
2574 /* binary: depends on family type */
2579 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2585 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2588 switch (rtattr
->rta_type
) {
2589 /* binary: depends on family type */
2596 u32
= RTA_DATA(rtattr
);
2597 *u32
= tswap32(*u32
);
2600 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2606 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2607 uint32_t rtattr_len
)
2609 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2610 target_to_host_data_link_rtattr
);
2613 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2614 uint32_t rtattr_len
)
2616 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2617 target_to_host_data_addr_rtattr
);
2620 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2621 uint32_t rtattr_len
)
2623 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2624 target_to_host_data_route_rtattr
);
2627 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2629 struct ifinfomsg
*ifi
;
2630 struct ifaddrmsg
*ifa
;
2633 switch (nlh
->nlmsg_type
) {
2638 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2639 ifi
= NLMSG_DATA(nlh
);
2640 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2641 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2642 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2643 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2644 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2645 NLMSG_LENGTH(sizeof(*ifi
)));
2651 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2652 ifa
= NLMSG_DATA(nlh
);
2653 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2654 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2655 NLMSG_LENGTH(sizeof(*ifa
)));
2662 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2663 rtm
= NLMSG_DATA(nlh
);
2664 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2665 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2666 NLMSG_LENGTH(sizeof(*rtm
)));
2670 return -TARGET_EOPNOTSUPP
;
2675 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2677 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2679 #endif /* CONFIG_RTNETLINK */
2681 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2683 switch (nlh
->nlmsg_type
) {
2685 gemu_log("Unknown host audit message type %d\n",
2687 return -TARGET_EINVAL
;
2692 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2695 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2698 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2700 switch (nlh
->nlmsg_type
) {
2702 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2703 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2706 gemu_log("Unknown target audit message type %d\n",
2708 return -TARGET_EINVAL
;
2714 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2716 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2719 /* do_setsockopt() Must return target values and target errnos. */
2720 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2721 abi_ulong optval_addr
, socklen_t optlen
)
2725 struct ip_mreqn
*ip_mreq
;
2726 struct ip_mreq_source
*ip_mreq_source
;
2730 /* TCP options all take an 'int' value. */
2731 if (optlen
< sizeof(uint32_t))
2732 return -TARGET_EINVAL
;
2734 if (get_user_u32(val
, optval_addr
))
2735 return -TARGET_EFAULT
;
2736 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2743 case IP_ROUTER_ALERT
:
2747 case IP_MTU_DISCOVER
:
2753 case IP_MULTICAST_TTL
:
2754 case IP_MULTICAST_LOOP
:
2756 if (optlen
>= sizeof(uint32_t)) {
2757 if (get_user_u32(val
, optval_addr
))
2758 return -TARGET_EFAULT
;
2759 } else if (optlen
>= 1) {
2760 if (get_user_u8(val
, optval_addr
))
2761 return -TARGET_EFAULT
;
2763 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2765 case IP_ADD_MEMBERSHIP
:
2766 case IP_DROP_MEMBERSHIP
:
2767 if (optlen
< sizeof (struct target_ip_mreq
) ||
2768 optlen
> sizeof (struct target_ip_mreqn
))
2769 return -TARGET_EINVAL
;
2771 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2772 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2773 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2776 case IP_BLOCK_SOURCE
:
2777 case IP_UNBLOCK_SOURCE
:
2778 case IP_ADD_SOURCE_MEMBERSHIP
:
2779 case IP_DROP_SOURCE_MEMBERSHIP
:
2780 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2781 return -TARGET_EINVAL
;
2783 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2784 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2785 unlock_user (ip_mreq_source
, optval_addr
, 0);
2794 case IPV6_MTU_DISCOVER
:
2797 case IPV6_RECVPKTINFO
:
2799 if (optlen
< sizeof(uint32_t)) {
2800 return -TARGET_EINVAL
;
2802 if (get_user_u32(val
, optval_addr
)) {
2803 return -TARGET_EFAULT
;
2805 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2806 &val
, sizeof(val
)));
2815 /* struct icmp_filter takes an u32 value */
2816 if (optlen
< sizeof(uint32_t)) {
2817 return -TARGET_EINVAL
;
2820 if (get_user_u32(val
, optval_addr
)) {
2821 return -TARGET_EFAULT
;
2823 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2824 &val
, sizeof(val
)));
2831 case TARGET_SOL_SOCKET
:
2833 case TARGET_SO_RCVTIMEO
:
2837 optname
= SO_RCVTIMEO
;
2840 if (optlen
!= sizeof(struct target_timeval
)) {
2841 return -TARGET_EINVAL
;
2844 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2845 return -TARGET_EFAULT
;
2848 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2852 case TARGET_SO_SNDTIMEO
:
2853 optname
= SO_SNDTIMEO
;
2855 case TARGET_SO_ATTACH_FILTER
:
2857 struct target_sock_fprog
*tfprog
;
2858 struct target_sock_filter
*tfilter
;
2859 struct sock_fprog fprog
;
2860 struct sock_filter
*filter
;
2863 if (optlen
!= sizeof(*tfprog
)) {
2864 return -TARGET_EINVAL
;
2866 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2867 return -TARGET_EFAULT
;
2869 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2870 tswapal(tfprog
->filter
), 0)) {
2871 unlock_user_struct(tfprog
, optval_addr
, 1);
2872 return -TARGET_EFAULT
;
2875 fprog
.len
= tswap16(tfprog
->len
);
2876 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2877 if (filter
== NULL
) {
2878 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2879 unlock_user_struct(tfprog
, optval_addr
, 1);
2880 return -TARGET_ENOMEM
;
2882 for (i
= 0; i
< fprog
.len
; i
++) {
2883 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2884 filter
[i
].jt
= tfilter
[i
].jt
;
2885 filter
[i
].jf
= tfilter
[i
].jf
;
2886 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2888 fprog
.filter
= filter
;
2890 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2891 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2894 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2895 unlock_user_struct(tfprog
, optval_addr
, 1);
2898 case TARGET_SO_BINDTODEVICE
:
2900 char *dev_ifname
, *addr_ifname
;
2902 if (optlen
> IFNAMSIZ
- 1) {
2903 optlen
= IFNAMSIZ
- 1;
2905 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2907 return -TARGET_EFAULT
;
2909 optname
= SO_BINDTODEVICE
;
2910 addr_ifname
= alloca(IFNAMSIZ
);
2911 memcpy(addr_ifname
, dev_ifname
, optlen
);
2912 addr_ifname
[optlen
] = 0;
2913 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2914 addr_ifname
, optlen
));
2915 unlock_user (dev_ifname
, optval_addr
, 0);
2918 /* Options with 'int' argument. */
2919 case TARGET_SO_DEBUG
:
2922 case TARGET_SO_REUSEADDR
:
2923 optname
= SO_REUSEADDR
;
2925 case TARGET_SO_TYPE
:
2928 case TARGET_SO_ERROR
:
2931 case TARGET_SO_DONTROUTE
:
2932 optname
= SO_DONTROUTE
;
2934 case TARGET_SO_BROADCAST
:
2935 optname
= SO_BROADCAST
;
2937 case TARGET_SO_SNDBUF
:
2938 optname
= SO_SNDBUF
;
2940 case TARGET_SO_SNDBUFFORCE
:
2941 optname
= SO_SNDBUFFORCE
;
2943 case TARGET_SO_RCVBUF
:
2944 optname
= SO_RCVBUF
;
2946 case TARGET_SO_RCVBUFFORCE
:
2947 optname
= SO_RCVBUFFORCE
;
2949 case TARGET_SO_KEEPALIVE
:
2950 optname
= SO_KEEPALIVE
;
2952 case TARGET_SO_OOBINLINE
:
2953 optname
= SO_OOBINLINE
;
2955 case TARGET_SO_NO_CHECK
:
2956 optname
= SO_NO_CHECK
;
2958 case TARGET_SO_PRIORITY
:
2959 optname
= SO_PRIORITY
;
2962 case TARGET_SO_BSDCOMPAT
:
2963 optname
= SO_BSDCOMPAT
;
2966 case TARGET_SO_PASSCRED
:
2967 optname
= SO_PASSCRED
;
2969 case TARGET_SO_PASSSEC
:
2970 optname
= SO_PASSSEC
;
2972 case TARGET_SO_TIMESTAMP
:
2973 optname
= SO_TIMESTAMP
;
2975 case TARGET_SO_RCVLOWAT
:
2976 optname
= SO_RCVLOWAT
;
2982 if (optlen
< sizeof(uint32_t))
2983 return -TARGET_EINVAL
;
2985 if (get_user_u32(val
, optval_addr
))
2986 return -TARGET_EFAULT
;
2987 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2991 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2992 ret
= -TARGET_ENOPROTOOPT
;
2997 /* do_getsockopt() Must return target values and target errnos. */
2998 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2999 abi_ulong optval_addr
, abi_ulong optlen
)
3006 case TARGET_SOL_SOCKET
:
3009 /* These don't just return a single integer */
3010 case TARGET_SO_LINGER
:
3011 case TARGET_SO_RCVTIMEO
:
3012 case TARGET_SO_SNDTIMEO
:
3013 case TARGET_SO_PEERNAME
:
3015 case TARGET_SO_PEERCRED
: {
3018 struct target_ucred
*tcr
;
3020 if (get_user_u32(len
, optlen
)) {
3021 return -TARGET_EFAULT
;
3024 return -TARGET_EINVAL
;
3028 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3036 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3037 return -TARGET_EFAULT
;
3039 __put_user(cr
.pid
, &tcr
->pid
);
3040 __put_user(cr
.uid
, &tcr
->uid
);
3041 __put_user(cr
.gid
, &tcr
->gid
);
3042 unlock_user_struct(tcr
, optval_addr
, 1);
3043 if (put_user_u32(len
, optlen
)) {
3044 return -TARGET_EFAULT
;
3048 /* Options with 'int' argument. */
3049 case TARGET_SO_DEBUG
:
3052 case TARGET_SO_REUSEADDR
:
3053 optname
= SO_REUSEADDR
;
3055 case TARGET_SO_TYPE
:
3058 case TARGET_SO_ERROR
:
3061 case TARGET_SO_DONTROUTE
:
3062 optname
= SO_DONTROUTE
;
3064 case TARGET_SO_BROADCAST
:
3065 optname
= SO_BROADCAST
;
3067 case TARGET_SO_SNDBUF
:
3068 optname
= SO_SNDBUF
;
3070 case TARGET_SO_RCVBUF
:
3071 optname
= SO_RCVBUF
;
3073 case TARGET_SO_KEEPALIVE
:
3074 optname
= SO_KEEPALIVE
;
3076 case TARGET_SO_OOBINLINE
:
3077 optname
= SO_OOBINLINE
;
3079 case TARGET_SO_NO_CHECK
:
3080 optname
= SO_NO_CHECK
;
3082 case TARGET_SO_PRIORITY
:
3083 optname
= SO_PRIORITY
;
3086 case TARGET_SO_BSDCOMPAT
:
3087 optname
= SO_BSDCOMPAT
;
3090 case TARGET_SO_PASSCRED
:
3091 optname
= SO_PASSCRED
;
3093 case TARGET_SO_TIMESTAMP
:
3094 optname
= SO_TIMESTAMP
;
3096 case TARGET_SO_RCVLOWAT
:
3097 optname
= SO_RCVLOWAT
;
3099 case TARGET_SO_ACCEPTCONN
:
3100 optname
= SO_ACCEPTCONN
;
3107 /* TCP options all take an 'int' value. */
3109 if (get_user_u32(len
, optlen
))
3110 return -TARGET_EFAULT
;
3112 return -TARGET_EINVAL
;
3114 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3117 if (optname
== SO_TYPE
) {
3118 val
= host_to_target_sock_type(val
);
3123 if (put_user_u32(val
, optval_addr
))
3124 return -TARGET_EFAULT
;
3126 if (put_user_u8(val
, optval_addr
))
3127 return -TARGET_EFAULT
;
3129 if (put_user_u32(len
, optlen
))
3130 return -TARGET_EFAULT
;
3137 case IP_ROUTER_ALERT
:
3141 case IP_MTU_DISCOVER
:
3147 case IP_MULTICAST_TTL
:
3148 case IP_MULTICAST_LOOP
:
3149 if (get_user_u32(len
, optlen
))
3150 return -TARGET_EFAULT
;
3152 return -TARGET_EINVAL
;
3154 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3157 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3159 if (put_user_u32(len
, optlen
)
3160 || put_user_u8(val
, optval_addr
))
3161 return -TARGET_EFAULT
;
3163 if (len
> sizeof(int))
3165 if (put_user_u32(len
, optlen
)
3166 || put_user_u32(val
, optval_addr
))
3167 return -TARGET_EFAULT
;
3171 ret
= -TARGET_ENOPROTOOPT
;
3177 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3179 ret
= -TARGET_EOPNOTSUPP
;
3185 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3186 abi_ulong count
, int copy
)
3188 struct target_iovec
*target_vec
;
3190 abi_ulong total_len
, max_len
;
3193 bool bad_address
= false;
3199 if (count
> IOV_MAX
) {
3204 vec
= g_try_new0(struct iovec
, count
);
3210 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3211 count
* sizeof(struct target_iovec
), 1);
3212 if (target_vec
== NULL
) {
3217 /* ??? If host page size > target page size, this will result in a
3218 value larger than what we can actually support. */
3219 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3222 for (i
= 0; i
< count
; i
++) {
3223 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3224 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3229 } else if (len
== 0) {
3230 /* Zero length pointer is ignored. */
3231 vec
[i
].iov_base
= 0;
3233 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3234 /* If the first buffer pointer is bad, this is a fault. But
3235 * subsequent bad buffers will result in a partial write; this
3236 * is realized by filling the vector with null pointers and
3238 if (!vec
[i
].iov_base
) {
3249 if (len
> max_len
- total_len
) {
3250 len
= max_len
- total_len
;
3253 vec
[i
].iov_len
= len
;
3257 unlock_user(target_vec
, target_addr
, 0);
3262 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3263 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3266 unlock_user(target_vec
, target_addr
, 0);
3273 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3274 abi_ulong count
, int copy
)
3276 struct target_iovec
*target_vec
;
3279 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3280 count
* sizeof(struct target_iovec
), 1);
3282 for (i
= 0; i
< count
; i
++) {
3283 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3284 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3288 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3290 unlock_user(target_vec
, target_addr
, 0);
3296 static inline int target_to_host_sock_type(int *type
)
3299 int target_type
= *type
;
3301 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3302 case TARGET_SOCK_DGRAM
:
3303 host_type
= SOCK_DGRAM
;
3305 case TARGET_SOCK_STREAM
:
3306 host_type
= SOCK_STREAM
;
3309 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3312 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3313 #if defined(SOCK_CLOEXEC)
3314 host_type
|= SOCK_CLOEXEC
;
3316 return -TARGET_EINVAL
;
3319 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3320 #if defined(SOCK_NONBLOCK)
3321 host_type
|= SOCK_NONBLOCK
;
3322 #elif !defined(O_NONBLOCK)
3323 return -TARGET_EINVAL
;
3330 /* Try to emulate socket type flags after socket creation. */
3331 static int sock_flags_fixup(int fd
, int target_type
)
3333 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3334 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3335 int flags
= fcntl(fd
, F_GETFL
);
3336 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3338 return -TARGET_EINVAL
;
3345 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3346 abi_ulong target_addr
,
3349 struct sockaddr
*addr
= host_addr
;
3350 struct target_sockaddr
*target_saddr
;
3352 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3353 if (!target_saddr
) {
3354 return -TARGET_EFAULT
;
3357 memcpy(addr
, target_saddr
, len
);
3358 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3359 /* spkt_protocol is big-endian */
3361 unlock_user(target_saddr
, target_addr
, 0);
3365 static TargetFdTrans target_packet_trans
= {
3366 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3369 #ifdef CONFIG_RTNETLINK
3370 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3374 ret
= target_to_host_nlmsg_route(buf
, len
);
3382 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3386 ret
= host_to_target_nlmsg_route(buf
, len
);
3394 static TargetFdTrans target_netlink_route_trans
= {
3395 .target_to_host_data
= netlink_route_target_to_host
,
3396 .host_to_target_data
= netlink_route_host_to_target
,
3398 #endif /* CONFIG_RTNETLINK */
3400 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3404 ret
= target_to_host_nlmsg_audit(buf
, len
);
3412 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3416 ret
= host_to_target_nlmsg_audit(buf
, len
);
3424 static TargetFdTrans target_netlink_audit_trans
= {
3425 .target_to_host_data
= netlink_audit_target_to_host
,
3426 .host_to_target_data
= netlink_audit_host_to_target
,
3429 /* do_socket() Must return target values and target errnos. */
3430 static abi_long
do_socket(int domain
, int type
, int protocol
)
3432 int target_type
= type
;
3435 ret
= target_to_host_sock_type(&type
);
3440 if (domain
== PF_NETLINK
&& !(
3441 #ifdef CONFIG_RTNETLINK
3442 protocol
== NETLINK_ROUTE
||
3444 protocol
== NETLINK_KOBJECT_UEVENT
||
3445 protocol
== NETLINK_AUDIT
)) {
3446 return -EPFNOSUPPORT
;
3449 if (domain
== AF_PACKET
||
3450 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3451 protocol
= tswap16(protocol
);
3454 ret
= get_errno(socket(domain
, type
, protocol
));
3456 ret
= sock_flags_fixup(ret
, target_type
);
3457 if (type
== SOCK_PACKET
) {
3458 /* Manage an obsolete case :
3459 * if socket type is SOCK_PACKET, bind by name
3461 fd_trans_register(ret
, &target_packet_trans
);
3462 } else if (domain
== PF_NETLINK
) {
3464 #ifdef CONFIG_RTNETLINK
3466 fd_trans_register(ret
, &target_netlink_route_trans
);
3469 case NETLINK_KOBJECT_UEVENT
:
3470 /* nothing to do: messages are strings */
3473 fd_trans_register(ret
, &target_netlink_audit_trans
);
3476 g_assert_not_reached();
3483 /* do_bind() Must return target values and target errnos. */
3484 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3490 if ((int)addrlen
< 0) {
3491 return -TARGET_EINVAL
;
3494 addr
= alloca(addrlen
+1);
3496 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3500 return get_errno(bind(sockfd
, addr
, addrlen
));
3503 /* do_connect() Must return target values and target errnos. */
3504 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3510 if ((int)addrlen
< 0) {
3511 return -TARGET_EINVAL
;
3514 addr
= alloca(addrlen
+1);
3516 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3520 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3523 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3524 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3525 int flags
, int send
)
3531 abi_ulong target_vec
;
3533 if (msgp
->msg_name
) {
3534 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3535 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3536 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3537 tswapal(msgp
->msg_name
),
3539 if (ret
== -TARGET_EFAULT
) {
3540 /* For connected sockets msg_name and msg_namelen must
3541 * be ignored, so returning EFAULT immediately is wrong.
3542 * Instead, pass a bad msg_name to the host kernel, and
3543 * let it decide whether to return EFAULT or not.
3545 msg
.msg_name
= (void *)-1;
3550 msg
.msg_name
= NULL
;
3551 msg
.msg_namelen
= 0;
3553 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3554 msg
.msg_control
= alloca(msg
.msg_controllen
);
3555 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3557 count
= tswapal(msgp
->msg_iovlen
);
3558 target_vec
= tswapal(msgp
->msg_iov
);
3560 if (count
> IOV_MAX
) {
3561 /* sendrcvmsg returns a different errno for this condition than
3562 * readv/writev, so we must catch it here before lock_iovec() does.
3564 ret
= -TARGET_EMSGSIZE
;
3568 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3569 target_vec
, count
, send
);
3571 ret
= -host_to_target_errno(errno
);
3574 msg
.msg_iovlen
= count
;
3578 if (fd_trans_target_to_host_data(fd
)) {
3581 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3582 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3583 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3584 msg
.msg_iov
->iov_len
);
3586 msg
.msg_iov
->iov_base
= host_msg
;
3587 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3591 ret
= target_to_host_cmsg(&msg
, msgp
);
3593 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3597 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3598 if (!is_error(ret
)) {
3600 if (fd_trans_host_to_target_data(fd
)) {
3601 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3604 ret
= host_to_target_cmsg(msgp
, &msg
);
3606 if (!is_error(ret
)) {
3607 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3608 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3609 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3610 msg
.msg_name
, msg
.msg_namelen
);
3622 unlock_iovec(vec
, target_vec
, count
, !send
);
3627 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3628 int flags
, int send
)
3631 struct target_msghdr
*msgp
;
3633 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3637 return -TARGET_EFAULT
;
3639 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3640 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3644 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3645 * so it might not have this *mmsg-specific flag either.
3647 #ifndef MSG_WAITFORONE
3648 #define MSG_WAITFORONE 0x10000
3651 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3652 unsigned int vlen
, unsigned int flags
,
3655 struct target_mmsghdr
*mmsgp
;
3659 if (vlen
> UIO_MAXIOV
) {
3663 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3665 return -TARGET_EFAULT
;
3668 for (i
= 0; i
< vlen
; i
++) {
3669 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3670 if (is_error(ret
)) {
3673 mmsgp
[i
].msg_len
= tswap32(ret
);
3674 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3675 if (flags
& MSG_WAITFORONE
) {
3676 flags
|= MSG_DONTWAIT
;
3680 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3682 /* Return number of datagrams sent if we sent any at all;
3683 * otherwise return the error.
3691 /* do_accept4() Must return target values and target errnos. */
3692 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3693 abi_ulong target_addrlen_addr
, int flags
)
3700 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3702 if (target_addr
== 0) {
3703 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3706 /* linux returns EINVAL if addrlen pointer is invalid */
3707 if (get_user_u32(addrlen
, target_addrlen_addr
))
3708 return -TARGET_EINVAL
;
3710 if ((int)addrlen
< 0) {
3711 return -TARGET_EINVAL
;
3714 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3715 return -TARGET_EINVAL
;
3717 addr
= alloca(addrlen
);
3719 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3720 if (!is_error(ret
)) {
3721 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3722 if (put_user_u32(addrlen
, target_addrlen_addr
))
3723 ret
= -TARGET_EFAULT
;
3728 /* do_getpeername() Must return target values and target errnos. */
3729 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3730 abi_ulong target_addrlen_addr
)
3736 if (get_user_u32(addrlen
, target_addrlen_addr
))
3737 return -TARGET_EFAULT
;
3739 if ((int)addrlen
< 0) {
3740 return -TARGET_EINVAL
;
3743 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3744 return -TARGET_EFAULT
;
3746 addr
= alloca(addrlen
);
3748 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3749 if (!is_error(ret
)) {
3750 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3751 if (put_user_u32(addrlen
, target_addrlen_addr
))
3752 ret
= -TARGET_EFAULT
;
3757 /* do_getsockname() Must return target values and target errnos. */
3758 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3759 abi_ulong target_addrlen_addr
)
3765 if (get_user_u32(addrlen
, target_addrlen_addr
))
3766 return -TARGET_EFAULT
;
3768 if ((int)addrlen
< 0) {
3769 return -TARGET_EINVAL
;
3772 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3773 return -TARGET_EFAULT
;
3775 addr
= alloca(addrlen
);
3777 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3778 if (!is_error(ret
)) {
3779 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3780 if (put_user_u32(addrlen
, target_addrlen_addr
))
3781 ret
= -TARGET_EFAULT
;
3786 /* do_socketpair() Must return target values and target errnos. */
3787 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3788 abi_ulong target_tab_addr
)
3793 target_to_host_sock_type(&type
);
3795 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3796 if (!is_error(ret
)) {
3797 if (put_user_s32(tab
[0], target_tab_addr
)
3798 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3799 ret
= -TARGET_EFAULT
;
3804 /* do_sendto() Must return target values and target errnos. */
3805 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3806 abi_ulong target_addr
, socklen_t addrlen
)
3810 void *copy_msg
= NULL
;
3813 if ((int)addrlen
< 0) {
3814 return -TARGET_EINVAL
;
3817 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3819 return -TARGET_EFAULT
;
3820 if (fd_trans_target_to_host_data(fd
)) {
3821 copy_msg
= host_msg
;
3822 host_msg
= g_malloc(len
);
3823 memcpy(host_msg
, copy_msg
, len
);
3824 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3830 addr
= alloca(addrlen
+1);
3831 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3835 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3837 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3842 host_msg
= copy_msg
;
3844 unlock_user(host_msg
, msg
, 0);
3848 /* do_recvfrom() Must return target values and target errnos. */
3849 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3850 abi_ulong target_addr
,
3851 abi_ulong target_addrlen
)
3858 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3860 return -TARGET_EFAULT
;
3862 if (get_user_u32(addrlen
, target_addrlen
)) {
3863 ret
= -TARGET_EFAULT
;
3866 if ((int)addrlen
< 0) {
3867 ret
= -TARGET_EINVAL
;
3870 addr
= alloca(addrlen
);
3871 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3874 addr
= NULL
; /* To keep compiler quiet. */
3875 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3877 if (!is_error(ret
)) {
3878 if (fd_trans_host_to_target_data(fd
)) {
3879 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3882 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3883 if (put_user_u32(addrlen
, target_addrlen
)) {
3884 ret
= -TARGET_EFAULT
;
3888 unlock_user(host_msg
, msg
, len
);
3891 unlock_user(host_msg
, msg
, 0);
3896 #ifdef TARGET_NR_socketcall
3897 /* do_socketcall() Must return target values and target errnos. */
3898 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3900 static const unsigned ac
[] = { /* number of arguments per call */
3901 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3902 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3903 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3904 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3905 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3907 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3908 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3909 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3910 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3911 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3912 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3913 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3914 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3915 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3916 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3917 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3918 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3919 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3920 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3922 abi_long a
[6]; /* max 6 args */
3924 /* first, collect the arguments in a[] according to ac[] */
3925 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3927 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3928 for (i
= 0; i
< ac
[num
]; ++i
) {
3929 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3930 return -TARGET_EFAULT
;
3935 /* now when we have the args, actually handle the call */
3937 case SOCKOP_socket
: /* domain, type, protocol */
3938 return do_socket(a
[0], a
[1], a
[2]);
3939 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3940 return do_bind(a
[0], a
[1], a
[2]);
3941 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3942 return do_connect(a
[0], a
[1], a
[2]);
3943 case SOCKOP_listen
: /* sockfd, backlog */
3944 return get_errno(listen(a
[0], a
[1]));
3945 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3946 return do_accept4(a
[0], a
[1], a
[2], 0);
3947 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3948 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3949 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3950 return do_getsockname(a
[0], a
[1], a
[2]);
3951 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3952 return do_getpeername(a
[0], a
[1], a
[2]);
3953 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3954 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3955 case SOCKOP_send
: /* sockfd, msg, len, flags */
3956 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3957 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3958 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3959 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3960 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3961 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3962 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3963 case SOCKOP_shutdown
: /* sockfd, how */
3964 return get_errno(shutdown(a
[0], a
[1]));
3965 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3966 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3967 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3968 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3969 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3970 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3971 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3972 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3973 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3974 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3975 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3976 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3978 gemu_log("Unsupported socketcall: %d\n", num
);
3979 return -TARGET_ENOSYS
;
3984 #define N_SHM_REGIONS 32
3986 static struct shm_region
{
3990 } shm_regions
[N_SHM_REGIONS
];
3992 #ifndef TARGET_SEMID64_DS
3993 /* asm-generic version of this struct */
3994 struct target_semid64_ds
3996 struct target_ipc_perm sem_perm
;
3997 abi_ulong sem_otime
;
3998 #if TARGET_ABI_BITS == 32
3999 abi_ulong __unused1
;
4001 abi_ulong sem_ctime
;
4002 #if TARGET_ABI_BITS == 32
4003 abi_ulong __unused2
;
4005 abi_ulong sem_nsems
;
4006 abi_ulong __unused3
;
4007 abi_ulong __unused4
;
4011 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4012 abi_ulong target_addr
)
4014 struct target_ipc_perm
*target_ip
;
4015 struct target_semid64_ds
*target_sd
;
4017 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4018 return -TARGET_EFAULT
;
4019 target_ip
= &(target_sd
->sem_perm
);
4020 host_ip
->__key
= tswap32(target_ip
->__key
);
4021 host_ip
->uid
= tswap32(target_ip
->uid
);
4022 host_ip
->gid
= tswap32(target_ip
->gid
);
4023 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4024 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4025 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4026 host_ip
->mode
= tswap32(target_ip
->mode
);
4028 host_ip
->mode
= tswap16(target_ip
->mode
);
4030 #if defined(TARGET_PPC)
4031 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4033 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4035 unlock_user_struct(target_sd
, target_addr
, 0);
4039 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4040 struct ipc_perm
*host_ip
)
4042 struct target_ipc_perm
*target_ip
;
4043 struct target_semid64_ds
*target_sd
;
4045 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4046 return -TARGET_EFAULT
;
4047 target_ip
= &(target_sd
->sem_perm
);
4048 target_ip
->__key
= tswap32(host_ip
->__key
);
4049 target_ip
->uid
= tswap32(host_ip
->uid
);
4050 target_ip
->gid
= tswap32(host_ip
->gid
);
4051 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4052 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4053 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4054 target_ip
->mode
= tswap32(host_ip
->mode
);
4056 target_ip
->mode
= tswap16(host_ip
->mode
);
4058 #if defined(TARGET_PPC)
4059 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4061 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4063 unlock_user_struct(target_sd
, target_addr
, 1);
4067 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4068 abi_ulong target_addr
)
4070 struct target_semid64_ds
*target_sd
;
4072 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4073 return -TARGET_EFAULT
;
4074 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4075 return -TARGET_EFAULT
;
4076 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4077 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4078 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4079 unlock_user_struct(target_sd
, target_addr
, 0);
4083 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4084 struct semid_ds
*host_sd
)
4086 struct target_semid64_ds
*target_sd
;
4088 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4089 return -TARGET_EFAULT
;
4090 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4091 return -TARGET_EFAULT
;
4092 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4093 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4094 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4095 unlock_user_struct(target_sd
, target_addr
, 1);
4099 struct target_seminfo
{
4112 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4113 struct seminfo
*host_seminfo
)
4115 struct target_seminfo
*target_seminfo
;
4116 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4117 return -TARGET_EFAULT
;
4118 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4119 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4120 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4121 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4122 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4123 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4124 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4125 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4126 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4127 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4128 unlock_user_struct(target_seminfo
, target_addr
, 1);
4134 struct semid_ds
*buf
;
4135 unsigned short *array
;
4136 struct seminfo
*__buf
;
4139 union target_semun
{
4146 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4147 abi_ulong target_addr
)
4150 unsigned short *array
;
4152 struct semid_ds semid_ds
;
4155 semun
.buf
= &semid_ds
;
4157 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4159 return get_errno(ret
);
4161 nsems
= semid_ds
.sem_nsems
;
4163 *host_array
= g_try_new(unsigned short, nsems
);
4165 return -TARGET_ENOMEM
;
4167 array
= lock_user(VERIFY_READ
, target_addr
,
4168 nsems
*sizeof(unsigned short), 1);
4170 g_free(*host_array
);
4171 return -TARGET_EFAULT
;
4174 for(i
=0; i
<nsems
; i
++) {
4175 __get_user((*host_array
)[i
], &array
[i
]);
4177 unlock_user(array
, target_addr
, 0);
4182 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4183 unsigned short **host_array
)
4186 unsigned short *array
;
4188 struct semid_ds semid_ds
;
4191 semun
.buf
= &semid_ds
;
4193 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4195 return get_errno(ret
);
4197 nsems
= semid_ds
.sem_nsems
;
4199 array
= lock_user(VERIFY_WRITE
, target_addr
,
4200 nsems
*sizeof(unsigned short), 0);
4202 return -TARGET_EFAULT
;
4204 for(i
=0; i
<nsems
; i
++) {
4205 __put_user((*host_array
)[i
], &array
[i
]);
4207 g_free(*host_array
);
4208 unlock_user(array
, target_addr
, 1);
4213 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4214 abi_ulong target_arg
)
4216 union target_semun target_su
= { .buf
= target_arg
};
4218 struct semid_ds dsarg
;
4219 unsigned short *array
= NULL
;
4220 struct seminfo seminfo
;
4221 abi_long ret
= -TARGET_EINVAL
;
4228 /* In 64 bit cross-endian situations, we will erroneously pick up
4229 * the wrong half of the union for the "val" element. To rectify
4230 * this, the entire 8-byte structure is byteswapped, followed by
4231 * a swap of the 4 byte val field. In other cases, the data is
4232 * already in proper host byte order. */
4233 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4234 target_su
.buf
= tswapal(target_su
.buf
);
4235 arg
.val
= tswap32(target_su
.val
);
4237 arg
.val
= target_su
.val
;
4239 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4243 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4247 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4248 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4255 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4259 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4260 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4266 arg
.__buf
= &seminfo
;
4267 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4268 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4276 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4283 struct target_sembuf
{
4284 unsigned short sem_num
;
4289 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4290 abi_ulong target_addr
,
4293 struct target_sembuf
*target_sembuf
;
4296 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4297 nsops
*sizeof(struct target_sembuf
), 1);
4299 return -TARGET_EFAULT
;
4301 for(i
=0; i
<nsops
; i
++) {
4302 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4303 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4304 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4307 unlock_user(target_sembuf
, target_addr
, 0);
4312 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4314 struct sembuf sops
[nsops
];
4316 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4317 return -TARGET_EFAULT
;
4319 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4322 struct target_msqid_ds
4324 struct target_ipc_perm msg_perm
;
4325 abi_ulong msg_stime
;
4326 #if TARGET_ABI_BITS == 32
4327 abi_ulong __unused1
;
4329 abi_ulong msg_rtime
;
4330 #if TARGET_ABI_BITS == 32
4331 abi_ulong __unused2
;
4333 abi_ulong msg_ctime
;
4334 #if TARGET_ABI_BITS == 32
4335 abi_ulong __unused3
;
4337 abi_ulong __msg_cbytes
;
4339 abi_ulong msg_qbytes
;
4340 abi_ulong msg_lspid
;
4341 abi_ulong msg_lrpid
;
4342 abi_ulong __unused4
;
4343 abi_ulong __unused5
;
4346 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4347 abi_ulong target_addr
)
4349 struct target_msqid_ds
*target_md
;
4351 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4352 return -TARGET_EFAULT
;
4353 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4354 return -TARGET_EFAULT
;
4355 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4356 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4357 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4358 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4359 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4360 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4361 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4362 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4363 unlock_user_struct(target_md
, target_addr
, 0);
4367 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4368 struct msqid_ds
*host_md
)
4370 struct target_msqid_ds
*target_md
;
4372 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4373 return -TARGET_EFAULT
;
4374 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4375 return -TARGET_EFAULT
;
4376 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4377 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4378 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4379 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4380 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4381 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4382 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4383 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4384 unlock_user_struct(target_md
, target_addr
, 1);
4388 struct target_msginfo
{
4396 unsigned short int msgseg
;
4399 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4400 struct msginfo
*host_msginfo
)
4402 struct target_msginfo
*target_msginfo
;
4403 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4404 return -TARGET_EFAULT
;
4405 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4406 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4407 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4408 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4409 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4410 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4411 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4412 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4413 unlock_user_struct(target_msginfo
, target_addr
, 1);
4417 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4419 struct msqid_ds dsarg
;
4420 struct msginfo msginfo
;
4421 abi_long ret
= -TARGET_EINVAL
;
4429 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4430 return -TARGET_EFAULT
;
4431 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4432 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4433 return -TARGET_EFAULT
;
4436 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4440 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4441 if (host_to_target_msginfo(ptr
, &msginfo
))
4442 return -TARGET_EFAULT
;
4449 struct target_msgbuf
{
4454 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4455 ssize_t msgsz
, int msgflg
)
4457 struct target_msgbuf
*target_mb
;
4458 struct msgbuf
*host_mb
;
4462 return -TARGET_EINVAL
;
4465 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4466 return -TARGET_EFAULT
;
4467 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4469 unlock_user_struct(target_mb
, msgp
, 0);
4470 return -TARGET_ENOMEM
;
4472 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4473 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4474 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4476 unlock_user_struct(target_mb
, msgp
, 0);
4481 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4482 ssize_t msgsz
, abi_long msgtyp
,
4485 struct target_msgbuf
*target_mb
;
4487 struct msgbuf
*host_mb
;
4491 return -TARGET_EINVAL
;
4494 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4495 return -TARGET_EFAULT
;
4497 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4499 ret
= -TARGET_ENOMEM
;
4502 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4505 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4506 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4507 if (!target_mtext
) {
4508 ret
= -TARGET_EFAULT
;
4511 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4512 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4515 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4519 unlock_user_struct(target_mb
, msgp
, 1);
4524 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4525 abi_ulong target_addr
)
4527 struct target_shmid_ds
*target_sd
;
4529 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4530 return -TARGET_EFAULT
;
4531 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4532 return -TARGET_EFAULT
;
4533 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4534 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4535 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4536 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4537 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4538 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4539 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4540 unlock_user_struct(target_sd
, target_addr
, 0);
4544 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4545 struct shmid_ds
*host_sd
)
4547 struct target_shmid_ds
*target_sd
;
4549 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4550 return -TARGET_EFAULT
;
4551 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4552 return -TARGET_EFAULT
;
4553 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4554 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4555 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4556 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4557 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4558 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4559 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4560 unlock_user_struct(target_sd
, target_addr
, 1);
4564 struct target_shminfo
{
4572 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4573 struct shminfo
*host_shminfo
)
4575 struct target_shminfo
*target_shminfo
;
4576 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4577 return -TARGET_EFAULT
;
4578 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4579 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4580 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4581 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4582 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4583 unlock_user_struct(target_shminfo
, target_addr
, 1);
4587 struct target_shm_info
{
4592 abi_ulong swap_attempts
;
4593 abi_ulong swap_successes
;
4596 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4597 struct shm_info
*host_shm_info
)
4599 struct target_shm_info
*target_shm_info
;
4600 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4601 return -TARGET_EFAULT
;
4602 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4603 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4604 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4605 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4606 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4607 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4608 unlock_user_struct(target_shm_info
, target_addr
, 1);
4612 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4614 struct shmid_ds dsarg
;
4615 struct shminfo shminfo
;
4616 struct shm_info shm_info
;
4617 abi_long ret
= -TARGET_EINVAL
;
4625 if (target_to_host_shmid_ds(&dsarg
, buf
))
4626 return -TARGET_EFAULT
;
4627 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4628 if (host_to_target_shmid_ds(buf
, &dsarg
))
4629 return -TARGET_EFAULT
;
4632 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4633 if (host_to_target_shminfo(buf
, &shminfo
))
4634 return -TARGET_EFAULT
;
4637 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4638 if (host_to_target_shm_info(buf
, &shm_info
))
4639 return -TARGET_EFAULT
;
4644 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4651 #ifndef TARGET_FORCE_SHMLBA
4652 /* For most architectures, SHMLBA is the same as the page size;
4653 * some architectures have larger values, in which case they should
4654 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4655 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4656 * and defining its own value for SHMLBA.
4658 * The kernel also permits SHMLBA to be set by the architecture to a
4659 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4660 * this means that addresses are rounded to the large size if
4661 * SHM_RND is set but addresses not aligned to that size are not rejected
4662 * as long as they are at least page-aligned. Since the only architecture
4663 * which uses this is ia64 this code doesn't provide for that oddity.
4665 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4667 return TARGET_PAGE_SIZE
;
4671 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4672 int shmid
, abi_ulong shmaddr
, int shmflg
)
4676 struct shmid_ds shm_info
;
4680 /* find out the length of the shared memory segment */
4681 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4682 if (is_error(ret
)) {
4683 /* can't get length, bail out */
4687 shmlba
= target_shmlba(cpu_env
);
4689 if (shmaddr
& (shmlba
- 1)) {
4690 if (shmflg
& SHM_RND
) {
4691 shmaddr
&= ~(shmlba
- 1);
4693 return -TARGET_EINVAL
;
4700 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4702 abi_ulong mmap_start
;
4704 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4706 if (mmap_start
== -1) {
4708 host_raddr
= (void *)-1;
4710 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4713 if (host_raddr
== (void *)-1) {
4715 return get_errno((long)host_raddr
);
4717 raddr
=h2g((unsigned long)host_raddr
);
4719 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4720 PAGE_VALID
| PAGE_READ
|
4721 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4723 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4724 if (!shm_regions
[i
].in_use
) {
4725 shm_regions
[i
].in_use
= true;
4726 shm_regions
[i
].start
= raddr
;
4727 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4737 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4741 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4742 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4743 shm_regions
[i
].in_use
= false;
4744 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4749 return get_errno(shmdt(g2h(shmaddr
)));
4752 #ifdef TARGET_NR_ipc
4753 /* ??? This only works with linear mappings. */
4754 /* do_ipc() must return target values and target errnos. */
4755 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4756 unsigned int call
, abi_long first
,
4757 abi_long second
, abi_long third
,
4758 abi_long ptr
, abi_long fifth
)
4763 version
= call
>> 16;
4768 ret
= do_semop(first
, ptr
, second
);
4772 ret
= get_errno(semget(first
, second
, third
));
4775 case IPCOP_semctl
: {
4776 /* The semun argument to semctl is passed by value, so dereference the
4779 get_user_ual(atptr
, ptr
);
4780 ret
= do_semctl(first
, second
, third
, atptr
);
4785 ret
= get_errno(msgget(first
, second
));
4789 ret
= do_msgsnd(first
, ptr
, second
, third
);
4793 ret
= do_msgctl(first
, second
, ptr
);
4800 struct target_ipc_kludge
{
4805 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4806 ret
= -TARGET_EFAULT
;
4810 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4812 unlock_user_struct(tmp
, ptr
, 0);
4816 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4825 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4826 if (is_error(raddr
))
4827 return get_errno(raddr
);
4828 if (put_user_ual(raddr
, third
))
4829 return -TARGET_EFAULT
;
4833 ret
= -TARGET_EINVAL
;
4838 ret
= do_shmdt(ptr
);
4842 /* IPC_* flag values are the same on all linux platforms */
4843 ret
= get_errno(shmget(first
, second
, third
));
4846 /* IPC_* and SHM_* command values are the same on all linux platforms */
4848 ret
= do_shmctl(first
, second
, ptr
);
4851 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4852 ret
= -TARGET_ENOSYS
;
4859 /* kernel structure types definitions */
4861 #define STRUCT(name, ...) STRUCT_ ## name,
4862 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4864 #include "syscall_types.h"
4868 #undef STRUCT_SPECIAL
4870 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4871 #define STRUCT_SPECIAL(name)
4872 #include "syscall_types.h"
4874 #undef STRUCT_SPECIAL
4876 typedef struct IOCTLEntry IOCTLEntry
;
4878 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4879 int fd
, int cmd
, abi_long arg
);
4883 unsigned int host_cmd
;
4886 do_ioctl_fn
*do_ioctl
;
4887 const argtype arg_type
[5];
4890 #define IOC_R 0x0001
4891 #define IOC_W 0x0002
4892 #define IOC_RW (IOC_R | IOC_W)
4894 #define MAX_STRUCT_SIZE 4096
4896 #ifdef CONFIG_FIEMAP
4897 /* So fiemap access checks don't overflow on 32 bit systems.
4898 * This is very slightly smaller than the limit imposed by
4899 * the underlying kernel.
4901 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4902 / sizeof(struct fiemap_extent))
4904 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4905 int fd
, int cmd
, abi_long arg
)
4907 /* The parameter for this ioctl is a struct fiemap followed
4908 * by an array of struct fiemap_extent whose size is set
4909 * in fiemap->fm_extent_count. The array is filled in by the
4912 int target_size_in
, target_size_out
;
4914 const argtype
*arg_type
= ie
->arg_type
;
4915 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4918 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4922 assert(arg_type
[0] == TYPE_PTR
);
4923 assert(ie
->access
== IOC_RW
);
4925 target_size_in
= thunk_type_size(arg_type
, 0);
4926 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4928 return -TARGET_EFAULT
;
4930 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4931 unlock_user(argptr
, arg
, 0);
4932 fm
= (struct fiemap
*)buf_temp
;
4933 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4934 return -TARGET_EINVAL
;
4937 outbufsz
= sizeof (*fm
) +
4938 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4940 if (outbufsz
> MAX_STRUCT_SIZE
) {
4941 /* We can't fit all the extents into the fixed size buffer.
4942 * Allocate one that is large enough and use it instead.
4944 fm
= g_try_malloc(outbufsz
);
4946 return -TARGET_ENOMEM
;
4948 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4951 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4952 if (!is_error(ret
)) {
4953 target_size_out
= target_size_in
;
4954 /* An extent_count of 0 means we were only counting the extents
4955 * so there are no structs to copy
4957 if (fm
->fm_extent_count
!= 0) {
4958 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4960 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4962 ret
= -TARGET_EFAULT
;
4964 /* Convert the struct fiemap */
4965 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4966 if (fm
->fm_extent_count
!= 0) {
4967 p
= argptr
+ target_size_in
;
4968 /* ...and then all the struct fiemap_extents */
4969 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4970 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4975 unlock_user(argptr
, arg
, target_size_out
);
4985 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4986 int fd
, int cmd
, abi_long arg
)
4988 const argtype
*arg_type
= ie
->arg_type
;
4992 struct ifconf
*host_ifconf
;
4994 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4995 int target_ifreq_size
;
5000 abi_long target_ifc_buf
;
5004 assert(arg_type
[0] == TYPE_PTR
);
5005 assert(ie
->access
== IOC_RW
);
5008 target_size
= thunk_type_size(arg_type
, 0);
5010 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5012 return -TARGET_EFAULT
;
5013 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5014 unlock_user(argptr
, arg
, 0);
5016 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5017 target_ifc_len
= host_ifconf
->ifc_len
;
5018 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5020 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5021 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5022 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5024 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5025 if (outbufsz
> MAX_STRUCT_SIZE
) {
5026 /* We can't fit all the extents into the fixed size buffer.
5027 * Allocate one that is large enough and use it instead.
5029 host_ifconf
= malloc(outbufsz
);
5031 return -TARGET_ENOMEM
;
5033 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5036 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5038 host_ifconf
->ifc_len
= host_ifc_len
;
5039 host_ifconf
->ifc_buf
= host_ifc_buf
;
5041 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5042 if (!is_error(ret
)) {
5043 /* convert host ifc_len to target ifc_len */
5045 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5046 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5047 host_ifconf
->ifc_len
= target_ifc_len
;
5049 /* restore target ifc_buf */
5051 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5053 /* copy struct ifconf to target user */
5055 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5057 return -TARGET_EFAULT
;
5058 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5059 unlock_user(argptr
, arg
, target_size
);
5061 /* copy ifreq[] to target user */
5063 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5064 for (i
= 0; i
< nb_ifreq
; i
++) {
5065 thunk_convert(argptr
+ i
* target_ifreq_size
,
5066 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5067 ifreq_arg_type
, THUNK_TARGET
);
5069 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5079 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5080 int cmd
, abi_long arg
)
5083 struct dm_ioctl
*host_dm
;
5084 abi_long guest_data
;
5085 uint32_t guest_data_size
;
5087 const argtype
*arg_type
= ie
->arg_type
;
5089 void *big_buf
= NULL
;
5093 target_size
= thunk_type_size(arg_type
, 0);
5094 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5096 ret
= -TARGET_EFAULT
;
5099 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5100 unlock_user(argptr
, arg
, 0);
5102 /* buf_temp is too small, so fetch things into a bigger buffer */
5103 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5104 memcpy(big_buf
, buf_temp
, target_size
);
5108 guest_data
= arg
+ host_dm
->data_start
;
5109 if ((guest_data
- arg
) < 0) {
5110 ret
= -TARGET_EINVAL
;
5113 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5114 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5116 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5118 ret
= -TARGET_EFAULT
;
5122 switch (ie
->host_cmd
) {
5124 case DM_LIST_DEVICES
:
5127 case DM_DEV_SUSPEND
:
5130 case DM_TABLE_STATUS
:
5131 case DM_TABLE_CLEAR
:
5133 case DM_LIST_VERSIONS
:
5137 case DM_DEV_SET_GEOMETRY
:
5138 /* data contains only strings */
5139 memcpy(host_data
, argptr
, guest_data_size
);
5142 memcpy(host_data
, argptr
, guest_data_size
);
5143 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5147 void *gspec
= argptr
;
5148 void *cur_data
= host_data
;
5149 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5150 int spec_size
= thunk_type_size(arg_type
, 0);
5153 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5154 struct dm_target_spec
*spec
= cur_data
;
5158 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5159 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5161 spec
->next
= sizeof(*spec
) + slen
;
5162 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5164 cur_data
+= spec
->next
;
5169 ret
= -TARGET_EINVAL
;
5170 unlock_user(argptr
, guest_data
, 0);
5173 unlock_user(argptr
, guest_data
, 0);
5175 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5176 if (!is_error(ret
)) {
5177 guest_data
= arg
+ host_dm
->data_start
;
5178 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5179 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5180 switch (ie
->host_cmd
) {
5185 case DM_DEV_SUSPEND
:
5188 case DM_TABLE_CLEAR
:
5190 case DM_DEV_SET_GEOMETRY
:
5191 /* no return data */
5193 case DM_LIST_DEVICES
:
5195 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5196 uint32_t remaining_data
= guest_data_size
;
5197 void *cur_data
= argptr
;
5198 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5199 int nl_size
= 12; /* can't use thunk_size due to alignment */
5202 uint32_t next
= nl
->next
;
5204 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5206 if (remaining_data
< nl
->next
) {
5207 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5210 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5211 strcpy(cur_data
+ nl_size
, nl
->name
);
5212 cur_data
+= nl
->next
;
5213 remaining_data
-= nl
->next
;
5217 nl
= (void*)nl
+ next
;
5222 case DM_TABLE_STATUS
:
5224 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5225 void *cur_data
= argptr
;
5226 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5227 int spec_size
= thunk_type_size(arg_type
, 0);
5230 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5231 uint32_t next
= spec
->next
;
5232 int slen
= strlen((char*)&spec
[1]) + 1;
5233 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5234 if (guest_data_size
< spec
->next
) {
5235 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5238 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5239 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5240 cur_data
= argptr
+ spec
->next
;
5241 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5247 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5248 int count
= *(uint32_t*)hdata
;
5249 uint64_t *hdev
= hdata
+ 8;
5250 uint64_t *gdev
= argptr
+ 8;
5253 *(uint32_t*)argptr
= tswap32(count
);
5254 for (i
= 0; i
< count
; i
++) {
5255 *gdev
= tswap64(*hdev
);
5261 case DM_LIST_VERSIONS
:
5263 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5264 uint32_t remaining_data
= guest_data_size
;
5265 void *cur_data
= argptr
;
5266 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5267 int vers_size
= thunk_type_size(arg_type
, 0);
5270 uint32_t next
= vers
->next
;
5272 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5274 if (remaining_data
< vers
->next
) {
5275 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5278 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5279 strcpy(cur_data
+ vers_size
, vers
->name
);
5280 cur_data
+= vers
->next
;
5281 remaining_data
-= vers
->next
;
5285 vers
= (void*)vers
+ next
;
5290 unlock_user(argptr
, guest_data
, 0);
5291 ret
= -TARGET_EINVAL
;
5294 unlock_user(argptr
, guest_data
, guest_data_size
);
5296 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5298 ret
= -TARGET_EFAULT
;
5301 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5302 unlock_user(argptr
, arg
, target_size
);
5309 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5310 int cmd
, abi_long arg
)
5314 const argtype
*arg_type
= ie
->arg_type
;
5315 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5318 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5319 struct blkpg_partition host_part
;
5321 /* Read and convert blkpg */
5323 target_size
= thunk_type_size(arg_type
, 0);
5324 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5326 ret
= -TARGET_EFAULT
;
5329 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5330 unlock_user(argptr
, arg
, 0);
5332 switch (host_blkpg
->op
) {
5333 case BLKPG_ADD_PARTITION
:
5334 case BLKPG_DEL_PARTITION
:
5335 /* payload is struct blkpg_partition */
5338 /* Unknown opcode */
5339 ret
= -TARGET_EINVAL
;
5343 /* Read and convert blkpg->data */
5344 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5345 target_size
= thunk_type_size(part_arg_type
, 0);
5346 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5348 ret
= -TARGET_EFAULT
;
5351 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5352 unlock_user(argptr
, arg
, 0);
5354 /* Swizzle the data pointer to our local copy and call! */
5355 host_blkpg
->data
= &host_part
;
5356 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5362 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5363 int fd
, int cmd
, abi_long arg
)
5365 const argtype
*arg_type
= ie
->arg_type
;
5366 const StructEntry
*se
;
5367 const argtype
*field_types
;
5368 const int *dst_offsets
, *src_offsets
;
5371 abi_ulong
*target_rt_dev_ptr
;
5372 unsigned long *host_rt_dev_ptr
;
5376 assert(ie
->access
== IOC_W
);
5377 assert(*arg_type
== TYPE_PTR
);
5379 assert(*arg_type
== TYPE_STRUCT
);
5380 target_size
= thunk_type_size(arg_type
, 0);
5381 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5383 return -TARGET_EFAULT
;
5386 assert(*arg_type
== (int)STRUCT_rtentry
);
5387 se
= struct_entries
+ *arg_type
++;
5388 assert(se
->convert
[0] == NULL
);
5389 /* convert struct here to be able to catch rt_dev string */
5390 field_types
= se
->field_types
;
5391 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5392 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5393 for (i
= 0; i
< se
->nb_fields
; i
++) {
5394 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5395 assert(*field_types
== TYPE_PTRVOID
);
5396 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5397 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5398 if (*target_rt_dev_ptr
!= 0) {
5399 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5400 tswapal(*target_rt_dev_ptr
));
5401 if (!*host_rt_dev_ptr
) {
5402 unlock_user(argptr
, arg
, 0);
5403 return -TARGET_EFAULT
;
5406 *host_rt_dev_ptr
= 0;
5411 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5412 argptr
+ src_offsets
[i
],
5413 field_types
, THUNK_HOST
);
5415 unlock_user(argptr
, arg
, 0);
5417 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5418 if (*host_rt_dev_ptr
!= 0) {
5419 unlock_user((void *)*host_rt_dev_ptr
,
5420 *target_rt_dev_ptr
, 0);
5425 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5426 int fd
, int cmd
, abi_long arg
)
5428 int sig
= target_to_host_signal(arg
);
5429 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5432 static IOCTLEntry ioctl_entries
[] = {
5433 #define IOCTL(cmd, access, ...) \
5434 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5435 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5436 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5441 /* ??? Implement proper locking for ioctls. */
5442 /* do_ioctl() Must return target values and target errnos. */
5443 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5445 const IOCTLEntry
*ie
;
5446 const argtype
*arg_type
;
5448 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5454 if (ie
->target_cmd
== 0) {
5455 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5456 return -TARGET_ENOSYS
;
5458 if (ie
->target_cmd
== cmd
)
5462 arg_type
= ie
->arg_type
;
5464 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5467 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5470 switch(arg_type
[0]) {
5473 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5477 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5481 target_size
= thunk_type_size(arg_type
, 0);
5482 switch(ie
->access
) {
5484 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5485 if (!is_error(ret
)) {
5486 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5488 return -TARGET_EFAULT
;
5489 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5490 unlock_user(argptr
, arg
, target_size
);
5494 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5496 return -TARGET_EFAULT
;
5497 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5498 unlock_user(argptr
, arg
, 0);
5499 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5503 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5505 return -TARGET_EFAULT
;
5506 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5507 unlock_user(argptr
, arg
, 0);
5508 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5509 if (!is_error(ret
)) {
5510 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5512 return -TARGET_EFAULT
;
5513 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5514 unlock_user(argptr
, arg
, target_size
);
5520 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5521 (long)cmd
, arg_type
[0]);
5522 ret
= -TARGET_ENOSYS
;
5528 static const bitmask_transtbl iflag_tbl
[] = {
5529 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5530 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5531 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5532 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5533 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5534 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5535 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5536 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5537 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5538 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5539 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5540 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5541 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5542 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5546 static const bitmask_transtbl oflag_tbl
[] = {
5547 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5548 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5549 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5550 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5551 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5552 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5553 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5554 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5555 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5556 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5557 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5558 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5559 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5560 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5561 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5562 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5563 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5564 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5565 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5566 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5567 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5568 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5569 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5570 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5574 static const bitmask_transtbl cflag_tbl
[] = {
5575 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5576 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5577 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5578 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5579 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5580 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5581 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5582 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5583 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5584 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5585 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5586 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5587 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5588 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5589 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5590 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5591 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5592 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5593 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5594 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5595 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5596 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5597 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5598 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5599 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5600 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5601 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5602 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5603 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5604 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5605 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5609 static const bitmask_transtbl lflag_tbl
[] = {
5610 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5611 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5612 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5613 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5614 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5615 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5616 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5617 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5618 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5619 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5620 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5621 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5622 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5623 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5624 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5628 static void target_to_host_termios (void *dst
, const void *src
)
5630 struct host_termios
*host
= dst
;
5631 const struct target_termios
*target
= src
;
5634 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5636 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5638 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5640 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5641 host
->c_line
= target
->c_line
;
5643 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5644 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5645 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5646 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5647 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5648 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5649 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5650 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5651 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5652 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5653 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5654 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5655 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5656 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5657 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5658 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5659 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5660 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5663 static void host_to_target_termios (void *dst
, const void *src
)
5665 struct target_termios
*target
= dst
;
5666 const struct host_termios
*host
= src
;
5669 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5671 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5673 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5675 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5676 target
->c_line
= host
->c_line
;
5678 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5679 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5680 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5681 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5682 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5683 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5684 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5685 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5686 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5687 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5688 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5689 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5690 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5691 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5692 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5693 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5694 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5695 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5698 static const StructEntry struct_termios_def
= {
5699 .convert
= { host_to_target_termios
, target_to_host_termios
},
5700 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5701 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5704 static bitmask_transtbl mmap_flags_tbl
[] = {
5705 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5706 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5707 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5708 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5709 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5710 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5711 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5712 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5713 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5718 #if defined(TARGET_I386)
5720 /* NOTE: there is really one LDT for all the threads */
5721 static uint8_t *ldt_table
;
5723 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5730 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5731 if (size
> bytecount
)
5733 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5735 return -TARGET_EFAULT
;
5736 /* ??? Should this by byteswapped? */
5737 memcpy(p
, ldt_table
, size
);
5738 unlock_user(p
, ptr
, size
);
5742 /* XXX: add locking support */
5743 static abi_long
write_ldt(CPUX86State
*env
,
5744 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5746 struct target_modify_ldt_ldt_s ldt_info
;
5747 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5748 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5749 int seg_not_present
, useable
, lm
;
5750 uint32_t *lp
, entry_1
, entry_2
;
5752 if (bytecount
!= sizeof(ldt_info
))
5753 return -TARGET_EINVAL
;
5754 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5755 return -TARGET_EFAULT
;
5756 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5757 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5758 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5759 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5760 unlock_user_struct(target_ldt_info
, ptr
, 0);
5762 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5763 return -TARGET_EINVAL
;
5764 seg_32bit
= ldt_info
.flags
& 1;
5765 contents
= (ldt_info
.flags
>> 1) & 3;
5766 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5767 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5768 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5769 useable
= (ldt_info
.flags
>> 6) & 1;
5773 lm
= (ldt_info
.flags
>> 7) & 1;
5775 if (contents
== 3) {
5777 return -TARGET_EINVAL
;
5778 if (seg_not_present
== 0)
5779 return -TARGET_EINVAL
;
5781 /* allocate the LDT */
5783 env
->ldt
.base
= target_mmap(0,
5784 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5785 PROT_READ
|PROT_WRITE
,
5786 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5787 if (env
->ldt
.base
== -1)
5788 return -TARGET_ENOMEM
;
5789 memset(g2h(env
->ldt
.base
), 0,
5790 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5791 env
->ldt
.limit
= 0xffff;
5792 ldt_table
= g2h(env
->ldt
.base
);
5795 /* NOTE: same code as Linux kernel */
5796 /* Allow LDTs to be cleared by the user. */
5797 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5800 read_exec_only
== 1 &&
5802 limit_in_pages
== 0 &&
5803 seg_not_present
== 1 &&
5811 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5812 (ldt_info
.limit
& 0x0ffff);
5813 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5814 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5815 (ldt_info
.limit
& 0xf0000) |
5816 ((read_exec_only
^ 1) << 9) |
5818 ((seg_not_present
^ 1) << 15) |
5820 (limit_in_pages
<< 23) |
5824 entry_2
|= (useable
<< 20);
5826 /* Install the new entry ... */
5828 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5829 lp
[0] = tswap32(entry_1
);
5830 lp
[1] = tswap32(entry_2
);
5834 /* specific and weird i386 syscalls */
5835 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5836 unsigned long bytecount
)
5842 ret
= read_ldt(ptr
, bytecount
);
5845 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5848 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5851 ret
= -TARGET_ENOSYS
;
5857 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5858 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5860 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5861 struct target_modify_ldt_ldt_s ldt_info
;
5862 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5863 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5864 int seg_not_present
, useable
, lm
;
5865 uint32_t *lp
, entry_1
, entry_2
;
5868 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5869 if (!target_ldt_info
)
5870 return -TARGET_EFAULT
;
5871 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5872 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5873 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5874 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5875 if (ldt_info
.entry_number
== -1) {
5876 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5877 if (gdt_table
[i
] == 0) {
5878 ldt_info
.entry_number
= i
;
5879 target_ldt_info
->entry_number
= tswap32(i
);
5884 unlock_user_struct(target_ldt_info
, ptr
, 1);
5886 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5887 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5888 return -TARGET_EINVAL
;
5889 seg_32bit
= ldt_info
.flags
& 1;
5890 contents
= (ldt_info
.flags
>> 1) & 3;
5891 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5892 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5893 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5894 useable
= (ldt_info
.flags
>> 6) & 1;
5898 lm
= (ldt_info
.flags
>> 7) & 1;
5901 if (contents
== 3) {
5902 if (seg_not_present
== 0)
5903 return -TARGET_EINVAL
;
5906 /* NOTE: same code as Linux kernel */
5907 /* Allow LDTs to be cleared by the user. */
5908 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5909 if ((contents
== 0 &&
5910 read_exec_only
== 1 &&
5912 limit_in_pages
== 0 &&
5913 seg_not_present
== 1 &&
5921 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5922 (ldt_info
.limit
& 0x0ffff);
5923 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5924 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5925 (ldt_info
.limit
& 0xf0000) |
5926 ((read_exec_only
^ 1) << 9) |
5928 ((seg_not_present
^ 1) << 15) |
5930 (limit_in_pages
<< 23) |
5935 /* Install the new entry ... */
5937 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5938 lp
[0] = tswap32(entry_1
);
5939 lp
[1] = tswap32(entry_2
);
5943 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5945 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5946 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5947 uint32_t base_addr
, limit
, flags
;
5948 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5949 int seg_not_present
, useable
, lm
;
5950 uint32_t *lp
, entry_1
, entry_2
;
5952 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5953 if (!target_ldt_info
)
5954 return -TARGET_EFAULT
;
5955 idx
= tswap32(target_ldt_info
->entry_number
);
5956 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5957 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5958 unlock_user_struct(target_ldt_info
, ptr
, 1);
5959 return -TARGET_EINVAL
;
5961 lp
= (uint32_t *)(gdt_table
+ idx
);
5962 entry_1
= tswap32(lp
[0]);
5963 entry_2
= tswap32(lp
[1]);
5965 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5966 contents
= (entry_2
>> 10) & 3;
5967 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5968 seg_32bit
= (entry_2
>> 22) & 1;
5969 limit_in_pages
= (entry_2
>> 23) & 1;
5970 useable
= (entry_2
>> 20) & 1;
5974 lm
= (entry_2
>> 21) & 1;
5976 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5977 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5978 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5979 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5980 base_addr
= (entry_1
>> 16) |
5981 (entry_2
& 0xff000000) |
5982 ((entry_2
& 0xff) << 16);
5983 target_ldt_info
->base_addr
= tswapal(base_addr
);
5984 target_ldt_info
->limit
= tswap32(limit
);
5985 target_ldt_info
->flags
= tswap32(flags
);
5986 unlock_user_struct(target_ldt_info
, ptr
, 1);
5989 #endif /* TARGET_I386 && TARGET_ABI32 */
5991 #ifndef TARGET_ABI32
5992 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5999 case TARGET_ARCH_SET_GS
:
6000 case TARGET_ARCH_SET_FS
:
6001 if (code
== TARGET_ARCH_SET_GS
)
6005 cpu_x86_load_seg(env
, idx
, 0);
6006 env
->segs
[idx
].base
= addr
;
6008 case TARGET_ARCH_GET_GS
:
6009 case TARGET_ARCH_GET_FS
:
6010 if (code
== TARGET_ARCH_GET_GS
)
6014 val
= env
->segs
[idx
].base
;
6015 if (put_user(val
, addr
, abi_ulong
))
6016 ret
= -TARGET_EFAULT
;
6019 ret
= -TARGET_EINVAL
;
6026 #endif /* defined(TARGET_I386) */
6028 #define NEW_STACK_SIZE 0x40000
6031 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6034 pthread_mutex_t mutex
;
6035 pthread_cond_t cond
;
6038 abi_ulong child_tidptr
;
6039 abi_ulong parent_tidptr
;
6043 static void *clone_func(void *arg
)
6045 new_thread_info
*info
= arg
;
6050 rcu_register_thread();
6052 cpu
= ENV_GET_CPU(env
);
6054 ts
= (TaskState
*)cpu
->opaque
;
6055 info
->tid
= gettid();
6056 cpu
->host_tid
= info
->tid
;
6058 if (info
->child_tidptr
)
6059 put_user_u32(info
->tid
, info
->child_tidptr
);
6060 if (info
->parent_tidptr
)
6061 put_user_u32(info
->tid
, info
->parent_tidptr
);
6062 /* Enable signals. */
6063 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6064 /* Signal to the parent that we're ready. */
6065 pthread_mutex_lock(&info
->mutex
);
6066 pthread_cond_broadcast(&info
->cond
);
6067 pthread_mutex_unlock(&info
->mutex
);
6068 /* Wait until the parent has finshed initializing the tls state. */
6069 pthread_mutex_lock(&clone_lock
);
6070 pthread_mutex_unlock(&clone_lock
);
6076 /* do_fork() Must return host values and target errnos (unlike most
6077 do_*() functions). */
6078 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6079 abi_ulong parent_tidptr
, target_ulong newtls
,
6080 abi_ulong child_tidptr
)
6082 CPUState
*cpu
= ENV_GET_CPU(env
);
6086 CPUArchState
*new_env
;
6089 flags
&= ~CLONE_IGNORED_FLAGS
;
6091 /* Emulate vfork() with fork() */
6092 if (flags
& CLONE_VFORK
)
6093 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6095 if (flags
& CLONE_VM
) {
6096 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6097 new_thread_info info
;
6098 pthread_attr_t attr
;
6100 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6101 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6102 return -TARGET_EINVAL
;
6105 ts
= g_new0(TaskState
, 1);
6106 init_task_state(ts
);
6107 /* we create a new CPU instance. */
6108 new_env
= cpu_copy(env
);
6109 /* Init regs that differ from the parent. */
6110 cpu_clone_regs(new_env
, newsp
);
6111 new_cpu
= ENV_GET_CPU(new_env
);
6112 new_cpu
->opaque
= ts
;
6113 ts
->bprm
= parent_ts
->bprm
;
6114 ts
->info
= parent_ts
->info
;
6115 ts
->signal_mask
= parent_ts
->signal_mask
;
6117 if (flags
& CLONE_CHILD_CLEARTID
) {
6118 ts
->child_tidptr
= child_tidptr
;
6121 if (flags
& CLONE_SETTLS
) {
6122 cpu_set_tls (new_env
, newtls
);
6125 /* Grab a mutex so that thread setup appears atomic. */
6126 pthread_mutex_lock(&clone_lock
);
6128 memset(&info
, 0, sizeof(info
));
6129 pthread_mutex_init(&info
.mutex
, NULL
);
6130 pthread_mutex_lock(&info
.mutex
);
6131 pthread_cond_init(&info
.cond
, NULL
);
6133 if (flags
& CLONE_CHILD_SETTID
) {
6134 info
.child_tidptr
= child_tidptr
;
6136 if (flags
& CLONE_PARENT_SETTID
) {
6137 info
.parent_tidptr
= parent_tidptr
;
6140 ret
= pthread_attr_init(&attr
);
6141 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6142 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6143 /* It is not safe to deliver signals until the child has finished
6144 initializing, so temporarily block all signals. */
6145 sigfillset(&sigmask
);
6146 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6148 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6149 /* TODO: Free new CPU state if thread creation failed. */
6151 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6152 pthread_attr_destroy(&attr
);
6154 /* Wait for the child to initialize. */
6155 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6160 pthread_mutex_unlock(&info
.mutex
);
6161 pthread_cond_destroy(&info
.cond
);
6162 pthread_mutex_destroy(&info
.mutex
);
6163 pthread_mutex_unlock(&clone_lock
);
6165 /* if no CLONE_VM, we consider it is a fork */
6166 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6167 return -TARGET_EINVAL
;
6170 /* We can't support custom termination signals */
6171 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6172 return -TARGET_EINVAL
;
6175 if (block_signals()) {
6176 return -TARGET_ERESTARTSYS
;
6182 /* Child Process. */
6184 cpu_clone_regs(env
, newsp
);
6186 /* There is a race condition here. The parent process could
6187 theoretically read the TID in the child process before the child
6188 tid is set. This would require using either ptrace
6189 (not implemented) or having *_tidptr to point at a shared memory
6190 mapping. We can't repeat the spinlock hack used above because
6191 the child process gets its own copy of the lock. */
6192 if (flags
& CLONE_CHILD_SETTID
)
6193 put_user_u32(gettid(), child_tidptr
);
6194 if (flags
& CLONE_PARENT_SETTID
)
6195 put_user_u32(gettid(), parent_tidptr
);
6196 ts
= (TaskState
*)cpu
->opaque
;
6197 if (flags
& CLONE_SETTLS
)
6198 cpu_set_tls (env
, newtls
);
6199 if (flags
& CLONE_CHILD_CLEARTID
)
6200 ts
->child_tidptr
= child_tidptr
;
6208 /* warning : doesn't handle linux specific flags... */
6209 static int target_to_host_fcntl_cmd(int cmd
)
6212 case TARGET_F_DUPFD
:
6213 case TARGET_F_GETFD
:
6214 case TARGET_F_SETFD
:
6215 case TARGET_F_GETFL
:
6216 case TARGET_F_SETFL
:
6218 case TARGET_F_GETLK
:
6220 case TARGET_F_SETLK
:
6222 case TARGET_F_SETLKW
:
6224 case TARGET_F_GETOWN
:
6226 case TARGET_F_SETOWN
:
6228 case TARGET_F_GETSIG
:
6230 case TARGET_F_SETSIG
:
6232 #if TARGET_ABI_BITS == 32
6233 case TARGET_F_GETLK64
:
6235 case TARGET_F_SETLK64
:
6237 case TARGET_F_SETLKW64
:
6240 case TARGET_F_SETLEASE
:
6242 case TARGET_F_GETLEASE
:
6244 #ifdef F_DUPFD_CLOEXEC
6245 case TARGET_F_DUPFD_CLOEXEC
:
6246 return F_DUPFD_CLOEXEC
;
6248 case TARGET_F_NOTIFY
:
6251 case TARGET_F_GETOWN_EX
:
6255 case TARGET_F_SETOWN_EX
:
6259 case TARGET_F_SETPIPE_SZ
:
6260 return F_SETPIPE_SZ
;
6261 case TARGET_F_GETPIPE_SZ
:
6262 return F_GETPIPE_SZ
;
6265 return -TARGET_EINVAL
;
6267 return -TARGET_EINVAL
;
6270 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6271 static const bitmask_transtbl flock_tbl
[] = {
6272 TRANSTBL_CONVERT(F_RDLCK
),
6273 TRANSTBL_CONVERT(F_WRLCK
),
6274 TRANSTBL_CONVERT(F_UNLCK
),
6275 TRANSTBL_CONVERT(F_EXLCK
),
6276 TRANSTBL_CONVERT(F_SHLCK
),
6280 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6281 abi_ulong target_flock_addr
)
6283 struct target_flock
*target_fl
;
6286 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6287 return -TARGET_EFAULT
;
6290 __get_user(l_type
, &target_fl
->l_type
);
6291 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6292 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6293 __get_user(fl
->l_start
, &target_fl
->l_start
);
6294 __get_user(fl
->l_len
, &target_fl
->l_len
);
6295 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6296 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6300 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6301 const struct flock64
*fl
)
6303 struct target_flock
*target_fl
;
6306 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6307 return -TARGET_EFAULT
;
6310 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6311 __put_user(l_type
, &target_fl
->l_type
);
6312 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6313 __put_user(fl
->l_start
, &target_fl
->l_start
);
6314 __put_user(fl
->l_len
, &target_fl
->l_len
);
6315 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6316 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6320 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6321 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6323 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6324 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6325 abi_ulong target_flock_addr
)
6327 struct target_eabi_flock64
*target_fl
;
6330 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6331 return -TARGET_EFAULT
;
6334 __get_user(l_type
, &target_fl
->l_type
);
6335 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6336 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6337 __get_user(fl
->l_start
, &target_fl
->l_start
);
6338 __get_user(fl
->l_len
, &target_fl
->l_len
);
6339 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6340 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6344 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6345 const struct flock64
*fl
)
6347 struct target_eabi_flock64
*target_fl
;
6350 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6351 return -TARGET_EFAULT
;
6354 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6355 __put_user(l_type
, &target_fl
->l_type
);
6356 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6357 __put_user(fl
->l_start
, &target_fl
->l_start
);
6358 __put_user(fl
->l_len
, &target_fl
->l_len
);
6359 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6360 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6365 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6366 abi_ulong target_flock_addr
)
6368 struct target_flock64
*target_fl
;
6371 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6372 return -TARGET_EFAULT
;
6375 __get_user(l_type
, &target_fl
->l_type
);
6376 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6377 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6378 __get_user(fl
->l_start
, &target_fl
->l_start
);
6379 __get_user(fl
->l_len
, &target_fl
->l_len
);
6380 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6381 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6385 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6386 const struct flock64
*fl
)
6388 struct target_flock64
*target_fl
;
6391 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6392 return -TARGET_EFAULT
;
6395 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6396 __put_user(l_type
, &target_fl
->l_type
);
6397 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6398 __put_user(fl
->l_start
, &target_fl
->l_start
);
6399 __put_user(fl
->l_len
, &target_fl
->l_len
);
6400 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6401 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6405 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6407 struct flock64 fl64
;
6409 struct f_owner_ex fox
;
6410 struct target_f_owner_ex
*target_fox
;
6413 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6415 if (host_cmd
== -TARGET_EINVAL
)
6419 case TARGET_F_GETLK
:
6420 ret
= copy_from_user_flock(&fl64
, arg
);
6424 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6426 ret
= copy_to_user_flock(arg
, &fl64
);
6430 case TARGET_F_SETLK
:
6431 case TARGET_F_SETLKW
:
6432 ret
= copy_from_user_flock(&fl64
, arg
);
6436 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6439 case TARGET_F_GETLK64
:
6440 ret
= copy_from_user_flock64(&fl64
, arg
);
6444 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6446 ret
= copy_to_user_flock64(arg
, &fl64
);
6449 case TARGET_F_SETLK64
:
6450 case TARGET_F_SETLKW64
:
6451 ret
= copy_from_user_flock64(&fl64
, arg
);
6455 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6458 case TARGET_F_GETFL
:
6459 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6461 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6465 case TARGET_F_SETFL
:
6466 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6467 target_to_host_bitmask(arg
,
6472 case TARGET_F_GETOWN_EX
:
6473 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6475 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6476 return -TARGET_EFAULT
;
6477 target_fox
->type
= tswap32(fox
.type
);
6478 target_fox
->pid
= tswap32(fox
.pid
);
6479 unlock_user_struct(target_fox
, arg
, 1);
6485 case TARGET_F_SETOWN_EX
:
6486 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6487 return -TARGET_EFAULT
;
6488 fox
.type
= tswap32(target_fox
->type
);
6489 fox
.pid
= tswap32(target_fox
->pid
);
6490 unlock_user_struct(target_fox
, arg
, 0);
6491 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6495 case TARGET_F_SETOWN
:
6496 case TARGET_F_GETOWN
:
6497 case TARGET_F_SETSIG
:
6498 case TARGET_F_GETSIG
:
6499 case TARGET_F_SETLEASE
:
6500 case TARGET_F_GETLEASE
:
6501 case TARGET_F_SETPIPE_SZ
:
6502 case TARGET_F_GETPIPE_SZ
:
6503 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6507 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6515 static inline int high2lowuid(int uid
)
6523 static inline int high2lowgid(int gid
)
6531 static inline int low2highuid(int uid
)
6533 if ((int16_t)uid
== -1)
6539 static inline int low2highgid(int gid
)
6541 if ((int16_t)gid
== -1)
6546 static inline int tswapid(int id
)
6551 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6553 #else /* !USE_UID16 */
6554 static inline int high2lowuid(int uid
)
6558 static inline int high2lowgid(int gid
)
6562 static inline int low2highuid(int uid
)
6566 static inline int low2highgid(int gid
)
6570 static inline int tswapid(int id
)
6575 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6577 #endif /* USE_UID16 */
6579 /* We must do direct syscalls for setting UID/GID, because we want to
6580 * implement the Linux system call semantics of "change only for this thread",
6581 * not the libc/POSIX semantics of "change for all threads in process".
6582 * (See http://ewontfix.com/17/ for more details.)
6583 * We use the 32-bit version of the syscalls if present; if it is not
6584 * then either the host architecture supports 32-bit UIDs natively with
6585 * the standard syscall, or the 16-bit UID is the best we can do.
6587 #ifdef __NR_setuid32
6588 #define __NR_sys_setuid __NR_setuid32
6590 #define __NR_sys_setuid __NR_setuid
6592 #ifdef __NR_setgid32
6593 #define __NR_sys_setgid __NR_setgid32
6595 #define __NR_sys_setgid __NR_setgid
6597 #ifdef __NR_setresuid32
6598 #define __NR_sys_setresuid __NR_setresuid32
6600 #define __NR_sys_setresuid __NR_setresuid
6602 #ifdef __NR_setresgid32
6603 #define __NR_sys_setresgid __NR_setresgid32
6605 #define __NR_sys_setresgid __NR_setresgid
6608 _syscall1(int, sys_setuid
, uid_t
, uid
)
6609 _syscall1(int, sys_setgid
, gid_t
, gid
)
6610 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6611 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6613 void syscall_init(void)
6616 const argtype
*arg_type
;
6620 thunk_init(STRUCT_MAX
);
6622 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6623 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6624 #include "syscall_types.h"
6626 #undef STRUCT_SPECIAL
6628 /* Build target_to_host_errno_table[] table from
6629 * host_to_target_errno_table[]. */
6630 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6631 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6634 /* we patch the ioctl size if necessary. We rely on the fact that
6635 no ioctl has all the bits at '1' in the size field */
6637 while (ie
->target_cmd
!= 0) {
6638 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6639 TARGET_IOC_SIZEMASK
) {
6640 arg_type
= ie
->arg_type
;
6641 if (arg_type
[0] != TYPE_PTR
) {
6642 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6647 size
= thunk_type_size(arg_type
, 0);
6648 ie
->target_cmd
= (ie
->target_cmd
&
6649 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6650 (size
<< TARGET_IOC_SIZESHIFT
);
6653 /* automatic consistency check if same arch */
6654 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6655 (defined(__x86_64__) && defined(TARGET_X86_64))
6656 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6657 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6658 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6665 #if TARGET_ABI_BITS == 32
6666 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6668 #ifdef TARGET_WORDS_BIGENDIAN
6669 return ((uint64_t)word0
<< 32) | word1
;
6671 return ((uint64_t)word1
<< 32) | word0
;
6674 #else /* TARGET_ABI_BITS == 32 */
6675 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6679 #endif /* TARGET_ABI_BITS != 32 */
6681 #ifdef TARGET_NR_truncate64
6682 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6687 if (regpairs_aligned(cpu_env
)) {
6691 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6695 #ifdef TARGET_NR_ftruncate64
6696 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6701 if (regpairs_aligned(cpu_env
)) {
6705 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6709 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6710 abi_ulong target_addr
)
6712 struct target_timespec
*target_ts
;
6714 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6715 return -TARGET_EFAULT
;
6716 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6717 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6718 unlock_user_struct(target_ts
, target_addr
, 0);
6722 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6723 struct timespec
*host_ts
)
6725 struct target_timespec
*target_ts
;
6727 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6728 return -TARGET_EFAULT
;
6729 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6730 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6731 unlock_user_struct(target_ts
, target_addr
, 1);
6735 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6736 abi_ulong target_addr
)
6738 struct target_itimerspec
*target_itspec
;
6740 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6741 return -TARGET_EFAULT
;
6744 host_itspec
->it_interval
.tv_sec
=
6745 tswapal(target_itspec
->it_interval
.tv_sec
);
6746 host_itspec
->it_interval
.tv_nsec
=
6747 tswapal(target_itspec
->it_interval
.tv_nsec
);
6748 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6749 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6751 unlock_user_struct(target_itspec
, target_addr
, 1);
6755 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6756 struct itimerspec
*host_its
)
6758 struct target_itimerspec
*target_itspec
;
6760 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6761 return -TARGET_EFAULT
;
6764 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6765 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6767 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6768 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6770 unlock_user_struct(target_itspec
, target_addr
, 0);
6774 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6775 abi_long target_addr
)
6777 struct target_timex
*target_tx
;
6779 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6780 return -TARGET_EFAULT
;
6783 __get_user(host_tx
->modes
, &target_tx
->modes
);
6784 __get_user(host_tx
->offset
, &target_tx
->offset
);
6785 __get_user(host_tx
->freq
, &target_tx
->freq
);
6786 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6787 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6788 __get_user(host_tx
->status
, &target_tx
->status
);
6789 __get_user(host_tx
->constant
, &target_tx
->constant
);
6790 __get_user(host_tx
->precision
, &target_tx
->precision
);
6791 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6792 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6793 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6794 __get_user(host_tx
->tick
, &target_tx
->tick
);
6795 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6796 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6797 __get_user(host_tx
->shift
, &target_tx
->shift
);
6798 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6799 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6800 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6801 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6802 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6803 __get_user(host_tx
->tai
, &target_tx
->tai
);
6805 unlock_user_struct(target_tx
, target_addr
, 0);
6809 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6810 struct timex
*host_tx
)
6812 struct target_timex
*target_tx
;
6814 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6815 return -TARGET_EFAULT
;
6818 __put_user(host_tx
->modes
, &target_tx
->modes
);
6819 __put_user(host_tx
->offset
, &target_tx
->offset
);
6820 __put_user(host_tx
->freq
, &target_tx
->freq
);
6821 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6822 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6823 __put_user(host_tx
->status
, &target_tx
->status
);
6824 __put_user(host_tx
->constant
, &target_tx
->constant
);
6825 __put_user(host_tx
->precision
, &target_tx
->precision
);
6826 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6827 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6828 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6829 __put_user(host_tx
->tick
, &target_tx
->tick
);
6830 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6831 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6832 __put_user(host_tx
->shift
, &target_tx
->shift
);
6833 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6834 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6835 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6836 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6837 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6838 __put_user(host_tx
->tai
, &target_tx
->tai
);
6840 unlock_user_struct(target_tx
, target_addr
, 1);
6845 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6846 abi_ulong target_addr
)
6848 struct target_sigevent
*target_sevp
;
6850 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6851 return -TARGET_EFAULT
;
6854 /* This union is awkward on 64 bit systems because it has a 32 bit
6855 * integer and a pointer in it; we follow the conversion approach
6856 * used for handling sigval types in signal.c so the guest should get
6857 * the correct value back even if we did a 64 bit byteswap and it's
6858 * using the 32 bit integer.
6860 host_sevp
->sigev_value
.sival_ptr
=
6861 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6862 host_sevp
->sigev_signo
=
6863 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6864 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6865 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6867 unlock_user_struct(target_sevp
, target_addr
, 1);
6871 #if defined(TARGET_NR_mlockall)
6872 static inline int target_to_host_mlockall_arg(int arg
)
6876 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6877 result
|= MCL_CURRENT
;
6879 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6880 result
|= MCL_FUTURE
;
6886 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6887 abi_ulong target_addr
,
6888 struct stat
*host_st
)
6890 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6891 if (((CPUARMState
*)cpu_env
)->eabi
) {
6892 struct target_eabi_stat64
*target_st
;
6894 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6895 return -TARGET_EFAULT
;
6896 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6897 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6898 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6899 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6900 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6902 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6903 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6904 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6905 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6906 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6907 __put_user(host_st
->st_size
, &target_st
->st_size
);
6908 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6909 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6910 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6911 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6912 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6913 unlock_user_struct(target_st
, target_addr
, 1);
6917 #if defined(TARGET_HAS_STRUCT_STAT64)
6918 struct target_stat64
*target_st
;
6920 struct target_stat
*target_st
;
6923 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6924 return -TARGET_EFAULT
;
6925 memset(target_st
, 0, sizeof(*target_st
));
6926 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6927 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6928 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6929 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6931 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6932 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6933 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6934 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6935 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6936 /* XXX: better use of kernel struct */
6937 __put_user(host_st
->st_size
, &target_st
->st_size
);
6938 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6939 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6940 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6941 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6942 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6943 unlock_user_struct(target_st
, target_addr
, 1);
6949 /* ??? Using host futex calls even when target atomic operations
6950 are not really atomic probably breaks things. However implementing
6951 futexes locally would make futexes shared between multiple processes
6952 tricky. However they're probably useless because guest atomic
6953 operations won't work either. */
6954 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6955 target_ulong uaddr2
, int val3
)
6957 struct timespec ts
, *pts
;
6960 /* ??? We assume FUTEX_* constants are the same on both host
6962 #ifdef FUTEX_CMD_MASK
6963 base_op
= op
& FUTEX_CMD_MASK
;
6969 case FUTEX_WAIT_BITSET
:
6972 target_to_host_timespec(pts
, timeout
);
6976 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6979 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6981 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6983 case FUTEX_CMP_REQUEUE
:
6985 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6986 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6987 But the prototype takes a `struct timespec *'; insert casts
6988 to satisfy the compiler. We do not need to tswap TIMEOUT
6989 since it's not compared to guest memory. */
6990 pts
= (struct timespec
*)(uintptr_t) timeout
;
6991 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6993 (base_op
== FUTEX_CMP_REQUEUE
6997 return -TARGET_ENOSYS
;
7000 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7001 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7002 abi_long handle
, abi_long mount_id
,
7005 struct file_handle
*target_fh
;
7006 struct file_handle
*fh
;
7010 unsigned int size
, total_size
;
7012 if (get_user_s32(size
, handle
)) {
7013 return -TARGET_EFAULT
;
7016 name
= lock_user_string(pathname
);
7018 return -TARGET_EFAULT
;
7021 total_size
= sizeof(struct file_handle
) + size
;
7022 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7024 unlock_user(name
, pathname
, 0);
7025 return -TARGET_EFAULT
;
7028 fh
= g_malloc0(total_size
);
7029 fh
->handle_bytes
= size
;
7031 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7032 unlock_user(name
, pathname
, 0);
7034 /* man name_to_handle_at(2):
7035 * Other than the use of the handle_bytes field, the caller should treat
7036 * the file_handle structure as an opaque data type
7039 memcpy(target_fh
, fh
, total_size
);
7040 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7041 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7043 unlock_user(target_fh
, handle
, total_size
);
7045 if (put_user_s32(mid
, mount_id
)) {
7046 return -TARGET_EFAULT
;
7054 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7055 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7058 struct file_handle
*target_fh
;
7059 struct file_handle
*fh
;
7060 unsigned int size
, total_size
;
7063 if (get_user_s32(size
, handle
)) {
7064 return -TARGET_EFAULT
;
7067 total_size
= sizeof(struct file_handle
) + size
;
7068 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7070 return -TARGET_EFAULT
;
7073 fh
= g_memdup(target_fh
, total_size
);
7074 fh
->handle_bytes
= size
;
7075 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7077 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7078 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7082 unlock_user(target_fh
, handle
, total_size
);
7088 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7090 /* signalfd siginfo conversion */
7093 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7094 const struct signalfd_siginfo
*info
)
7096 int sig
= host_to_target_signal(info
->ssi_signo
);
7098 /* linux/signalfd.h defines a ssi_addr_lsb
7099 * not defined in sys/signalfd.h but used by some kernels
7102 #ifdef BUS_MCEERR_AO
7103 if (tinfo
->ssi_signo
== SIGBUS
&&
7104 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7105 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7106 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7107 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7108 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7112 tinfo
->ssi_signo
= tswap32(sig
);
7113 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7114 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7115 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7116 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7117 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7118 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7119 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7120 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7121 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7122 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7123 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7124 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7125 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7126 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7127 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7130 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7134 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7135 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7141 static TargetFdTrans target_signalfd_trans
= {
7142 .host_to_target_data
= host_to_target_data_signalfd
,
7145 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7148 target_sigset_t
*target_mask
;
7152 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7153 return -TARGET_EINVAL
;
7155 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7156 return -TARGET_EFAULT
;
7159 target_to_host_sigset(&host_mask
, target_mask
);
7161 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7163 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7165 fd_trans_register(ret
, &target_signalfd_trans
);
7168 unlock_user_struct(target_mask
, mask
, 0);
7174 /* Map host to target signal numbers for the wait family of syscalls.
7175 Assume all other status bits are the same. */
7176 int host_to_target_waitstatus(int status
)
7178 if (WIFSIGNALED(status
)) {
7179 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7181 if (WIFSTOPPED(status
)) {
7182 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7188 static int open_self_cmdline(void *cpu_env
, int fd
)
7191 bool word_skipped
= false;
7193 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7203 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7206 fd_orig
= close(fd_orig
);
7209 } else if (nb_read
== 0) {
7213 if (!word_skipped
) {
7214 /* Skip the first string, which is the path to qemu-*-static
7215 instead of the actual command. */
7216 cp_buf
= memchr(buf
, 0, nb_read
);
7218 /* Null byte found, skip one string */
7220 nb_read
-= cp_buf
- buf
;
7221 word_skipped
= true;
7226 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7235 return close(fd_orig
);
7238 static int open_self_maps(void *cpu_env
, int fd
)
7240 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7241 TaskState
*ts
= cpu
->opaque
;
7247 fp
= fopen("/proc/self/maps", "r");
7252 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7253 int fields
, dev_maj
, dev_min
, inode
;
7254 uint64_t min
, max
, offset
;
7255 char flag_r
, flag_w
, flag_x
, flag_p
;
7256 char path
[512] = "";
7257 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7258 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7259 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7261 if ((fields
< 10) || (fields
> 11)) {
7264 if (h2g_valid(min
)) {
7265 int flags
= page_get_flags(h2g(min
));
7266 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7267 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7270 if (h2g(min
) == ts
->info
->stack_limit
) {
7271 pstrcpy(path
, sizeof(path
), " [stack]");
7273 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7274 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7275 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7276 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7277 path
[0] ? " " : "", path
);
7287 static int open_self_stat(void *cpu_env
, int fd
)
7289 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7290 TaskState
*ts
= cpu
->opaque
;
7291 abi_ulong start_stack
= ts
->info
->start_stack
;
7294 for (i
= 0; i
< 44; i
++) {
7302 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7303 } else if (i
== 1) {
7305 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7306 } else if (i
== 27) {
7309 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7311 /* for the rest, there is MasterCard */
7312 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7316 if (write(fd
, buf
, len
) != len
) {
7324 static int open_self_auxv(void *cpu_env
, int fd
)
7326 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7327 TaskState
*ts
= cpu
->opaque
;
7328 abi_ulong auxv
= ts
->info
->saved_auxv
;
7329 abi_ulong len
= ts
->info
->auxv_len
;
7333 * Auxiliary vector is stored in target process stack.
7334 * read in whole auxv vector and copy it to file
7336 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7340 r
= write(fd
, ptr
, len
);
7347 lseek(fd
, 0, SEEK_SET
);
7348 unlock_user(ptr
, auxv
, len
);
7354 static int is_proc_myself(const char *filename
, const char *entry
)
7356 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7357 filename
+= strlen("/proc/");
7358 if (!strncmp(filename
, "self/", strlen("self/"))) {
7359 filename
+= strlen("self/");
7360 } else if (*filename
>= '1' && *filename
<= '9') {
7362 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7363 if (!strncmp(filename
, myself
, strlen(myself
))) {
7364 filename
+= strlen(myself
);
7371 if (!strcmp(filename
, entry
)) {
7378 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7379 static int is_proc(const char *filename
, const char *entry
)
7381 return strcmp(filename
, entry
) == 0;
7384 static int open_net_route(void *cpu_env
, int fd
)
7391 fp
= fopen("/proc/net/route", "r");
7398 read
= getline(&line
, &len
, fp
);
7399 dprintf(fd
, "%s", line
);
7403 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7405 uint32_t dest
, gw
, mask
;
7406 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7407 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7408 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7409 &mask
, &mtu
, &window
, &irtt
);
7410 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7411 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7412 metric
, tswap32(mask
), mtu
, window
, irtt
);
7422 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7425 const char *filename
;
7426 int (*fill
)(void *cpu_env
, int fd
);
7427 int (*cmp
)(const char *s1
, const char *s2
);
7429 const struct fake_open
*fake_open
;
7430 static const struct fake_open fakes
[] = {
7431 { "maps", open_self_maps
, is_proc_myself
},
7432 { "stat", open_self_stat
, is_proc_myself
},
7433 { "auxv", open_self_auxv
, is_proc_myself
},
7434 { "cmdline", open_self_cmdline
, is_proc_myself
},
7435 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7436 { "/proc/net/route", open_net_route
, is_proc
},
7438 { NULL
, NULL
, NULL
}
7441 if (is_proc_myself(pathname
, "exe")) {
7442 int execfd
= qemu_getauxval(AT_EXECFD
);
7443 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7446 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7447 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7452 if (fake_open
->filename
) {
7454 char filename
[PATH_MAX
];
7457 /* create temporary file to map stat to */
7458 tmpdir
= getenv("TMPDIR");
7461 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7462 fd
= mkstemp(filename
);
7468 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7474 lseek(fd
, 0, SEEK_SET
);
7479 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7482 #define TIMER_MAGIC 0x0caf0000
7483 #define TIMER_MAGIC_MASK 0xffff0000
7485 /* Convert QEMU provided timer ID back to internal 16bit index format */
7486 static target_timer_t
get_timer_id(abi_long arg
)
7488 target_timer_t timerid
= arg
;
7490 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7491 return -TARGET_EINVAL
;
7496 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7497 return -TARGET_EINVAL
;
7503 /* do_syscall() should always have a single exit point at the end so
7504 that actions, such as logging of syscall results, can be performed.
7505 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7506 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7507 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7508 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7511 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7517 #if defined(DEBUG_ERESTARTSYS)
7518 /* Debug-only code for exercising the syscall-restart code paths
7519 * in the per-architecture cpu main loops: restart every syscall
7520 * the guest makes once before letting it through.
7527 return -TARGET_ERESTARTSYS
;
7533 gemu_log("syscall %d", num
);
7535 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7537 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7540 case TARGET_NR_exit
:
7541 /* In old applications this may be used to implement _exit(2).
7542 However in threaded applictions it is used for thread termination,
7543 and _exit_group is used for application termination.
7544 Do thread termination if we have more then one thread. */
7546 if (block_signals()) {
7547 ret
= -TARGET_ERESTARTSYS
;
7553 if (CPU_NEXT(first_cpu
)) {
7556 /* Remove the CPU from the list. */
7557 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7562 if (ts
->child_tidptr
) {
7563 put_user_u32(0, ts
->child_tidptr
);
7564 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7568 object_unref(OBJECT(cpu
));
7570 rcu_unregister_thread();
7578 gdb_exit(cpu_env
, arg1
);
7580 ret
= 0; /* avoid warning */
7582 case TARGET_NR_read
:
7586 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7588 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7590 fd_trans_host_to_target_data(arg1
)) {
7591 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7593 unlock_user(p
, arg2
, ret
);
7596 case TARGET_NR_write
:
7597 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7599 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7600 unlock_user(p
, arg2
, 0);
7602 #ifdef TARGET_NR_open
7603 case TARGET_NR_open
:
7604 if (!(p
= lock_user_string(arg1
)))
7606 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7607 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7609 fd_trans_unregister(ret
);
7610 unlock_user(p
, arg1
, 0);
7613 case TARGET_NR_openat
:
7614 if (!(p
= lock_user_string(arg2
)))
7616 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7617 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7619 fd_trans_unregister(ret
);
7620 unlock_user(p
, arg2
, 0);
7622 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7623 case TARGET_NR_name_to_handle_at
:
7624 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7627 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7628 case TARGET_NR_open_by_handle_at
:
7629 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7630 fd_trans_unregister(ret
);
7633 case TARGET_NR_close
:
7634 fd_trans_unregister(arg1
);
7635 ret
= get_errno(close(arg1
));
7640 #ifdef TARGET_NR_fork
7641 case TARGET_NR_fork
:
7642 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7645 #ifdef TARGET_NR_waitpid
7646 case TARGET_NR_waitpid
:
7649 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7650 if (!is_error(ret
) && arg2
&& ret
7651 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7656 #ifdef TARGET_NR_waitid
7657 case TARGET_NR_waitid
:
7661 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7662 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7663 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7665 host_to_target_siginfo(p
, &info
);
7666 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7671 #ifdef TARGET_NR_creat /* not on alpha */
7672 case TARGET_NR_creat
:
7673 if (!(p
= lock_user_string(arg1
)))
7675 ret
= get_errno(creat(p
, arg2
));
7676 fd_trans_unregister(ret
);
7677 unlock_user(p
, arg1
, 0);
7680 #ifdef TARGET_NR_link
7681 case TARGET_NR_link
:
7684 p
= lock_user_string(arg1
);
7685 p2
= lock_user_string(arg2
);
7687 ret
= -TARGET_EFAULT
;
7689 ret
= get_errno(link(p
, p2
));
7690 unlock_user(p2
, arg2
, 0);
7691 unlock_user(p
, arg1
, 0);
7695 #if defined(TARGET_NR_linkat)
7696 case TARGET_NR_linkat
:
7701 p
= lock_user_string(arg2
);
7702 p2
= lock_user_string(arg4
);
7704 ret
= -TARGET_EFAULT
;
7706 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7707 unlock_user(p
, arg2
, 0);
7708 unlock_user(p2
, arg4
, 0);
7712 #ifdef TARGET_NR_unlink
7713 case TARGET_NR_unlink
:
7714 if (!(p
= lock_user_string(arg1
)))
7716 ret
= get_errno(unlink(p
));
7717 unlock_user(p
, arg1
, 0);
7720 #if defined(TARGET_NR_unlinkat)
7721 case TARGET_NR_unlinkat
:
7722 if (!(p
= lock_user_string(arg2
)))
7724 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7725 unlock_user(p
, arg2
, 0);
7728 case TARGET_NR_execve
:
7730 char **argp
, **envp
;
7733 abi_ulong guest_argp
;
7734 abi_ulong guest_envp
;
7741 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7742 if (get_user_ual(addr
, gp
))
7750 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7751 if (get_user_ual(addr
, gp
))
7758 argp
= alloca((argc
+ 1) * sizeof(void *));
7759 envp
= alloca((envc
+ 1) * sizeof(void *));
7761 for (gp
= guest_argp
, q
= argp
; gp
;
7762 gp
+= sizeof(abi_ulong
), q
++) {
7763 if (get_user_ual(addr
, gp
))
7767 if (!(*q
= lock_user_string(addr
)))
7769 total_size
+= strlen(*q
) + 1;
7773 for (gp
= guest_envp
, q
= envp
; gp
;
7774 gp
+= sizeof(abi_ulong
), q
++) {
7775 if (get_user_ual(addr
, gp
))
7779 if (!(*q
= lock_user_string(addr
)))
7781 total_size
+= strlen(*q
) + 1;
7785 if (!(p
= lock_user_string(arg1
)))
7787 /* Although execve() is not an interruptible syscall it is
7788 * a special case where we must use the safe_syscall wrapper:
7789 * if we allow a signal to happen before we make the host
7790 * syscall then we will 'lose' it, because at the point of
7791 * execve the process leaves QEMU's control. So we use the
7792 * safe syscall wrapper to ensure that we either take the
7793 * signal as a guest signal, or else it does not happen
7794 * before the execve completes and makes it the other
7795 * program's problem.
7797 ret
= get_errno(safe_execve(p
, argp
, envp
));
7798 unlock_user(p
, arg1
, 0);
7803 ret
= -TARGET_EFAULT
;
7806 for (gp
= guest_argp
, q
= argp
; *q
;
7807 gp
+= sizeof(abi_ulong
), q
++) {
7808 if (get_user_ual(addr
, gp
)
7811 unlock_user(*q
, addr
, 0);
7813 for (gp
= guest_envp
, q
= envp
; *q
;
7814 gp
+= sizeof(abi_ulong
), q
++) {
7815 if (get_user_ual(addr
, gp
)
7818 unlock_user(*q
, addr
, 0);
7822 case TARGET_NR_chdir
:
7823 if (!(p
= lock_user_string(arg1
)))
7825 ret
= get_errno(chdir(p
));
7826 unlock_user(p
, arg1
, 0);
7828 #ifdef TARGET_NR_time
7829 case TARGET_NR_time
:
7832 ret
= get_errno(time(&host_time
));
7835 && put_user_sal(host_time
, arg1
))
7840 #ifdef TARGET_NR_mknod
7841 case TARGET_NR_mknod
:
7842 if (!(p
= lock_user_string(arg1
)))
7844 ret
= get_errno(mknod(p
, arg2
, arg3
));
7845 unlock_user(p
, arg1
, 0);
7848 #if defined(TARGET_NR_mknodat)
7849 case TARGET_NR_mknodat
:
7850 if (!(p
= lock_user_string(arg2
)))
7852 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7853 unlock_user(p
, arg2
, 0);
7856 #ifdef TARGET_NR_chmod
7857 case TARGET_NR_chmod
:
7858 if (!(p
= lock_user_string(arg1
)))
7860 ret
= get_errno(chmod(p
, arg2
));
7861 unlock_user(p
, arg1
, 0);
7864 #ifdef TARGET_NR_break
7865 case TARGET_NR_break
:
7868 #ifdef TARGET_NR_oldstat
7869 case TARGET_NR_oldstat
:
7872 case TARGET_NR_lseek
:
7873 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7875 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7876 /* Alpha specific */
7877 case TARGET_NR_getxpid
:
7878 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7879 ret
= get_errno(getpid());
7882 #ifdef TARGET_NR_getpid
7883 case TARGET_NR_getpid
:
7884 ret
= get_errno(getpid());
7887 case TARGET_NR_mount
:
7889 /* need to look at the data field */
7893 p
= lock_user_string(arg1
);
7901 p2
= lock_user_string(arg2
);
7904 unlock_user(p
, arg1
, 0);
7910 p3
= lock_user_string(arg3
);
7913 unlock_user(p
, arg1
, 0);
7915 unlock_user(p2
, arg2
, 0);
7922 /* FIXME - arg5 should be locked, but it isn't clear how to
7923 * do that since it's not guaranteed to be a NULL-terminated
7927 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7929 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7931 ret
= get_errno(ret
);
7934 unlock_user(p
, arg1
, 0);
7936 unlock_user(p2
, arg2
, 0);
7938 unlock_user(p3
, arg3
, 0);
7942 #ifdef TARGET_NR_umount
7943 case TARGET_NR_umount
:
7944 if (!(p
= lock_user_string(arg1
)))
7946 ret
= get_errno(umount(p
));
7947 unlock_user(p
, arg1
, 0);
7950 #ifdef TARGET_NR_stime /* not on alpha */
7951 case TARGET_NR_stime
:
7954 if (get_user_sal(host_time
, arg1
))
7956 ret
= get_errno(stime(&host_time
));
7960 case TARGET_NR_ptrace
:
7962 #ifdef TARGET_NR_alarm /* not on alpha */
7963 case TARGET_NR_alarm
:
7967 #ifdef TARGET_NR_oldfstat
7968 case TARGET_NR_oldfstat
:
7971 #ifdef TARGET_NR_pause /* not on alpha */
7972 case TARGET_NR_pause
:
7973 if (!block_signals()) {
7974 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7976 ret
= -TARGET_EINTR
;
7979 #ifdef TARGET_NR_utime
7980 case TARGET_NR_utime
:
7982 struct utimbuf tbuf
, *host_tbuf
;
7983 struct target_utimbuf
*target_tbuf
;
7985 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7987 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7988 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7989 unlock_user_struct(target_tbuf
, arg2
, 0);
7994 if (!(p
= lock_user_string(arg1
)))
7996 ret
= get_errno(utime(p
, host_tbuf
));
7997 unlock_user(p
, arg1
, 0);
8001 #ifdef TARGET_NR_utimes
8002 case TARGET_NR_utimes
:
8004 struct timeval
*tvp
, tv
[2];
8006 if (copy_from_user_timeval(&tv
[0], arg2
)
8007 || copy_from_user_timeval(&tv
[1],
8008 arg2
+ sizeof(struct target_timeval
)))
8014 if (!(p
= lock_user_string(arg1
)))
8016 ret
= get_errno(utimes(p
, tvp
));
8017 unlock_user(p
, arg1
, 0);
8021 #if defined(TARGET_NR_futimesat)
8022 case TARGET_NR_futimesat
:
8024 struct timeval
*tvp
, tv
[2];
8026 if (copy_from_user_timeval(&tv
[0], arg3
)
8027 || copy_from_user_timeval(&tv
[1],
8028 arg3
+ sizeof(struct target_timeval
)))
8034 if (!(p
= lock_user_string(arg2
)))
8036 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8037 unlock_user(p
, arg2
, 0);
8041 #ifdef TARGET_NR_stty
8042 case TARGET_NR_stty
:
8045 #ifdef TARGET_NR_gtty
8046 case TARGET_NR_gtty
:
8049 #ifdef TARGET_NR_access
8050 case TARGET_NR_access
:
8051 if (!(p
= lock_user_string(arg1
)))
8053 ret
= get_errno(access(path(p
), arg2
));
8054 unlock_user(p
, arg1
, 0);
8057 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8058 case TARGET_NR_faccessat
:
8059 if (!(p
= lock_user_string(arg2
)))
8061 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8062 unlock_user(p
, arg2
, 0);
8065 #ifdef TARGET_NR_nice /* not on alpha */
8066 case TARGET_NR_nice
:
8067 ret
= get_errno(nice(arg1
));
8070 #ifdef TARGET_NR_ftime
8071 case TARGET_NR_ftime
:
8074 case TARGET_NR_sync
:
8078 case TARGET_NR_kill
:
8079 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8081 #ifdef TARGET_NR_rename
8082 case TARGET_NR_rename
:
8085 p
= lock_user_string(arg1
);
8086 p2
= lock_user_string(arg2
);
8088 ret
= -TARGET_EFAULT
;
8090 ret
= get_errno(rename(p
, p2
));
8091 unlock_user(p2
, arg2
, 0);
8092 unlock_user(p
, arg1
, 0);
8096 #if defined(TARGET_NR_renameat)
8097 case TARGET_NR_renameat
:
8100 p
= lock_user_string(arg2
);
8101 p2
= lock_user_string(arg4
);
8103 ret
= -TARGET_EFAULT
;
8105 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8106 unlock_user(p2
, arg4
, 0);
8107 unlock_user(p
, arg2
, 0);
8111 #ifdef TARGET_NR_mkdir
8112 case TARGET_NR_mkdir
:
8113 if (!(p
= lock_user_string(arg1
)))
8115 ret
= get_errno(mkdir(p
, arg2
));
8116 unlock_user(p
, arg1
, 0);
8119 #if defined(TARGET_NR_mkdirat)
8120 case TARGET_NR_mkdirat
:
8121 if (!(p
= lock_user_string(arg2
)))
8123 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8124 unlock_user(p
, arg2
, 0);
8127 #ifdef TARGET_NR_rmdir
8128 case TARGET_NR_rmdir
:
8129 if (!(p
= lock_user_string(arg1
)))
8131 ret
= get_errno(rmdir(p
));
8132 unlock_user(p
, arg1
, 0);
8136 ret
= get_errno(dup(arg1
));
8138 fd_trans_dup(arg1
, ret
);
8141 #ifdef TARGET_NR_pipe
8142 case TARGET_NR_pipe
:
8143 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8146 #ifdef TARGET_NR_pipe2
8147 case TARGET_NR_pipe2
:
8148 ret
= do_pipe(cpu_env
, arg1
,
8149 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8152 case TARGET_NR_times
:
8154 struct target_tms
*tmsp
;
8156 ret
= get_errno(times(&tms
));
8158 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8161 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8162 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8163 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8164 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8167 ret
= host_to_target_clock_t(ret
);
8170 #ifdef TARGET_NR_prof
8171 case TARGET_NR_prof
:
8174 #ifdef TARGET_NR_signal
8175 case TARGET_NR_signal
:
8178 case TARGET_NR_acct
:
8180 ret
= get_errno(acct(NULL
));
8182 if (!(p
= lock_user_string(arg1
)))
8184 ret
= get_errno(acct(path(p
)));
8185 unlock_user(p
, arg1
, 0);
8188 #ifdef TARGET_NR_umount2
8189 case TARGET_NR_umount2
:
8190 if (!(p
= lock_user_string(arg1
)))
8192 ret
= get_errno(umount2(p
, arg2
));
8193 unlock_user(p
, arg1
, 0);
8196 #ifdef TARGET_NR_lock
8197 case TARGET_NR_lock
:
8200 case TARGET_NR_ioctl
:
8201 ret
= do_ioctl(arg1
, arg2
, arg3
);
8203 case TARGET_NR_fcntl
:
8204 ret
= do_fcntl(arg1
, arg2
, arg3
);
8206 #ifdef TARGET_NR_mpx
8210 case TARGET_NR_setpgid
:
8211 ret
= get_errno(setpgid(arg1
, arg2
));
8213 #ifdef TARGET_NR_ulimit
8214 case TARGET_NR_ulimit
:
8217 #ifdef TARGET_NR_oldolduname
8218 case TARGET_NR_oldolduname
:
8221 case TARGET_NR_umask
:
8222 ret
= get_errno(umask(arg1
));
8224 case TARGET_NR_chroot
:
8225 if (!(p
= lock_user_string(arg1
)))
8227 ret
= get_errno(chroot(p
));
8228 unlock_user(p
, arg1
, 0);
8230 #ifdef TARGET_NR_ustat
8231 case TARGET_NR_ustat
:
8234 #ifdef TARGET_NR_dup2
8235 case TARGET_NR_dup2
:
8236 ret
= get_errno(dup2(arg1
, arg2
));
8238 fd_trans_dup(arg1
, arg2
);
8242 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8243 case TARGET_NR_dup3
:
8244 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8246 fd_trans_dup(arg1
, arg2
);
8250 #ifdef TARGET_NR_getppid /* not on alpha */
8251 case TARGET_NR_getppid
:
8252 ret
= get_errno(getppid());
8255 #ifdef TARGET_NR_getpgrp
8256 case TARGET_NR_getpgrp
:
8257 ret
= get_errno(getpgrp());
8260 case TARGET_NR_setsid
:
8261 ret
= get_errno(setsid());
8263 #ifdef TARGET_NR_sigaction
8264 case TARGET_NR_sigaction
:
8266 #if defined(TARGET_ALPHA)
8267 struct target_sigaction act
, oact
, *pact
= 0;
8268 struct target_old_sigaction
*old_act
;
8270 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8272 act
._sa_handler
= old_act
->_sa_handler
;
8273 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8274 act
.sa_flags
= old_act
->sa_flags
;
8275 act
.sa_restorer
= 0;
8276 unlock_user_struct(old_act
, arg2
, 0);
8279 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8280 if (!is_error(ret
) && arg3
) {
8281 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8283 old_act
->_sa_handler
= oact
._sa_handler
;
8284 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8285 old_act
->sa_flags
= oact
.sa_flags
;
8286 unlock_user_struct(old_act
, arg3
, 1);
8288 #elif defined(TARGET_MIPS)
8289 struct target_sigaction act
, oact
, *pact
, *old_act
;
8292 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8294 act
._sa_handler
= old_act
->_sa_handler
;
8295 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8296 act
.sa_flags
= old_act
->sa_flags
;
8297 unlock_user_struct(old_act
, arg2
, 0);
8303 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8305 if (!is_error(ret
) && arg3
) {
8306 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8308 old_act
->_sa_handler
= oact
._sa_handler
;
8309 old_act
->sa_flags
= oact
.sa_flags
;
8310 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8311 old_act
->sa_mask
.sig
[1] = 0;
8312 old_act
->sa_mask
.sig
[2] = 0;
8313 old_act
->sa_mask
.sig
[3] = 0;
8314 unlock_user_struct(old_act
, arg3
, 1);
8317 struct target_old_sigaction
*old_act
;
8318 struct target_sigaction act
, oact
, *pact
;
8320 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8322 act
._sa_handler
= old_act
->_sa_handler
;
8323 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8324 act
.sa_flags
= old_act
->sa_flags
;
8325 act
.sa_restorer
= old_act
->sa_restorer
;
8326 unlock_user_struct(old_act
, arg2
, 0);
8331 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8332 if (!is_error(ret
) && arg3
) {
8333 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8335 old_act
->_sa_handler
= oact
._sa_handler
;
8336 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8337 old_act
->sa_flags
= oact
.sa_flags
;
8338 old_act
->sa_restorer
= oact
.sa_restorer
;
8339 unlock_user_struct(old_act
, arg3
, 1);
8345 case TARGET_NR_rt_sigaction
:
8347 #if defined(TARGET_ALPHA)
8348 struct target_sigaction act
, oact
, *pact
= 0;
8349 struct target_rt_sigaction
*rt_act
;
8351 if (arg4
!= sizeof(target_sigset_t
)) {
8352 ret
= -TARGET_EINVAL
;
8356 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8358 act
._sa_handler
= rt_act
->_sa_handler
;
8359 act
.sa_mask
= rt_act
->sa_mask
;
8360 act
.sa_flags
= rt_act
->sa_flags
;
8361 act
.sa_restorer
= arg5
;
8362 unlock_user_struct(rt_act
, arg2
, 0);
8365 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8366 if (!is_error(ret
) && arg3
) {
8367 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8369 rt_act
->_sa_handler
= oact
._sa_handler
;
8370 rt_act
->sa_mask
= oact
.sa_mask
;
8371 rt_act
->sa_flags
= oact
.sa_flags
;
8372 unlock_user_struct(rt_act
, arg3
, 1);
8375 struct target_sigaction
*act
;
8376 struct target_sigaction
*oact
;
8378 if (arg4
!= sizeof(target_sigset_t
)) {
8379 ret
= -TARGET_EINVAL
;
8383 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8388 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8389 ret
= -TARGET_EFAULT
;
8390 goto rt_sigaction_fail
;
8394 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8397 unlock_user_struct(act
, arg2
, 0);
8399 unlock_user_struct(oact
, arg3
, 1);
8403 #ifdef TARGET_NR_sgetmask /* not on alpha */
8404 case TARGET_NR_sgetmask
:
8407 abi_ulong target_set
;
8408 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8410 host_to_target_old_sigset(&target_set
, &cur_set
);
8416 #ifdef TARGET_NR_ssetmask /* not on alpha */
8417 case TARGET_NR_ssetmask
:
8419 sigset_t set
, oset
, cur_set
;
8420 abi_ulong target_set
= arg1
;
8421 /* We only have one word of the new mask so we must read
8422 * the rest of it with do_sigprocmask() and OR in this word.
8423 * We are guaranteed that a do_sigprocmask() that only queries
8424 * the signal mask will not fail.
8426 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8428 target_to_host_old_sigset(&set
, &target_set
);
8429 sigorset(&set
, &set
, &cur_set
);
8430 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8432 host_to_target_old_sigset(&target_set
, &oset
);
8438 #ifdef TARGET_NR_sigprocmask
8439 case TARGET_NR_sigprocmask
:
8441 #if defined(TARGET_ALPHA)
8442 sigset_t set
, oldset
;
8447 case TARGET_SIG_BLOCK
:
8450 case TARGET_SIG_UNBLOCK
:
8453 case TARGET_SIG_SETMASK
:
8457 ret
= -TARGET_EINVAL
;
8461 target_to_host_old_sigset(&set
, &mask
);
8463 ret
= do_sigprocmask(how
, &set
, &oldset
);
8464 if (!is_error(ret
)) {
8465 host_to_target_old_sigset(&mask
, &oldset
);
8467 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8470 sigset_t set
, oldset
, *set_ptr
;
8475 case TARGET_SIG_BLOCK
:
8478 case TARGET_SIG_UNBLOCK
:
8481 case TARGET_SIG_SETMASK
:
8485 ret
= -TARGET_EINVAL
;
8488 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8490 target_to_host_old_sigset(&set
, p
);
8491 unlock_user(p
, arg2
, 0);
8497 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8498 if (!is_error(ret
) && arg3
) {
8499 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8501 host_to_target_old_sigset(p
, &oldset
);
8502 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8508 case TARGET_NR_rt_sigprocmask
:
8511 sigset_t set
, oldset
, *set_ptr
;
8513 if (arg4
!= sizeof(target_sigset_t
)) {
8514 ret
= -TARGET_EINVAL
;
8520 case TARGET_SIG_BLOCK
:
8523 case TARGET_SIG_UNBLOCK
:
8526 case TARGET_SIG_SETMASK
:
8530 ret
= -TARGET_EINVAL
;
8533 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8535 target_to_host_sigset(&set
, p
);
8536 unlock_user(p
, arg2
, 0);
8542 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8543 if (!is_error(ret
) && arg3
) {
8544 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8546 host_to_target_sigset(p
, &oldset
);
8547 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8551 #ifdef TARGET_NR_sigpending
8552 case TARGET_NR_sigpending
:
8555 ret
= get_errno(sigpending(&set
));
8556 if (!is_error(ret
)) {
8557 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8559 host_to_target_old_sigset(p
, &set
);
8560 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8565 case TARGET_NR_rt_sigpending
:
8569 /* Yes, this check is >, not != like most. We follow the kernel's
8570 * logic and it does it like this because it implements
8571 * NR_sigpending through the same code path, and in that case
8572 * the old_sigset_t is smaller in size.
8574 if (arg2
> sizeof(target_sigset_t
)) {
8575 ret
= -TARGET_EINVAL
;
8579 ret
= get_errno(sigpending(&set
));
8580 if (!is_error(ret
)) {
8581 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8583 host_to_target_sigset(p
, &set
);
8584 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8588 #ifdef TARGET_NR_sigsuspend
8589 case TARGET_NR_sigsuspend
:
8591 TaskState
*ts
= cpu
->opaque
;
8592 #if defined(TARGET_ALPHA)
8593 abi_ulong mask
= arg1
;
8594 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8596 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8598 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8599 unlock_user(p
, arg1
, 0);
8601 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8603 if (ret
!= -TARGET_ERESTARTSYS
) {
8604 ts
->in_sigsuspend
= 1;
8609 case TARGET_NR_rt_sigsuspend
:
8611 TaskState
*ts
= cpu
->opaque
;
8613 if (arg2
!= sizeof(target_sigset_t
)) {
8614 ret
= -TARGET_EINVAL
;
8617 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8619 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8620 unlock_user(p
, arg1
, 0);
8621 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8623 if (ret
!= -TARGET_ERESTARTSYS
) {
8624 ts
->in_sigsuspend
= 1;
8628 case TARGET_NR_rt_sigtimedwait
:
8631 struct timespec uts
, *puts
;
8634 if (arg4
!= sizeof(target_sigset_t
)) {
8635 ret
= -TARGET_EINVAL
;
8639 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8641 target_to_host_sigset(&set
, p
);
8642 unlock_user(p
, arg1
, 0);
8645 target_to_host_timespec(puts
, arg3
);
8649 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8651 if (!is_error(ret
)) {
8653 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8658 host_to_target_siginfo(p
, &uinfo
);
8659 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8661 ret
= host_to_target_signal(ret
);
8665 case TARGET_NR_rt_sigqueueinfo
:
8669 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8673 target_to_host_siginfo(&uinfo
, p
);
8674 unlock_user(p
, arg1
, 0);
8675 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8678 #ifdef TARGET_NR_sigreturn
8679 case TARGET_NR_sigreturn
:
8680 if (block_signals()) {
8681 ret
= -TARGET_ERESTARTSYS
;
8683 ret
= do_sigreturn(cpu_env
);
8687 case TARGET_NR_rt_sigreturn
:
8688 if (block_signals()) {
8689 ret
= -TARGET_ERESTARTSYS
;
8691 ret
= do_rt_sigreturn(cpu_env
);
8694 case TARGET_NR_sethostname
:
8695 if (!(p
= lock_user_string(arg1
)))
8697 ret
= get_errno(sethostname(p
, arg2
));
8698 unlock_user(p
, arg1
, 0);
8700 case TARGET_NR_setrlimit
:
8702 int resource
= target_to_host_resource(arg1
);
8703 struct target_rlimit
*target_rlim
;
8705 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8707 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8708 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8709 unlock_user_struct(target_rlim
, arg2
, 0);
8710 ret
= get_errno(setrlimit(resource
, &rlim
));
8713 case TARGET_NR_getrlimit
:
8715 int resource
= target_to_host_resource(arg1
);
8716 struct target_rlimit
*target_rlim
;
8719 ret
= get_errno(getrlimit(resource
, &rlim
));
8720 if (!is_error(ret
)) {
8721 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8723 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8724 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8725 unlock_user_struct(target_rlim
, arg2
, 1);
8729 case TARGET_NR_getrusage
:
8731 struct rusage rusage
;
8732 ret
= get_errno(getrusage(arg1
, &rusage
));
8733 if (!is_error(ret
)) {
8734 ret
= host_to_target_rusage(arg2
, &rusage
);
8738 case TARGET_NR_gettimeofday
:
8741 ret
= get_errno(gettimeofday(&tv
, NULL
));
8742 if (!is_error(ret
)) {
8743 if (copy_to_user_timeval(arg1
, &tv
))
8748 case TARGET_NR_settimeofday
:
8750 struct timeval tv
, *ptv
= NULL
;
8751 struct timezone tz
, *ptz
= NULL
;
8754 if (copy_from_user_timeval(&tv
, arg1
)) {
8761 if (copy_from_user_timezone(&tz
, arg2
)) {
8767 ret
= get_errno(settimeofday(ptv
, ptz
));
8770 #if defined(TARGET_NR_select)
8771 case TARGET_NR_select
:
8772 #if defined(TARGET_WANT_NI_OLD_SELECT)
8773 /* some architectures used to have old_select here
8774 * but now ENOSYS it.
8776 ret
= -TARGET_ENOSYS
;
8777 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8778 ret
= do_old_select(arg1
);
8780 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8784 #ifdef TARGET_NR_pselect6
8785 case TARGET_NR_pselect6
:
8787 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8788 fd_set rfds
, wfds
, efds
;
8789 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8790 struct timespec ts
, *ts_ptr
;
8793 * The 6th arg is actually two args smashed together,
8794 * so we cannot use the C library.
8802 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8803 target_sigset_t
*target_sigset
;
8811 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8815 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8819 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8825 * This takes a timespec, and not a timeval, so we cannot
8826 * use the do_select() helper ...
8829 if (target_to_host_timespec(&ts
, ts_addr
)) {
8837 /* Extract the two packed args for the sigset */
8840 sig
.size
= SIGSET_T_SIZE
;
8842 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8846 arg_sigset
= tswapal(arg7
[0]);
8847 arg_sigsize
= tswapal(arg7
[1]);
8848 unlock_user(arg7
, arg6
, 0);
8852 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8853 /* Like the kernel, we enforce correct size sigsets */
8854 ret
= -TARGET_EINVAL
;
8857 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8858 sizeof(*target_sigset
), 1);
8859 if (!target_sigset
) {
8862 target_to_host_sigset(&set
, target_sigset
);
8863 unlock_user(target_sigset
, arg_sigset
, 0);
8871 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8874 if (!is_error(ret
)) {
8875 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8877 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8879 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8882 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8888 #ifdef TARGET_NR_symlink
8889 case TARGET_NR_symlink
:
8892 p
= lock_user_string(arg1
);
8893 p2
= lock_user_string(arg2
);
8895 ret
= -TARGET_EFAULT
;
8897 ret
= get_errno(symlink(p
, p2
));
8898 unlock_user(p2
, arg2
, 0);
8899 unlock_user(p
, arg1
, 0);
8903 #if defined(TARGET_NR_symlinkat)
8904 case TARGET_NR_symlinkat
:
8907 p
= lock_user_string(arg1
);
8908 p2
= lock_user_string(arg3
);
8910 ret
= -TARGET_EFAULT
;
8912 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8913 unlock_user(p2
, arg3
, 0);
8914 unlock_user(p
, arg1
, 0);
8918 #ifdef TARGET_NR_oldlstat
8919 case TARGET_NR_oldlstat
:
8922 #ifdef TARGET_NR_readlink
8923 case TARGET_NR_readlink
:
8926 p
= lock_user_string(arg1
);
8927 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8929 ret
= -TARGET_EFAULT
;
8931 /* Short circuit this for the magic exe check. */
8932 ret
= -TARGET_EINVAL
;
8933 } else if (is_proc_myself((const char *)p
, "exe")) {
8934 char real
[PATH_MAX
], *temp
;
8935 temp
= realpath(exec_path
, real
);
8936 /* Return value is # of bytes that we wrote to the buffer. */
8938 ret
= get_errno(-1);
8940 /* Don't worry about sign mismatch as earlier mapping
8941 * logic would have thrown a bad address error. */
8942 ret
= MIN(strlen(real
), arg3
);
8943 /* We cannot NUL terminate the string. */
8944 memcpy(p2
, real
, ret
);
8947 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8949 unlock_user(p2
, arg2
, ret
);
8950 unlock_user(p
, arg1
, 0);
8954 #if defined(TARGET_NR_readlinkat)
8955 case TARGET_NR_readlinkat
:
8958 p
= lock_user_string(arg2
);
8959 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8961 ret
= -TARGET_EFAULT
;
8962 } else if (is_proc_myself((const char *)p
, "exe")) {
8963 char real
[PATH_MAX
], *temp
;
8964 temp
= realpath(exec_path
, real
);
8965 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8966 snprintf((char *)p2
, arg4
, "%s", real
);
8968 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8970 unlock_user(p2
, arg3
, ret
);
8971 unlock_user(p
, arg2
, 0);
8975 #ifdef TARGET_NR_uselib
8976 case TARGET_NR_uselib
:
8979 #ifdef TARGET_NR_swapon
8980 case TARGET_NR_swapon
:
8981 if (!(p
= lock_user_string(arg1
)))
8983 ret
= get_errno(swapon(p
, arg2
));
8984 unlock_user(p
, arg1
, 0);
8987 case TARGET_NR_reboot
:
8988 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8989 /* arg4 must be ignored in all other cases */
8990 p
= lock_user_string(arg4
);
8994 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8995 unlock_user(p
, arg4
, 0);
8997 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9000 #ifdef TARGET_NR_readdir
9001 case TARGET_NR_readdir
:
9004 #ifdef TARGET_NR_mmap
9005 case TARGET_NR_mmap
:
9006 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9007 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9008 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9009 || defined(TARGET_S390X)
9012 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9013 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9021 unlock_user(v
, arg1
, 0);
9022 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9023 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9027 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9028 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9034 #ifdef TARGET_NR_mmap2
9035 case TARGET_NR_mmap2
:
9037 #define MMAP_SHIFT 12
9039 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9040 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9042 arg6
<< MMAP_SHIFT
));
9045 case TARGET_NR_munmap
:
9046 ret
= get_errno(target_munmap(arg1
, arg2
));
9048 case TARGET_NR_mprotect
:
9050 TaskState
*ts
= cpu
->opaque
;
9051 /* Special hack to detect libc making the stack executable. */
9052 if ((arg3
& PROT_GROWSDOWN
)
9053 && arg1
>= ts
->info
->stack_limit
9054 && arg1
<= ts
->info
->start_stack
) {
9055 arg3
&= ~PROT_GROWSDOWN
;
9056 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9057 arg1
= ts
->info
->stack_limit
;
9060 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9062 #ifdef TARGET_NR_mremap
9063 case TARGET_NR_mremap
:
9064 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9067 /* ??? msync/mlock/munlock are broken for softmmu. */
9068 #ifdef TARGET_NR_msync
9069 case TARGET_NR_msync
:
9070 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9073 #ifdef TARGET_NR_mlock
9074 case TARGET_NR_mlock
:
9075 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9078 #ifdef TARGET_NR_munlock
9079 case TARGET_NR_munlock
:
9080 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9083 #ifdef TARGET_NR_mlockall
9084 case TARGET_NR_mlockall
:
9085 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9088 #ifdef TARGET_NR_munlockall
9089 case TARGET_NR_munlockall
:
9090 ret
= get_errno(munlockall());
9093 case TARGET_NR_truncate
:
9094 if (!(p
= lock_user_string(arg1
)))
9096 ret
= get_errno(truncate(p
, arg2
));
9097 unlock_user(p
, arg1
, 0);
9099 case TARGET_NR_ftruncate
:
9100 ret
= get_errno(ftruncate(arg1
, arg2
));
9102 case TARGET_NR_fchmod
:
9103 ret
= get_errno(fchmod(arg1
, arg2
));
9105 #if defined(TARGET_NR_fchmodat)
9106 case TARGET_NR_fchmodat
:
9107 if (!(p
= lock_user_string(arg2
)))
9109 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9110 unlock_user(p
, arg2
, 0);
9113 case TARGET_NR_getpriority
:
9114 /* Note that negative values are valid for getpriority, so we must
9115 differentiate based on errno settings. */
9117 ret
= getpriority(arg1
, arg2
);
9118 if (ret
== -1 && errno
!= 0) {
9119 ret
= -host_to_target_errno(errno
);
9123 /* Return value is the unbiased priority. Signal no error. */
9124 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9126 /* Return value is a biased priority to avoid negative numbers. */
9130 case TARGET_NR_setpriority
:
9131 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9133 #ifdef TARGET_NR_profil
9134 case TARGET_NR_profil
:
9137 case TARGET_NR_statfs
:
9138 if (!(p
= lock_user_string(arg1
)))
9140 ret
= get_errno(statfs(path(p
), &stfs
));
9141 unlock_user(p
, arg1
, 0);
9143 if (!is_error(ret
)) {
9144 struct target_statfs
*target_stfs
;
9146 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9148 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9149 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9150 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9151 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9152 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9153 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9154 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9155 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9156 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9157 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9158 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9159 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9160 unlock_user_struct(target_stfs
, arg2
, 1);
9163 case TARGET_NR_fstatfs
:
9164 ret
= get_errno(fstatfs(arg1
, &stfs
));
9165 goto convert_statfs
;
9166 #ifdef TARGET_NR_statfs64
9167 case TARGET_NR_statfs64
:
9168 if (!(p
= lock_user_string(arg1
)))
9170 ret
= get_errno(statfs(path(p
), &stfs
));
9171 unlock_user(p
, arg1
, 0);
9173 if (!is_error(ret
)) {
9174 struct target_statfs64
*target_stfs
;
9176 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9178 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9179 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9180 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9181 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9182 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9183 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9184 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9185 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9186 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9187 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9188 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9189 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9190 unlock_user_struct(target_stfs
, arg3
, 1);
9193 case TARGET_NR_fstatfs64
:
9194 ret
= get_errno(fstatfs(arg1
, &stfs
));
9195 goto convert_statfs64
;
9197 #ifdef TARGET_NR_ioperm
9198 case TARGET_NR_ioperm
:
9201 #ifdef TARGET_NR_socketcall
9202 case TARGET_NR_socketcall
:
9203 ret
= do_socketcall(arg1
, arg2
);
9206 #ifdef TARGET_NR_accept
9207 case TARGET_NR_accept
:
9208 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9211 #ifdef TARGET_NR_accept4
9212 case TARGET_NR_accept4
:
9213 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9216 #ifdef TARGET_NR_bind
9217 case TARGET_NR_bind
:
9218 ret
= do_bind(arg1
, arg2
, arg3
);
9221 #ifdef TARGET_NR_connect
9222 case TARGET_NR_connect
:
9223 ret
= do_connect(arg1
, arg2
, arg3
);
9226 #ifdef TARGET_NR_getpeername
9227 case TARGET_NR_getpeername
:
9228 ret
= do_getpeername(arg1
, arg2
, arg3
);
9231 #ifdef TARGET_NR_getsockname
9232 case TARGET_NR_getsockname
:
9233 ret
= do_getsockname(arg1
, arg2
, arg3
);
9236 #ifdef TARGET_NR_getsockopt
9237 case TARGET_NR_getsockopt
:
9238 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9241 #ifdef TARGET_NR_listen
9242 case TARGET_NR_listen
:
9243 ret
= get_errno(listen(arg1
, arg2
));
9246 #ifdef TARGET_NR_recv
9247 case TARGET_NR_recv
:
9248 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9251 #ifdef TARGET_NR_recvfrom
9252 case TARGET_NR_recvfrom
:
9253 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9256 #ifdef TARGET_NR_recvmsg
9257 case TARGET_NR_recvmsg
:
9258 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9261 #ifdef TARGET_NR_send
9262 case TARGET_NR_send
:
9263 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9266 #ifdef TARGET_NR_sendmsg
9267 case TARGET_NR_sendmsg
:
9268 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9271 #ifdef TARGET_NR_sendmmsg
9272 case TARGET_NR_sendmmsg
:
9273 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9275 case TARGET_NR_recvmmsg
:
9276 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9279 #ifdef TARGET_NR_sendto
9280 case TARGET_NR_sendto
:
9281 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9284 #ifdef TARGET_NR_shutdown
9285 case TARGET_NR_shutdown
:
9286 ret
= get_errno(shutdown(arg1
, arg2
));
9289 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9290 case TARGET_NR_getrandom
:
9291 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9295 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9296 unlock_user(p
, arg1
, ret
);
9299 #ifdef TARGET_NR_socket
9300 case TARGET_NR_socket
:
9301 ret
= do_socket(arg1
, arg2
, arg3
);
9302 fd_trans_unregister(ret
);
9305 #ifdef TARGET_NR_socketpair
9306 case TARGET_NR_socketpair
:
9307 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9310 #ifdef TARGET_NR_setsockopt
9311 case TARGET_NR_setsockopt
:
9312 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9316 case TARGET_NR_syslog
:
9317 if (!(p
= lock_user_string(arg2
)))
9319 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9320 unlock_user(p
, arg2
, 0);
9323 case TARGET_NR_setitimer
:
9325 struct itimerval value
, ovalue
, *pvalue
;
9329 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9330 || copy_from_user_timeval(&pvalue
->it_value
,
9331 arg2
+ sizeof(struct target_timeval
)))
9336 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9337 if (!is_error(ret
) && arg3
) {
9338 if (copy_to_user_timeval(arg3
,
9339 &ovalue
.it_interval
)
9340 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9346 case TARGET_NR_getitimer
:
9348 struct itimerval value
;
9350 ret
= get_errno(getitimer(arg1
, &value
));
9351 if (!is_error(ret
) && arg2
) {
9352 if (copy_to_user_timeval(arg2
,
9354 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9360 #ifdef TARGET_NR_stat
9361 case TARGET_NR_stat
:
9362 if (!(p
= lock_user_string(arg1
)))
9364 ret
= get_errno(stat(path(p
), &st
));
9365 unlock_user(p
, arg1
, 0);
9368 #ifdef TARGET_NR_lstat
9369 case TARGET_NR_lstat
:
9370 if (!(p
= lock_user_string(arg1
)))
9372 ret
= get_errno(lstat(path(p
), &st
));
9373 unlock_user(p
, arg1
, 0);
9376 case TARGET_NR_fstat
:
9378 ret
= get_errno(fstat(arg1
, &st
));
9379 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9382 if (!is_error(ret
)) {
9383 struct target_stat
*target_st
;
9385 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9387 memset(target_st
, 0, sizeof(*target_st
));
9388 __put_user(st
.st_dev
, &target_st
->st_dev
);
9389 __put_user(st
.st_ino
, &target_st
->st_ino
);
9390 __put_user(st
.st_mode
, &target_st
->st_mode
);
9391 __put_user(st
.st_uid
, &target_st
->st_uid
);
9392 __put_user(st
.st_gid
, &target_st
->st_gid
);
9393 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9394 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9395 __put_user(st
.st_size
, &target_st
->st_size
);
9396 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9397 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9398 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9399 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9400 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9401 unlock_user_struct(target_st
, arg2
, 1);
9405 #ifdef TARGET_NR_olduname
9406 case TARGET_NR_olduname
:
9409 #ifdef TARGET_NR_iopl
9410 case TARGET_NR_iopl
:
9413 case TARGET_NR_vhangup
:
9414 ret
= get_errno(vhangup());
9416 #ifdef TARGET_NR_idle
9417 case TARGET_NR_idle
:
9420 #ifdef TARGET_NR_syscall
9421 case TARGET_NR_syscall
:
9422 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9423 arg6
, arg7
, arg8
, 0);
9426 case TARGET_NR_wait4
:
9429 abi_long status_ptr
= arg2
;
9430 struct rusage rusage
, *rusage_ptr
;
9431 abi_ulong target_rusage
= arg4
;
9432 abi_long rusage_err
;
9434 rusage_ptr
= &rusage
;
9437 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9438 if (!is_error(ret
)) {
9439 if (status_ptr
&& ret
) {
9440 status
= host_to_target_waitstatus(status
);
9441 if (put_user_s32(status
, status_ptr
))
9444 if (target_rusage
) {
9445 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9453 #ifdef TARGET_NR_swapoff
9454 case TARGET_NR_swapoff
:
9455 if (!(p
= lock_user_string(arg1
)))
9457 ret
= get_errno(swapoff(p
));
9458 unlock_user(p
, arg1
, 0);
9461 case TARGET_NR_sysinfo
:
9463 struct target_sysinfo
*target_value
;
9464 struct sysinfo value
;
9465 ret
= get_errno(sysinfo(&value
));
9466 if (!is_error(ret
) && arg1
)
9468 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9470 __put_user(value
.uptime
, &target_value
->uptime
);
9471 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9472 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9473 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9474 __put_user(value
.totalram
, &target_value
->totalram
);
9475 __put_user(value
.freeram
, &target_value
->freeram
);
9476 __put_user(value
.sharedram
, &target_value
->sharedram
);
9477 __put_user(value
.bufferram
, &target_value
->bufferram
);
9478 __put_user(value
.totalswap
, &target_value
->totalswap
);
9479 __put_user(value
.freeswap
, &target_value
->freeswap
);
9480 __put_user(value
.procs
, &target_value
->procs
);
9481 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9482 __put_user(value
.freehigh
, &target_value
->freehigh
);
9483 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9484 unlock_user_struct(target_value
, arg1
, 1);
9488 #ifdef TARGET_NR_ipc
9490 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9493 #ifdef TARGET_NR_semget
9494 case TARGET_NR_semget
:
9495 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9498 #ifdef TARGET_NR_semop
9499 case TARGET_NR_semop
:
9500 ret
= do_semop(arg1
, arg2
, arg3
);
9503 #ifdef TARGET_NR_semctl
9504 case TARGET_NR_semctl
:
9505 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9508 #ifdef TARGET_NR_msgctl
9509 case TARGET_NR_msgctl
:
9510 ret
= do_msgctl(arg1
, arg2
, arg3
);
9513 #ifdef TARGET_NR_msgget
9514 case TARGET_NR_msgget
:
9515 ret
= get_errno(msgget(arg1
, arg2
));
9518 #ifdef TARGET_NR_msgrcv
9519 case TARGET_NR_msgrcv
:
9520 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9523 #ifdef TARGET_NR_msgsnd
9524 case TARGET_NR_msgsnd
:
9525 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9528 #ifdef TARGET_NR_shmget
9529 case TARGET_NR_shmget
:
9530 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9533 #ifdef TARGET_NR_shmctl
9534 case TARGET_NR_shmctl
:
9535 ret
= do_shmctl(arg1
, arg2
, arg3
);
9538 #ifdef TARGET_NR_shmat
9539 case TARGET_NR_shmat
:
9540 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9543 #ifdef TARGET_NR_shmdt
9544 case TARGET_NR_shmdt
:
9545 ret
= do_shmdt(arg1
);
9548 case TARGET_NR_fsync
:
9549 ret
= get_errno(fsync(arg1
));
9551 case TARGET_NR_clone
:
9552 /* Linux manages to have three different orderings for its
9553 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9554 * match the kernel's CONFIG_CLONE_* settings.
9555 * Microblaze is further special in that it uses a sixth
9556 * implicit argument to clone for the TLS pointer.
9558 #if defined(TARGET_MICROBLAZE)
9559 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9560 #elif defined(TARGET_CLONE_BACKWARDS)
9561 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9562 #elif defined(TARGET_CLONE_BACKWARDS2)
9563 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9565 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9568 #ifdef __NR_exit_group
9569 /* new thread calls */
9570 case TARGET_NR_exit_group
:
9574 gdb_exit(cpu_env
, arg1
);
9575 ret
= get_errno(exit_group(arg1
));
9578 case TARGET_NR_setdomainname
:
9579 if (!(p
= lock_user_string(arg1
)))
9581 ret
= get_errno(setdomainname(p
, arg2
));
9582 unlock_user(p
, arg1
, 0);
9584 case TARGET_NR_uname
:
9585 /* no need to transcode because we use the linux syscall */
9587 struct new_utsname
* buf
;
9589 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9591 ret
= get_errno(sys_uname(buf
));
9592 if (!is_error(ret
)) {
9593 /* Overwrite the native machine name with whatever is being
9595 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9596 /* Allow the user to override the reported release. */
9597 if (qemu_uname_release
&& *qemu_uname_release
) {
9598 g_strlcpy(buf
->release
, qemu_uname_release
,
9599 sizeof(buf
->release
));
9602 unlock_user_struct(buf
, arg1
, 1);
9606 case TARGET_NR_modify_ldt
:
9607 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9609 #if !defined(TARGET_X86_64)
9610 case TARGET_NR_vm86old
:
9612 case TARGET_NR_vm86
:
9613 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9617 case TARGET_NR_adjtimex
:
9619 struct timex host_buf
;
9621 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9624 ret
= get_errno(adjtimex(&host_buf
));
9625 if (!is_error(ret
)) {
9626 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9632 #ifdef TARGET_NR_create_module
9633 case TARGET_NR_create_module
:
9635 case TARGET_NR_init_module
:
9636 case TARGET_NR_delete_module
:
9637 #ifdef TARGET_NR_get_kernel_syms
9638 case TARGET_NR_get_kernel_syms
:
9641 case TARGET_NR_quotactl
:
9643 case TARGET_NR_getpgid
:
9644 ret
= get_errno(getpgid(arg1
));
9646 case TARGET_NR_fchdir
:
9647 ret
= get_errno(fchdir(arg1
));
9649 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9650 case TARGET_NR_bdflush
:
9653 #ifdef TARGET_NR_sysfs
9654 case TARGET_NR_sysfs
:
9657 case TARGET_NR_personality
:
9658 ret
= get_errno(personality(arg1
));
9660 #ifdef TARGET_NR_afs_syscall
9661 case TARGET_NR_afs_syscall
:
9664 #ifdef TARGET_NR__llseek /* Not on alpha */
9665 case TARGET_NR__llseek
:
9668 #if !defined(__NR_llseek)
9669 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9671 ret
= get_errno(res
);
9676 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9678 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9684 #ifdef TARGET_NR_getdents
9685 case TARGET_NR_getdents
:
9686 #ifdef __NR_getdents
9687 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9689 struct target_dirent
*target_dirp
;
9690 struct linux_dirent
*dirp
;
9691 abi_long count
= arg3
;
9693 dirp
= g_try_malloc(count
);
9695 ret
= -TARGET_ENOMEM
;
9699 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9700 if (!is_error(ret
)) {
9701 struct linux_dirent
*de
;
9702 struct target_dirent
*tde
;
9704 int reclen
, treclen
;
9705 int count1
, tnamelen
;
9709 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9713 reclen
= de
->d_reclen
;
9714 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9715 assert(tnamelen
>= 0);
9716 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9717 assert(count1
+ treclen
<= count
);
9718 tde
->d_reclen
= tswap16(treclen
);
9719 tde
->d_ino
= tswapal(de
->d_ino
);
9720 tde
->d_off
= tswapal(de
->d_off
);
9721 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9722 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9724 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9728 unlock_user(target_dirp
, arg2
, ret
);
9734 struct linux_dirent
*dirp
;
9735 abi_long count
= arg3
;
9737 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9739 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9740 if (!is_error(ret
)) {
9741 struct linux_dirent
*de
;
9746 reclen
= de
->d_reclen
;
9749 de
->d_reclen
= tswap16(reclen
);
9750 tswapls(&de
->d_ino
);
9751 tswapls(&de
->d_off
);
9752 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9756 unlock_user(dirp
, arg2
, ret
);
9760 /* Implement getdents in terms of getdents64 */
9762 struct linux_dirent64
*dirp
;
9763 abi_long count
= arg3
;
9765 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9769 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9770 if (!is_error(ret
)) {
9771 /* Convert the dirent64 structs to target dirent. We do this
9772 * in-place, since we can guarantee that a target_dirent is no
9773 * larger than a dirent64; however this means we have to be
9774 * careful to read everything before writing in the new format.
9776 struct linux_dirent64
*de
;
9777 struct target_dirent
*tde
;
9782 tde
= (struct target_dirent
*)dirp
;
9784 int namelen
, treclen
;
9785 int reclen
= de
->d_reclen
;
9786 uint64_t ino
= de
->d_ino
;
9787 int64_t off
= de
->d_off
;
9788 uint8_t type
= de
->d_type
;
9790 namelen
= strlen(de
->d_name
);
9791 treclen
= offsetof(struct target_dirent
, d_name
)
9793 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9795 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9796 tde
->d_ino
= tswapal(ino
);
9797 tde
->d_off
= tswapal(off
);
9798 tde
->d_reclen
= tswap16(treclen
);
9799 /* The target_dirent type is in what was formerly a padding
9800 * byte at the end of the structure:
9802 *(((char *)tde
) + treclen
- 1) = type
;
9804 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9805 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9811 unlock_user(dirp
, arg2
, ret
);
9815 #endif /* TARGET_NR_getdents */
9816 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9817 case TARGET_NR_getdents64
:
9819 struct linux_dirent64
*dirp
;
9820 abi_long count
= arg3
;
9821 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9823 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9824 if (!is_error(ret
)) {
9825 struct linux_dirent64
*de
;
9830 reclen
= de
->d_reclen
;
9833 de
->d_reclen
= tswap16(reclen
);
9834 tswap64s((uint64_t *)&de
->d_ino
);
9835 tswap64s((uint64_t *)&de
->d_off
);
9836 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9840 unlock_user(dirp
, arg2
, ret
);
9843 #endif /* TARGET_NR_getdents64 */
9844 #if defined(TARGET_NR__newselect)
9845 case TARGET_NR__newselect
:
9846 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9849 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9850 # ifdef TARGET_NR_poll
9851 case TARGET_NR_poll
:
9853 # ifdef TARGET_NR_ppoll
9854 case TARGET_NR_ppoll
:
9857 struct target_pollfd
*target_pfd
;
9858 unsigned int nfds
= arg2
;
9865 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9866 ret
= -TARGET_EINVAL
;
9870 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9871 sizeof(struct target_pollfd
) * nfds
, 1);
9876 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9877 for (i
= 0; i
< nfds
; i
++) {
9878 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9879 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9884 # ifdef TARGET_NR_ppoll
9885 case TARGET_NR_ppoll
:
9887 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9888 target_sigset_t
*target_set
;
9889 sigset_t _set
, *set
= &_set
;
9892 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9893 unlock_user(target_pfd
, arg1
, 0);
9901 if (arg5
!= sizeof(target_sigset_t
)) {
9902 unlock_user(target_pfd
, arg1
, 0);
9903 ret
= -TARGET_EINVAL
;
9907 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9909 unlock_user(target_pfd
, arg1
, 0);
9912 target_to_host_sigset(set
, target_set
);
9917 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9918 set
, SIGSET_T_SIZE
));
9920 if (!is_error(ret
) && arg3
) {
9921 host_to_target_timespec(arg3
, timeout_ts
);
9924 unlock_user(target_set
, arg4
, 0);
9929 # ifdef TARGET_NR_poll
9930 case TARGET_NR_poll
:
9932 struct timespec ts
, *pts
;
9935 /* Convert ms to secs, ns */
9936 ts
.tv_sec
= arg3
/ 1000;
9937 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9940 /* -ve poll() timeout means "infinite" */
9943 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9948 g_assert_not_reached();
9951 if (!is_error(ret
)) {
9952 for(i
= 0; i
< nfds
; i
++) {
9953 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9956 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9960 case TARGET_NR_flock
:
9961 /* NOTE: the flock constant seems to be the same for every
9963 ret
= get_errno(safe_flock(arg1
, arg2
));
9965 case TARGET_NR_readv
:
9967 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9969 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9970 unlock_iovec(vec
, arg2
, arg3
, 1);
9972 ret
= -host_to_target_errno(errno
);
9976 case TARGET_NR_writev
:
9978 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9980 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9981 unlock_iovec(vec
, arg2
, arg3
, 0);
9983 ret
= -host_to_target_errno(errno
);
9987 case TARGET_NR_getsid
:
9988 ret
= get_errno(getsid(arg1
));
9990 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9991 case TARGET_NR_fdatasync
:
9992 ret
= get_errno(fdatasync(arg1
));
9995 #ifdef TARGET_NR__sysctl
9996 case TARGET_NR__sysctl
:
9997 /* We don't implement this, but ENOTDIR is always a safe
9999 ret
= -TARGET_ENOTDIR
;
10002 case TARGET_NR_sched_getaffinity
:
10004 unsigned int mask_size
;
10005 unsigned long *mask
;
10008 * sched_getaffinity needs multiples of ulong, so need to take
10009 * care of mismatches between target ulong and host ulong sizes.
10011 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10012 ret
= -TARGET_EINVAL
;
10015 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10017 mask
= alloca(mask_size
);
10018 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10020 if (!is_error(ret
)) {
10022 /* More data returned than the caller's buffer will fit.
10023 * This only happens if sizeof(abi_long) < sizeof(long)
10024 * and the caller passed us a buffer holding an odd number
10025 * of abi_longs. If the host kernel is actually using the
10026 * extra 4 bytes then fail EINVAL; otherwise we can just
10027 * ignore them and only copy the interesting part.
10029 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10030 if (numcpus
> arg2
* 8) {
10031 ret
= -TARGET_EINVAL
;
10037 if (copy_to_user(arg3
, mask
, ret
)) {
10043 case TARGET_NR_sched_setaffinity
:
10045 unsigned int mask_size
;
10046 unsigned long *mask
;
10049 * sched_setaffinity needs multiples of ulong, so need to take
10050 * care of mismatches between target ulong and host ulong sizes.
10052 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10053 ret
= -TARGET_EINVAL
;
10056 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10058 mask
= alloca(mask_size
);
10059 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10062 memcpy(mask
, p
, arg2
);
10063 unlock_user_struct(p
, arg2
, 0);
10065 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10068 case TARGET_NR_sched_setparam
:
10070 struct sched_param
*target_schp
;
10071 struct sched_param schp
;
10074 return -TARGET_EINVAL
;
10076 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10078 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10079 unlock_user_struct(target_schp
, arg2
, 0);
10080 ret
= get_errno(sched_setparam(arg1
, &schp
));
10083 case TARGET_NR_sched_getparam
:
10085 struct sched_param
*target_schp
;
10086 struct sched_param schp
;
10089 return -TARGET_EINVAL
;
10091 ret
= get_errno(sched_getparam(arg1
, &schp
));
10092 if (!is_error(ret
)) {
10093 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10095 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10096 unlock_user_struct(target_schp
, arg2
, 1);
10100 case TARGET_NR_sched_setscheduler
:
10102 struct sched_param
*target_schp
;
10103 struct sched_param schp
;
10105 return -TARGET_EINVAL
;
10107 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10109 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10110 unlock_user_struct(target_schp
, arg3
, 0);
10111 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10114 case TARGET_NR_sched_getscheduler
:
10115 ret
= get_errno(sched_getscheduler(arg1
));
10117 case TARGET_NR_sched_yield
:
10118 ret
= get_errno(sched_yield());
10120 case TARGET_NR_sched_get_priority_max
:
10121 ret
= get_errno(sched_get_priority_max(arg1
));
10123 case TARGET_NR_sched_get_priority_min
:
10124 ret
= get_errno(sched_get_priority_min(arg1
));
10126 case TARGET_NR_sched_rr_get_interval
:
10128 struct timespec ts
;
10129 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10130 if (!is_error(ret
)) {
10131 ret
= host_to_target_timespec(arg2
, &ts
);
10135 case TARGET_NR_nanosleep
:
10137 struct timespec req
, rem
;
10138 target_to_host_timespec(&req
, arg1
);
10139 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10140 if (is_error(ret
) && arg2
) {
10141 host_to_target_timespec(arg2
, &rem
);
10145 #ifdef TARGET_NR_query_module
10146 case TARGET_NR_query_module
:
10147 goto unimplemented
;
10149 #ifdef TARGET_NR_nfsservctl
10150 case TARGET_NR_nfsservctl
:
10151 goto unimplemented
;
10153 case TARGET_NR_prctl
:
10155 case PR_GET_PDEATHSIG
:
10158 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10159 if (!is_error(ret
) && arg2
10160 && put_user_ual(deathsig
, arg2
)) {
10168 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10172 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10173 arg3
, arg4
, arg5
));
10174 unlock_user(name
, arg2
, 16);
10179 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10183 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10184 arg3
, arg4
, arg5
));
10185 unlock_user(name
, arg2
, 0);
10190 /* Most prctl options have no pointer arguments */
10191 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10195 #ifdef TARGET_NR_arch_prctl
10196 case TARGET_NR_arch_prctl
:
10197 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10198 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10201 goto unimplemented
;
10204 #ifdef TARGET_NR_pread64
10205 case TARGET_NR_pread64
:
10206 if (regpairs_aligned(cpu_env
)) {
10210 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10212 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10213 unlock_user(p
, arg2
, ret
);
10215 case TARGET_NR_pwrite64
:
10216 if (regpairs_aligned(cpu_env
)) {
10220 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10222 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10223 unlock_user(p
, arg2
, 0);
10226 case TARGET_NR_getcwd
:
10227 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10229 ret
= get_errno(sys_getcwd1(p
, arg2
));
10230 unlock_user(p
, arg1
, ret
);
10232 case TARGET_NR_capget
:
10233 case TARGET_NR_capset
:
10235 struct target_user_cap_header
*target_header
;
10236 struct target_user_cap_data
*target_data
= NULL
;
10237 struct __user_cap_header_struct header
;
10238 struct __user_cap_data_struct data
[2];
10239 struct __user_cap_data_struct
*dataptr
= NULL
;
10240 int i
, target_datalen
;
10241 int data_items
= 1;
10243 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10246 header
.version
= tswap32(target_header
->version
);
10247 header
.pid
= tswap32(target_header
->pid
);
10249 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10250 /* Version 2 and up takes pointer to two user_data structs */
10254 target_datalen
= sizeof(*target_data
) * data_items
;
10257 if (num
== TARGET_NR_capget
) {
10258 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10260 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10262 if (!target_data
) {
10263 unlock_user_struct(target_header
, arg1
, 0);
10267 if (num
== TARGET_NR_capset
) {
10268 for (i
= 0; i
< data_items
; i
++) {
10269 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10270 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10271 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10278 if (num
== TARGET_NR_capget
) {
10279 ret
= get_errno(capget(&header
, dataptr
));
10281 ret
= get_errno(capset(&header
, dataptr
));
10284 /* The kernel always updates version for both capget and capset */
10285 target_header
->version
= tswap32(header
.version
);
10286 unlock_user_struct(target_header
, arg1
, 1);
10289 if (num
== TARGET_NR_capget
) {
10290 for (i
= 0; i
< data_items
; i
++) {
10291 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10292 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10293 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10295 unlock_user(target_data
, arg2
, target_datalen
);
10297 unlock_user(target_data
, arg2
, 0);
10302 case TARGET_NR_sigaltstack
:
10303 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10306 #ifdef CONFIG_SENDFILE
10307 case TARGET_NR_sendfile
:
10309 off_t
*offp
= NULL
;
10312 ret
= get_user_sal(off
, arg3
);
10313 if (is_error(ret
)) {
10318 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10319 if (!is_error(ret
) && arg3
) {
10320 abi_long ret2
= put_user_sal(off
, arg3
);
10321 if (is_error(ret2
)) {
10327 #ifdef TARGET_NR_sendfile64
10328 case TARGET_NR_sendfile64
:
10330 off_t
*offp
= NULL
;
10333 ret
= get_user_s64(off
, arg3
);
10334 if (is_error(ret
)) {
10339 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10340 if (!is_error(ret
) && arg3
) {
10341 abi_long ret2
= put_user_s64(off
, arg3
);
10342 if (is_error(ret2
)) {
10350 case TARGET_NR_sendfile
:
10351 #ifdef TARGET_NR_sendfile64
10352 case TARGET_NR_sendfile64
:
10354 goto unimplemented
;
10357 #ifdef TARGET_NR_getpmsg
10358 case TARGET_NR_getpmsg
:
10359 goto unimplemented
;
10361 #ifdef TARGET_NR_putpmsg
10362 case TARGET_NR_putpmsg
:
10363 goto unimplemented
;
10365 #ifdef TARGET_NR_vfork
10366 case TARGET_NR_vfork
:
10367 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10371 #ifdef TARGET_NR_ugetrlimit
10372 case TARGET_NR_ugetrlimit
:
10374 struct rlimit rlim
;
10375 int resource
= target_to_host_resource(arg1
);
10376 ret
= get_errno(getrlimit(resource
, &rlim
));
10377 if (!is_error(ret
)) {
10378 struct target_rlimit
*target_rlim
;
10379 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10381 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10382 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10383 unlock_user_struct(target_rlim
, arg2
, 1);
10388 #ifdef TARGET_NR_truncate64
10389 case TARGET_NR_truncate64
:
10390 if (!(p
= lock_user_string(arg1
)))
10392 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10393 unlock_user(p
, arg1
, 0);
10396 #ifdef TARGET_NR_ftruncate64
10397 case TARGET_NR_ftruncate64
:
10398 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10401 #ifdef TARGET_NR_stat64
10402 case TARGET_NR_stat64
:
10403 if (!(p
= lock_user_string(arg1
)))
10405 ret
= get_errno(stat(path(p
), &st
));
10406 unlock_user(p
, arg1
, 0);
10407 if (!is_error(ret
))
10408 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10411 #ifdef TARGET_NR_lstat64
10412 case TARGET_NR_lstat64
:
10413 if (!(p
= lock_user_string(arg1
)))
10415 ret
= get_errno(lstat(path(p
), &st
));
10416 unlock_user(p
, arg1
, 0);
10417 if (!is_error(ret
))
10418 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10421 #ifdef TARGET_NR_fstat64
10422 case TARGET_NR_fstat64
:
10423 ret
= get_errno(fstat(arg1
, &st
));
10424 if (!is_error(ret
))
10425 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10428 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10429 #ifdef TARGET_NR_fstatat64
10430 case TARGET_NR_fstatat64
:
10432 #ifdef TARGET_NR_newfstatat
10433 case TARGET_NR_newfstatat
:
10435 if (!(p
= lock_user_string(arg2
)))
10437 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10438 if (!is_error(ret
))
10439 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10442 #ifdef TARGET_NR_lchown
10443 case TARGET_NR_lchown
:
10444 if (!(p
= lock_user_string(arg1
)))
10446 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10447 unlock_user(p
, arg1
, 0);
10450 #ifdef TARGET_NR_getuid
10451 case TARGET_NR_getuid
:
10452 ret
= get_errno(high2lowuid(getuid()));
10455 #ifdef TARGET_NR_getgid
10456 case TARGET_NR_getgid
:
10457 ret
= get_errno(high2lowgid(getgid()));
10460 #ifdef TARGET_NR_geteuid
10461 case TARGET_NR_geteuid
:
10462 ret
= get_errno(high2lowuid(geteuid()));
10465 #ifdef TARGET_NR_getegid
10466 case TARGET_NR_getegid
:
10467 ret
= get_errno(high2lowgid(getegid()));
10470 case TARGET_NR_setreuid
:
10471 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10473 case TARGET_NR_setregid
:
10474 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10476 case TARGET_NR_getgroups
:
10478 int gidsetsize
= arg1
;
10479 target_id
*target_grouplist
;
10483 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10484 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10485 if (gidsetsize
== 0)
10487 if (!is_error(ret
)) {
10488 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10489 if (!target_grouplist
)
10491 for(i
= 0;i
< ret
; i
++)
10492 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10493 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10497 case TARGET_NR_setgroups
:
10499 int gidsetsize
= arg1
;
10500 target_id
*target_grouplist
;
10501 gid_t
*grouplist
= NULL
;
10504 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10505 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10506 if (!target_grouplist
) {
10507 ret
= -TARGET_EFAULT
;
10510 for (i
= 0; i
< gidsetsize
; i
++) {
10511 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10513 unlock_user(target_grouplist
, arg2
, 0);
10515 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10518 case TARGET_NR_fchown
:
10519 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10521 #if defined(TARGET_NR_fchownat)
10522 case TARGET_NR_fchownat
:
10523 if (!(p
= lock_user_string(arg2
)))
10525 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10526 low2highgid(arg4
), arg5
));
10527 unlock_user(p
, arg2
, 0);
10530 #ifdef TARGET_NR_setresuid
10531 case TARGET_NR_setresuid
:
10532 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10534 low2highuid(arg3
)));
10537 #ifdef TARGET_NR_getresuid
10538 case TARGET_NR_getresuid
:
10540 uid_t ruid
, euid
, suid
;
10541 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10542 if (!is_error(ret
)) {
10543 if (put_user_id(high2lowuid(ruid
), arg1
)
10544 || put_user_id(high2lowuid(euid
), arg2
)
10545 || put_user_id(high2lowuid(suid
), arg3
))
10551 #ifdef TARGET_NR_getresgid
10552 case TARGET_NR_setresgid
:
10553 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10555 low2highgid(arg3
)));
10558 #ifdef TARGET_NR_getresgid
10559 case TARGET_NR_getresgid
:
10561 gid_t rgid
, egid
, sgid
;
10562 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10563 if (!is_error(ret
)) {
10564 if (put_user_id(high2lowgid(rgid
), arg1
)
10565 || put_user_id(high2lowgid(egid
), arg2
)
10566 || put_user_id(high2lowgid(sgid
), arg3
))
10572 #ifdef TARGET_NR_chown
10573 case TARGET_NR_chown
:
10574 if (!(p
= lock_user_string(arg1
)))
10576 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10577 unlock_user(p
, arg1
, 0);
10580 case TARGET_NR_setuid
:
10581 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10583 case TARGET_NR_setgid
:
10584 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10586 case TARGET_NR_setfsuid
:
10587 ret
= get_errno(setfsuid(arg1
));
10589 case TARGET_NR_setfsgid
:
10590 ret
= get_errno(setfsgid(arg1
));
10593 #ifdef TARGET_NR_lchown32
10594 case TARGET_NR_lchown32
:
10595 if (!(p
= lock_user_string(arg1
)))
10597 ret
= get_errno(lchown(p
, arg2
, arg3
));
10598 unlock_user(p
, arg1
, 0);
10601 #ifdef TARGET_NR_getuid32
10602 case TARGET_NR_getuid32
:
10603 ret
= get_errno(getuid());
10607 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10608 /* Alpha specific */
10609 case TARGET_NR_getxuid
:
10613 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10615 ret
= get_errno(getuid());
10618 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10619 /* Alpha specific */
10620 case TARGET_NR_getxgid
:
10624 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10626 ret
= get_errno(getgid());
10629 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10630 /* Alpha specific */
10631 case TARGET_NR_osf_getsysinfo
:
10632 ret
= -TARGET_EOPNOTSUPP
;
10634 case TARGET_GSI_IEEE_FP_CONTROL
:
10636 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10638 /* Copied from linux ieee_fpcr_to_swcr. */
10639 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10640 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10641 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10642 | SWCR_TRAP_ENABLE_DZE
10643 | SWCR_TRAP_ENABLE_OVF
);
10644 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10645 | SWCR_TRAP_ENABLE_INE
);
10646 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10647 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10649 if (put_user_u64 (swcr
, arg2
))
10655 /* case GSI_IEEE_STATE_AT_SIGNAL:
10656 -- Not implemented in linux kernel.
10658 -- Retrieves current unaligned access state; not much used.
10659 case GSI_PROC_TYPE:
10660 -- Retrieves implver information; surely not used.
10661 case GSI_GET_HWRPB:
10662 -- Grabs a copy of the HWRPB; surely not used.
10667 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10668 /* Alpha specific */
10669 case TARGET_NR_osf_setsysinfo
:
10670 ret
= -TARGET_EOPNOTSUPP
;
10672 case TARGET_SSI_IEEE_FP_CONTROL
:
10674 uint64_t swcr
, fpcr
, orig_fpcr
;
10676 if (get_user_u64 (swcr
, arg2
)) {
10679 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10680 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10682 /* Copied from linux ieee_swcr_to_fpcr. */
10683 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10684 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10685 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10686 | SWCR_TRAP_ENABLE_DZE
10687 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10688 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10689 | SWCR_TRAP_ENABLE_INE
)) << 57;
10690 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10691 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10693 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10698 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10700 uint64_t exc
, fpcr
, orig_fpcr
;
10703 if (get_user_u64(exc
, arg2
)) {
10707 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10709 /* We only add to the exception status here. */
10710 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10712 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10715 /* Old exceptions are not signaled. */
10716 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10718 /* If any exceptions set by this call,
10719 and are unmasked, send a signal. */
10721 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10722 si_code
= TARGET_FPE_FLTRES
;
10724 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10725 si_code
= TARGET_FPE_FLTUND
;
10727 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10728 si_code
= TARGET_FPE_FLTOVF
;
10730 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10731 si_code
= TARGET_FPE_FLTDIV
;
10733 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10734 si_code
= TARGET_FPE_FLTINV
;
10736 if (si_code
!= 0) {
10737 target_siginfo_t info
;
10738 info
.si_signo
= SIGFPE
;
10740 info
.si_code
= si_code
;
10741 info
._sifields
._sigfault
._addr
10742 = ((CPUArchState
*)cpu_env
)->pc
;
10743 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10744 QEMU_SI_FAULT
, &info
);
10749 /* case SSI_NVPAIRS:
10750 -- Used with SSIN_UACPROC to enable unaligned accesses.
10751 case SSI_IEEE_STATE_AT_SIGNAL:
10752 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10753 -- Not implemented in linux kernel
10758 #ifdef TARGET_NR_osf_sigprocmask
10759 /* Alpha specific. */
10760 case TARGET_NR_osf_sigprocmask
:
10764 sigset_t set
, oldset
;
10767 case TARGET_SIG_BLOCK
:
10770 case TARGET_SIG_UNBLOCK
:
10773 case TARGET_SIG_SETMASK
:
10777 ret
= -TARGET_EINVAL
;
10781 target_to_host_old_sigset(&set
, &mask
);
10782 ret
= do_sigprocmask(how
, &set
, &oldset
);
10784 host_to_target_old_sigset(&mask
, &oldset
);
10791 #ifdef TARGET_NR_getgid32
10792 case TARGET_NR_getgid32
:
10793 ret
= get_errno(getgid());
10796 #ifdef TARGET_NR_geteuid32
10797 case TARGET_NR_geteuid32
:
10798 ret
= get_errno(geteuid());
10801 #ifdef TARGET_NR_getegid32
10802 case TARGET_NR_getegid32
:
10803 ret
= get_errno(getegid());
10806 #ifdef TARGET_NR_setreuid32
10807 case TARGET_NR_setreuid32
:
10808 ret
= get_errno(setreuid(arg1
, arg2
));
10811 #ifdef TARGET_NR_setregid32
10812 case TARGET_NR_setregid32
:
10813 ret
= get_errno(setregid(arg1
, arg2
));
10816 #ifdef TARGET_NR_getgroups32
10817 case TARGET_NR_getgroups32
:
10819 int gidsetsize
= arg1
;
10820 uint32_t *target_grouplist
;
10824 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10825 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10826 if (gidsetsize
== 0)
10828 if (!is_error(ret
)) {
10829 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10830 if (!target_grouplist
) {
10831 ret
= -TARGET_EFAULT
;
10834 for(i
= 0;i
< ret
; i
++)
10835 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10836 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10841 #ifdef TARGET_NR_setgroups32
10842 case TARGET_NR_setgroups32
:
10844 int gidsetsize
= arg1
;
10845 uint32_t *target_grouplist
;
10849 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10850 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10851 if (!target_grouplist
) {
10852 ret
= -TARGET_EFAULT
;
10855 for(i
= 0;i
< gidsetsize
; i
++)
10856 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10857 unlock_user(target_grouplist
, arg2
, 0);
10858 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10862 #ifdef TARGET_NR_fchown32
10863 case TARGET_NR_fchown32
:
10864 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10867 #ifdef TARGET_NR_setresuid32
10868 case TARGET_NR_setresuid32
:
10869 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10872 #ifdef TARGET_NR_getresuid32
10873 case TARGET_NR_getresuid32
:
10875 uid_t ruid
, euid
, suid
;
10876 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10877 if (!is_error(ret
)) {
10878 if (put_user_u32(ruid
, arg1
)
10879 || put_user_u32(euid
, arg2
)
10880 || put_user_u32(suid
, arg3
))
10886 #ifdef TARGET_NR_setresgid32
10887 case TARGET_NR_setresgid32
:
10888 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10891 #ifdef TARGET_NR_getresgid32
10892 case TARGET_NR_getresgid32
:
10894 gid_t rgid
, egid
, sgid
;
10895 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10896 if (!is_error(ret
)) {
10897 if (put_user_u32(rgid
, arg1
)
10898 || put_user_u32(egid
, arg2
)
10899 || put_user_u32(sgid
, arg3
))
10905 #ifdef TARGET_NR_chown32
10906 case TARGET_NR_chown32
:
10907 if (!(p
= lock_user_string(arg1
)))
10909 ret
= get_errno(chown(p
, arg2
, arg3
));
10910 unlock_user(p
, arg1
, 0);
10913 #ifdef TARGET_NR_setuid32
10914 case TARGET_NR_setuid32
:
10915 ret
= get_errno(sys_setuid(arg1
));
10918 #ifdef TARGET_NR_setgid32
10919 case TARGET_NR_setgid32
:
10920 ret
= get_errno(sys_setgid(arg1
));
10923 #ifdef TARGET_NR_setfsuid32
10924 case TARGET_NR_setfsuid32
:
10925 ret
= get_errno(setfsuid(arg1
));
10928 #ifdef TARGET_NR_setfsgid32
10929 case TARGET_NR_setfsgid32
:
10930 ret
= get_errno(setfsgid(arg1
));
10934 case TARGET_NR_pivot_root
:
10935 goto unimplemented
;
10936 #ifdef TARGET_NR_mincore
10937 case TARGET_NR_mincore
:
10940 ret
= -TARGET_EFAULT
;
10941 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10943 if (!(p
= lock_user_string(arg3
)))
10945 ret
= get_errno(mincore(a
, arg2
, p
));
10946 unlock_user(p
, arg3
, ret
);
10948 unlock_user(a
, arg1
, 0);
10952 #ifdef TARGET_NR_arm_fadvise64_64
10953 case TARGET_NR_arm_fadvise64_64
:
10954 /* arm_fadvise64_64 looks like fadvise64_64 but
10955 * with different argument order: fd, advice, offset, len
10956 * rather than the usual fd, offset, len, advice.
10957 * Note that offset and len are both 64-bit so appear as
10958 * pairs of 32-bit registers.
10960 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10961 target_offset64(arg5
, arg6
), arg2
);
10962 ret
= -host_to_target_errno(ret
);
10966 #if TARGET_ABI_BITS == 32
10968 #ifdef TARGET_NR_fadvise64_64
10969 case TARGET_NR_fadvise64_64
:
10970 /* 6 args: fd, offset (high, low), len (high, low), advice */
10971 if (regpairs_aligned(cpu_env
)) {
10972 /* offset is in (3,4), len in (5,6) and advice in 7 */
10979 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10980 target_offset64(arg2
, arg3
),
10981 target_offset64(arg4
, arg5
),
10986 #ifdef TARGET_NR_fadvise64
10987 case TARGET_NR_fadvise64
:
10988 /* 5 args: fd, offset (high, low), len, advice */
10989 if (regpairs_aligned(cpu_env
)) {
10990 /* offset is in (3,4), len in 5 and advice in 6 */
10996 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10997 target_offset64(arg2
, arg3
),
11002 #else /* not a 32-bit ABI */
11003 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11004 #ifdef TARGET_NR_fadvise64_64
11005 case TARGET_NR_fadvise64_64
:
11007 #ifdef TARGET_NR_fadvise64
11008 case TARGET_NR_fadvise64
:
11010 #ifdef TARGET_S390X
11012 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11013 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11014 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11015 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11019 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11022 #endif /* end of 64-bit ABI fadvise handling */
11024 #ifdef TARGET_NR_madvise
11025 case TARGET_NR_madvise
:
11026 /* A straight passthrough may not be safe because qemu sometimes
11027 turns private file-backed mappings into anonymous mappings.
11028 This will break MADV_DONTNEED.
11029 This is a hint, so ignoring and returning success is ok. */
11030 ret
= get_errno(0);
11033 #if TARGET_ABI_BITS == 32
11034 case TARGET_NR_fcntl64
:
11038 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11039 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11042 if (((CPUARMState
*)cpu_env
)->eabi
) {
11043 copyfrom
= copy_from_user_eabi_flock64
;
11044 copyto
= copy_to_user_eabi_flock64
;
11048 cmd
= target_to_host_fcntl_cmd(arg2
);
11049 if (cmd
== -TARGET_EINVAL
) {
11055 case TARGET_F_GETLK64
:
11056 ret
= copyfrom(&fl
, arg3
);
11060 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11062 ret
= copyto(arg3
, &fl
);
11066 case TARGET_F_SETLK64
:
11067 case TARGET_F_SETLKW64
:
11068 ret
= copyfrom(&fl
, arg3
);
11072 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11075 ret
= do_fcntl(arg1
, arg2
, arg3
);
11081 #ifdef TARGET_NR_cacheflush
11082 case TARGET_NR_cacheflush
:
11083 /* self-modifying code is handled automatically, so nothing needed */
11087 #ifdef TARGET_NR_security
11088 case TARGET_NR_security
:
11089 goto unimplemented
;
11091 #ifdef TARGET_NR_getpagesize
11092 case TARGET_NR_getpagesize
:
11093 ret
= TARGET_PAGE_SIZE
;
11096 case TARGET_NR_gettid
:
11097 ret
= get_errno(gettid());
11099 #ifdef TARGET_NR_readahead
11100 case TARGET_NR_readahead
:
11101 #if TARGET_ABI_BITS == 32
11102 if (regpairs_aligned(cpu_env
)) {
11107 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11109 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11114 #ifdef TARGET_NR_setxattr
11115 case TARGET_NR_listxattr
:
11116 case TARGET_NR_llistxattr
:
11120 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11122 ret
= -TARGET_EFAULT
;
11126 p
= lock_user_string(arg1
);
11128 if (num
== TARGET_NR_listxattr
) {
11129 ret
= get_errno(listxattr(p
, b
, arg3
));
11131 ret
= get_errno(llistxattr(p
, b
, arg3
));
11134 ret
= -TARGET_EFAULT
;
11136 unlock_user(p
, arg1
, 0);
11137 unlock_user(b
, arg2
, arg3
);
11140 case TARGET_NR_flistxattr
:
11144 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11146 ret
= -TARGET_EFAULT
;
11150 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11151 unlock_user(b
, arg2
, arg3
);
11154 case TARGET_NR_setxattr
:
11155 case TARGET_NR_lsetxattr
:
11157 void *p
, *n
, *v
= 0;
11159 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11161 ret
= -TARGET_EFAULT
;
11165 p
= lock_user_string(arg1
);
11166 n
= lock_user_string(arg2
);
11168 if (num
== TARGET_NR_setxattr
) {
11169 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11171 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11174 ret
= -TARGET_EFAULT
;
11176 unlock_user(p
, arg1
, 0);
11177 unlock_user(n
, arg2
, 0);
11178 unlock_user(v
, arg3
, 0);
11181 case TARGET_NR_fsetxattr
:
11185 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11187 ret
= -TARGET_EFAULT
;
11191 n
= lock_user_string(arg2
);
11193 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11195 ret
= -TARGET_EFAULT
;
11197 unlock_user(n
, arg2
, 0);
11198 unlock_user(v
, arg3
, 0);
11201 case TARGET_NR_getxattr
:
11202 case TARGET_NR_lgetxattr
:
11204 void *p
, *n
, *v
= 0;
11206 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11208 ret
= -TARGET_EFAULT
;
11212 p
= lock_user_string(arg1
);
11213 n
= lock_user_string(arg2
);
11215 if (num
== TARGET_NR_getxattr
) {
11216 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11218 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11221 ret
= -TARGET_EFAULT
;
11223 unlock_user(p
, arg1
, 0);
11224 unlock_user(n
, arg2
, 0);
11225 unlock_user(v
, arg3
, arg4
);
11228 case TARGET_NR_fgetxattr
:
11232 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11234 ret
= -TARGET_EFAULT
;
11238 n
= lock_user_string(arg2
);
11240 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11242 ret
= -TARGET_EFAULT
;
11244 unlock_user(n
, arg2
, 0);
11245 unlock_user(v
, arg3
, arg4
);
11248 case TARGET_NR_removexattr
:
11249 case TARGET_NR_lremovexattr
:
11252 p
= lock_user_string(arg1
);
11253 n
= lock_user_string(arg2
);
11255 if (num
== TARGET_NR_removexattr
) {
11256 ret
= get_errno(removexattr(p
, n
));
11258 ret
= get_errno(lremovexattr(p
, n
));
11261 ret
= -TARGET_EFAULT
;
11263 unlock_user(p
, arg1
, 0);
11264 unlock_user(n
, arg2
, 0);
11267 case TARGET_NR_fremovexattr
:
11270 n
= lock_user_string(arg2
);
11272 ret
= get_errno(fremovexattr(arg1
, n
));
11274 ret
= -TARGET_EFAULT
;
11276 unlock_user(n
, arg2
, 0);
11280 #endif /* CONFIG_ATTR */
11281 #ifdef TARGET_NR_set_thread_area
11282 case TARGET_NR_set_thread_area
:
11283 #if defined(TARGET_MIPS)
11284 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11287 #elif defined(TARGET_CRIS)
11289 ret
= -TARGET_EINVAL
;
11291 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11295 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11296 ret
= do_set_thread_area(cpu_env
, arg1
);
11298 #elif defined(TARGET_M68K)
11300 TaskState
*ts
= cpu
->opaque
;
11301 ts
->tp_value
= arg1
;
11306 goto unimplemented_nowarn
;
11309 #ifdef TARGET_NR_get_thread_area
11310 case TARGET_NR_get_thread_area
:
11311 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11312 ret
= do_get_thread_area(cpu_env
, arg1
);
11314 #elif defined(TARGET_M68K)
11316 TaskState
*ts
= cpu
->opaque
;
11317 ret
= ts
->tp_value
;
11321 goto unimplemented_nowarn
;
11324 #ifdef TARGET_NR_getdomainname
11325 case TARGET_NR_getdomainname
:
11326 goto unimplemented_nowarn
;
11329 #ifdef TARGET_NR_clock_gettime
11330 case TARGET_NR_clock_gettime
:
11332 struct timespec ts
;
11333 ret
= get_errno(clock_gettime(arg1
, &ts
));
11334 if (!is_error(ret
)) {
11335 host_to_target_timespec(arg2
, &ts
);
11340 #ifdef TARGET_NR_clock_getres
11341 case TARGET_NR_clock_getres
:
11343 struct timespec ts
;
11344 ret
= get_errno(clock_getres(arg1
, &ts
));
11345 if (!is_error(ret
)) {
11346 host_to_target_timespec(arg2
, &ts
);
11351 #ifdef TARGET_NR_clock_nanosleep
11352 case TARGET_NR_clock_nanosleep
:
11354 struct timespec ts
;
11355 target_to_host_timespec(&ts
, arg3
);
11356 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11357 &ts
, arg4
? &ts
: NULL
));
11359 host_to_target_timespec(arg4
, &ts
);
11361 #if defined(TARGET_PPC)
11362 /* clock_nanosleep is odd in that it returns positive errno values.
11363 * On PPC, CR0 bit 3 should be set in such a situation. */
11364 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11365 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11372 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11373 case TARGET_NR_set_tid_address
:
11374 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11378 case TARGET_NR_tkill
:
11379 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11382 case TARGET_NR_tgkill
:
11383 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11384 target_to_host_signal(arg3
)));
11387 #ifdef TARGET_NR_set_robust_list
11388 case TARGET_NR_set_robust_list
:
11389 case TARGET_NR_get_robust_list
:
11390 /* The ABI for supporting robust futexes has userspace pass
11391 * the kernel a pointer to a linked list which is updated by
11392 * userspace after the syscall; the list is walked by the kernel
11393 * when the thread exits. Since the linked list in QEMU guest
11394 * memory isn't a valid linked list for the host and we have
11395 * no way to reliably intercept the thread-death event, we can't
11396 * support these. Silently return ENOSYS so that guest userspace
11397 * falls back to a non-robust futex implementation (which should
11398 * be OK except in the corner case of the guest crashing while
11399 * holding a mutex that is shared with another process via
11402 goto unimplemented_nowarn
;
11405 #if defined(TARGET_NR_utimensat)
11406 case TARGET_NR_utimensat
:
11408 struct timespec
*tsp
, ts
[2];
11412 target_to_host_timespec(ts
, arg3
);
11413 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11417 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11419 if (!(p
= lock_user_string(arg2
))) {
11420 ret
= -TARGET_EFAULT
;
11423 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11424 unlock_user(p
, arg2
, 0);
11429 case TARGET_NR_futex
:
11430 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11432 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11433 case TARGET_NR_inotify_init
:
11434 ret
= get_errno(sys_inotify_init());
11437 #ifdef CONFIG_INOTIFY1
11438 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11439 case TARGET_NR_inotify_init1
:
11440 ret
= get_errno(sys_inotify_init1(arg1
));
11444 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11445 case TARGET_NR_inotify_add_watch
:
11446 p
= lock_user_string(arg2
);
11447 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11448 unlock_user(p
, arg2
, 0);
11451 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11452 case TARGET_NR_inotify_rm_watch
:
11453 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11457 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11458 case TARGET_NR_mq_open
:
11460 struct mq_attr posix_mq_attr
;
11463 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11464 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11467 p
= lock_user_string(arg1
- 1);
11471 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11472 unlock_user (p
, arg1
, 0);
11476 case TARGET_NR_mq_unlink
:
11477 p
= lock_user_string(arg1
- 1);
11479 ret
= -TARGET_EFAULT
;
11482 ret
= get_errno(mq_unlink(p
));
11483 unlock_user (p
, arg1
, 0);
11486 case TARGET_NR_mq_timedsend
:
11488 struct timespec ts
;
11490 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11492 target_to_host_timespec(&ts
, arg5
);
11493 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11494 host_to_target_timespec(arg5
, &ts
);
11496 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11498 unlock_user (p
, arg2
, arg3
);
11502 case TARGET_NR_mq_timedreceive
:
11504 struct timespec ts
;
11507 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11509 target_to_host_timespec(&ts
, arg5
);
11510 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11512 host_to_target_timespec(arg5
, &ts
);
11514 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11517 unlock_user (p
, arg2
, arg3
);
11519 put_user_u32(prio
, arg4
);
11523 /* Not implemented for now... */
11524 /* case TARGET_NR_mq_notify: */
11527 case TARGET_NR_mq_getsetattr
:
11529 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11532 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11533 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11536 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11537 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11544 #ifdef CONFIG_SPLICE
11545 #ifdef TARGET_NR_tee
11546 case TARGET_NR_tee
:
11548 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11552 #ifdef TARGET_NR_splice
11553 case TARGET_NR_splice
:
11555 loff_t loff_in
, loff_out
;
11556 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11558 if (get_user_u64(loff_in
, arg2
)) {
11561 ploff_in
= &loff_in
;
11564 if (get_user_u64(loff_out
, arg4
)) {
11567 ploff_out
= &loff_out
;
11569 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11571 if (put_user_u64(loff_in
, arg2
)) {
11576 if (put_user_u64(loff_out
, arg4
)) {
11583 #ifdef TARGET_NR_vmsplice
11584 case TARGET_NR_vmsplice
:
11586 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11588 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11589 unlock_iovec(vec
, arg2
, arg3
, 0);
11591 ret
= -host_to_target_errno(errno
);
11596 #endif /* CONFIG_SPLICE */
11597 #ifdef CONFIG_EVENTFD
11598 #if defined(TARGET_NR_eventfd)
11599 case TARGET_NR_eventfd
:
11600 ret
= get_errno(eventfd(arg1
, 0));
11601 fd_trans_unregister(ret
);
11604 #if defined(TARGET_NR_eventfd2)
11605 case TARGET_NR_eventfd2
:
11607 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11608 if (arg2
& TARGET_O_NONBLOCK
) {
11609 host_flags
|= O_NONBLOCK
;
11611 if (arg2
& TARGET_O_CLOEXEC
) {
11612 host_flags
|= O_CLOEXEC
;
11614 ret
= get_errno(eventfd(arg1
, host_flags
));
11615 fd_trans_unregister(ret
);
11619 #endif /* CONFIG_EVENTFD */
11620 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11621 case TARGET_NR_fallocate
:
11622 #if TARGET_ABI_BITS == 32
11623 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11624 target_offset64(arg5
, arg6
)));
11626 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11630 #if defined(CONFIG_SYNC_FILE_RANGE)
11631 #if defined(TARGET_NR_sync_file_range)
11632 case TARGET_NR_sync_file_range
:
11633 #if TARGET_ABI_BITS == 32
11634 #if defined(TARGET_MIPS)
11635 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11636 target_offset64(arg5
, arg6
), arg7
));
11638 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11639 target_offset64(arg4
, arg5
), arg6
));
11640 #endif /* !TARGET_MIPS */
11642 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11646 #if defined(TARGET_NR_sync_file_range2)
11647 case TARGET_NR_sync_file_range2
:
11648 /* This is like sync_file_range but the arguments are reordered */
11649 #if TARGET_ABI_BITS == 32
11650 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11651 target_offset64(arg5
, arg6
), arg2
));
11653 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11658 #if defined(TARGET_NR_signalfd4)
11659 case TARGET_NR_signalfd4
:
11660 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11663 #if defined(TARGET_NR_signalfd)
11664 case TARGET_NR_signalfd
:
11665 ret
= do_signalfd4(arg1
, arg2
, 0);
11668 #if defined(CONFIG_EPOLL)
11669 #if defined(TARGET_NR_epoll_create)
11670 case TARGET_NR_epoll_create
:
11671 ret
= get_errno(epoll_create(arg1
));
11674 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11675 case TARGET_NR_epoll_create1
:
11676 ret
= get_errno(epoll_create1(arg1
));
11679 #if defined(TARGET_NR_epoll_ctl)
11680 case TARGET_NR_epoll_ctl
:
11682 struct epoll_event ep
;
11683 struct epoll_event
*epp
= 0;
11685 struct target_epoll_event
*target_ep
;
11686 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11689 ep
.events
= tswap32(target_ep
->events
);
11690 /* The epoll_data_t union is just opaque data to the kernel,
11691 * so we transfer all 64 bits across and need not worry what
11692 * actual data type it is.
11694 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11695 unlock_user_struct(target_ep
, arg4
, 0);
11698 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11703 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11704 #if defined(TARGET_NR_epoll_wait)
11705 case TARGET_NR_epoll_wait
:
11707 #if defined(TARGET_NR_epoll_pwait)
11708 case TARGET_NR_epoll_pwait
:
11711 struct target_epoll_event
*target_ep
;
11712 struct epoll_event
*ep
;
11714 int maxevents
= arg3
;
11715 int timeout
= arg4
;
11717 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11718 ret
= -TARGET_EINVAL
;
11722 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11723 maxevents
* sizeof(struct target_epoll_event
), 1);
11728 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11731 #if defined(TARGET_NR_epoll_pwait)
11732 case TARGET_NR_epoll_pwait
:
11734 target_sigset_t
*target_set
;
11735 sigset_t _set
, *set
= &_set
;
11738 if (arg6
!= sizeof(target_sigset_t
)) {
11739 ret
= -TARGET_EINVAL
;
11743 target_set
= lock_user(VERIFY_READ
, arg5
,
11744 sizeof(target_sigset_t
), 1);
11746 unlock_user(target_ep
, arg2
, 0);
11749 target_to_host_sigset(set
, target_set
);
11750 unlock_user(target_set
, arg5
, 0);
11755 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11756 set
, SIGSET_T_SIZE
));
11760 #if defined(TARGET_NR_epoll_wait)
11761 case TARGET_NR_epoll_wait
:
11762 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11767 ret
= -TARGET_ENOSYS
;
11769 if (!is_error(ret
)) {
11771 for (i
= 0; i
< ret
; i
++) {
11772 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11773 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11776 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11781 #ifdef TARGET_NR_prlimit64
11782 case TARGET_NR_prlimit64
:
11784 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11785 struct target_rlimit64
*target_rnew
, *target_rold
;
11786 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11787 int resource
= target_to_host_resource(arg2
);
11789 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11792 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11793 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11794 unlock_user_struct(target_rnew
, arg3
, 0);
11798 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11799 if (!is_error(ret
) && arg4
) {
11800 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11803 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11804 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11805 unlock_user_struct(target_rold
, arg4
, 1);
11810 #ifdef TARGET_NR_gethostname
11811 case TARGET_NR_gethostname
:
11813 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11815 ret
= get_errno(gethostname(name
, arg2
));
11816 unlock_user(name
, arg1
, arg2
);
11818 ret
= -TARGET_EFAULT
;
11823 #ifdef TARGET_NR_atomic_cmpxchg_32
11824 case TARGET_NR_atomic_cmpxchg_32
:
11826 /* should use start_exclusive from main.c */
11827 abi_ulong mem_value
;
11828 if (get_user_u32(mem_value
, arg6
)) {
11829 target_siginfo_t info
;
11830 info
.si_signo
= SIGSEGV
;
11832 info
.si_code
= TARGET_SEGV_MAPERR
;
11833 info
._sifields
._sigfault
._addr
= arg6
;
11834 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11835 QEMU_SI_FAULT
, &info
);
11839 if (mem_value
== arg2
)
11840 put_user_u32(arg1
, arg6
);
11845 #ifdef TARGET_NR_atomic_barrier
11846 case TARGET_NR_atomic_barrier
:
11848 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11854 #ifdef TARGET_NR_timer_create
11855 case TARGET_NR_timer_create
:
11857 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11859 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11862 int timer_index
= next_free_host_timer();
11864 if (timer_index
< 0) {
11865 ret
= -TARGET_EAGAIN
;
11867 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11870 phost_sevp
= &host_sevp
;
11871 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11877 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11881 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11890 #ifdef TARGET_NR_timer_settime
11891 case TARGET_NR_timer_settime
:
11893 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11894 * struct itimerspec * old_value */
11895 target_timer_t timerid
= get_timer_id(arg1
);
11899 } else if (arg3
== 0) {
11900 ret
= -TARGET_EINVAL
;
11902 timer_t htimer
= g_posix_timers
[timerid
];
11903 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11905 target_to_host_itimerspec(&hspec_new
, arg3
);
11907 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11908 host_to_target_itimerspec(arg2
, &hspec_old
);
11914 #ifdef TARGET_NR_timer_gettime
11915 case TARGET_NR_timer_gettime
:
11917 /* args: timer_t timerid, struct itimerspec *curr_value */
11918 target_timer_t timerid
= get_timer_id(arg1
);
11922 } else if (!arg2
) {
11923 ret
= -TARGET_EFAULT
;
11925 timer_t htimer
= g_posix_timers
[timerid
];
11926 struct itimerspec hspec
;
11927 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11929 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11930 ret
= -TARGET_EFAULT
;
11937 #ifdef TARGET_NR_timer_getoverrun
11938 case TARGET_NR_timer_getoverrun
:
11940 /* args: timer_t timerid */
11941 target_timer_t timerid
= get_timer_id(arg1
);
11946 timer_t htimer
= g_posix_timers
[timerid
];
11947 ret
= get_errno(timer_getoverrun(htimer
));
11949 fd_trans_unregister(ret
);
11954 #ifdef TARGET_NR_timer_delete
11955 case TARGET_NR_timer_delete
:
11957 /* args: timer_t timerid */
11958 target_timer_t timerid
= get_timer_id(arg1
);
11963 timer_t htimer
= g_posix_timers
[timerid
];
11964 ret
= get_errno(timer_delete(htimer
));
11965 g_posix_timers
[timerid
] = 0;
11971 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11972 case TARGET_NR_timerfd_create
:
11973 ret
= get_errno(timerfd_create(arg1
,
11974 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11978 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11979 case TARGET_NR_timerfd_gettime
:
11981 struct itimerspec its_curr
;
11983 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11985 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11992 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11993 case TARGET_NR_timerfd_settime
:
11995 struct itimerspec its_new
, its_old
, *p_new
;
11998 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12006 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12008 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12015 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12016 case TARGET_NR_ioprio_get
:
12017 ret
= get_errno(ioprio_get(arg1
, arg2
));
12021 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12022 case TARGET_NR_ioprio_set
:
12023 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12027 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12028 case TARGET_NR_setns
:
12029 ret
= get_errno(setns(arg1
, arg2
));
12032 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12033 case TARGET_NR_unshare
:
12034 ret
= get_errno(unshare(arg1
));
12040 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12041 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12042 unimplemented_nowarn
:
12044 ret
= -TARGET_ENOSYS
;
12049 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12052 print_syscall_ret(num
, ret
);
12053 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12056 ret
= -TARGET_EFAULT
;