4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
52 #include <sys/sysinfo.h>
53 #include <sys/signalfd.h>
54 //#include <sys/user.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <linux/wireless.h>
58 #include <linux/icmp.h>
59 #include "qemu-common.h"
61 #include <sys/timerfd.h>
67 #include <sys/eventfd.h>
70 #include <sys/epoll.h>
73 #include "qemu/xattr.h"
75 #ifdef CONFIG_SENDFILE
76 #include <sys/sendfile.h>
79 #define termios host_termios
80 #define winsize host_winsize
81 #define termio host_termio
82 #define sgttyb host_sgttyb /* same as target */
83 #define tchars host_tchars /* same as target */
84 #define ltchars host_ltchars /* same as target */
86 #include <linux/termios.h>
87 #include <linux/unistd.h>
88 #include <linux/cdrom.h>
89 #include <linux/hdreg.h>
90 #include <linux/soundcard.h>
92 #include <linux/mtio.h>
94 #if defined(CONFIG_FIEMAP)
95 #include <linux/fiemap.h>
99 #include <linux/dm-ioctl.h>
100 #include <linux/reboot.h>
101 #include <linux/route.h>
102 #include <linux/filter.h>
103 #include <linux/blkpg.h>
104 #include <netpacket/packet.h>
105 #include <linux/netlink.h>
106 #ifdef CONFIG_RTNETLINK
107 #include <linux/rtnetlink.h>
108 #include <linux/if_bridge.h>
110 #include <linux/audit.h>
111 #include "linux_loop.h"
117 #define CLONE_IO 0x80000000 /* Clone io context */
120 /* We can't directly call the host clone syscall, because this will
121 * badly confuse libc (breaking mutexes, for example). So we must
122 * divide clone flags into:
123 * * flag combinations that look like pthread_create()
124 * * flag combinations that look like fork()
125 * * flags we can implement within QEMU itself
126 * * flags we can't support and will return an error for
128 /* For thread creation, all these flags must be present; for
129 * fork, none must be present.
131 #define CLONE_THREAD_FLAGS \
132 (CLONE_VM | CLONE_FS | CLONE_FILES | \
133 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
135 /* These flags are ignored:
136 * CLONE_DETACHED is now ignored by the kernel;
137 * CLONE_IO is just an optimisation hint to the I/O scheduler
139 #define CLONE_IGNORED_FLAGS \
140 (CLONE_DETACHED | CLONE_IO)
142 /* Flags for fork which we can implement within QEMU itself */
143 #define CLONE_OPTIONAL_FORK_FLAGS \
144 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
145 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
147 /* Flags for thread creation which we can implement within QEMU itself */
148 #define CLONE_OPTIONAL_THREAD_FLAGS \
149 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
150 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
152 #define CLONE_INVALID_FORK_FLAGS \
153 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
155 #define CLONE_INVALID_THREAD_FLAGS \
156 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
157 CLONE_IGNORED_FLAGS))
159 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
160 * have almost all been allocated. We cannot support any of
161 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
162 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
163 * The checks against the invalid thread masks above will catch these.
164 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
169 * once. This exercises the codepaths for restart.
171 //#define DEBUG_ERESTARTSYS
173 //#include <linux/msdos_fs.h>
174 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
175 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
185 #define _syscall0(type,name) \
186 static type name (void) \
188 return syscall(__NR_##name); \
191 #define _syscall1(type,name,type1,arg1) \
192 static type name (type1 arg1) \
194 return syscall(__NR_##name, arg1); \
197 #define _syscall2(type,name,type1,arg1,type2,arg2) \
198 static type name (type1 arg1,type2 arg2) \
200 return syscall(__NR_##name, arg1, arg2); \
203 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
204 static type name (type1 arg1,type2 arg2,type3 arg3) \
206 return syscall(__NR_##name, arg1, arg2, arg3); \
209 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
210 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
212 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
215 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
219 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
223 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
224 type5,arg5,type6,arg6) \
225 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
228 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
232 #define __NR_sys_uname __NR_uname
233 #define __NR_sys_getcwd1 __NR_getcwd
234 #define __NR_sys_getdents __NR_getdents
235 #define __NR_sys_getdents64 __NR_getdents64
236 #define __NR_sys_getpriority __NR_getpriority
237 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 #define __NR__llseek __NR_lseek
249 /* Newer kernel ports have llseek() instead of _llseek() */
250 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
251 #define TARGET_NR__llseek TARGET_NR_llseek
255 _syscall0(int, gettid
)
257 /* This is a replacement for the host gettid() and must return a host
259 static int gettid(void) {
263 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
264 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
266 #if !defined(__NR_getdents) || \
267 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
268 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
270 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
271 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
272 loff_t
*, res
, uint
, wh
);
274 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
275 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
276 #ifdef __NR_exit_group
277 _syscall1(int,exit_group
,int,error_code
)
279 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
280 _syscall1(int,set_tid_address
,int *,tidptr
)
282 #if defined(TARGET_NR_futex) && defined(__NR_futex)
283 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
284 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
286 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
287 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
288 unsigned long *, user_mask_ptr
);
289 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
290 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
291 unsigned long *, user_mask_ptr
);
292 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
294 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
295 struct __user_cap_data_struct
*, data
);
296 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
297 struct __user_cap_data_struct
*, data
);
298 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
299 _syscall2(int, ioprio_get
, int, which
, int, who
)
301 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
302 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
304 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
305 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
308 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
309 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
310 unsigned long, idx1
, unsigned long, idx2
)
313 static bitmask_transtbl fcntl_flags_tbl
[] = {
314 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
315 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
316 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
317 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
318 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
319 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
320 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
321 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
322 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
323 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
324 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
325 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
326 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
327 #if defined(O_DIRECT)
328 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
330 #if defined(O_NOATIME)
331 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
333 #if defined(O_CLOEXEC)
334 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
337 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
339 /* Don't terminate the list prematurely on 64-bit host+guest. */
340 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
341 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
348 QEMU_IFLA_BR_FORWARD_DELAY
,
349 QEMU_IFLA_BR_HELLO_TIME
,
350 QEMU_IFLA_BR_MAX_AGE
,
351 QEMU_IFLA_BR_AGEING_TIME
,
352 QEMU_IFLA_BR_STP_STATE
,
353 QEMU_IFLA_BR_PRIORITY
,
354 QEMU_IFLA_BR_VLAN_FILTERING
,
355 QEMU_IFLA_BR_VLAN_PROTOCOL
,
356 QEMU_IFLA_BR_GROUP_FWD_MASK
,
357 QEMU_IFLA_BR_ROOT_ID
,
358 QEMU_IFLA_BR_BRIDGE_ID
,
359 QEMU_IFLA_BR_ROOT_PORT
,
360 QEMU_IFLA_BR_ROOT_PATH_COST
,
361 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
363 QEMU_IFLA_BR_HELLO_TIMER
,
364 QEMU_IFLA_BR_TCN_TIMER
,
365 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
366 QEMU_IFLA_BR_GC_TIMER
,
367 QEMU_IFLA_BR_GROUP_ADDR
,
368 QEMU_IFLA_BR_FDB_FLUSH
,
369 QEMU_IFLA_BR_MCAST_ROUTER
,
370 QEMU_IFLA_BR_MCAST_SNOOPING
,
371 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
372 QEMU_IFLA_BR_MCAST_QUERIER
,
373 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
374 QEMU_IFLA_BR_MCAST_HASH_MAX
,
375 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
376 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
377 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
378 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
379 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
380 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
381 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
383 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
384 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
385 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
386 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
388 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
389 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
413 QEMU_IFLA_NET_NS_PID
,
416 QEMU_IFLA_VFINFO_LIST
,
424 QEMU_IFLA_PROMISCUITY
,
425 QEMU_IFLA_NUM_TX_QUEUES
,
426 QEMU_IFLA_NUM_RX_QUEUES
,
428 QEMU_IFLA_PHYS_PORT_ID
,
429 QEMU_IFLA_CARRIER_CHANGES
,
430 QEMU_IFLA_PHYS_SWITCH_ID
,
431 QEMU_IFLA_LINK_NETNSID
,
432 QEMU_IFLA_PHYS_PORT_NAME
,
433 QEMU_IFLA_PROTO_DOWN
,
434 QEMU_IFLA_GSO_MAX_SEGS
,
435 QEMU_IFLA_GSO_MAX_SIZE
,
442 QEMU_IFLA_BRPORT_UNSPEC
,
443 QEMU_IFLA_BRPORT_STATE
,
444 QEMU_IFLA_BRPORT_PRIORITY
,
445 QEMU_IFLA_BRPORT_COST
,
446 QEMU_IFLA_BRPORT_MODE
,
447 QEMU_IFLA_BRPORT_GUARD
,
448 QEMU_IFLA_BRPORT_PROTECT
,
449 QEMU_IFLA_BRPORT_FAST_LEAVE
,
450 QEMU_IFLA_BRPORT_LEARNING
,
451 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
452 QEMU_IFLA_BRPORT_PROXYARP
,
453 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
454 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
455 QEMU_IFLA_BRPORT_ROOT_ID
,
456 QEMU_IFLA_BRPORT_BRIDGE_ID
,
457 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
458 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
461 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
462 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
463 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
464 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
465 QEMU_IFLA_BRPORT_HOLD_TIMER
,
466 QEMU_IFLA_BRPORT_FLUSH
,
467 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
468 QEMU_IFLA_BRPORT_PAD
,
469 QEMU___IFLA_BRPORT_MAX
473 QEMU_IFLA_INFO_UNSPEC
,
476 QEMU_IFLA_INFO_XSTATS
,
477 QEMU_IFLA_INFO_SLAVE_KIND
,
478 QEMU_IFLA_INFO_SLAVE_DATA
,
479 QEMU___IFLA_INFO_MAX
,
483 QEMU_IFLA_INET_UNSPEC
,
485 QEMU___IFLA_INET_MAX
,
489 QEMU_IFLA_INET6_UNSPEC
,
490 QEMU_IFLA_INET6_FLAGS
,
491 QEMU_IFLA_INET6_CONF
,
492 QEMU_IFLA_INET6_STATS
,
493 QEMU_IFLA_INET6_MCAST
,
494 QEMU_IFLA_INET6_CACHEINFO
,
495 QEMU_IFLA_INET6_ICMP6STATS
,
496 QEMU_IFLA_INET6_TOKEN
,
497 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
498 QEMU___IFLA_INET6_MAX
501 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
502 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
503 typedef struct TargetFdTrans
{
504 TargetFdDataFunc host_to_target_data
;
505 TargetFdDataFunc target_to_host_data
;
506 TargetFdAddrFunc target_to_host_addr
;
509 static TargetFdTrans
**target_fd_trans
;
511 static unsigned int target_fd_max
;
513 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
515 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
516 return target_fd_trans
[fd
]->target_to_host_data
;
521 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
523 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
524 return target_fd_trans
[fd
]->host_to_target_data
;
529 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
531 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
532 return target_fd_trans
[fd
]->target_to_host_addr
;
537 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
541 if (fd
>= target_fd_max
) {
542 oldmax
= target_fd_max
;
543 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
544 target_fd_trans
= g_renew(TargetFdTrans
*,
545 target_fd_trans
, target_fd_max
);
546 memset((void *)(target_fd_trans
+ oldmax
), 0,
547 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
549 target_fd_trans
[fd
] = trans
;
552 static void fd_trans_unregister(int fd
)
554 if (fd
>= 0 && fd
< target_fd_max
) {
555 target_fd_trans
[fd
] = NULL
;
559 static void fd_trans_dup(int oldfd
, int newfd
)
561 fd_trans_unregister(newfd
);
562 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
563 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
567 static int sys_getcwd1(char *buf
, size_t size
)
569 if (getcwd(buf
, size
) == NULL
) {
570 /* getcwd() sets errno */
573 return strlen(buf
)+1;
576 #ifdef TARGET_NR_utimensat
577 #if defined(__NR_utimensat)
578 #define __NR_sys_utimensat __NR_utimensat
579 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
580 const struct timespec
*,tsp
,int,flags
)
582 static int sys_utimensat(int dirfd
, const char *pathname
,
583 const struct timespec times
[2], int flags
)
589 #endif /* TARGET_NR_utimensat */
591 #ifdef CONFIG_INOTIFY
592 #include <sys/inotify.h>
594 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
595 static int sys_inotify_init(void)
597 return (inotify_init());
600 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
601 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
603 return (inotify_add_watch(fd
, pathname
, mask
));
606 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
607 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
609 return (inotify_rm_watch(fd
, wd
));
612 #ifdef CONFIG_INOTIFY1
613 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
614 static int sys_inotify_init1(int flags
)
616 return (inotify_init1(flags
));
621 /* Userspace can usually survive runtime without inotify */
622 #undef TARGET_NR_inotify_init
623 #undef TARGET_NR_inotify_init1
624 #undef TARGET_NR_inotify_add_watch
625 #undef TARGET_NR_inotify_rm_watch
626 #endif /* CONFIG_INOTIFY */
628 #if defined(TARGET_NR_prlimit64)
629 #ifndef __NR_prlimit64
630 # define __NR_prlimit64 -1
632 #define __NR_sys_prlimit64 __NR_prlimit64
633 /* The glibc rlimit structure may not be that used by the underlying syscall */
634 struct host_rlimit64
{
638 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
639 const struct host_rlimit64
*, new_limit
,
640 struct host_rlimit64
*, old_limit
)
644 #if defined(TARGET_NR_timer_create)
645 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
646 static timer_t g_posix_timers
[32] = { 0, } ;
648 static inline int next_free_host_timer(void)
651 /* FIXME: Does finding the next free slot require a lock? */
652 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
653 if (g_posix_timers
[k
] == 0) {
654 g_posix_timers
[k
] = (timer_t
) 1;
662 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
664 static inline int regpairs_aligned(void *cpu_env
) {
665 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
667 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
668 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
669 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
670 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
671 * of registers which translates to the same as ARM/MIPS, because we start with
673 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
675 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
678 #define ERRNO_TABLE_SIZE 1200
680 /* target_to_host_errno_table[] is initialized from
681 * host_to_target_errno_table[] in syscall_init(). */
682 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
686 * This list is the union of errno values overridden in asm-<arch>/errno.h
687 * minus the errnos that are not actually generic to all archs.
689 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
690 [EAGAIN
] = TARGET_EAGAIN
,
691 [EIDRM
] = TARGET_EIDRM
,
692 [ECHRNG
] = TARGET_ECHRNG
,
693 [EL2NSYNC
] = TARGET_EL2NSYNC
,
694 [EL3HLT
] = TARGET_EL3HLT
,
695 [EL3RST
] = TARGET_EL3RST
,
696 [ELNRNG
] = TARGET_ELNRNG
,
697 [EUNATCH
] = TARGET_EUNATCH
,
698 [ENOCSI
] = TARGET_ENOCSI
,
699 [EL2HLT
] = TARGET_EL2HLT
,
700 [EDEADLK
] = TARGET_EDEADLK
,
701 [ENOLCK
] = TARGET_ENOLCK
,
702 [EBADE
] = TARGET_EBADE
,
703 [EBADR
] = TARGET_EBADR
,
704 [EXFULL
] = TARGET_EXFULL
,
705 [ENOANO
] = TARGET_ENOANO
,
706 [EBADRQC
] = TARGET_EBADRQC
,
707 [EBADSLT
] = TARGET_EBADSLT
,
708 [EBFONT
] = TARGET_EBFONT
,
709 [ENOSTR
] = TARGET_ENOSTR
,
710 [ENODATA
] = TARGET_ENODATA
,
711 [ETIME
] = TARGET_ETIME
,
712 [ENOSR
] = TARGET_ENOSR
,
713 [ENONET
] = TARGET_ENONET
,
714 [ENOPKG
] = TARGET_ENOPKG
,
715 [EREMOTE
] = TARGET_EREMOTE
,
716 [ENOLINK
] = TARGET_ENOLINK
,
717 [EADV
] = TARGET_EADV
,
718 [ESRMNT
] = TARGET_ESRMNT
,
719 [ECOMM
] = TARGET_ECOMM
,
720 [EPROTO
] = TARGET_EPROTO
,
721 [EDOTDOT
] = TARGET_EDOTDOT
,
722 [EMULTIHOP
] = TARGET_EMULTIHOP
,
723 [EBADMSG
] = TARGET_EBADMSG
,
724 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
725 [EOVERFLOW
] = TARGET_EOVERFLOW
,
726 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
727 [EBADFD
] = TARGET_EBADFD
,
728 [EREMCHG
] = TARGET_EREMCHG
,
729 [ELIBACC
] = TARGET_ELIBACC
,
730 [ELIBBAD
] = TARGET_ELIBBAD
,
731 [ELIBSCN
] = TARGET_ELIBSCN
,
732 [ELIBMAX
] = TARGET_ELIBMAX
,
733 [ELIBEXEC
] = TARGET_ELIBEXEC
,
734 [EILSEQ
] = TARGET_EILSEQ
,
735 [ENOSYS
] = TARGET_ENOSYS
,
736 [ELOOP
] = TARGET_ELOOP
,
737 [ERESTART
] = TARGET_ERESTART
,
738 [ESTRPIPE
] = TARGET_ESTRPIPE
,
739 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
740 [EUSERS
] = TARGET_EUSERS
,
741 [ENOTSOCK
] = TARGET_ENOTSOCK
,
742 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
743 [EMSGSIZE
] = TARGET_EMSGSIZE
,
744 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
745 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
746 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
747 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
748 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
749 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
750 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
751 [EADDRINUSE
] = TARGET_EADDRINUSE
,
752 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
753 [ENETDOWN
] = TARGET_ENETDOWN
,
754 [ENETUNREACH
] = TARGET_ENETUNREACH
,
755 [ENETRESET
] = TARGET_ENETRESET
,
756 [ECONNABORTED
] = TARGET_ECONNABORTED
,
757 [ECONNRESET
] = TARGET_ECONNRESET
,
758 [ENOBUFS
] = TARGET_ENOBUFS
,
759 [EISCONN
] = TARGET_EISCONN
,
760 [ENOTCONN
] = TARGET_ENOTCONN
,
761 [EUCLEAN
] = TARGET_EUCLEAN
,
762 [ENOTNAM
] = TARGET_ENOTNAM
,
763 [ENAVAIL
] = TARGET_ENAVAIL
,
764 [EISNAM
] = TARGET_EISNAM
,
765 [EREMOTEIO
] = TARGET_EREMOTEIO
,
766 [EDQUOT
] = TARGET_EDQUOT
,
767 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
768 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
769 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
770 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
771 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
772 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
773 [EALREADY
] = TARGET_EALREADY
,
774 [EINPROGRESS
] = TARGET_EINPROGRESS
,
775 [ESTALE
] = TARGET_ESTALE
,
776 [ECANCELED
] = TARGET_ECANCELED
,
777 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
778 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
780 [ENOKEY
] = TARGET_ENOKEY
,
783 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
786 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
789 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
792 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
794 #ifdef ENOTRECOVERABLE
795 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
798 [ENOMSG
] = TARGET_ENOMSG
,
802 static inline int host_to_target_errno(int err
)
804 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
805 host_to_target_errno_table
[err
]) {
806 return host_to_target_errno_table
[err
];
811 static inline int target_to_host_errno(int err
)
813 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
814 target_to_host_errno_table
[err
]) {
815 return target_to_host_errno_table
[err
];
820 static inline abi_long
get_errno(abi_long ret
)
823 return -host_to_target_errno(errno
);
828 static inline int is_error(abi_long ret
)
830 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
833 const char *target_strerror(int err
)
835 if (err
== TARGET_ERESTARTSYS
) {
836 return "To be restarted";
838 if (err
== TARGET_QEMU_ESIGRETURN
) {
839 return "Successful exit from sigreturn";
842 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
845 return strerror(target_to_host_errno(err
));
848 #define safe_syscall0(type, name) \
849 static type safe_##name(void) \
851 return safe_syscall(__NR_##name); \
854 #define safe_syscall1(type, name, type1, arg1) \
855 static type safe_##name(type1 arg1) \
857 return safe_syscall(__NR_##name, arg1); \
860 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
861 static type safe_##name(type1 arg1, type2 arg2) \
863 return safe_syscall(__NR_##name, arg1, arg2); \
866 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
867 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
869 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
872 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
874 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
876 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
879 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
880 type4, arg4, type5, arg5) \
881 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
884 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
887 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
888 type4, arg4, type5, arg5, type6, arg6) \
889 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
890 type5 arg5, type6 arg6) \
892 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
895 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
896 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
897 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
898 int, flags
, mode_t
, mode
)
899 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
900 struct rusage
*, rusage
)
901 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
902 int, options
, struct rusage
*, rusage
)
903 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
904 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
905 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
906 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
907 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
909 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
910 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
912 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
913 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
914 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
915 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
916 safe_syscall2(int, tkill
, int, tid
, int, sig
)
917 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
918 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
919 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
920 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
922 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
923 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
924 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
925 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
926 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
927 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
928 safe_syscall2(int, flock
, int, fd
, int, operation
)
929 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
930 const struct timespec
*, uts
, size_t, sigsetsize
)
931 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
933 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
934 struct timespec
*, rem
)
935 #ifdef TARGET_NR_clock_nanosleep
936 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
937 const struct timespec
*, req
, struct timespec
*, rem
)
940 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
942 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
943 long, msgtype
, int, flags
)
944 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
945 unsigned, nsops
, const struct timespec
*, timeout
)
947 /* This host kernel architecture uses a single ipc syscall; fake up
948 * wrappers for the sub-operations to hide this implementation detail.
949 * Annoyingly we can't include linux/ipc.h to get the constant definitions
950 * for the call parameter because some structs in there conflict with the
951 * sys/ipc.h ones. So we just define them here, and rely on them being
952 * the same for all host architectures.
954 #define Q_SEMTIMEDOP 4
957 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
959 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
960 void *, ptr
, long, fifth
)
961 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
963 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
965 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
967 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
969 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
970 const struct timespec
*timeout
)
972 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
976 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
977 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
978 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
979 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
980 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
982 /* We do ioctl like this rather than via safe_syscall3 to preserve the
983 * "third argument might be integer or pointer or not present" behaviour of
986 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
987 /* Similarly for fcntl. Note that callers must always:
988 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
989 * use the flock64 struct rather than unsuffixed flock
990 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
993 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
995 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
998 static inline int host_to_target_sock_type(int host_type
)
1002 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1004 target_type
= TARGET_SOCK_DGRAM
;
1007 target_type
= TARGET_SOCK_STREAM
;
1010 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1014 #if defined(SOCK_CLOEXEC)
1015 if (host_type
& SOCK_CLOEXEC
) {
1016 target_type
|= TARGET_SOCK_CLOEXEC
;
1020 #if defined(SOCK_NONBLOCK)
1021 if (host_type
& SOCK_NONBLOCK
) {
1022 target_type
|= TARGET_SOCK_NONBLOCK
;
1029 static abi_ulong target_brk
;
1030 static abi_ulong target_original_brk
;
1031 static abi_ulong brk_page
;
1033 void target_set_brk(abi_ulong new_brk
)
1035 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1036 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1039 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1040 #define DEBUGF_BRK(message, args...)
1042 /* do_brk() must return target values and target errnos. */
1043 abi_long
do_brk(abi_ulong new_brk
)
1045 abi_long mapped_addr
;
1046 abi_ulong new_alloc_size
;
1048 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1051 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1054 if (new_brk
< target_original_brk
) {
1055 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1060 /* If the new brk is less than the highest page reserved to the
1061 * target heap allocation, set it and we're almost done... */
1062 if (new_brk
<= brk_page
) {
1063 /* Heap contents are initialized to zero, as for anonymous
1065 if (new_brk
> target_brk
) {
1066 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1068 target_brk
= new_brk
;
1069 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1073 /* We need to allocate more memory after the brk... Note that
1074 * we don't use MAP_FIXED because that will map over the top of
1075 * any existing mapping (like the one with the host libc or qemu
1076 * itself); instead we treat "mapped but at wrong address" as
1077 * a failure and unmap again.
1079 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1080 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1081 PROT_READ
|PROT_WRITE
,
1082 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1084 if (mapped_addr
== brk_page
) {
1085 /* Heap contents are initialized to zero, as for anonymous
1086 * mapped pages. Technically the new pages are already
1087 * initialized to zero since they *are* anonymous mapped
1088 * pages, however we have to take care with the contents that
1089 * come from the remaining part of the previous page: it may
1090 * contains garbage data due to a previous heap usage (grown
1091 * then shrunken). */
1092 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1094 target_brk
= new_brk
;
1095 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1096 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1099 } else if (mapped_addr
!= -1) {
1100 /* Mapped but at wrong address, meaning there wasn't actually
1101 * enough space for this brk.
1103 target_munmap(mapped_addr
, new_alloc_size
);
1105 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1108 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1111 #if defined(TARGET_ALPHA)
1112 /* We (partially) emulate OSF/1 on Alpha, which requires we
1113 return a proper errno, not an unchanged brk value. */
1114 return -TARGET_ENOMEM
;
1116 /* For everything else, return the previous break. */
1120 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1121 abi_ulong target_fds_addr
,
1125 abi_ulong b
, *target_fds
;
1127 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1128 if (!(target_fds
= lock_user(VERIFY_READ
,
1130 sizeof(abi_ulong
) * nw
,
1132 return -TARGET_EFAULT
;
1136 for (i
= 0; i
< nw
; i
++) {
1137 /* grab the abi_ulong */
1138 __get_user(b
, &target_fds
[i
]);
1139 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1140 /* check the bit inside the abi_ulong */
1147 unlock_user(target_fds
, target_fds_addr
, 0);
1152 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1153 abi_ulong target_fds_addr
,
1156 if (target_fds_addr
) {
1157 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1158 return -TARGET_EFAULT
;
1166 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1172 abi_ulong
*target_fds
;
1174 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1175 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1177 sizeof(abi_ulong
) * nw
,
1179 return -TARGET_EFAULT
;
1182 for (i
= 0; i
< nw
; i
++) {
1184 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1185 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1188 __put_user(v
, &target_fds
[i
]);
1191 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1196 #if defined(__alpha__)
1197 #define HOST_HZ 1024
1202 static inline abi_long
host_to_target_clock_t(long ticks
)
1204 #if HOST_HZ == TARGET_HZ
1207 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1211 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1212 const struct rusage
*rusage
)
1214 struct target_rusage
*target_rusage
;
1216 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1217 return -TARGET_EFAULT
;
1218 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1219 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1220 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1221 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1222 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1223 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1224 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1225 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1226 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1227 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1228 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1229 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1230 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1231 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1232 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1233 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1234 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1235 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1236 unlock_user_struct(target_rusage
, target_addr
, 1);
1241 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1243 abi_ulong target_rlim_swap
;
1246 target_rlim_swap
= tswapal(target_rlim
);
1247 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1248 return RLIM_INFINITY
;
1250 result
= target_rlim_swap
;
1251 if (target_rlim_swap
!= (rlim_t
)result
)
1252 return RLIM_INFINITY
;
1257 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1259 abi_ulong target_rlim_swap
;
1262 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1263 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1265 target_rlim_swap
= rlim
;
1266 result
= tswapal(target_rlim_swap
);
1271 static inline int target_to_host_resource(int code
)
1274 case TARGET_RLIMIT_AS
:
1276 case TARGET_RLIMIT_CORE
:
1278 case TARGET_RLIMIT_CPU
:
1280 case TARGET_RLIMIT_DATA
:
1282 case TARGET_RLIMIT_FSIZE
:
1283 return RLIMIT_FSIZE
;
1284 case TARGET_RLIMIT_LOCKS
:
1285 return RLIMIT_LOCKS
;
1286 case TARGET_RLIMIT_MEMLOCK
:
1287 return RLIMIT_MEMLOCK
;
1288 case TARGET_RLIMIT_MSGQUEUE
:
1289 return RLIMIT_MSGQUEUE
;
1290 case TARGET_RLIMIT_NICE
:
1292 case TARGET_RLIMIT_NOFILE
:
1293 return RLIMIT_NOFILE
;
1294 case TARGET_RLIMIT_NPROC
:
1295 return RLIMIT_NPROC
;
1296 case TARGET_RLIMIT_RSS
:
1298 case TARGET_RLIMIT_RTPRIO
:
1299 return RLIMIT_RTPRIO
;
1300 case TARGET_RLIMIT_SIGPENDING
:
1301 return RLIMIT_SIGPENDING
;
1302 case TARGET_RLIMIT_STACK
:
1303 return RLIMIT_STACK
;
1309 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1310 abi_ulong target_tv_addr
)
1312 struct target_timeval
*target_tv
;
1314 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1315 return -TARGET_EFAULT
;
1317 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1318 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1320 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1325 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1326 const struct timeval
*tv
)
1328 struct target_timeval
*target_tv
;
1330 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1331 return -TARGET_EFAULT
;
1333 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1334 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1336 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1341 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1342 abi_ulong target_tz_addr
)
1344 struct target_timezone
*target_tz
;
1346 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1347 return -TARGET_EFAULT
;
1350 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1351 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1353 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1358 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1361 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1362 abi_ulong target_mq_attr_addr
)
1364 struct target_mq_attr
*target_mq_attr
;
1366 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1367 target_mq_attr_addr
, 1))
1368 return -TARGET_EFAULT
;
1370 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1371 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1372 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1373 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1375 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1380 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1381 const struct mq_attr
*attr
)
1383 struct target_mq_attr
*target_mq_attr
;
1385 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1386 target_mq_attr_addr
, 0))
1387 return -TARGET_EFAULT
;
1389 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1390 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1391 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1392 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1394 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1400 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1401 /* do_select() must return target values and target errnos. */
1402 static abi_long
do_select(int n
,
1403 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1404 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1406 fd_set rfds
, wfds
, efds
;
1407 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1409 struct timespec ts
, *ts_ptr
;
1412 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1416 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1420 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1425 if (target_tv_addr
) {
1426 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1427 return -TARGET_EFAULT
;
1428 ts
.tv_sec
= tv
.tv_sec
;
1429 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1435 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1438 if (!is_error(ret
)) {
1439 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1440 return -TARGET_EFAULT
;
1441 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1442 return -TARGET_EFAULT
;
1443 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1444 return -TARGET_EFAULT
;
1446 if (target_tv_addr
) {
1447 tv
.tv_sec
= ts
.tv_sec
;
1448 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1449 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1450 return -TARGET_EFAULT
;
1458 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1459 static abi_long
do_old_select(abi_ulong arg1
)
1461 struct target_sel_arg_struct
*sel
;
1462 abi_ulong inp
, outp
, exp
, tvp
;
1465 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1466 return -TARGET_EFAULT
;
1469 nsel
= tswapal(sel
->n
);
1470 inp
= tswapal(sel
->inp
);
1471 outp
= tswapal(sel
->outp
);
1472 exp
= tswapal(sel
->exp
);
1473 tvp
= tswapal(sel
->tvp
);
1475 unlock_user_struct(sel
, arg1
, 0);
1477 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1482 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1485 return pipe2(host_pipe
, flags
);
1491 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1492 int flags
, int is_pipe2
)
1496 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1499 return get_errno(ret
);
1501 /* Several targets have special calling conventions for the original
1502 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1504 #if defined(TARGET_ALPHA)
1505 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1506 return host_pipe
[0];
1507 #elif defined(TARGET_MIPS)
1508 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1509 return host_pipe
[0];
1510 #elif defined(TARGET_SH4)
1511 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1512 return host_pipe
[0];
1513 #elif defined(TARGET_SPARC)
1514 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1515 return host_pipe
[0];
1519 if (put_user_s32(host_pipe
[0], pipedes
)
1520 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1521 return -TARGET_EFAULT
;
1522 return get_errno(ret
);
1525 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1526 abi_ulong target_addr
,
1529 struct target_ip_mreqn
*target_smreqn
;
1531 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1533 return -TARGET_EFAULT
;
1534 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1535 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1536 if (len
== sizeof(struct target_ip_mreqn
))
1537 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1538 unlock_user(target_smreqn
, target_addr
, 0);
1543 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1544 abi_ulong target_addr
,
1547 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1548 sa_family_t sa_family
;
1549 struct target_sockaddr
*target_saddr
;
1551 if (fd_trans_target_to_host_addr(fd
)) {
1552 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1555 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1557 return -TARGET_EFAULT
;
1559 sa_family
= tswap16(target_saddr
->sa_family
);
1561 /* Oops. The caller might send a incomplete sun_path; sun_path
1562 * must be terminated by \0 (see the manual page), but
1563 * unfortunately it is quite common to specify sockaddr_un
1564 * length as "strlen(x->sun_path)" while it should be
1565 * "strlen(...) + 1". We'll fix that here if needed.
1566 * Linux kernel has a similar feature.
1569 if (sa_family
== AF_UNIX
) {
1570 if (len
< unix_maxlen
&& len
> 0) {
1571 char *cp
= (char*)target_saddr
;
1573 if ( cp
[len
-1] && !cp
[len
] )
1576 if (len
> unix_maxlen
)
1580 memcpy(addr
, target_saddr
, len
);
1581 addr
->sa_family
= sa_family
;
1582 if (sa_family
== AF_NETLINK
) {
1583 struct sockaddr_nl
*nladdr
;
1585 nladdr
= (struct sockaddr_nl
*)addr
;
1586 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1587 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1588 } else if (sa_family
== AF_PACKET
) {
1589 struct target_sockaddr_ll
*lladdr
;
1591 lladdr
= (struct target_sockaddr_ll
*)addr
;
1592 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1593 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1595 unlock_user(target_saddr
, target_addr
, 0);
1600 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1601 struct sockaddr
*addr
,
1604 struct target_sockaddr
*target_saddr
;
1610 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1612 return -TARGET_EFAULT
;
1613 memcpy(target_saddr
, addr
, len
);
1614 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1615 sizeof(target_saddr
->sa_family
)) {
1616 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1618 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1619 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1620 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1621 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1622 } else if (addr
->sa_family
== AF_PACKET
) {
1623 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1624 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1625 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1627 unlock_user(target_saddr
, target_addr
, len
);
1632 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1633 struct target_msghdr
*target_msgh
)
1635 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1636 abi_long msg_controllen
;
1637 abi_ulong target_cmsg_addr
;
1638 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1639 socklen_t space
= 0;
1641 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1642 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1644 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1645 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1646 target_cmsg_start
= target_cmsg
;
1648 return -TARGET_EFAULT
;
1650 while (cmsg
&& target_cmsg
) {
1651 void *data
= CMSG_DATA(cmsg
);
1652 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1654 int len
= tswapal(target_cmsg
->cmsg_len
)
1655 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1657 space
+= CMSG_SPACE(len
);
1658 if (space
> msgh
->msg_controllen
) {
1659 space
-= CMSG_SPACE(len
);
1660 /* This is a QEMU bug, since we allocated the payload
1661 * area ourselves (unlike overflow in host-to-target
1662 * conversion, which is just the guest giving us a buffer
1663 * that's too small). It can't happen for the payload types
1664 * we currently support; if it becomes an issue in future
1665 * we would need to improve our allocation strategy to
1666 * something more intelligent than "twice the size of the
1667 * target buffer we're reading from".
1669 gemu_log("Host cmsg overflow\n");
1673 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1674 cmsg
->cmsg_level
= SOL_SOCKET
;
1676 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1678 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1679 cmsg
->cmsg_len
= CMSG_LEN(len
);
1681 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1682 int *fd
= (int *)data
;
1683 int *target_fd
= (int *)target_data
;
1684 int i
, numfds
= len
/ sizeof(int);
1686 for (i
= 0; i
< numfds
; i
++) {
1687 __get_user(fd
[i
], target_fd
+ i
);
1689 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1690 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1691 struct ucred
*cred
= (struct ucred
*)data
;
1692 struct target_ucred
*target_cred
=
1693 (struct target_ucred
*)target_data
;
1695 __get_user(cred
->pid
, &target_cred
->pid
);
1696 __get_user(cred
->uid
, &target_cred
->uid
);
1697 __get_user(cred
->gid
, &target_cred
->gid
);
1699 gemu_log("Unsupported ancillary data: %d/%d\n",
1700 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1701 memcpy(data
, target_data
, len
);
1704 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1705 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1708 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1710 msgh
->msg_controllen
= space
;
1714 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1715 struct msghdr
*msgh
)
1717 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1718 abi_long msg_controllen
;
1719 abi_ulong target_cmsg_addr
;
1720 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1721 socklen_t space
= 0;
1723 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1724 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1726 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1727 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1728 target_cmsg_start
= target_cmsg
;
1730 return -TARGET_EFAULT
;
1732 while (cmsg
&& target_cmsg
) {
1733 void *data
= CMSG_DATA(cmsg
);
1734 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1736 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1737 int tgt_len
, tgt_space
;
1739 /* We never copy a half-header but may copy half-data;
1740 * this is Linux's behaviour in put_cmsg(). Note that
1741 * truncation here is a guest problem (which we report
1742 * to the guest via the CTRUNC bit), unlike truncation
1743 * in target_to_host_cmsg, which is a QEMU bug.
1745 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1746 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1750 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1751 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1753 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1755 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1757 tgt_len
= TARGET_CMSG_LEN(len
);
1759 /* Payload types which need a different size of payload on
1760 * the target must adjust tgt_len here.
1762 switch (cmsg
->cmsg_level
) {
1764 switch (cmsg
->cmsg_type
) {
1766 tgt_len
= sizeof(struct target_timeval
);
1775 if (msg_controllen
< tgt_len
) {
1776 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1777 tgt_len
= msg_controllen
;
1780 /* We must now copy-and-convert len bytes of payload
1781 * into tgt_len bytes of destination space. Bear in mind
1782 * that in both source and destination we may be dealing
1783 * with a truncated value!
1785 switch (cmsg
->cmsg_level
) {
1787 switch (cmsg
->cmsg_type
) {
1790 int *fd
= (int *)data
;
1791 int *target_fd
= (int *)target_data
;
1792 int i
, numfds
= tgt_len
/ sizeof(int);
1794 for (i
= 0; i
< numfds
; i
++) {
1795 __put_user(fd
[i
], target_fd
+ i
);
1801 struct timeval
*tv
= (struct timeval
*)data
;
1802 struct target_timeval
*target_tv
=
1803 (struct target_timeval
*)target_data
;
1805 if (len
!= sizeof(struct timeval
) ||
1806 tgt_len
!= sizeof(struct target_timeval
)) {
1810 /* copy struct timeval to target */
1811 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1812 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1815 case SCM_CREDENTIALS
:
1817 struct ucred
*cred
= (struct ucred
*)data
;
1818 struct target_ucred
*target_cred
=
1819 (struct target_ucred
*)target_data
;
1821 __put_user(cred
->pid
, &target_cred
->pid
);
1822 __put_user(cred
->uid
, &target_cred
->uid
);
1823 __put_user(cred
->gid
, &target_cred
->gid
);
1833 gemu_log("Unsupported ancillary data: %d/%d\n",
1834 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1835 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1836 if (tgt_len
> len
) {
1837 memset(target_data
+ len
, 0, tgt_len
- len
);
1841 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1842 tgt_space
= TARGET_CMSG_SPACE(len
);
1843 if (msg_controllen
< tgt_space
) {
1844 tgt_space
= msg_controllen
;
1846 msg_controllen
-= tgt_space
;
1848 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1849 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1852 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1854 target_msgh
->msg_controllen
= tswapal(space
);
1858 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1860 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1861 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1862 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1863 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1864 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1867 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1869 abi_long (*host_to_target_nlmsg
)
1870 (struct nlmsghdr
*))
1875 while (len
> sizeof(struct nlmsghdr
)) {
1877 nlmsg_len
= nlh
->nlmsg_len
;
1878 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1883 switch (nlh
->nlmsg_type
) {
1885 tswap_nlmsghdr(nlh
);
1891 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1892 e
->error
= tswap32(e
->error
);
1893 tswap_nlmsghdr(&e
->msg
);
1894 tswap_nlmsghdr(nlh
);
1898 ret
= host_to_target_nlmsg(nlh
);
1900 tswap_nlmsghdr(nlh
);
1905 tswap_nlmsghdr(nlh
);
1906 len
-= NLMSG_ALIGN(nlmsg_len
);
1907 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1912 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1914 abi_long (*target_to_host_nlmsg
)
1915 (struct nlmsghdr
*))
1919 while (len
> sizeof(struct nlmsghdr
)) {
1920 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1921 tswap32(nlh
->nlmsg_len
) > len
) {
1924 tswap_nlmsghdr(nlh
);
1925 switch (nlh
->nlmsg_type
) {
1932 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1933 e
->error
= tswap32(e
->error
);
1934 tswap_nlmsghdr(&e
->msg
);
1938 ret
= target_to_host_nlmsg(nlh
);
1943 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1944 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1949 #ifdef CONFIG_RTNETLINK
1950 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1951 size_t len
, void *context
,
1952 abi_long (*host_to_target_nlattr
)
1956 unsigned short nla_len
;
1959 while (len
> sizeof(struct nlattr
)) {
1960 nla_len
= nlattr
->nla_len
;
1961 if (nla_len
< sizeof(struct nlattr
) ||
1965 ret
= host_to_target_nlattr(nlattr
, context
);
1966 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1967 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1971 len
-= NLA_ALIGN(nla_len
);
1972 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1977 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1979 abi_long (*host_to_target_rtattr
)
1982 unsigned short rta_len
;
1985 while (len
> sizeof(struct rtattr
)) {
1986 rta_len
= rtattr
->rta_len
;
1987 if (rta_len
< sizeof(struct rtattr
) ||
1991 ret
= host_to_target_rtattr(rtattr
);
1992 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1993 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1997 len
-= RTA_ALIGN(rta_len
);
1998 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2003 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2005 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2012 switch (nlattr
->nla_type
) {
2014 case QEMU_IFLA_BR_FDB_FLUSH
:
2017 case QEMU_IFLA_BR_GROUP_ADDR
:
2020 case QEMU_IFLA_BR_VLAN_FILTERING
:
2021 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2022 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2023 case QEMU_IFLA_BR_MCAST_ROUTER
:
2024 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2025 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2026 case QEMU_IFLA_BR_MCAST_QUERIER
:
2027 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2028 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2029 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2032 case QEMU_IFLA_BR_PRIORITY
:
2033 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2034 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2035 case QEMU_IFLA_BR_ROOT_PORT
:
2036 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2037 u16
= NLA_DATA(nlattr
);
2038 *u16
= tswap16(*u16
);
2041 case QEMU_IFLA_BR_FORWARD_DELAY
:
2042 case QEMU_IFLA_BR_HELLO_TIME
:
2043 case QEMU_IFLA_BR_MAX_AGE
:
2044 case QEMU_IFLA_BR_AGEING_TIME
:
2045 case QEMU_IFLA_BR_STP_STATE
:
2046 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2047 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2048 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2049 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2050 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2051 u32
= NLA_DATA(nlattr
);
2052 *u32
= tswap32(*u32
);
2055 case QEMU_IFLA_BR_HELLO_TIMER
:
2056 case QEMU_IFLA_BR_TCN_TIMER
:
2057 case QEMU_IFLA_BR_GC_TIMER
:
2058 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2059 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2060 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2061 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2062 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2063 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2064 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2065 u64
= NLA_DATA(nlattr
);
2066 *u64
= tswap64(*u64
);
2068 /* ifla_bridge_id: uin8_t[] */
2069 case QEMU_IFLA_BR_ROOT_ID
:
2070 case QEMU_IFLA_BR_BRIDGE_ID
:
2073 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2079 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2086 switch (nlattr
->nla_type
) {
2088 case QEMU_IFLA_BRPORT_STATE
:
2089 case QEMU_IFLA_BRPORT_MODE
:
2090 case QEMU_IFLA_BRPORT_GUARD
:
2091 case QEMU_IFLA_BRPORT_PROTECT
:
2092 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2093 case QEMU_IFLA_BRPORT_LEARNING
:
2094 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2095 case QEMU_IFLA_BRPORT_PROXYARP
:
2096 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2097 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2098 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2099 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2100 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2103 case QEMU_IFLA_BRPORT_PRIORITY
:
2104 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2105 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2106 case QEMU_IFLA_BRPORT_ID
:
2107 case QEMU_IFLA_BRPORT_NO
:
2108 u16
= NLA_DATA(nlattr
);
2109 *u16
= tswap16(*u16
);
2112 case QEMU_IFLA_BRPORT_COST
:
2113 u32
= NLA_DATA(nlattr
);
2114 *u32
= tswap32(*u32
);
2117 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2118 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2119 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2120 u64
= NLA_DATA(nlattr
);
2121 *u64
= tswap64(*u64
);
2123 /* ifla_bridge_id: uint8_t[] */
2124 case QEMU_IFLA_BRPORT_ROOT_ID
:
2125 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2128 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2134 struct linkinfo_context
{
2141 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2144 struct linkinfo_context
*li_context
= context
;
2146 switch (nlattr
->nla_type
) {
2148 case QEMU_IFLA_INFO_KIND
:
2149 li_context
->name
= NLA_DATA(nlattr
);
2150 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2152 case QEMU_IFLA_INFO_SLAVE_KIND
:
2153 li_context
->slave_name
= NLA_DATA(nlattr
);
2154 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2157 case QEMU_IFLA_INFO_XSTATS
:
2158 /* FIXME: only used by CAN */
2161 case QEMU_IFLA_INFO_DATA
:
2162 if (strncmp(li_context
->name
, "bridge",
2163 li_context
->len
) == 0) {
2164 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2167 host_to_target_data_bridge_nlattr
);
2169 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2172 case QEMU_IFLA_INFO_SLAVE_DATA
:
2173 if (strncmp(li_context
->slave_name
, "bridge",
2174 li_context
->slave_len
) == 0) {
2175 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2178 host_to_target_slave_data_bridge_nlattr
);
2180 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2181 li_context
->slave_name
);
2185 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2192 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2198 switch (nlattr
->nla_type
) {
2199 case QEMU_IFLA_INET_CONF
:
2200 u32
= NLA_DATA(nlattr
);
2201 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2203 u32
[i
] = tswap32(u32
[i
]);
2207 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2212 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2217 struct ifla_cacheinfo
*ci
;
2220 switch (nlattr
->nla_type
) {
2222 case QEMU_IFLA_INET6_TOKEN
:
2225 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2228 case QEMU_IFLA_INET6_FLAGS
:
2229 u32
= NLA_DATA(nlattr
);
2230 *u32
= tswap32(*u32
);
2233 case QEMU_IFLA_INET6_CONF
:
2234 u32
= NLA_DATA(nlattr
);
2235 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2237 u32
[i
] = tswap32(u32
[i
]);
2240 /* ifla_cacheinfo */
2241 case QEMU_IFLA_INET6_CACHEINFO
:
2242 ci
= NLA_DATA(nlattr
);
2243 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2244 ci
->tstamp
= tswap32(ci
->tstamp
);
2245 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2246 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2249 case QEMU_IFLA_INET6_STATS
:
2250 case QEMU_IFLA_INET6_ICMP6STATS
:
2251 u64
= NLA_DATA(nlattr
);
2252 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2254 u64
[i
] = tswap64(u64
[i
]);
2258 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2263 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2266 switch (nlattr
->nla_type
) {
2268 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2270 host_to_target_data_inet_nlattr
);
2272 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2274 host_to_target_data_inet6_nlattr
);
2276 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2282 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2285 struct rtnl_link_stats
*st
;
2286 struct rtnl_link_stats64
*st64
;
2287 struct rtnl_link_ifmap
*map
;
2288 struct linkinfo_context li_context
;
2290 switch (rtattr
->rta_type
) {
2292 case QEMU_IFLA_ADDRESS
:
2293 case QEMU_IFLA_BROADCAST
:
2295 case QEMU_IFLA_IFNAME
:
2296 case QEMU_IFLA_QDISC
:
2299 case QEMU_IFLA_OPERSTATE
:
2300 case QEMU_IFLA_LINKMODE
:
2301 case QEMU_IFLA_CARRIER
:
2302 case QEMU_IFLA_PROTO_DOWN
:
2306 case QEMU_IFLA_LINK
:
2307 case QEMU_IFLA_WEIGHT
:
2308 case QEMU_IFLA_TXQLEN
:
2309 case QEMU_IFLA_CARRIER_CHANGES
:
2310 case QEMU_IFLA_NUM_RX_QUEUES
:
2311 case QEMU_IFLA_NUM_TX_QUEUES
:
2312 case QEMU_IFLA_PROMISCUITY
:
2313 case QEMU_IFLA_EXT_MASK
:
2314 case QEMU_IFLA_LINK_NETNSID
:
2315 case QEMU_IFLA_GROUP
:
2316 case QEMU_IFLA_MASTER
:
2317 case QEMU_IFLA_NUM_VF
:
2318 u32
= RTA_DATA(rtattr
);
2319 *u32
= tswap32(*u32
);
2321 /* struct rtnl_link_stats */
2322 case QEMU_IFLA_STATS
:
2323 st
= RTA_DATA(rtattr
);
2324 st
->rx_packets
= tswap32(st
->rx_packets
);
2325 st
->tx_packets
= tswap32(st
->tx_packets
);
2326 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2327 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2328 st
->rx_errors
= tswap32(st
->rx_errors
);
2329 st
->tx_errors
= tswap32(st
->tx_errors
);
2330 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2331 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2332 st
->multicast
= tswap32(st
->multicast
);
2333 st
->collisions
= tswap32(st
->collisions
);
2335 /* detailed rx_errors: */
2336 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2337 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2338 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2339 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2340 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2341 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2343 /* detailed tx_errors */
2344 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2345 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2346 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2347 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2348 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2351 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2352 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2354 /* struct rtnl_link_stats64 */
2355 case QEMU_IFLA_STATS64
:
2356 st64
= RTA_DATA(rtattr
);
2357 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2358 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2359 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2360 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2361 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2362 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2363 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2364 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2365 st64
->multicast
= tswap64(st64
->multicast
);
2366 st64
->collisions
= tswap64(st64
->collisions
);
2368 /* detailed rx_errors: */
2369 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2370 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2371 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2372 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2373 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2374 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2376 /* detailed tx_errors */
2377 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2378 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2379 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2380 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2381 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2384 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2385 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2387 /* struct rtnl_link_ifmap */
2389 map
= RTA_DATA(rtattr
);
2390 map
->mem_start
= tswap64(map
->mem_start
);
2391 map
->mem_end
= tswap64(map
->mem_end
);
2392 map
->base_addr
= tswap64(map
->base_addr
);
2393 map
->irq
= tswap16(map
->irq
);
2396 case QEMU_IFLA_LINKINFO
:
2397 memset(&li_context
, 0, sizeof(li_context
));
2398 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2400 host_to_target_data_linkinfo_nlattr
);
2401 case QEMU_IFLA_AF_SPEC
:
2402 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2404 host_to_target_data_spec_nlattr
);
2406 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2412 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2415 struct ifa_cacheinfo
*ci
;
2417 switch (rtattr
->rta_type
) {
2418 /* binary: depends on family type */
2428 u32
= RTA_DATA(rtattr
);
2429 *u32
= tswap32(*u32
);
2431 /* struct ifa_cacheinfo */
2433 ci
= RTA_DATA(rtattr
);
2434 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2435 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2436 ci
->cstamp
= tswap32(ci
->cstamp
);
2437 ci
->tstamp
= tswap32(ci
->tstamp
);
2440 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2446 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2449 switch (rtattr
->rta_type
) {
2450 /* binary: depends on family type */
2459 u32
= RTA_DATA(rtattr
);
2460 *u32
= tswap32(*u32
);
2463 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2469 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2470 uint32_t rtattr_len
)
2472 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2473 host_to_target_data_link_rtattr
);
2476 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2477 uint32_t rtattr_len
)
2479 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2480 host_to_target_data_addr_rtattr
);
2483 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2484 uint32_t rtattr_len
)
2486 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2487 host_to_target_data_route_rtattr
);
2490 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2493 struct ifinfomsg
*ifi
;
2494 struct ifaddrmsg
*ifa
;
2497 nlmsg_len
= nlh
->nlmsg_len
;
2498 switch (nlh
->nlmsg_type
) {
2502 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2503 ifi
= NLMSG_DATA(nlh
);
2504 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2505 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2506 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2507 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2508 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2509 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2515 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2516 ifa
= NLMSG_DATA(nlh
);
2517 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2518 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2519 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2525 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2526 rtm
= NLMSG_DATA(nlh
);
2527 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2528 host_to_target_route_rtattr(RTM_RTA(rtm
),
2529 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2533 return -TARGET_EINVAL
;
2538 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2541 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2544 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2546 abi_long (*target_to_host_rtattr
)
2551 while (len
>= sizeof(struct rtattr
)) {
2552 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2553 tswap16(rtattr
->rta_len
) > len
) {
2556 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2557 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2558 ret
= target_to_host_rtattr(rtattr
);
2562 len
-= RTA_ALIGN(rtattr
->rta_len
);
2563 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2564 RTA_ALIGN(rtattr
->rta_len
));
2569 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2571 switch (rtattr
->rta_type
) {
2573 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2579 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2581 switch (rtattr
->rta_type
) {
2582 /* binary: depends on family type */
2587 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2593 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2596 switch (rtattr
->rta_type
) {
2597 /* binary: depends on family type */
2605 u32
= RTA_DATA(rtattr
);
2606 *u32
= tswap32(*u32
);
2609 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2615 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2616 uint32_t rtattr_len
)
2618 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2619 target_to_host_data_link_rtattr
);
2622 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2623 uint32_t rtattr_len
)
2625 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2626 target_to_host_data_addr_rtattr
);
2629 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2630 uint32_t rtattr_len
)
2632 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2633 target_to_host_data_route_rtattr
);
2636 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2638 struct ifinfomsg
*ifi
;
2639 struct ifaddrmsg
*ifa
;
2642 switch (nlh
->nlmsg_type
) {
2647 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2648 ifi
= NLMSG_DATA(nlh
);
2649 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2650 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2651 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2652 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2653 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2654 NLMSG_LENGTH(sizeof(*ifi
)));
2660 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2661 ifa
= NLMSG_DATA(nlh
);
2662 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2663 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2664 NLMSG_LENGTH(sizeof(*ifa
)));
2671 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2672 rtm
= NLMSG_DATA(nlh
);
2673 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2674 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2675 NLMSG_LENGTH(sizeof(*rtm
)));
2679 return -TARGET_EOPNOTSUPP
;
2684 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2686 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2688 #endif /* CONFIG_RTNETLINK */
2690 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2692 switch (nlh
->nlmsg_type
) {
2694 gemu_log("Unknown host audit message type %d\n",
2696 return -TARGET_EINVAL
;
2701 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2704 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2707 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2709 switch (nlh
->nlmsg_type
) {
2711 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2712 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2715 gemu_log("Unknown target audit message type %d\n",
2717 return -TARGET_EINVAL
;
2723 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2725 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2728 /* do_setsockopt() Must return target values and target errnos. */
2729 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2730 abi_ulong optval_addr
, socklen_t optlen
)
2734 struct ip_mreqn
*ip_mreq
;
2735 struct ip_mreq_source
*ip_mreq_source
;
2739 /* TCP options all take an 'int' value. */
2740 if (optlen
< sizeof(uint32_t))
2741 return -TARGET_EINVAL
;
2743 if (get_user_u32(val
, optval_addr
))
2744 return -TARGET_EFAULT
;
2745 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2752 case IP_ROUTER_ALERT
:
2756 case IP_MTU_DISCOVER
:
2762 case IP_MULTICAST_TTL
:
2763 case IP_MULTICAST_LOOP
:
2765 if (optlen
>= sizeof(uint32_t)) {
2766 if (get_user_u32(val
, optval_addr
))
2767 return -TARGET_EFAULT
;
2768 } else if (optlen
>= 1) {
2769 if (get_user_u8(val
, optval_addr
))
2770 return -TARGET_EFAULT
;
2772 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2774 case IP_ADD_MEMBERSHIP
:
2775 case IP_DROP_MEMBERSHIP
:
2776 if (optlen
< sizeof (struct target_ip_mreq
) ||
2777 optlen
> sizeof (struct target_ip_mreqn
))
2778 return -TARGET_EINVAL
;
2780 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2781 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2782 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2785 case IP_BLOCK_SOURCE
:
2786 case IP_UNBLOCK_SOURCE
:
2787 case IP_ADD_SOURCE_MEMBERSHIP
:
2788 case IP_DROP_SOURCE_MEMBERSHIP
:
2789 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2790 return -TARGET_EINVAL
;
2792 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2793 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2794 unlock_user (ip_mreq_source
, optval_addr
, 0);
2803 case IPV6_MTU_DISCOVER
:
2806 case IPV6_RECVPKTINFO
:
2808 if (optlen
< sizeof(uint32_t)) {
2809 return -TARGET_EINVAL
;
2811 if (get_user_u32(val
, optval_addr
)) {
2812 return -TARGET_EFAULT
;
2814 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2815 &val
, sizeof(val
)));
2824 /* struct icmp_filter takes an u32 value */
2825 if (optlen
< sizeof(uint32_t)) {
2826 return -TARGET_EINVAL
;
2829 if (get_user_u32(val
, optval_addr
)) {
2830 return -TARGET_EFAULT
;
2832 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2833 &val
, sizeof(val
)));
2840 case TARGET_SOL_SOCKET
:
2842 case TARGET_SO_RCVTIMEO
:
2846 optname
= SO_RCVTIMEO
;
2849 if (optlen
!= sizeof(struct target_timeval
)) {
2850 return -TARGET_EINVAL
;
2853 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2854 return -TARGET_EFAULT
;
2857 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2861 case TARGET_SO_SNDTIMEO
:
2862 optname
= SO_SNDTIMEO
;
2864 case TARGET_SO_ATTACH_FILTER
:
2866 struct target_sock_fprog
*tfprog
;
2867 struct target_sock_filter
*tfilter
;
2868 struct sock_fprog fprog
;
2869 struct sock_filter
*filter
;
2872 if (optlen
!= sizeof(*tfprog
)) {
2873 return -TARGET_EINVAL
;
2875 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2876 return -TARGET_EFAULT
;
2878 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2879 tswapal(tfprog
->filter
), 0)) {
2880 unlock_user_struct(tfprog
, optval_addr
, 1);
2881 return -TARGET_EFAULT
;
2884 fprog
.len
= tswap16(tfprog
->len
);
2885 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2886 if (filter
== NULL
) {
2887 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2888 unlock_user_struct(tfprog
, optval_addr
, 1);
2889 return -TARGET_ENOMEM
;
2891 for (i
= 0; i
< fprog
.len
; i
++) {
2892 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2893 filter
[i
].jt
= tfilter
[i
].jt
;
2894 filter
[i
].jf
= tfilter
[i
].jf
;
2895 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2897 fprog
.filter
= filter
;
2899 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2900 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2903 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2904 unlock_user_struct(tfprog
, optval_addr
, 1);
2907 case TARGET_SO_BINDTODEVICE
:
2909 char *dev_ifname
, *addr_ifname
;
2911 if (optlen
> IFNAMSIZ
- 1) {
2912 optlen
= IFNAMSIZ
- 1;
2914 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2916 return -TARGET_EFAULT
;
2918 optname
= SO_BINDTODEVICE
;
2919 addr_ifname
= alloca(IFNAMSIZ
);
2920 memcpy(addr_ifname
, dev_ifname
, optlen
);
2921 addr_ifname
[optlen
] = 0;
2922 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2923 addr_ifname
, optlen
));
2924 unlock_user (dev_ifname
, optval_addr
, 0);
2927 /* Options with 'int' argument. */
2928 case TARGET_SO_DEBUG
:
2931 case TARGET_SO_REUSEADDR
:
2932 optname
= SO_REUSEADDR
;
2934 case TARGET_SO_TYPE
:
2937 case TARGET_SO_ERROR
:
2940 case TARGET_SO_DONTROUTE
:
2941 optname
= SO_DONTROUTE
;
2943 case TARGET_SO_BROADCAST
:
2944 optname
= SO_BROADCAST
;
2946 case TARGET_SO_SNDBUF
:
2947 optname
= SO_SNDBUF
;
2949 case TARGET_SO_SNDBUFFORCE
:
2950 optname
= SO_SNDBUFFORCE
;
2952 case TARGET_SO_RCVBUF
:
2953 optname
= SO_RCVBUF
;
2955 case TARGET_SO_RCVBUFFORCE
:
2956 optname
= SO_RCVBUFFORCE
;
2958 case TARGET_SO_KEEPALIVE
:
2959 optname
= SO_KEEPALIVE
;
2961 case TARGET_SO_OOBINLINE
:
2962 optname
= SO_OOBINLINE
;
2964 case TARGET_SO_NO_CHECK
:
2965 optname
= SO_NO_CHECK
;
2967 case TARGET_SO_PRIORITY
:
2968 optname
= SO_PRIORITY
;
2971 case TARGET_SO_BSDCOMPAT
:
2972 optname
= SO_BSDCOMPAT
;
2975 case TARGET_SO_PASSCRED
:
2976 optname
= SO_PASSCRED
;
2978 case TARGET_SO_PASSSEC
:
2979 optname
= SO_PASSSEC
;
2981 case TARGET_SO_TIMESTAMP
:
2982 optname
= SO_TIMESTAMP
;
2984 case TARGET_SO_RCVLOWAT
:
2985 optname
= SO_RCVLOWAT
;
2991 if (optlen
< sizeof(uint32_t))
2992 return -TARGET_EINVAL
;
2994 if (get_user_u32(val
, optval_addr
))
2995 return -TARGET_EFAULT
;
2996 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3000 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3001 ret
= -TARGET_ENOPROTOOPT
;
3006 /* do_getsockopt() Must return target values and target errnos. */
3007 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3008 abi_ulong optval_addr
, abi_ulong optlen
)
3015 case TARGET_SOL_SOCKET
:
3018 /* These don't just return a single integer */
3019 case TARGET_SO_LINGER
:
3020 case TARGET_SO_RCVTIMEO
:
3021 case TARGET_SO_SNDTIMEO
:
3022 case TARGET_SO_PEERNAME
:
3024 case TARGET_SO_PEERCRED
: {
3027 struct target_ucred
*tcr
;
3029 if (get_user_u32(len
, optlen
)) {
3030 return -TARGET_EFAULT
;
3033 return -TARGET_EINVAL
;
3037 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3045 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3046 return -TARGET_EFAULT
;
3048 __put_user(cr
.pid
, &tcr
->pid
);
3049 __put_user(cr
.uid
, &tcr
->uid
);
3050 __put_user(cr
.gid
, &tcr
->gid
);
3051 unlock_user_struct(tcr
, optval_addr
, 1);
3052 if (put_user_u32(len
, optlen
)) {
3053 return -TARGET_EFAULT
;
3057 /* Options with 'int' argument. */
3058 case TARGET_SO_DEBUG
:
3061 case TARGET_SO_REUSEADDR
:
3062 optname
= SO_REUSEADDR
;
3064 case TARGET_SO_TYPE
:
3067 case TARGET_SO_ERROR
:
3070 case TARGET_SO_DONTROUTE
:
3071 optname
= SO_DONTROUTE
;
3073 case TARGET_SO_BROADCAST
:
3074 optname
= SO_BROADCAST
;
3076 case TARGET_SO_SNDBUF
:
3077 optname
= SO_SNDBUF
;
3079 case TARGET_SO_RCVBUF
:
3080 optname
= SO_RCVBUF
;
3082 case TARGET_SO_KEEPALIVE
:
3083 optname
= SO_KEEPALIVE
;
3085 case TARGET_SO_OOBINLINE
:
3086 optname
= SO_OOBINLINE
;
3088 case TARGET_SO_NO_CHECK
:
3089 optname
= SO_NO_CHECK
;
3091 case TARGET_SO_PRIORITY
:
3092 optname
= SO_PRIORITY
;
3095 case TARGET_SO_BSDCOMPAT
:
3096 optname
= SO_BSDCOMPAT
;
3099 case TARGET_SO_PASSCRED
:
3100 optname
= SO_PASSCRED
;
3102 case TARGET_SO_TIMESTAMP
:
3103 optname
= SO_TIMESTAMP
;
3105 case TARGET_SO_RCVLOWAT
:
3106 optname
= SO_RCVLOWAT
;
3108 case TARGET_SO_ACCEPTCONN
:
3109 optname
= SO_ACCEPTCONN
;
3116 /* TCP options all take an 'int' value. */
3118 if (get_user_u32(len
, optlen
))
3119 return -TARGET_EFAULT
;
3121 return -TARGET_EINVAL
;
3123 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3126 if (optname
== SO_TYPE
) {
3127 val
= host_to_target_sock_type(val
);
3132 if (put_user_u32(val
, optval_addr
))
3133 return -TARGET_EFAULT
;
3135 if (put_user_u8(val
, optval_addr
))
3136 return -TARGET_EFAULT
;
3138 if (put_user_u32(len
, optlen
))
3139 return -TARGET_EFAULT
;
3146 case IP_ROUTER_ALERT
:
3150 case IP_MTU_DISCOVER
:
3156 case IP_MULTICAST_TTL
:
3157 case IP_MULTICAST_LOOP
:
3158 if (get_user_u32(len
, optlen
))
3159 return -TARGET_EFAULT
;
3161 return -TARGET_EINVAL
;
3163 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3166 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3168 if (put_user_u32(len
, optlen
)
3169 || put_user_u8(val
, optval_addr
))
3170 return -TARGET_EFAULT
;
3172 if (len
> sizeof(int))
3174 if (put_user_u32(len
, optlen
)
3175 || put_user_u32(val
, optval_addr
))
3176 return -TARGET_EFAULT
;
3180 ret
= -TARGET_ENOPROTOOPT
;
3186 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3188 ret
= -TARGET_EOPNOTSUPP
;
3194 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3195 abi_ulong count
, int copy
)
3197 struct target_iovec
*target_vec
;
3199 abi_ulong total_len
, max_len
;
3202 bool bad_address
= false;
3208 if (count
> IOV_MAX
) {
3213 vec
= g_try_new0(struct iovec
, count
);
3219 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3220 count
* sizeof(struct target_iovec
), 1);
3221 if (target_vec
== NULL
) {
3226 /* ??? If host page size > target page size, this will result in a
3227 value larger than what we can actually support. */
3228 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3231 for (i
= 0; i
< count
; i
++) {
3232 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3233 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3238 } else if (len
== 0) {
3239 /* Zero length pointer is ignored. */
3240 vec
[i
].iov_base
= 0;
3242 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3243 /* If the first buffer pointer is bad, this is a fault. But
3244 * subsequent bad buffers will result in a partial write; this
3245 * is realized by filling the vector with null pointers and
3247 if (!vec
[i
].iov_base
) {
3258 if (len
> max_len
- total_len
) {
3259 len
= max_len
- total_len
;
3262 vec
[i
].iov_len
= len
;
3266 unlock_user(target_vec
, target_addr
, 0);
3271 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3272 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3275 unlock_user(target_vec
, target_addr
, 0);
3282 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3283 abi_ulong count
, int copy
)
3285 struct target_iovec
*target_vec
;
3288 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3289 count
* sizeof(struct target_iovec
), 1);
3291 for (i
= 0; i
< count
; i
++) {
3292 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3293 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3297 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3299 unlock_user(target_vec
, target_addr
, 0);
3305 static inline int target_to_host_sock_type(int *type
)
3308 int target_type
= *type
;
3310 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3311 case TARGET_SOCK_DGRAM
:
3312 host_type
= SOCK_DGRAM
;
3314 case TARGET_SOCK_STREAM
:
3315 host_type
= SOCK_STREAM
;
3318 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3321 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3322 #if defined(SOCK_CLOEXEC)
3323 host_type
|= SOCK_CLOEXEC
;
3325 return -TARGET_EINVAL
;
3328 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3329 #if defined(SOCK_NONBLOCK)
3330 host_type
|= SOCK_NONBLOCK
;
3331 #elif !defined(O_NONBLOCK)
3332 return -TARGET_EINVAL
;
3339 /* Try to emulate socket type flags after socket creation. */
3340 static int sock_flags_fixup(int fd
, int target_type
)
3342 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3343 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3344 int flags
= fcntl(fd
, F_GETFL
);
3345 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3347 return -TARGET_EINVAL
;
3354 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3355 abi_ulong target_addr
,
3358 struct sockaddr
*addr
= host_addr
;
3359 struct target_sockaddr
*target_saddr
;
3361 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3362 if (!target_saddr
) {
3363 return -TARGET_EFAULT
;
3366 memcpy(addr
, target_saddr
, len
);
3367 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3368 /* spkt_protocol is big-endian */
3370 unlock_user(target_saddr
, target_addr
, 0);
3374 static TargetFdTrans target_packet_trans
= {
3375 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3378 #ifdef CONFIG_RTNETLINK
3379 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3383 ret
= target_to_host_nlmsg_route(buf
, len
);
3391 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3395 ret
= host_to_target_nlmsg_route(buf
, len
);
3403 static TargetFdTrans target_netlink_route_trans
= {
3404 .target_to_host_data
= netlink_route_target_to_host
,
3405 .host_to_target_data
= netlink_route_host_to_target
,
3407 #endif /* CONFIG_RTNETLINK */
3409 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3413 ret
= target_to_host_nlmsg_audit(buf
, len
);
3421 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3425 ret
= host_to_target_nlmsg_audit(buf
, len
);
3433 static TargetFdTrans target_netlink_audit_trans
= {
3434 .target_to_host_data
= netlink_audit_target_to_host
,
3435 .host_to_target_data
= netlink_audit_host_to_target
,
3438 /* do_socket() Must return target values and target errnos. */
3439 static abi_long
do_socket(int domain
, int type
, int protocol
)
3441 int target_type
= type
;
3444 ret
= target_to_host_sock_type(&type
);
3449 if (domain
== PF_NETLINK
&& !(
3450 #ifdef CONFIG_RTNETLINK
3451 protocol
== NETLINK_ROUTE
||
3453 protocol
== NETLINK_KOBJECT_UEVENT
||
3454 protocol
== NETLINK_AUDIT
)) {
3455 return -EPFNOSUPPORT
;
3458 if (domain
== AF_PACKET
||
3459 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3460 protocol
= tswap16(protocol
);
3463 ret
= get_errno(socket(domain
, type
, protocol
));
3465 ret
= sock_flags_fixup(ret
, target_type
);
3466 if (type
== SOCK_PACKET
) {
3467 /* Manage an obsolete case :
3468 * if socket type is SOCK_PACKET, bind by name
3470 fd_trans_register(ret
, &target_packet_trans
);
3471 } else if (domain
== PF_NETLINK
) {
3473 #ifdef CONFIG_RTNETLINK
3475 fd_trans_register(ret
, &target_netlink_route_trans
);
3478 case NETLINK_KOBJECT_UEVENT
:
3479 /* nothing to do: messages are strings */
3482 fd_trans_register(ret
, &target_netlink_audit_trans
);
3485 g_assert_not_reached();
3492 /* do_bind() Must return target values and target errnos. */
3493 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3499 if ((int)addrlen
< 0) {
3500 return -TARGET_EINVAL
;
3503 addr
= alloca(addrlen
+1);
3505 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3509 return get_errno(bind(sockfd
, addr
, addrlen
));
3512 /* do_connect() Must return target values and target errnos. */
3513 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3519 if ((int)addrlen
< 0) {
3520 return -TARGET_EINVAL
;
3523 addr
= alloca(addrlen
+1);
3525 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3529 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3532 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3533 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3534 int flags
, int send
)
3540 abi_ulong target_vec
;
3542 if (msgp
->msg_name
) {
3543 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3544 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3545 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3546 tswapal(msgp
->msg_name
),
3548 if (ret
== -TARGET_EFAULT
) {
3549 /* For connected sockets msg_name and msg_namelen must
3550 * be ignored, so returning EFAULT immediately is wrong.
3551 * Instead, pass a bad msg_name to the host kernel, and
3552 * let it decide whether to return EFAULT or not.
3554 msg
.msg_name
= (void *)-1;
3559 msg
.msg_name
= NULL
;
3560 msg
.msg_namelen
= 0;
3562 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3563 msg
.msg_control
= alloca(msg
.msg_controllen
);
3564 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3566 count
= tswapal(msgp
->msg_iovlen
);
3567 target_vec
= tswapal(msgp
->msg_iov
);
3569 if (count
> IOV_MAX
) {
3570 /* sendrcvmsg returns a different errno for this condition than
3571 * readv/writev, so we must catch it here before lock_iovec() does.
3573 ret
= -TARGET_EMSGSIZE
;
3577 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3578 target_vec
, count
, send
);
3580 ret
= -host_to_target_errno(errno
);
3583 msg
.msg_iovlen
= count
;
3587 if (fd_trans_target_to_host_data(fd
)) {
3590 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3591 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3592 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3593 msg
.msg_iov
->iov_len
);
3595 msg
.msg_iov
->iov_base
= host_msg
;
3596 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3600 ret
= target_to_host_cmsg(&msg
, msgp
);
3602 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3606 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3607 if (!is_error(ret
)) {
3609 if (fd_trans_host_to_target_data(fd
)) {
3610 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3613 ret
= host_to_target_cmsg(msgp
, &msg
);
3615 if (!is_error(ret
)) {
3616 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3617 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3618 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3619 msg
.msg_name
, msg
.msg_namelen
);
3631 unlock_iovec(vec
, target_vec
, count
, !send
);
3636 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3637 int flags
, int send
)
3640 struct target_msghdr
*msgp
;
3642 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3646 return -TARGET_EFAULT
;
3648 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3649 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3653 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3654 * so it might not have this *mmsg-specific flag either.
3656 #ifndef MSG_WAITFORONE
3657 #define MSG_WAITFORONE 0x10000
3660 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3661 unsigned int vlen
, unsigned int flags
,
3664 struct target_mmsghdr
*mmsgp
;
3668 if (vlen
> UIO_MAXIOV
) {
3672 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3674 return -TARGET_EFAULT
;
3677 for (i
= 0; i
< vlen
; i
++) {
3678 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3679 if (is_error(ret
)) {
3682 mmsgp
[i
].msg_len
= tswap32(ret
);
3683 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3684 if (flags
& MSG_WAITFORONE
) {
3685 flags
|= MSG_DONTWAIT
;
3689 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3691 /* Return number of datagrams sent if we sent any at all;
3692 * otherwise return the error.
3700 /* do_accept4() Must return target values and target errnos. */
3701 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3702 abi_ulong target_addrlen_addr
, int flags
)
3709 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3711 if (target_addr
== 0) {
3712 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3715 /* linux returns EINVAL if addrlen pointer is invalid */
3716 if (get_user_u32(addrlen
, target_addrlen_addr
))
3717 return -TARGET_EINVAL
;
3719 if ((int)addrlen
< 0) {
3720 return -TARGET_EINVAL
;
3723 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3724 return -TARGET_EINVAL
;
3726 addr
= alloca(addrlen
);
3728 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3729 if (!is_error(ret
)) {
3730 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3731 if (put_user_u32(addrlen
, target_addrlen_addr
))
3732 ret
= -TARGET_EFAULT
;
3737 /* do_getpeername() Must return target values and target errnos. */
3738 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3739 abi_ulong target_addrlen_addr
)
3745 if (get_user_u32(addrlen
, target_addrlen_addr
))
3746 return -TARGET_EFAULT
;
3748 if ((int)addrlen
< 0) {
3749 return -TARGET_EINVAL
;
3752 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3753 return -TARGET_EFAULT
;
3755 addr
= alloca(addrlen
);
3757 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3758 if (!is_error(ret
)) {
3759 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3760 if (put_user_u32(addrlen
, target_addrlen_addr
))
3761 ret
= -TARGET_EFAULT
;
3766 /* do_getsockname() Must return target values and target errnos. */
3767 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3768 abi_ulong target_addrlen_addr
)
3774 if (get_user_u32(addrlen
, target_addrlen_addr
))
3775 return -TARGET_EFAULT
;
3777 if ((int)addrlen
< 0) {
3778 return -TARGET_EINVAL
;
3781 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3782 return -TARGET_EFAULT
;
3784 addr
= alloca(addrlen
);
3786 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3787 if (!is_error(ret
)) {
3788 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3789 if (put_user_u32(addrlen
, target_addrlen_addr
))
3790 ret
= -TARGET_EFAULT
;
3795 /* do_socketpair() Must return target values and target errnos. */
3796 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3797 abi_ulong target_tab_addr
)
3802 target_to_host_sock_type(&type
);
3804 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3805 if (!is_error(ret
)) {
3806 if (put_user_s32(tab
[0], target_tab_addr
)
3807 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3808 ret
= -TARGET_EFAULT
;
3813 /* do_sendto() Must return target values and target errnos. */
3814 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3815 abi_ulong target_addr
, socklen_t addrlen
)
3819 void *copy_msg
= NULL
;
3822 if ((int)addrlen
< 0) {
3823 return -TARGET_EINVAL
;
3826 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3828 return -TARGET_EFAULT
;
3829 if (fd_trans_target_to_host_data(fd
)) {
3830 copy_msg
= host_msg
;
3831 host_msg
= g_malloc(len
);
3832 memcpy(host_msg
, copy_msg
, len
);
3833 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3839 addr
= alloca(addrlen
+1);
3840 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3844 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3846 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3851 host_msg
= copy_msg
;
3853 unlock_user(host_msg
, msg
, 0);
3857 /* do_recvfrom() Must return target values and target errnos. */
3858 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3859 abi_ulong target_addr
,
3860 abi_ulong target_addrlen
)
3867 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3869 return -TARGET_EFAULT
;
3871 if (get_user_u32(addrlen
, target_addrlen
)) {
3872 ret
= -TARGET_EFAULT
;
3875 if ((int)addrlen
< 0) {
3876 ret
= -TARGET_EINVAL
;
3879 addr
= alloca(addrlen
);
3880 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3883 addr
= NULL
; /* To keep compiler quiet. */
3884 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3886 if (!is_error(ret
)) {
3887 if (fd_trans_host_to_target_data(fd
)) {
3888 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3891 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3892 if (put_user_u32(addrlen
, target_addrlen
)) {
3893 ret
= -TARGET_EFAULT
;
3897 unlock_user(host_msg
, msg
, len
);
3900 unlock_user(host_msg
, msg
, 0);
3905 #ifdef TARGET_NR_socketcall
3906 /* do_socketcall() must return target values and target errnos. */
3907 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3909 static const unsigned nargs
[] = { /* number of arguments per operation */
3910 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3911 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3912 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3913 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3914 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3915 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3916 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3918 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3919 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3920 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3921 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3922 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3923 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3924 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3925 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3926 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3927 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3928 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3929 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3931 abi_long a
[6]; /* max 6 args */
3934 /* check the range of the first argument num */
3935 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3936 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3937 return -TARGET_EINVAL
;
3939 /* ensure we have space for args */
3940 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3941 return -TARGET_EINVAL
;
3943 /* collect the arguments in a[] according to nargs[] */
3944 for (i
= 0; i
< nargs
[num
]; ++i
) {
3945 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3946 return -TARGET_EFAULT
;
3949 /* now when we have the args, invoke the appropriate underlying function */
3951 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3952 return do_socket(a
[0], a
[1], a
[2]);
3953 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3954 return do_bind(a
[0], a
[1], a
[2]);
3955 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3956 return do_connect(a
[0], a
[1], a
[2]);
3957 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3958 return get_errno(listen(a
[0], a
[1]));
3959 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3960 return do_accept4(a
[0], a
[1], a
[2], 0);
3961 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3962 return do_getsockname(a
[0], a
[1], a
[2]);
3963 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3964 return do_getpeername(a
[0], a
[1], a
[2]);
3965 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3966 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3967 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3968 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3969 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3970 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3971 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3972 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3973 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3974 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3975 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3976 return get_errno(shutdown(a
[0], a
[1]));
3977 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3978 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3979 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3980 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3981 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3982 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3983 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3984 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3985 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3986 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3987 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3988 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3989 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3990 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3992 gemu_log("Unsupported socketcall: %d\n", num
);
3993 return -TARGET_EINVAL
;
3998 #define N_SHM_REGIONS 32
4000 static struct shm_region
{
4004 } shm_regions
[N_SHM_REGIONS
];
4006 #ifndef TARGET_SEMID64_DS
4007 /* asm-generic version of this struct */
4008 struct target_semid64_ds
4010 struct target_ipc_perm sem_perm
;
4011 abi_ulong sem_otime
;
4012 #if TARGET_ABI_BITS == 32
4013 abi_ulong __unused1
;
4015 abi_ulong sem_ctime
;
4016 #if TARGET_ABI_BITS == 32
4017 abi_ulong __unused2
;
4019 abi_ulong sem_nsems
;
4020 abi_ulong __unused3
;
4021 abi_ulong __unused4
;
4025 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4026 abi_ulong target_addr
)
4028 struct target_ipc_perm
*target_ip
;
4029 struct target_semid64_ds
*target_sd
;
4031 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4032 return -TARGET_EFAULT
;
4033 target_ip
= &(target_sd
->sem_perm
);
4034 host_ip
->__key
= tswap32(target_ip
->__key
);
4035 host_ip
->uid
= tswap32(target_ip
->uid
);
4036 host_ip
->gid
= tswap32(target_ip
->gid
);
4037 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4038 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4039 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4040 host_ip
->mode
= tswap32(target_ip
->mode
);
4042 host_ip
->mode
= tswap16(target_ip
->mode
);
4044 #if defined(TARGET_PPC)
4045 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4047 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4049 unlock_user_struct(target_sd
, target_addr
, 0);
4053 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4054 struct ipc_perm
*host_ip
)
4056 struct target_ipc_perm
*target_ip
;
4057 struct target_semid64_ds
*target_sd
;
4059 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4060 return -TARGET_EFAULT
;
4061 target_ip
= &(target_sd
->sem_perm
);
4062 target_ip
->__key
= tswap32(host_ip
->__key
);
4063 target_ip
->uid
= tswap32(host_ip
->uid
);
4064 target_ip
->gid
= tswap32(host_ip
->gid
);
4065 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4066 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4067 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4068 target_ip
->mode
= tswap32(host_ip
->mode
);
4070 target_ip
->mode
= tswap16(host_ip
->mode
);
4072 #if defined(TARGET_PPC)
4073 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4075 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4077 unlock_user_struct(target_sd
, target_addr
, 1);
4081 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4082 abi_ulong target_addr
)
4084 struct target_semid64_ds
*target_sd
;
4086 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4087 return -TARGET_EFAULT
;
4088 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4089 return -TARGET_EFAULT
;
4090 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4091 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4092 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4093 unlock_user_struct(target_sd
, target_addr
, 0);
4097 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4098 struct semid_ds
*host_sd
)
4100 struct target_semid64_ds
*target_sd
;
4102 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4103 return -TARGET_EFAULT
;
4104 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4105 return -TARGET_EFAULT
;
4106 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4107 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4108 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4109 unlock_user_struct(target_sd
, target_addr
, 1);
4113 struct target_seminfo
{
4126 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4127 struct seminfo
*host_seminfo
)
4129 struct target_seminfo
*target_seminfo
;
4130 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4131 return -TARGET_EFAULT
;
4132 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4133 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4134 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4135 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4136 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4137 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4138 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4139 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4140 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4141 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4142 unlock_user_struct(target_seminfo
, target_addr
, 1);
4148 struct semid_ds
*buf
;
4149 unsigned short *array
;
4150 struct seminfo
*__buf
;
4153 union target_semun
{
4160 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4161 abi_ulong target_addr
)
4164 unsigned short *array
;
4166 struct semid_ds semid_ds
;
4169 semun
.buf
= &semid_ds
;
4171 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4173 return get_errno(ret
);
4175 nsems
= semid_ds
.sem_nsems
;
4177 *host_array
= g_try_new(unsigned short, nsems
);
4179 return -TARGET_ENOMEM
;
4181 array
= lock_user(VERIFY_READ
, target_addr
,
4182 nsems
*sizeof(unsigned short), 1);
4184 g_free(*host_array
);
4185 return -TARGET_EFAULT
;
4188 for(i
=0; i
<nsems
; i
++) {
4189 __get_user((*host_array
)[i
], &array
[i
]);
4191 unlock_user(array
, target_addr
, 0);
4196 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4197 unsigned short **host_array
)
4200 unsigned short *array
;
4202 struct semid_ds semid_ds
;
4205 semun
.buf
= &semid_ds
;
4207 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4209 return get_errno(ret
);
4211 nsems
= semid_ds
.sem_nsems
;
4213 array
= lock_user(VERIFY_WRITE
, target_addr
,
4214 nsems
*sizeof(unsigned short), 0);
4216 return -TARGET_EFAULT
;
4218 for(i
=0; i
<nsems
; i
++) {
4219 __put_user((*host_array
)[i
], &array
[i
]);
4221 g_free(*host_array
);
4222 unlock_user(array
, target_addr
, 1);
4227 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4228 abi_ulong target_arg
)
4230 union target_semun target_su
= { .buf
= target_arg
};
4232 struct semid_ds dsarg
;
4233 unsigned short *array
= NULL
;
4234 struct seminfo seminfo
;
4235 abi_long ret
= -TARGET_EINVAL
;
4242 /* In 64 bit cross-endian situations, we will erroneously pick up
4243 * the wrong half of the union for the "val" element. To rectify
4244 * this, the entire 8-byte structure is byteswapped, followed by
4245 * a swap of the 4 byte val field. In other cases, the data is
4246 * already in proper host byte order. */
4247 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4248 target_su
.buf
= tswapal(target_su
.buf
);
4249 arg
.val
= tswap32(target_su
.val
);
4251 arg
.val
= target_su
.val
;
4253 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4257 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4261 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4262 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4269 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4273 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4274 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4280 arg
.__buf
= &seminfo
;
4281 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4282 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4290 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4297 struct target_sembuf
{
4298 unsigned short sem_num
;
4303 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4304 abi_ulong target_addr
,
4307 struct target_sembuf
*target_sembuf
;
4310 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4311 nsops
*sizeof(struct target_sembuf
), 1);
4313 return -TARGET_EFAULT
;
4315 for(i
=0; i
<nsops
; i
++) {
4316 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4317 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4318 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4321 unlock_user(target_sembuf
, target_addr
, 0);
4326 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4328 struct sembuf sops
[nsops
];
4330 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4331 return -TARGET_EFAULT
;
4333 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4336 struct target_msqid_ds
4338 struct target_ipc_perm msg_perm
;
4339 abi_ulong msg_stime
;
4340 #if TARGET_ABI_BITS == 32
4341 abi_ulong __unused1
;
4343 abi_ulong msg_rtime
;
4344 #if TARGET_ABI_BITS == 32
4345 abi_ulong __unused2
;
4347 abi_ulong msg_ctime
;
4348 #if TARGET_ABI_BITS == 32
4349 abi_ulong __unused3
;
4351 abi_ulong __msg_cbytes
;
4353 abi_ulong msg_qbytes
;
4354 abi_ulong msg_lspid
;
4355 abi_ulong msg_lrpid
;
4356 abi_ulong __unused4
;
4357 abi_ulong __unused5
;
4360 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4361 abi_ulong target_addr
)
4363 struct target_msqid_ds
*target_md
;
4365 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4366 return -TARGET_EFAULT
;
4367 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4368 return -TARGET_EFAULT
;
4369 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4370 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4371 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4372 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4373 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4374 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4375 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4376 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4377 unlock_user_struct(target_md
, target_addr
, 0);
4381 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4382 struct msqid_ds
*host_md
)
4384 struct target_msqid_ds
*target_md
;
4386 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4387 return -TARGET_EFAULT
;
4388 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4389 return -TARGET_EFAULT
;
4390 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4391 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4392 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4393 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4394 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4395 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4396 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4397 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4398 unlock_user_struct(target_md
, target_addr
, 1);
4402 struct target_msginfo
{
4410 unsigned short int msgseg
;
4413 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4414 struct msginfo
*host_msginfo
)
4416 struct target_msginfo
*target_msginfo
;
4417 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4418 return -TARGET_EFAULT
;
4419 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4420 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4421 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4422 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4423 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4424 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4425 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4426 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4427 unlock_user_struct(target_msginfo
, target_addr
, 1);
4431 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4433 struct msqid_ds dsarg
;
4434 struct msginfo msginfo
;
4435 abi_long ret
= -TARGET_EINVAL
;
4443 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4444 return -TARGET_EFAULT
;
4445 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4446 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4447 return -TARGET_EFAULT
;
4450 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4454 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4455 if (host_to_target_msginfo(ptr
, &msginfo
))
4456 return -TARGET_EFAULT
;
4463 struct target_msgbuf
{
4468 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4469 ssize_t msgsz
, int msgflg
)
4471 struct target_msgbuf
*target_mb
;
4472 struct msgbuf
*host_mb
;
4476 return -TARGET_EINVAL
;
4479 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4480 return -TARGET_EFAULT
;
4481 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4483 unlock_user_struct(target_mb
, msgp
, 0);
4484 return -TARGET_ENOMEM
;
4486 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4487 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4488 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4490 unlock_user_struct(target_mb
, msgp
, 0);
4495 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4496 ssize_t msgsz
, abi_long msgtyp
,
4499 struct target_msgbuf
*target_mb
;
4501 struct msgbuf
*host_mb
;
4505 return -TARGET_EINVAL
;
4508 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4509 return -TARGET_EFAULT
;
4511 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4513 ret
= -TARGET_ENOMEM
;
4516 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4519 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4520 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4521 if (!target_mtext
) {
4522 ret
= -TARGET_EFAULT
;
4525 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4526 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4529 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4533 unlock_user_struct(target_mb
, msgp
, 1);
4538 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4539 abi_ulong target_addr
)
4541 struct target_shmid_ds
*target_sd
;
4543 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4544 return -TARGET_EFAULT
;
4545 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4546 return -TARGET_EFAULT
;
4547 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4548 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4549 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4550 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4551 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4552 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4553 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4554 unlock_user_struct(target_sd
, target_addr
, 0);
4558 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4559 struct shmid_ds
*host_sd
)
4561 struct target_shmid_ds
*target_sd
;
4563 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4564 return -TARGET_EFAULT
;
4565 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4566 return -TARGET_EFAULT
;
4567 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4568 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4569 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4570 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4571 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4572 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4573 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4574 unlock_user_struct(target_sd
, target_addr
, 1);
4578 struct target_shminfo
{
4586 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4587 struct shminfo
*host_shminfo
)
4589 struct target_shminfo
*target_shminfo
;
4590 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4591 return -TARGET_EFAULT
;
4592 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4593 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4594 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4595 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4596 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4597 unlock_user_struct(target_shminfo
, target_addr
, 1);
4601 struct target_shm_info
{
4606 abi_ulong swap_attempts
;
4607 abi_ulong swap_successes
;
4610 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4611 struct shm_info
*host_shm_info
)
4613 struct target_shm_info
*target_shm_info
;
4614 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4615 return -TARGET_EFAULT
;
4616 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4617 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4618 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4619 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4620 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4621 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4622 unlock_user_struct(target_shm_info
, target_addr
, 1);
4626 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4628 struct shmid_ds dsarg
;
4629 struct shminfo shminfo
;
4630 struct shm_info shm_info
;
4631 abi_long ret
= -TARGET_EINVAL
;
4639 if (target_to_host_shmid_ds(&dsarg
, buf
))
4640 return -TARGET_EFAULT
;
4641 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4642 if (host_to_target_shmid_ds(buf
, &dsarg
))
4643 return -TARGET_EFAULT
;
4646 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4647 if (host_to_target_shminfo(buf
, &shminfo
))
4648 return -TARGET_EFAULT
;
4651 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4652 if (host_to_target_shm_info(buf
, &shm_info
))
4653 return -TARGET_EFAULT
;
4658 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4665 #ifndef TARGET_FORCE_SHMLBA
4666 /* For most architectures, SHMLBA is the same as the page size;
4667 * some architectures have larger values, in which case they should
4668 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4669 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4670 * and defining its own value for SHMLBA.
4672 * The kernel also permits SHMLBA to be set by the architecture to a
4673 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4674 * this means that addresses are rounded to the large size if
4675 * SHM_RND is set but addresses not aligned to that size are not rejected
4676 * as long as they are at least page-aligned. Since the only architecture
4677 * which uses this is ia64 this code doesn't provide for that oddity.
4679 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4681 return TARGET_PAGE_SIZE
;
4685 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4686 int shmid
, abi_ulong shmaddr
, int shmflg
)
4690 struct shmid_ds shm_info
;
4694 /* find out the length of the shared memory segment */
4695 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4696 if (is_error(ret
)) {
4697 /* can't get length, bail out */
4701 shmlba
= target_shmlba(cpu_env
);
4703 if (shmaddr
& (shmlba
- 1)) {
4704 if (shmflg
& SHM_RND
) {
4705 shmaddr
&= ~(shmlba
- 1);
4707 return -TARGET_EINVAL
;
4714 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4716 abi_ulong mmap_start
;
4718 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4720 if (mmap_start
== -1) {
4722 host_raddr
= (void *)-1;
4724 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4727 if (host_raddr
== (void *)-1) {
4729 return get_errno((long)host_raddr
);
4731 raddr
=h2g((unsigned long)host_raddr
);
4733 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4734 PAGE_VALID
| PAGE_READ
|
4735 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4737 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4738 if (!shm_regions
[i
].in_use
) {
4739 shm_regions
[i
].in_use
= true;
4740 shm_regions
[i
].start
= raddr
;
4741 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4751 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4755 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4756 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4757 shm_regions
[i
].in_use
= false;
4758 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4763 return get_errno(shmdt(g2h(shmaddr
)));
4766 #ifdef TARGET_NR_ipc
4767 /* ??? This only works with linear mappings. */
4768 /* do_ipc() must return target values and target errnos. */
4769 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4770 unsigned int call
, abi_long first
,
4771 abi_long second
, abi_long third
,
4772 abi_long ptr
, abi_long fifth
)
4777 version
= call
>> 16;
4782 ret
= do_semop(first
, ptr
, second
);
4786 ret
= get_errno(semget(first
, second
, third
));
4789 case IPCOP_semctl
: {
4790 /* The semun argument to semctl is passed by value, so dereference the
4793 get_user_ual(atptr
, ptr
);
4794 ret
= do_semctl(first
, second
, third
, atptr
);
4799 ret
= get_errno(msgget(first
, second
));
4803 ret
= do_msgsnd(first
, ptr
, second
, third
);
4807 ret
= do_msgctl(first
, second
, ptr
);
4814 struct target_ipc_kludge
{
4819 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4820 ret
= -TARGET_EFAULT
;
4824 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4826 unlock_user_struct(tmp
, ptr
, 0);
4830 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4839 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4840 if (is_error(raddr
))
4841 return get_errno(raddr
);
4842 if (put_user_ual(raddr
, third
))
4843 return -TARGET_EFAULT
;
4847 ret
= -TARGET_EINVAL
;
4852 ret
= do_shmdt(ptr
);
4856 /* IPC_* flag values are the same on all linux platforms */
4857 ret
= get_errno(shmget(first
, second
, third
));
4860 /* IPC_* and SHM_* command values are the same on all linux platforms */
4862 ret
= do_shmctl(first
, second
, ptr
);
4865 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4866 ret
= -TARGET_ENOSYS
;
4873 /* kernel structure types definitions */
4875 #define STRUCT(name, ...) STRUCT_ ## name,
4876 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4878 #include "syscall_types.h"
4882 #undef STRUCT_SPECIAL
4884 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4885 #define STRUCT_SPECIAL(name)
4886 #include "syscall_types.h"
4888 #undef STRUCT_SPECIAL
4890 typedef struct IOCTLEntry IOCTLEntry
;
4892 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4893 int fd
, int cmd
, abi_long arg
);
4897 unsigned int host_cmd
;
4900 do_ioctl_fn
*do_ioctl
;
4901 const argtype arg_type
[5];
4904 #define IOC_R 0x0001
4905 #define IOC_W 0x0002
4906 #define IOC_RW (IOC_R | IOC_W)
4908 #define MAX_STRUCT_SIZE 4096
4910 #ifdef CONFIG_FIEMAP
4911 /* So fiemap access checks don't overflow on 32 bit systems.
4912 * This is very slightly smaller than the limit imposed by
4913 * the underlying kernel.
4915 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4916 / sizeof(struct fiemap_extent))
4918 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4919 int fd
, int cmd
, abi_long arg
)
4921 /* The parameter for this ioctl is a struct fiemap followed
4922 * by an array of struct fiemap_extent whose size is set
4923 * in fiemap->fm_extent_count. The array is filled in by the
4926 int target_size_in
, target_size_out
;
4928 const argtype
*arg_type
= ie
->arg_type
;
4929 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4932 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4936 assert(arg_type
[0] == TYPE_PTR
);
4937 assert(ie
->access
== IOC_RW
);
4939 target_size_in
= thunk_type_size(arg_type
, 0);
4940 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4942 return -TARGET_EFAULT
;
4944 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4945 unlock_user(argptr
, arg
, 0);
4946 fm
= (struct fiemap
*)buf_temp
;
4947 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4948 return -TARGET_EINVAL
;
4951 outbufsz
= sizeof (*fm
) +
4952 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4954 if (outbufsz
> MAX_STRUCT_SIZE
) {
4955 /* We can't fit all the extents into the fixed size buffer.
4956 * Allocate one that is large enough and use it instead.
4958 fm
= g_try_malloc(outbufsz
);
4960 return -TARGET_ENOMEM
;
4962 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4965 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4966 if (!is_error(ret
)) {
4967 target_size_out
= target_size_in
;
4968 /* An extent_count of 0 means we were only counting the extents
4969 * so there are no structs to copy
4971 if (fm
->fm_extent_count
!= 0) {
4972 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4974 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4976 ret
= -TARGET_EFAULT
;
4978 /* Convert the struct fiemap */
4979 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4980 if (fm
->fm_extent_count
!= 0) {
4981 p
= argptr
+ target_size_in
;
4982 /* ...and then all the struct fiemap_extents */
4983 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4984 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4989 unlock_user(argptr
, arg
, target_size_out
);
4999 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5000 int fd
, int cmd
, abi_long arg
)
5002 const argtype
*arg_type
= ie
->arg_type
;
5006 struct ifconf
*host_ifconf
;
5008 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5009 int target_ifreq_size
;
5014 abi_long target_ifc_buf
;
5018 assert(arg_type
[0] == TYPE_PTR
);
5019 assert(ie
->access
== IOC_RW
);
5022 target_size
= thunk_type_size(arg_type
, 0);
5024 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5026 return -TARGET_EFAULT
;
5027 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5028 unlock_user(argptr
, arg
, 0);
5030 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5031 target_ifc_len
= host_ifconf
->ifc_len
;
5032 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5034 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5035 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5036 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5038 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5039 if (outbufsz
> MAX_STRUCT_SIZE
) {
5040 /* We can't fit all the extents into the fixed size buffer.
5041 * Allocate one that is large enough and use it instead.
5043 host_ifconf
= malloc(outbufsz
);
5045 return -TARGET_ENOMEM
;
5047 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5050 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5052 host_ifconf
->ifc_len
= host_ifc_len
;
5053 host_ifconf
->ifc_buf
= host_ifc_buf
;
5055 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5056 if (!is_error(ret
)) {
5057 /* convert host ifc_len to target ifc_len */
5059 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5060 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5061 host_ifconf
->ifc_len
= target_ifc_len
;
5063 /* restore target ifc_buf */
5065 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5067 /* copy struct ifconf to target user */
5069 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5071 return -TARGET_EFAULT
;
5072 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5073 unlock_user(argptr
, arg
, target_size
);
5075 /* copy ifreq[] to target user */
5077 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5078 for (i
= 0; i
< nb_ifreq
; i
++) {
5079 thunk_convert(argptr
+ i
* target_ifreq_size
,
5080 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5081 ifreq_arg_type
, THUNK_TARGET
);
5083 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5093 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5094 int cmd
, abi_long arg
)
5097 struct dm_ioctl
*host_dm
;
5098 abi_long guest_data
;
5099 uint32_t guest_data_size
;
5101 const argtype
*arg_type
= ie
->arg_type
;
5103 void *big_buf
= NULL
;
5107 target_size
= thunk_type_size(arg_type
, 0);
5108 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5110 ret
= -TARGET_EFAULT
;
5113 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5114 unlock_user(argptr
, arg
, 0);
5116 /* buf_temp is too small, so fetch things into a bigger buffer */
5117 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5118 memcpy(big_buf
, buf_temp
, target_size
);
5122 guest_data
= arg
+ host_dm
->data_start
;
5123 if ((guest_data
- arg
) < 0) {
5124 ret
= -TARGET_EINVAL
;
5127 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5128 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5130 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5132 ret
= -TARGET_EFAULT
;
5136 switch (ie
->host_cmd
) {
5138 case DM_LIST_DEVICES
:
5141 case DM_DEV_SUSPEND
:
5144 case DM_TABLE_STATUS
:
5145 case DM_TABLE_CLEAR
:
5147 case DM_LIST_VERSIONS
:
5151 case DM_DEV_SET_GEOMETRY
:
5152 /* data contains only strings */
5153 memcpy(host_data
, argptr
, guest_data_size
);
5156 memcpy(host_data
, argptr
, guest_data_size
);
5157 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5161 void *gspec
= argptr
;
5162 void *cur_data
= host_data
;
5163 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5164 int spec_size
= thunk_type_size(arg_type
, 0);
5167 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5168 struct dm_target_spec
*spec
= cur_data
;
5172 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5173 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5175 spec
->next
= sizeof(*spec
) + slen
;
5176 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5178 cur_data
+= spec
->next
;
5183 ret
= -TARGET_EINVAL
;
5184 unlock_user(argptr
, guest_data
, 0);
5187 unlock_user(argptr
, guest_data
, 0);
5189 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5190 if (!is_error(ret
)) {
5191 guest_data
= arg
+ host_dm
->data_start
;
5192 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5193 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5194 switch (ie
->host_cmd
) {
5199 case DM_DEV_SUSPEND
:
5202 case DM_TABLE_CLEAR
:
5204 case DM_DEV_SET_GEOMETRY
:
5205 /* no return data */
5207 case DM_LIST_DEVICES
:
5209 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5210 uint32_t remaining_data
= guest_data_size
;
5211 void *cur_data
= argptr
;
5212 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5213 int nl_size
= 12; /* can't use thunk_size due to alignment */
5216 uint32_t next
= nl
->next
;
5218 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5220 if (remaining_data
< nl
->next
) {
5221 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5224 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5225 strcpy(cur_data
+ nl_size
, nl
->name
);
5226 cur_data
+= nl
->next
;
5227 remaining_data
-= nl
->next
;
5231 nl
= (void*)nl
+ next
;
5236 case DM_TABLE_STATUS
:
5238 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5239 void *cur_data
= argptr
;
5240 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5241 int spec_size
= thunk_type_size(arg_type
, 0);
5244 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5245 uint32_t next
= spec
->next
;
5246 int slen
= strlen((char*)&spec
[1]) + 1;
5247 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5248 if (guest_data_size
< spec
->next
) {
5249 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5252 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5253 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5254 cur_data
= argptr
+ spec
->next
;
5255 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5261 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5262 int count
= *(uint32_t*)hdata
;
5263 uint64_t *hdev
= hdata
+ 8;
5264 uint64_t *gdev
= argptr
+ 8;
5267 *(uint32_t*)argptr
= tswap32(count
);
5268 for (i
= 0; i
< count
; i
++) {
5269 *gdev
= tswap64(*hdev
);
5275 case DM_LIST_VERSIONS
:
5277 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5278 uint32_t remaining_data
= guest_data_size
;
5279 void *cur_data
= argptr
;
5280 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5281 int vers_size
= thunk_type_size(arg_type
, 0);
5284 uint32_t next
= vers
->next
;
5286 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5288 if (remaining_data
< vers
->next
) {
5289 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5292 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5293 strcpy(cur_data
+ vers_size
, vers
->name
);
5294 cur_data
+= vers
->next
;
5295 remaining_data
-= vers
->next
;
5299 vers
= (void*)vers
+ next
;
5304 unlock_user(argptr
, guest_data
, 0);
5305 ret
= -TARGET_EINVAL
;
5308 unlock_user(argptr
, guest_data
, guest_data_size
);
5310 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5312 ret
= -TARGET_EFAULT
;
5315 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5316 unlock_user(argptr
, arg
, target_size
);
5323 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5324 int cmd
, abi_long arg
)
5328 const argtype
*arg_type
= ie
->arg_type
;
5329 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5332 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5333 struct blkpg_partition host_part
;
5335 /* Read and convert blkpg */
5337 target_size
= thunk_type_size(arg_type
, 0);
5338 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5340 ret
= -TARGET_EFAULT
;
5343 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5344 unlock_user(argptr
, arg
, 0);
5346 switch (host_blkpg
->op
) {
5347 case BLKPG_ADD_PARTITION
:
5348 case BLKPG_DEL_PARTITION
:
5349 /* payload is struct blkpg_partition */
5352 /* Unknown opcode */
5353 ret
= -TARGET_EINVAL
;
5357 /* Read and convert blkpg->data */
5358 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5359 target_size
= thunk_type_size(part_arg_type
, 0);
5360 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5362 ret
= -TARGET_EFAULT
;
5365 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5366 unlock_user(argptr
, arg
, 0);
5368 /* Swizzle the data pointer to our local copy and call! */
5369 host_blkpg
->data
= &host_part
;
5370 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5376 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5377 int fd
, int cmd
, abi_long arg
)
5379 const argtype
*arg_type
= ie
->arg_type
;
5380 const StructEntry
*se
;
5381 const argtype
*field_types
;
5382 const int *dst_offsets
, *src_offsets
;
5385 abi_ulong
*target_rt_dev_ptr
;
5386 unsigned long *host_rt_dev_ptr
;
5390 assert(ie
->access
== IOC_W
);
5391 assert(*arg_type
== TYPE_PTR
);
5393 assert(*arg_type
== TYPE_STRUCT
);
5394 target_size
= thunk_type_size(arg_type
, 0);
5395 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5397 return -TARGET_EFAULT
;
5400 assert(*arg_type
== (int)STRUCT_rtentry
);
5401 se
= struct_entries
+ *arg_type
++;
5402 assert(se
->convert
[0] == NULL
);
5403 /* convert struct here to be able to catch rt_dev string */
5404 field_types
= se
->field_types
;
5405 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5406 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5407 for (i
= 0; i
< se
->nb_fields
; i
++) {
5408 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5409 assert(*field_types
== TYPE_PTRVOID
);
5410 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5411 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5412 if (*target_rt_dev_ptr
!= 0) {
5413 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5414 tswapal(*target_rt_dev_ptr
));
5415 if (!*host_rt_dev_ptr
) {
5416 unlock_user(argptr
, arg
, 0);
5417 return -TARGET_EFAULT
;
5420 *host_rt_dev_ptr
= 0;
5425 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5426 argptr
+ src_offsets
[i
],
5427 field_types
, THUNK_HOST
);
5429 unlock_user(argptr
, arg
, 0);
5431 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5432 if (*host_rt_dev_ptr
!= 0) {
5433 unlock_user((void *)*host_rt_dev_ptr
,
5434 *target_rt_dev_ptr
, 0);
5439 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5440 int fd
, int cmd
, abi_long arg
)
5442 int sig
= target_to_host_signal(arg
);
5443 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5446 static IOCTLEntry ioctl_entries
[] = {
5447 #define IOCTL(cmd, access, ...) \
5448 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5449 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5450 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5455 /* ??? Implement proper locking for ioctls. */
5456 /* do_ioctl() Must return target values and target errnos. */
5457 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5459 const IOCTLEntry
*ie
;
5460 const argtype
*arg_type
;
5462 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5468 if (ie
->target_cmd
== 0) {
5469 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5470 return -TARGET_ENOSYS
;
5472 if (ie
->target_cmd
== cmd
)
5476 arg_type
= ie
->arg_type
;
5478 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5481 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5484 switch(arg_type
[0]) {
5487 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5491 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5495 target_size
= thunk_type_size(arg_type
, 0);
5496 switch(ie
->access
) {
5498 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5499 if (!is_error(ret
)) {
5500 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5502 return -TARGET_EFAULT
;
5503 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5504 unlock_user(argptr
, arg
, target_size
);
5508 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5510 return -TARGET_EFAULT
;
5511 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5512 unlock_user(argptr
, arg
, 0);
5513 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5517 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5519 return -TARGET_EFAULT
;
5520 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5521 unlock_user(argptr
, arg
, 0);
5522 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5523 if (!is_error(ret
)) {
5524 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5526 return -TARGET_EFAULT
;
5527 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5528 unlock_user(argptr
, arg
, target_size
);
5534 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5535 (long)cmd
, arg_type
[0]);
5536 ret
= -TARGET_ENOSYS
;
5542 static const bitmask_transtbl iflag_tbl
[] = {
5543 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5544 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5545 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5546 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5547 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5548 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5549 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5550 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5551 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5552 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5553 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5554 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5555 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5556 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5560 static const bitmask_transtbl oflag_tbl
[] = {
5561 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5562 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5563 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5564 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5565 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5566 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5567 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5568 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5569 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5570 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5571 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5572 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5573 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5574 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5575 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5576 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5577 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5578 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5579 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5580 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5581 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5582 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5583 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5584 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5588 static const bitmask_transtbl cflag_tbl
[] = {
5589 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5590 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5591 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5592 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5593 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5594 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5595 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5596 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5597 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5598 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5599 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5600 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5601 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5602 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5603 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5604 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5605 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5606 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5607 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5608 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5609 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5610 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5611 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5612 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5613 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5614 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5615 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5616 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5617 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5618 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5619 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5623 static const bitmask_transtbl lflag_tbl
[] = {
5624 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5625 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5626 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5627 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5628 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5629 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5630 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5631 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5632 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5633 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5634 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5635 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5636 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5637 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5638 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5642 static void target_to_host_termios (void *dst
, const void *src
)
5644 struct host_termios
*host
= dst
;
5645 const struct target_termios
*target
= src
;
5648 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5650 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5652 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5654 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5655 host
->c_line
= target
->c_line
;
5657 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5658 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5659 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5660 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5661 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5662 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5663 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5664 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5665 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5666 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5667 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5668 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5669 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5670 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5671 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5672 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5673 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5674 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5677 static void host_to_target_termios (void *dst
, const void *src
)
5679 struct target_termios
*target
= dst
;
5680 const struct host_termios
*host
= src
;
5683 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5685 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5687 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5689 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5690 target
->c_line
= host
->c_line
;
5692 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5693 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5694 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5695 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5696 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5697 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5698 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5699 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5700 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5701 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5702 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5703 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5704 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5705 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5706 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5707 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5708 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5709 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5712 static const StructEntry struct_termios_def
= {
5713 .convert
= { host_to_target_termios
, target_to_host_termios
},
5714 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5715 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5718 static bitmask_transtbl mmap_flags_tbl
[] = {
5719 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5720 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5721 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5722 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5723 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5724 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5725 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5726 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5727 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5732 #if defined(TARGET_I386)
5734 /* NOTE: there is really one LDT for all the threads */
5735 static uint8_t *ldt_table
;
5737 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5744 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5745 if (size
> bytecount
)
5747 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5749 return -TARGET_EFAULT
;
5750 /* ??? Should this by byteswapped? */
5751 memcpy(p
, ldt_table
, size
);
5752 unlock_user(p
, ptr
, size
);
5756 /* XXX: add locking support */
5757 static abi_long
write_ldt(CPUX86State
*env
,
5758 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5760 struct target_modify_ldt_ldt_s ldt_info
;
5761 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5762 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5763 int seg_not_present
, useable
, lm
;
5764 uint32_t *lp
, entry_1
, entry_2
;
5766 if (bytecount
!= sizeof(ldt_info
))
5767 return -TARGET_EINVAL
;
5768 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5769 return -TARGET_EFAULT
;
5770 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5771 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5772 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5773 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5774 unlock_user_struct(target_ldt_info
, ptr
, 0);
5776 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5777 return -TARGET_EINVAL
;
5778 seg_32bit
= ldt_info
.flags
& 1;
5779 contents
= (ldt_info
.flags
>> 1) & 3;
5780 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5781 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5782 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5783 useable
= (ldt_info
.flags
>> 6) & 1;
5787 lm
= (ldt_info
.flags
>> 7) & 1;
5789 if (contents
== 3) {
5791 return -TARGET_EINVAL
;
5792 if (seg_not_present
== 0)
5793 return -TARGET_EINVAL
;
5795 /* allocate the LDT */
5797 env
->ldt
.base
= target_mmap(0,
5798 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5799 PROT_READ
|PROT_WRITE
,
5800 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5801 if (env
->ldt
.base
== -1)
5802 return -TARGET_ENOMEM
;
5803 memset(g2h(env
->ldt
.base
), 0,
5804 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5805 env
->ldt
.limit
= 0xffff;
5806 ldt_table
= g2h(env
->ldt
.base
);
5809 /* NOTE: same code as Linux kernel */
5810 /* Allow LDTs to be cleared by the user. */
5811 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5814 read_exec_only
== 1 &&
5816 limit_in_pages
== 0 &&
5817 seg_not_present
== 1 &&
5825 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5826 (ldt_info
.limit
& 0x0ffff);
5827 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5828 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5829 (ldt_info
.limit
& 0xf0000) |
5830 ((read_exec_only
^ 1) << 9) |
5832 ((seg_not_present
^ 1) << 15) |
5834 (limit_in_pages
<< 23) |
5838 entry_2
|= (useable
<< 20);
5840 /* Install the new entry ... */
5842 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5843 lp
[0] = tswap32(entry_1
);
5844 lp
[1] = tswap32(entry_2
);
5848 /* specific and weird i386 syscalls */
5849 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5850 unsigned long bytecount
)
5856 ret
= read_ldt(ptr
, bytecount
);
5859 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5862 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5865 ret
= -TARGET_ENOSYS
;
5871 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5872 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5874 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5875 struct target_modify_ldt_ldt_s ldt_info
;
5876 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5877 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5878 int seg_not_present
, useable
, lm
;
5879 uint32_t *lp
, entry_1
, entry_2
;
5882 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5883 if (!target_ldt_info
)
5884 return -TARGET_EFAULT
;
5885 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5886 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5887 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5888 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5889 if (ldt_info
.entry_number
== -1) {
5890 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5891 if (gdt_table
[i
] == 0) {
5892 ldt_info
.entry_number
= i
;
5893 target_ldt_info
->entry_number
= tswap32(i
);
5898 unlock_user_struct(target_ldt_info
, ptr
, 1);
5900 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5901 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5902 return -TARGET_EINVAL
;
5903 seg_32bit
= ldt_info
.flags
& 1;
5904 contents
= (ldt_info
.flags
>> 1) & 3;
5905 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5906 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5907 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5908 useable
= (ldt_info
.flags
>> 6) & 1;
5912 lm
= (ldt_info
.flags
>> 7) & 1;
5915 if (contents
== 3) {
5916 if (seg_not_present
== 0)
5917 return -TARGET_EINVAL
;
5920 /* NOTE: same code as Linux kernel */
5921 /* Allow LDTs to be cleared by the user. */
5922 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5923 if ((contents
== 0 &&
5924 read_exec_only
== 1 &&
5926 limit_in_pages
== 0 &&
5927 seg_not_present
== 1 &&
5935 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5936 (ldt_info
.limit
& 0x0ffff);
5937 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5938 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5939 (ldt_info
.limit
& 0xf0000) |
5940 ((read_exec_only
^ 1) << 9) |
5942 ((seg_not_present
^ 1) << 15) |
5944 (limit_in_pages
<< 23) |
5949 /* Install the new entry ... */
5951 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5952 lp
[0] = tswap32(entry_1
);
5953 lp
[1] = tswap32(entry_2
);
5957 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5959 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5960 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5961 uint32_t base_addr
, limit
, flags
;
5962 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5963 int seg_not_present
, useable
, lm
;
5964 uint32_t *lp
, entry_1
, entry_2
;
5966 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5967 if (!target_ldt_info
)
5968 return -TARGET_EFAULT
;
5969 idx
= tswap32(target_ldt_info
->entry_number
);
5970 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5971 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5972 unlock_user_struct(target_ldt_info
, ptr
, 1);
5973 return -TARGET_EINVAL
;
5975 lp
= (uint32_t *)(gdt_table
+ idx
);
5976 entry_1
= tswap32(lp
[0]);
5977 entry_2
= tswap32(lp
[1]);
5979 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5980 contents
= (entry_2
>> 10) & 3;
5981 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5982 seg_32bit
= (entry_2
>> 22) & 1;
5983 limit_in_pages
= (entry_2
>> 23) & 1;
5984 useable
= (entry_2
>> 20) & 1;
5988 lm
= (entry_2
>> 21) & 1;
5990 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5991 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5992 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5993 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5994 base_addr
= (entry_1
>> 16) |
5995 (entry_2
& 0xff000000) |
5996 ((entry_2
& 0xff) << 16);
5997 target_ldt_info
->base_addr
= tswapal(base_addr
);
5998 target_ldt_info
->limit
= tswap32(limit
);
5999 target_ldt_info
->flags
= tswap32(flags
);
6000 unlock_user_struct(target_ldt_info
, ptr
, 1);
6003 #endif /* TARGET_I386 && TARGET_ABI32 */
6005 #ifndef TARGET_ABI32
6006 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6013 case TARGET_ARCH_SET_GS
:
6014 case TARGET_ARCH_SET_FS
:
6015 if (code
== TARGET_ARCH_SET_GS
)
6019 cpu_x86_load_seg(env
, idx
, 0);
6020 env
->segs
[idx
].base
= addr
;
6022 case TARGET_ARCH_GET_GS
:
6023 case TARGET_ARCH_GET_FS
:
6024 if (code
== TARGET_ARCH_GET_GS
)
6028 val
= env
->segs
[idx
].base
;
6029 if (put_user(val
, addr
, abi_ulong
))
6030 ret
= -TARGET_EFAULT
;
6033 ret
= -TARGET_EINVAL
;
6040 #endif /* defined(TARGET_I386) */
6042 #define NEW_STACK_SIZE 0x40000
6045 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6048 pthread_mutex_t mutex
;
6049 pthread_cond_t cond
;
6052 abi_ulong child_tidptr
;
6053 abi_ulong parent_tidptr
;
6057 static void *clone_func(void *arg
)
6059 new_thread_info
*info
= arg
;
6064 rcu_register_thread();
6066 cpu
= ENV_GET_CPU(env
);
6068 ts
= (TaskState
*)cpu
->opaque
;
6069 info
->tid
= gettid();
6070 cpu
->host_tid
= info
->tid
;
6072 if (info
->child_tidptr
)
6073 put_user_u32(info
->tid
, info
->child_tidptr
);
6074 if (info
->parent_tidptr
)
6075 put_user_u32(info
->tid
, info
->parent_tidptr
);
6076 /* Enable signals. */
6077 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6078 /* Signal to the parent that we're ready. */
6079 pthread_mutex_lock(&info
->mutex
);
6080 pthread_cond_broadcast(&info
->cond
);
6081 pthread_mutex_unlock(&info
->mutex
);
6082 /* Wait until the parent has finshed initializing the tls state. */
6083 pthread_mutex_lock(&clone_lock
);
6084 pthread_mutex_unlock(&clone_lock
);
6090 /* do_fork() Must return host values and target errnos (unlike most
6091 do_*() functions). */
6092 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6093 abi_ulong parent_tidptr
, target_ulong newtls
,
6094 abi_ulong child_tidptr
)
6096 CPUState
*cpu
= ENV_GET_CPU(env
);
6100 CPUArchState
*new_env
;
6103 flags
&= ~CLONE_IGNORED_FLAGS
;
6105 /* Emulate vfork() with fork() */
6106 if (flags
& CLONE_VFORK
)
6107 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6109 if (flags
& CLONE_VM
) {
6110 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6111 new_thread_info info
;
6112 pthread_attr_t attr
;
6114 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6115 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6116 return -TARGET_EINVAL
;
6119 ts
= g_new0(TaskState
, 1);
6120 init_task_state(ts
);
6121 /* we create a new CPU instance. */
6122 new_env
= cpu_copy(env
);
6123 /* Init regs that differ from the parent. */
6124 cpu_clone_regs(new_env
, newsp
);
6125 new_cpu
= ENV_GET_CPU(new_env
);
6126 new_cpu
->opaque
= ts
;
6127 ts
->bprm
= parent_ts
->bprm
;
6128 ts
->info
= parent_ts
->info
;
6129 ts
->signal_mask
= parent_ts
->signal_mask
;
6131 if (flags
& CLONE_CHILD_CLEARTID
) {
6132 ts
->child_tidptr
= child_tidptr
;
6135 if (flags
& CLONE_SETTLS
) {
6136 cpu_set_tls (new_env
, newtls
);
6139 /* Grab a mutex so that thread setup appears atomic. */
6140 pthread_mutex_lock(&clone_lock
);
6142 memset(&info
, 0, sizeof(info
));
6143 pthread_mutex_init(&info
.mutex
, NULL
);
6144 pthread_mutex_lock(&info
.mutex
);
6145 pthread_cond_init(&info
.cond
, NULL
);
6147 if (flags
& CLONE_CHILD_SETTID
) {
6148 info
.child_tidptr
= child_tidptr
;
6150 if (flags
& CLONE_PARENT_SETTID
) {
6151 info
.parent_tidptr
= parent_tidptr
;
6154 ret
= pthread_attr_init(&attr
);
6155 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6156 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6157 /* It is not safe to deliver signals until the child has finished
6158 initializing, so temporarily block all signals. */
6159 sigfillset(&sigmask
);
6160 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6162 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6163 /* TODO: Free new CPU state if thread creation failed. */
6165 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6166 pthread_attr_destroy(&attr
);
6168 /* Wait for the child to initialize. */
6169 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6174 pthread_mutex_unlock(&info
.mutex
);
6175 pthread_cond_destroy(&info
.cond
);
6176 pthread_mutex_destroy(&info
.mutex
);
6177 pthread_mutex_unlock(&clone_lock
);
6179 /* if no CLONE_VM, we consider it is a fork */
6180 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6181 return -TARGET_EINVAL
;
6184 /* We can't support custom termination signals */
6185 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6186 return -TARGET_EINVAL
;
6189 if (block_signals()) {
6190 return -TARGET_ERESTARTSYS
;
6196 /* Child Process. */
6198 cpu_clone_regs(env
, newsp
);
6200 /* There is a race condition here. The parent process could
6201 theoretically read the TID in the child process before the child
6202 tid is set. This would require using either ptrace
6203 (not implemented) or having *_tidptr to point at a shared memory
6204 mapping. We can't repeat the spinlock hack used above because
6205 the child process gets its own copy of the lock. */
6206 if (flags
& CLONE_CHILD_SETTID
)
6207 put_user_u32(gettid(), child_tidptr
);
6208 if (flags
& CLONE_PARENT_SETTID
)
6209 put_user_u32(gettid(), parent_tidptr
);
6210 ts
= (TaskState
*)cpu
->opaque
;
6211 if (flags
& CLONE_SETTLS
)
6212 cpu_set_tls (env
, newtls
);
6213 if (flags
& CLONE_CHILD_CLEARTID
)
6214 ts
->child_tidptr
= child_tidptr
;
6222 /* warning : doesn't handle linux specific flags... */
6223 static int target_to_host_fcntl_cmd(int cmd
)
6226 case TARGET_F_DUPFD
:
6227 case TARGET_F_GETFD
:
6228 case TARGET_F_SETFD
:
6229 case TARGET_F_GETFL
:
6230 case TARGET_F_SETFL
:
6232 case TARGET_F_GETLK
:
6234 case TARGET_F_SETLK
:
6236 case TARGET_F_SETLKW
:
6238 case TARGET_F_GETOWN
:
6240 case TARGET_F_SETOWN
:
6242 case TARGET_F_GETSIG
:
6244 case TARGET_F_SETSIG
:
6246 #if TARGET_ABI_BITS == 32
6247 case TARGET_F_GETLK64
:
6249 case TARGET_F_SETLK64
:
6251 case TARGET_F_SETLKW64
:
6254 case TARGET_F_SETLEASE
:
6256 case TARGET_F_GETLEASE
:
6258 #ifdef F_DUPFD_CLOEXEC
6259 case TARGET_F_DUPFD_CLOEXEC
:
6260 return F_DUPFD_CLOEXEC
;
6262 case TARGET_F_NOTIFY
:
6265 case TARGET_F_GETOWN_EX
:
6269 case TARGET_F_SETOWN_EX
:
6273 case TARGET_F_SETPIPE_SZ
:
6274 return F_SETPIPE_SZ
;
6275 case TARGET_F_GETPIPE_SZ
:
6276 return F_GETPIPE_SZ
;
6279 return -TARGET_EINVAL
;
6281 return -TARGET_EINVAL
;
6284 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6285 static const bitmask_transtbl flock_tbl
[] = {
6286 TRANSTBL_CONVERT(F_RDLCK
),
6287 TRANSTBL_CONVERT(F_WRLCK
),
6288 TRANSTBL_CONVERT(F_UNLCK
),
6289 TRANSTBL_CONVERT(F_EXLCK
),
6290 TRANSTBL_CONVERT(F_SHLCK
),
6294 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6295 abi_ulong target_flock_addr
)
6297 struct target_flock
*target_fl
;
6300 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6301 return -TARGET_EFAULT
;
6304 __get_user(l_type
, &target_fl
->l_type
);
6305 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6306 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6307 __get_user(fl
->l_start
, &target_fl
->l_start
);
6308 __get_user(fl
->l_len
, &target_fl
->l_len
);
6309 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6310 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6314 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6315 const struct flock64
*fl
)
6317 struct target_flock
*target_fl
;
6320 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6321 return -TARGET_EFAULT
;
6324 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6325 __put_user(l_type
, &target_fl
->l_type
);
6326 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6327 __put_user(fl
->l_start
, &target_fl
->l_start
);
6328 __put_user(fl
->l_len
, &target_fl
->l_len
);
6329 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6330 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6334 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6335 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6337 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6338 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6339 abi_ulong target_flock_addr
)
6341 struct target_eabi_flock64
*target_fl
;
6344 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6345 return -TARGET_EFAULT
;
6348 __get_user(l_type
, &target_fl
->l_type
);
6349 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6350 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6351 __get_user(fl
->l_start
, &target_fl
->l_start
);
6352 __get_user(fl
->l_len
, &target_fl
->l_len
);
6353 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6354 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6358 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6359 const struct flock64
*fl
)
6361 struct target_eabi_flock64
*target_fl
;
6364 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6365 return -TARGET_EFAULT
;
6368 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6369 __put_user(l_type
, &target_fl
->l_type
);
6370 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6371 __put_user(fl
->l_start
, &target_fl
->l_start
);
6372 __put_user(fl
->l_len
, &target_fl
->l_len
);
6373 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6374 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6379 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6380 abi_ulong target_flock_addr
)
6382 struct target_flock64
*target_fl
;
6385 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6386 return -TARGET_EFAULT
;
6389 __get_user(l_type
, &target_fl
->l_type
);
6390 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6391 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6392 __get_user(fl
->l_start
, &target_fl
->l_start
);
6393 __get_user(fl
->l_len
, &target_fl
->l_len
);
6394 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6395 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6399 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6400 const struct flock64
*fl
)
6402 struct target_flock64
*target_fl
;
6405 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6406 return -TARGET_EFAULT
;
6409 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6410 __put_user(l_type
, &target_fl
->l_type
);
6411 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6412 __put_user(fl
->l_start
, &target_fl
->l_start
);
6413 __put_user(fl
->l_len
, &target_fl
->l_len
);
6414 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6415 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6419 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6421 struct flock64 fl64
;
6423 struct f_owner_ex fox
;
6424 struct target_f_owner_ex
*target_fox
;
6427 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6429 if (host_cmd
== -TARGET_EINVAL
)
6433 case TARGET_F_GETLK
:
6434 ret
= copy_from_user_flock(&fl64
, arg
);
6438 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6440 ret
= copy_to_user_flock(arg
, &fl64
);
6444 case TARGET_F_SETLK
:
6445 case TARGET_F_SETLKW
:
6446 ret
= copy_from_user_flock(&fl64
, arg
);
6450 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6453 case TARGET_F_GETLK64
:
6454 ret
= copy_from_user_flock64(&fl64
, arg
);
6458 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6460 ret
= copy_to_user_flock64(arg
, &fl64
);
6463 case TARGET_F_SETLK64
:
6464 case TARGET_F_SETLKW64
:
6465 ret
= copy_from_user_flock64(&fl64
, arg
);
6469 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6472 case TARGET_F_GETFL
:
6473 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6475 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6479 case TARGET_F_SETFL
:
6480 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6481 target_to_host_bitmask(arg
,
6486 case TARGET_F_GETOWN_EX
:
6487 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6489 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6490 return -TARGET_EFAULT
;
6491 target_fox
->type
= tswap32(fox
.type
);
6492 target_fox
->pid
= tswap32(fox
.pid
);
6493 unlock_user_struct(target_fox
, arg
, 1);
6499 case TARGET_F_SETOWN_EX
:
6500 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6501 return -TARGET_EFAULT
;
6502 fox
.type
= tswap32(target_fox
->type
);
6503 fox
.pid
= tswap32(target_fox
->pid
);
6504 unlock_user_struct(target_fox
, arg
, 0);
6505 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6509 case TARGET_F_SETOWN
:
6510 case TARGET_F_GETOWN
:
6511 case TARGET_F_SETSIG
:
6512 case TARGET_F_GETSIG
:
6513 case TARGET_F_SETLEASE
:
6514 case TARGET_F_GETLEASE
:
6515 case TARGET_F_SETPIPE_SZ
:
6516 case TARGET_F_GETPIPE_SZ
:
6517 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6521 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6529 static inline int high2lowuid(int uid
)
6537 static inline int high2lowgid(int gid
)
6545 static inline int low2highuid(int uid
)
6547 if ((int16_t)uid
== -1)
6553 static inline int low2highgid(int gid
)
6555 if ((int16_t)gid
== -1)
6560 static inline int tswapid(int id
)
6565 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6567 #else /* !USE_UID16 */
6568 static inline int high2lowuid(int uid
)
6572 static inline int high2lowgid(int gid
)
6576 static inline int low2highuid(int uid
)
6580 static inline int low2highgid(int gid
)
6584 static inline int tswapid(int id
)
6589 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6591 #endif /* USE_UID16 */
6593 /* We must do direct syscalls for setting UID/GID, because we want to
6594 * implement the Linux system call semantics of "change only for this thread",
6595 * not the libc/POSIX semantics of "change for all threads in process".
6596 * (See http://ewontfix.com/17/ for more details.)
6597 * We use the 32-bit version of the syscalls if present; if it is not
6598 * then either the host architecture supports 32-bit UIDs natively with
6599 * the standard syscall, or the 16-bit UID is the best we can do.
6601 #ifdef __NR_setuid32
6602 #define __NR_sys_setuid __NR_setuid32
6604 #define __NR_sys_setuid __NR_setuid
6606 #ifdef __NR_setgid32
6607 #define __NR_sys_setgid __NR_setgid32
6609 #define __NR_sys_setgid __NR_setgid
6611 #ifdef __NR_setresuid32
6612 #define __NR_sys_setresuid __NR_setresuid32
6614 #define __NR_sys_setresuid __NR_setresuid
6616 #ifdef __NR_setresgid32
6617 #define __NR_sys_setresgid __NR_setresgid32
6619 #define __NR_sys_setresgid __NR_setresgid
6622 _syscall1(int, sys_setuid
, uid_t
, uid
)
6623 _syscall1(int, sys_setgid
, gid_t
, gid
)
6624 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6625 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6627 void syscall_init(void)
6630 const argtype
*arg_type
;
6634 thunk_init(STRUCT_MAX
);
6636 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6637 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6638 #include "syscall_types.h"
6640 #undef STRUCT_SPECIAL
6642 /* Build target_to_host_errno_table[] table from
6643 * host_to_target_errno_table[]. */
6644 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6645 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6648 /* we patch the ioctl size if necessary. We rely on the fact that
6649 no ioctl has all the bits at '1' in the size field */
6651 while (ie
->target_cmd
!= 0) {
6652 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6653 TARGET_IOC_SIZEMASK
) {
6654 arg_type
= ie
->arg_type
;
6655 if (arg_type
[0] != TYPE_PTR
) {
6656 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6661 size
= thunk_type_size(arg_type
, 0);
6662 ie
->target_cmd
= (ie
->target_cmd
&
6663 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6664 (size
<< TARGET_IOC_SIZESHIFT
);
6667 /* automatic consistency check if same arch */
6668 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6669 (defined(__x86_64__) && defined(TARGET_X86_64))
6670 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6671 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6672 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6679 #if TARGET_ABI_BITS == 32
6680 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6682 #ifdef TARGET_WORDS_BIGENDIAN
6683 return ((uint64_t)word0
<< 32) | word1
;
6685 return ((uint64_t)word1
<< 32) | word0
;
6688 #else /* TARGET_ABI_BITS == 32 */
6689 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6693 #endif /* TARGET_ABI_BITS != 32 */
6695 #ifdef TARGET_NR_truncate64
6696 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6701 if (regpairs_aligned(cpu_env
)) {
6705 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6709 #ifdef TARGET_NR_ftruncate64
6710 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6715 if (regpairs_aligned(cpu_env
)) {
6719 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6723 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6724 abi_ulong target_addr
)
6726 struct target_timespec
*target_ts
;
6728 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6729 return -TARGET_EFAULT
;
6730 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6731 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6732 unlock_user_struct(target_ts
, target_addr
, 0);
6736 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6737 struct timespec
*host_ts
)
6739 struct target_timespec
*target_ts
;
6741 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6742 return -TARGET_EFAULT
;
6743 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6744 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6745 unlock_user_struct(target_ts
, target_addr
, 1);
6749 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6750 abi_ulong target_addr
)
6752 struct target_itimerspec
*target_itspec
;
6754 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6755 return -TARGET_EFAULT
;
6758 host_itspec
->it_interval
.tv_sec
=
6759 tswapal(target_itspec
->it_interval
.tv_sec
);
6760 host_itspec
->it_interval
.tv_nsec
=
6761 tswapal(target_itspec
->it_interval
.tv_nsec
);
6762 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6763 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6765 unlock_user_struct(target_itspec
, target_addr
, 1);
6769 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6770 struct itimerspec
*host_its
)
6772 struct target_itimerspec
*target_itspec
;
6774 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6775 return -TARGET_EFAULT
;
6778 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6779 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6781 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6782 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6784 unlock_user_struct(target_itspec
, target_addr
, 0);
6788 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6789 abi_long target_addr
)
6791 struct target_timex
*target_tx
;
6793 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6794 return -TARGET_EFAULT
;
6797 __get_user(host_tx
->modes
, &target_tx
->modes
);
6798 __get_user(host_tx
->offset
, &target_tx
->offset
);
6799 __get_user(host_tx
->freq
, &target_tx
->freq
);
6800 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6801 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6802 __get_user(host_tx
->status
, &target_tx
->status
);
6803 __get_user(host_tx
->constant
, &target_tx
->constant
);
6804 __get_user(host_tx
->precision
, &target_tx
->precision
);
6805 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6806 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6807 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6808 __get_user(host_tx
->tick
, &target_tx
->tick
);
6809 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6810 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6811 __get_user(host_tx
->shift
, &target_tx
->shift
);
6812 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6813 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6814 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6815 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6816 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6817 __get_user(host_tx
->tai
, &target_tx
->tai
);
6819 unlock_user_struct(target_tx
, target_addr
, 0);
6823 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6824 struct timex
*host_tx
)
6826 struct target_timex
*target_tx
;
6828 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6829 return -TARGET_EFAULT
;
6832 __put_user(host_tx
->modes
, &target_tx
->modes
);
6833 __put_user(host_tx
->offset
, &target_tx
->offset
);
6834 __put_user(host_tx
->freq
, &target_tx
->freq
);
6835 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6836 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6837 __put_user(host_tx
->status
, &target_tx
->status
);
6838 __put_user(host_tx
->constant
, &target_tx
->constant
);
6839 __put_user(host_tx
->precision
, &target_tx
->precision
);
6840 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6841 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6842 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6843 __put_user(host_tx
->tick
, &target_tx
->tick
);
6844 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6845 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6846 __put_user(host_tx
->shift
, &target_tx
->shift
);
6847 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6848 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6849 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6850 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6851 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6852 __put_user(host_tx
->tai
, &target_tx
->tai
);
6854 unlock_user_struct(target_tx
, target_addr
, 1);
6859 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6860 abi_ulong target_addr
)
6862 struct target_sigevent
*target_sevp
;
6864 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6865 return -TARGET_EFAULT
;
6868 /* This union is awkward on 64 bit systems because it has a 32 bit
6869 * integer and a pointer in it; we follow the conversion approach
6870 * used for handling sigval types in signal.c so the guest should get
6871 * the correct value back even if we did a 64 bit byteswap and it's
6872 * using the 32 bit integer.
6874 host_sevp
->sigev_value
.sival_ptr
=
6875 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6876 host_sevp
->sigev_signo
=
6877 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6878 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6879 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6881 unlock_user_struct(target_sevp
, target_addr
, 1);
6885 #if defined(TARGET_NR_mlockall)
6886 static inline int target_to_host_mlockall_arg(int arg
)
6890 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6891 result
|= MCL_CURRENT
;
6893 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6894 result
|= MCL_FUTURE
;
6900 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6901 abi_ulong target_addr
,
6902 struct stat
*host_st
)
6904 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6905 if (((CPUARMState
*)cpu_env
)->eabi
) {
6906 struct target_eabi_stat64
*target_st
;
6908 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6909 return -TARGET_EFAULT
;
6910 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6911 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6912 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6913 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6914 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6916 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6917 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6918 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6919 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6920 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6921 __put_user(host_st
->st_size
, &target_st
->st_size
);
6922 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6923 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6924 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6925 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6926 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6927 unlock_user_struct(target_st
, target_addr
, 1);
6931 #if defined(TARGET_HAS_STRUCT_STAT64)
6932 struct target_stat64
*target_st
;
6934 struct target_stat
*target_st
;
6937 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6938 return -TARGET_EFAULT
;
6939 memset(target_st
, 0, sizeof(*target_st
));
6940 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6941 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6942 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6943 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6945 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6946 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6947 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6948 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6949 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6950 /* XXX: better use of kernel struct */
6951 __put_user(host_st
->st_size
, &target_st
->st_size
);
6952 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6953 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6954 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6955 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6956 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6957 unlock_user_struct(target_st
, target_addr
, 1);
6963 /* ??? Using host futex calls even when target atomic operations
6964 are not really atomic probably breaks things. However implementing
6965 futexes locally would make futexes shared between multiple processes
6966 tricky. However they're probably useless because guest atomic
6967 operations won't work either. */
6968 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6969 target_ulong uaddr2
, int val3
)
6971 struct timespec ts
, *pts
;
6974 /* ??? We assume FUTEX_* constants are the same on both host
6976 #ifdef FUTEX_CMD_MASK
6977 base_op
= op
& FUTEX_CMD_MASK
;
6983 case FUTEX_WAIT_BITSET
:
6986 target_to_host_timespec(pts
, timeout
);
6990 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6993 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6995 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6997 case FUTEX_CMP_REQUEUE
:
6999 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7000 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7001 But the prototype takes a `struct timespec *'; insert casts
7002 to satisfy the compiler. We do not need to tswap TIMEOUT
7003 since it's not compared to guest memory. */
7004 pts
= (struct timespec
*)(uintptr_t) timeout
;
7005 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7007 (base_op
== FUTEX_CMP_REQUEUE
7011 return -TARGET_ENOSYS
;
7014 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7015 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7016 abi_long handle
, abi_long mount_id
,
7019 struct file_handle
*target_fh
;
7020 struct file_handle
*fh
;
7024 unsigned int size
, total_size
;
7026 if (get_user_s32(size
, handle
)) {
7027 return -TARGET_EFAULT
;
7030 name
= lock_user_string(pathname
);
7032 return -TARGET_EFAULT
;
7035 total_size
= sizeof(struct file_handle
) + size
;
7036 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7038 unlock_user(name
, pathname
, 0);
7039 return -TARGET_EFAULT
;
7042 fh
= g_malloc0(total_size
);
7043 fh
->handle_bytes
= size
;
7045 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7046 unlock_user(name
, pathname
, 0);
7048 /* man name_to_handle_at(2):
7049 * Other than the use of the handle_bytes field, the caller should treat
7050 * the file_handle structure as an opaque data type
7053 memcpy(target_fh
, fh
, total_size
);
7054 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7055 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7057 unlock_user(target_fh
, handle
, total_size
);
7059 if (put_user_s32(mid
, mount_id
)) {
7060 return -TARGET_EFAULT
;
7068 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7069 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7072 struct file_handle
*target_fh
;
7073 struct file_handle
*fh
;
7074 unsigned int size
, total_size
;
7077 if (get_user_s32(size
, handle
)) {
7078 return -TARGET_EFAULT
;
7081 total_size
= sizeof(struct file_handle
) + size
;
7082 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7084 return -TARGET_EFAULT
;
7087 fh
= g_memdup(target_fh
, total_size
);
7088 fh
->handle_bytes
= size
;
7089 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7091 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7092 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7096 unlock_user(target_fh
, handle
, total_size
);
7102 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7104 /* signalfd siginfo conversion */
7107 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7108 const struct signalfd_siginfo
*info
)
7110 int sig
= host_to_target_signal(info
->ssi_signo
);
7112 /* linux/signalfd.h defines a ssi_addr_lsb
7113 * not defined in sys/signalfd.h but used by some kernels
7116 #ifdef BUS_MCEERR_AO
7117 if (tinfo
->ssi_signo
== SIGBUS
&&
7118 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7119 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7120 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7121 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7122 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7126 tinfo
->ssi_signo
= tswap32(sig
);
7127 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7128 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7129 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7130 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7131 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7132 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7133 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7134 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7135 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7136 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7137 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7138 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7139 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7140 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7141 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7144 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7148 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7149 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7155 static TargetFdTrans target_signalfd_trans
= {
7156 .host_to_target_data
= host_to_target_data_signalfd
,
7159 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7162 target_sigset_t
*target_mask
;
7166 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7167 return -TARGET_EINVAL
;
7169 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7170 return -TARGET_EFAULT
;
7173 target_to_host_sigset(&host_mask
, target_mask
);
7175 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7177 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7179 fd_trans_register(ret
, &target_signalfd_trans
);
7182 unlock_user_struct(target_mask
, mask
, 0);
7188 /* Map host to target signal numbers for the wait family of syscalls.
7189 Assume all other status bits are the same. */
7190 int host_to_target_waitstatus(int status
)
7192 if (WIFSIGNALED(status
)) {
7193 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7195 if (WIFSTOPPED(status
)) {
7196 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7202 static int open_self_cmdline(void *cpu_env
, int fd
)
7205 bool word_skipped
= false;
7207 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7217 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7220 fd_orig
= close(fd_orig
);
7223 } else if (nb_read
== 0) {
7227 if (!word_skipped
) {
7228 /* Skip the first string, which is the path to qemu-*-static
7229 instead of the actual command. */
7230 cp_buf
= memchr(buf
, 0, nb_read
);
7232 /* Null byte found, skip one string */
7234 nb_read
-= cp_buf
- buf
;
7235 word_skipped
= true;
7240 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7249 return close(fd_orig
);
7252 static int open_self_maps(void *cpu_env
, int fd
)
7254 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7255 TaskState
*ts
= cpu
->opaque
;
7261 fp
= fopen("/proc/self/maps", "r");
7266 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7267 int fields
, dev_maj
, dev_min
, inode
;
7268 uint64_t min
, max
, offset
;
7269 char flag_r
, flag_w
, flag_x
, flag_p
;
7270 char path
[512] = "";
7271 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7272 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7273 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7275 if ((fields
< 10) || (fields
> 11)) {
7278 if (h2g_valid(min
)) {
7279 int flags
= page_get_flags(h2g(min
));
7280 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7281 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7284 if (h2g(min
) == ts
->info
->stack_limit
) {
7285 pstrcpy(path
, sizeof(path
), " [stack]");
7287 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7288 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7289 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7290 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7291 path
[0] ? " " : "", path
);
7301 static int open_self_stat(void *cpu_env
, int fd
)
7303 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7304 TaskState
*ts
= cpu
->opaque
;
7305 abi_ulong start_stack
= ts
->info
->start_stack
;
7308 for (i
= 0; i
< 44; i
++) {
7316 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7317 } else if (i
== 1) {
7319 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7320 } else if (i
== 27) {
7323 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7325 /* for the rest, there is MasterCard */
7326 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7330 if (write(fd
, buf
, len
) != len
) {
7338 static int open_self_auxv(void *cpu_env
, int fd
)
7340 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7341 TaskState
*ts
= cpu
->opaque
;
7342 abi_ulong auxv
= ts
->info
->saved_auxv
;
7343 abi_ulong len
= ts
->info
->auxv_len
;
7347 * Auxiliary vector is stored in target process stack.
7348 * read in whole auxv vector and copy it to file
7350 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7354 r
= write(fd
, ptr
, len
);
7361 lseek(fd
, 0, SEEK_SET
);
7362 unlock_user(ptr
, auxv
, len
);
7368 static int is_proc_myself(const char *filename
, const char *entry
)
7370 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7371 filename
+= strlen("/proc/");
7372 if (!strncmp(filename
, "self/", strlen("self/"))) {
7373 filename
+= strlen("self/");
7374 } else if (*filename
>= '1' && *filename
<= '9') {
7376 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7377 if (!strncmp(filename
, myself
, strlen(myself
))) {
7378 filename
+= strlen(myself
);
7385 if (!strcmp(filename
, entry
)) {
7392 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7393 static int is_proc(const char *filename
, const char *entry
)
7395 return strcmp(filename
, entry
) == 0;
7398 static int open_net_route(void *cpu_env
, int fd
)
7405 fp
= fopen("/proc/net/route", "r");
7412 read
= getline(&line
, &len
, fp
);
7413 dprintf(fd
, "%s", line
);
7417 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7419 uint32_t dest
, gw
, mask
;
7420 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7421 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7422 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7423 &mask
, &mtu
, &window
, &irtt
);
7424 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7425 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7426 metric
, tswap32(mask
), mtu
, window
, irtt
);
7436 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7439 const char *filename
;
7440 int (*fill
)(void *cpu_env
, int fd
);
7441 int (*cmp
)(const char *s1
, const char *s2
);
7443 const struct fake_open
*fake_open
;
7444 static const struct fake_open fakes
[] = {
7445 { "maps", open_self_maps
, is_proc_myself
},
7446 { "stat", open_self_stat
, is_proc_myself
},
7447 { "auxv", open_self_auxv
, is_proc_myself
},
7448 { "cmdline", open_self_cmdline
, is_proc_myself
},
7449 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7450 { "/proc/net/route", open_net_route
, is_proc
},
7452 { NULL
, NULL
, NULL
}
7455 if (is_proc_myself(pathname
, "exe")) {
7456 int execfd
= qemu_getauxval(AT_EXECFD
);
7457 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7460 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7461 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7466 if (fake_open
->filename
) {
7468 char filename
[PATH_MAX
];
7471 /* create temporary file to map stat to */
7472 tmpdir
= getenv("TMPDIR");
7475 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7476 fd
= mkstemp(filename
);
7482 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7488 lseek(fd
, 0, SEEK_SET
);
7493 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7496 #define TIMER_MAGIC 0x0caf0000
7497 #define TIMER_MAGIC_MASK 0xffff0000
7499 /* Convert QEMU provided timer ID back to internal 16bit index format */
7500 static target_timer_t
get_timer_id(abi_long arg
)
7502 target_timer_t timerid
= arg
;
7504 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7505 return -TARGET_EINVAL
;
7510 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7511 return -TARGET_EINVAL
;
7517 /* do_syscall() should always have a single exit point at the end so
7518 that actions, such as logging of syscall results, can be performed.
7519 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7520 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7521 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7522 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7525 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7531 #if defined(DEBUG_ERESTARTSYS)
7532 /* Debug-only code for exercising the syscall-restart code paths
7533 * in the per-architecture cpu main loops: restart every syscall
7534 * the guest makes once before letting it through.
7541 return -TARGET_ERESTARTSYS
;
7547 gemu_log("syscall %d", num
);
7549 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7551 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7554 case TARGET_NR_exit
:
7555 /* In old applications this may be used to implement _exit(2).
7556 However in threaded applictions it is used for thread termination,
7557 and _exit_group is used for application termination.
7558 Do thread termination if we have more then one thread. */
7560 if (block_signals()) {
7561 ret
= -TARGET_ERESTARTSYS
;
7567 if (CPU_NEXT(first_cpu
)) {
7570 /* Remove the CPU from the list. */
7571 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7576 if (ts
->child_tidptr
) {
7577 put_user_u32(0, ts
->child_tidptr
);
7578 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7582 object_unref(OBJECT(cpu
));
7584 rcu_unregister_thread();
7592 gdb_exit(cpu_env
, arg1
);
7594 ret
= 0; /* avoid warning */
7596 case TARGET_NR_read
:
7600 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7602 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7604 fd_trans_host_to_target_data(arg1
)) {
7605 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7607 unlock_user(p
, arg2
, ret
);
7610 case TARGET_NR_write
:
7611 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7613 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7614 unlock_user(p
, arg2
, 0);
7616 #ifdef TARGET_NR_open
7617 case TARGET_NR_open
:
7618 if (!(p
= lock_user_string(arg1
)))
7620 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7621 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7623 fd_trans_unregister(ret
);
7624 unlock_user(p
, arg1
, 0);
7627 case TARGET_NR_openat
:
7628 if (!(p
= lock_user_string(arg2
)))
7630 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7631 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7633 fd_trans_unregister(ret
);
7634 unlock_user(p
, arg2
, 0);
7636 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7637 case TARGET_NR_name_to_handle_at
:
7638 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7641 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7642 case TARGET_NR_open_by_handle_at
:
7643 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7644 fd_trans_unregister(ret
);
7647 case TARGET_NR_close
:
7648 fd_trans_unregister(arg1
);
7649 ret
= get_errno(close(arg1
));
7654 #ifdef TARGET_NR_fork
7655 case TARGET_NR_fork
:
7656 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7659 #ifdef TARGET_NR_waitpid
7660 case TARGET_NR_waitpid
:
7663 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7664 if (!is_error(ret
) && arg2
&& ret
7665 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7670 #ifdef TARGET_NR_waitid
7671 case TARGET_NR_waitid
:
7675 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7676 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7677 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7679 host_to_target_siginfo(p
, &info
);
7680 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7685 #ifdef TARGET_NR_creat /* not on alpha */
7686 case TARGET_NR_creat
:
7687 if (!(p
= lock_user_string(arg1
)))
7689 ret
= get_errno(creat(p
, arg2
));
7690 fd_trans_unregister(ret
);
7691 unlock_user(p
, arg1
, 0);
7694 #ifdef TARGET_NR_link
7695 case TARGET_NR_link
:
7698 p
= lock_user_string(arg1
);
7699 p2
= lock_user_string(arg2
);
7701 ret
= -TARGET_EFAULT
;
7703 ret
= get_errno(link(p
, p2
));
7704 unlock_user(p2
, arg2
, 0);
7705 unlock_user(p
, arg1
, 0);
7709 #if defined(TARGET_NR_linkat)
7710 case TARGET_NR_linkat
:
7715 p
= lock_user_string(arg2
);
7716 p2
= lock_user_string(arg4
);
7718 ret
= -TARGET_EFAULT
;
7720 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7721 unlock_user(p
, arg2
, 0);
7722 unlock_user(p2
, arg4
, 0);
7726 #ifdef TARGET_NR_unlink
7727 case TARGET_NR_unlink
:
7728 if (!(p
= lock_user_string(arg1
)))
7730 ret
= get_errno(unlink(p
));
7731 unlock_user(p
, arg1
, 0);
7734 #if defined(TARGET_NR_unlinkat)
7735 case TARGET_NR_unlinkat
:
7736 if (!(p
= lock_user_string(arg2
)))
7738 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7739 unlock_user(p
, arg2
, 0);
7742 case TARGET_NR_execve
:
7744 char **argp
, **envp
;
7747 abi_ulong guest_argp
;
7748 abi_ulong guest_envp
;
7755 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7756 if (get_user_ual(addr
, gp
))
7764 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7765 if (get_user_ual(addr
, gp
))
7772 argp
= alloca((argc
+ 1) * sizeof(void *));
7773 envp
= alloca((envc
+ 1) * sizeof(void *));
7775 for (gp
= guest_argp
, q
= argp
; gp
;
7776 gp
+= sizeof(abi_ulong
), q
++) {
7777 if (get_user_ual(addr
, gp
))
7781 if (!(*q
= lock_user_string(addr
)))
7783 total_size
+= strlen(*q
) + 1;
7787 for (gp
= guest_envp
, q
= envp
; gp
;
7788 gp
+= sizeof(abi_ulong
), q
++) {
7789 if (get_user_ual(addr
, gp
))
7793 if (!(*q
= lock_user_string(addr
)))
7795 total_size
+= strlen(*q
) + 1;
7799 if (!(p
= lock_user_string(arg1
)))
7801 /* Although execve() is not an interruptible syscall it is
7802 * a special case where we must use the safe_syscall wrapper:
7803 * if we allow a signal to happen before we make the host
7804 * syscall then we will 'lose' it, because at the point of
7805 * execve the process leaves QEMU's control. So we use the
7806 * safe syscall wrapper to ensure that we either take the
7807 * signal as a guest signal, or else it does not happen
7808 * before the execve completes and makes it the other
7809 * program's problem.
7811 ret
= get_errno(safe_execve(p
, argp
, envp
));
7812 unlock_user(p
, arg1
, 0);
7817 ret
= -TARGET_EFAULT
;
7820 for (gp
= guest_argp
, q
= argp
; *q
;
7821 gp
+= sizeof(abi_ulong
), q
++) {
7822 if (get_user_ual(addr
, gp
)
7825 unlock_user(*q
, addr
, 0);
7827 for (gp
= guest_envp
, q
= envp
; *q
;
7828 gp
+= sizeof(abi_ulong
), q
++) {
7829 if (get_user_ual(addr
, gp
)
7832 unlock_user(*q
, addr
, 0);
7836 case TARGET_NR_chdir
:
7837 if (!(p
= lock_user_string(arg1
)))
7839 ret
= get_errno(chdir(p
));
7840 unlock_user(p
, arg1
, 0);
7842 #ifdef TARGET_NR_time
7843 case TARGET_NR_time
:
7846 ret
= get_errno(time(&host_time
));
7849 && put_user_sal(host_time
, arg1
))
7854 #ifdef TARGET_NR_mknod
7855 case TARGET_NR_mknod
:
7856 if (!(p
= lock_user_string(arg1
)))
7858 ret
= get_errno(mknod(p
, arg2
, arg3
));
7859 unlock_user(p
, arg1
, 0);
7862 #if defined(TARGET_NR_mknodat)
7863 case TARGET_NR_mknodat
:
7864 if (!(p
= lock_user_string(arg2
)))
7866 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7867 unlock_user(p
, arg2
, 0);
7870 #ifdef TARGET_NR_chmod
7871 case TARGET_NR_chmod
:
7872 if (!(p
= lock_user_string(arg1
)))
7874 ret
= get_errno(chmod(p
, arg2
));
7875 unlock_user(p
, arg1
, 0);
7878 #ifdef TARGET_NR_break
7879 case TARGET_NR_break
:
7882 #ifdef TARGET_NR_oldstat
7883 case TARGET_NR_oldstat
:
7886 case TARGET_NR_lseek
:
7887 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7889 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7890 /* Alpha specific */
7891 case TARGET_NR_getxpid
:
7892 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7893 ret
= get_errno(getpid());
7896 #ifdef TARGET_NR_getpid
7897 case TARGET_NR_getpid
:
7898 ret
= get_errno(getpid());
7901 case TARGET_NR_mount
:
7903 /* need to look at the data field */
7907 p
= lock_user_string(arg1
);
7915 p2
= lock_user_string(arg2
);
7918 unlock_user(p
, arg1
, 0);
7924 p3
= lock_user_string(arg3
);
7927 unlock_user(p
, arg1
, 0);
7929 unlock_user(p2
, arg2
, 0);
7936 /* FIXME - arg5 should be locked, but it isn't clear how to
7937 * do that since it's not guaranteed to be a NULL-terminated
7941 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7943 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7945 ret
= get_errno(ret
);
7948 unlock_user(p
, arg1
, 0);
7950 unlock_user(p2
, arg2
, 0);
7952 unlock_user(p3
, arg3
, 0);
7956 #ifdef TARGET_NR_umount
7957 case TARGET_NR_umount
:
7958 if (!(p
= lock_user_string(arg1
)))
7960 ret
= get_errno(umount(p
));
7961 unlock_user(p
, arg1
, 0);
7964 #ifdef TARGET_NR_stime /* not on alpha */
7965 case TARGET_NR_stime
:
7968 if (get_user_sal(host_time
, arg1
))
7970 ret
= get_errno(stime(&host_time
));
7974 case TARGET_NR_ptrace
:
7976 #ifdef TARGET_NR_alarm /* not on alpha */
7977 case TARGET_NR_alarm
:
7981 #ifdef TARGET_NR_oldfstat
7982 case TARGET_NR_oldfstat
:
7985 #ifdef TARGET_NR_pause /* not on alpha */
7986 case TARGET_NR_pause
:
7987 if (!block_signals()) {
7988 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7990 ret
= -TARGET_EINTR
;
7993 #ifdef TARGET_NR_utime
7994 case TARGET_NR_utime
:
7996 struct utimbuf tbuf
, *host_tbuf
;
7997 struct target_utimbuf
*target_tbuf
;
7999 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8001 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8002 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8003 unlock_user_struct(target_tbuf
, arg2
, 0);
8008 if (!(p
= lock_user_string(arg1
)))
8010 ret
= get_errno(utime(p
, host_tbuf
));
8011 unlock_user(p
, arg1
, 0);
8015 #ifdef TARGET_NR_utimes
8016 case TARGET_NR_utimes
:
8018 struct timeval
*tvp
, tv
[2];
8020 if (copy_from_user_timeval(&tv
[0], arg2
)
8021 || copy_from_user_timeval(&tv
[1],
8022 arg2
+ sizeof(struct target_timeval
)))
8028 if (!(p
= lock_user_string(arg1
)))
8030 ret
= get_errno(utimes(p
, tvp
));
8031 unlock_user(p
, arg1
, 0);
8035 #if defined(TARGET_NR_futimesat)
8036 case TARGET_NR_futimesat
:
8038 struct timeval
*tvp
, tv
[2];
8040 if (copy_from_user_timeval(&tv
[0], arg3
)
8041 || copy_from_user_timeval(&tv
[1],
8042 arg3
+ sizeof(struct target_timeval
)))
8048 if (!(p
= lock_user_string(arg2
)))
8050 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8051 unlock_user(p
, arg2
, 0);
8055 #ifdef TARGET_NR_stty
8056 case TARGET_NR_stty
:
8059 #ifdef TARGET_NR_gtty
8060 case TARGET_NR_gtty
:
8063 #ifdef TARGET_NR_access
8064 case TARGET_NR_access
:
8065 if (!(p
= lock_user_string(arg1
)))
8067 ret
= get_errno(access(path(p
), arg2
));
8068 unlock_user(p
, arg1
, 0);
8071 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8072 case TARGET_NR_faccessat
:
8073 if (!(p
= lock_user_string(arg2
)))
8075 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8076 unlock_user(p
, arg2
, 0);
8079 #ifdef TARGET_NR_nice /* not on alpha */
8080 case TARGET_NR_nice
:
8081 ret
= get_errno(nice(arg1
));
8084 #ifdef TARGET_NR_ftime
8085 case TARGET_NR_ftime
:
8088 case TARGET_NR_sync
:
8092 case TARGET_NR_kill
:
8093 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8095 #ifdef TARGET_NR_rename
8096 case TARGET_NR_rename
:
8099 p
= lock_user_string(arg1
);
8100 p2
= lock_user_string(arg2
);
8102 ret
= -TARGET_EFAULT
;
8104 ret
= get_errno(rename(p
, p2
));
8105 unlock_user(p2
, arg2
, 0);
8106 unlock_user(p
, arg1
, 0);
8110 #if defined(TARGET_NR_renameat)
8111 case TARGET_NR_renameat
:
8114 p
= lock_user_string(arg2
);
8115 p2
= lock_user_string(arg4
);
8117 ret
= -TARGET_EFAULT
;
8119 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8120 unlock_user(p2
, arg4
, 0);
8121 unlock_user(p
, arg2
, 0);
8125 #ifdef TARGET_NR_mkdir
8126 case TARGET_NR_mkdir
:
8127 if (!(p
= lock_user_string(arg1
)))
8129 ret
= get_errno(mkdir(p
, arg2
));
8130 unlock_user(p
, arg1
, 0);
8133 #if defined(TARGET_NR_mkdirat)
8134 case TARGET_NR_mkdirat
:
8135 if (!(p
= lock_user_string(arg2
)))
8137 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8138 unlock_user(p
, arg2
, 0);
8141 #ifdef TARGET_NR_rmdir
8142 case TARGET_NR_rmdir
:
8143 if (!(p
= lock_user_string(arg1
)))
8145 ret
= get_errno(rmdir(p
));
8146 unlock_user(p
, arg1
, 0);
8150 ret
= get_errno(dup(arg1
));
8152 fd_trans_dup(arg1
, ret
);
8155 #ifdef TARGET_NR_pipe
8156 case TARGET_NR_pipe
:
8157 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8160 #ifdef TARGET_NR_pipe2
8161 case TARGET_NR_pipe2
:
8162 ret
= do_pipe(cpu_env
, arg1
,
8163 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8166 case TARGET_NR_times
:
8168 struct target_tms
*tmsp
;
8170 ret
= get_errno(times(&tms
));
8172 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8175 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8176 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8177 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8178 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8181 ret
= host_to_target_clock_t(ret
);
8184 #ifdef TARGET_NR_prof
8185 case TARGET_NR_prof
:
8188 #ifdef TARGET_NR_signal
8189 case TARGET_NR_signal
:
8192 case TARGET_NR_acct
:
8194 ret
= get_errno(acct(NULL
));
8196 if (!(p
= lock_user_string(arg1
)))
8198 ret
= get_errno(acct(path(p
)));
8199 unlock_user(p
, arg1
, 0);
8202 #ifdef TARGET_NR_umount2
8203 case TARGET_NR_umount2
:
8204 if (!(p
= lock_user_string(arg1
)))
8206 ret
= get_errno(umount2(p
, arg2
));
8207 unlock_user(p
, arg1
, 0);
8210 #ifdef TARGET_NR_lock
8211 case TARGET_NR_lock
:
8214 case TARGET_NR_ioctl
:
8215 ret
= do_ioctl(arg1
, arg2
, arg3
);
8217 case TARGET_NR_fcntl
:
8218 ret
= do_fcntl(arg1
, arg2
, arg3
);
8220 #ifdef TARGET_NR_mpx
8224 case TARGET_NR_setpgid
:
8225 ret
= get_errno(setpgid(arg1
, arg2
));
8227 #ifdef TARGET_NR_ulimit
8228 case TARGET_NR_ulimit
:
8231 #ifdef TARGET_NR_oldolduname
8232 case TARGET_NR_oldolduname
:
8235 case TARGET_NR_umask
:
8236 ret
= get_errno(umask(arg1
));
8238 case TARGET_NR_chroot
:
8239 if (!(p
= lock_user_string(arg1
)))
8241 ret
= get_errno(chroot(p
));
8242 unlock_user(p
, arg1
, 0);
8244 #ifdef TARGET_NR_ustat
8245 case TARGET_NR_ustat
:
8248 #ifdef TARGET_NR_dup2
8249 case TARGET_NR_dup2
:
8250 ret
= get_errno(dup2(arg1
, arg2
));
8252 fd_trans_dup(arg1
, arg2
);
8256 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8257 case TARGET_NR_dup3
:
8258 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8260 fd_trans_dup(arg1
, arg2
);
8264 #ifdef TARGET_NR_getppid /* not on alpha */
8265 case TARGET_NR_getppid
:
8266 ret
= get_errno(getppid());
8269 #ifdef TARGET_NR_getpgrp
8270 case TARGET_NR_getpgrp
:
8271 ret
= get_errno(getpgrp());
8274 case TARGET_NR_setsid
:
8275 ret
= get_errno(setsid());
8277 #ifdef TARGET_NR_sigaction
8278 case TARGET_NR_sigaction
:
8280 #if defined(TARGET_ALPHA)
8281 struct target_sigaction act
, oact
, *pact
= 0;
8282 struct target_old_sigaction
*old_act
;
8284 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8286 act
._sa_handler
= old_act
->_sa_handler
;
8287 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8288 act
.sa_flags
= old_act
->sa_flags
;
8289 act
.sa_restorer
= 0;
8290 unlock_user_struct(old_act
, arg2
, 0);
8293 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8294 if (!is_error(ret
) && arg3
) {
8295 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8297 old_act
->_sa_handler
= oact
._sa_handler
;
8298 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8299 old_act
->sa_flags
= oact
.sa_flags
;
8300 unlock_user_struct(old_act
, arg3
, 1);
8302 #elif defined(TARGET_MIPS)
8303 struct target_sigaction act
, oact
, *pact
, *old_act
;
8306 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8308 act
._sa_handler
= old_act
->_sa_handler
;
8309 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8310 act
.sa_flags
= old_act
->sa_flags
;
8311 unlock_user_struct(old_act
, arg2
, 0);
8317 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8319 if (!is_error(ret
) && arg3
) {
8320 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8322 old_act
->_sa_handler
= oact
._sa_handler
;
8323 old_act
->sa_flags
= oact
.sa_flags
;
8324 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8325 old_act
->sa_mask
.sig
[1] = 0;
8326 old_act
->sa_mask
.sig
[2] = 0;
8327 old_act
->sa_mask
.sig
[3] = 0;
8328 unlock_user_struct(old_act
, arg3
, 1);
8331 struct target_old_sigaction
*old_act
;
8332 struct target_sigaction act
, oact
, *pact
;
8334 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8336 act
._sa_handler
= old_act
->_sa_handler
;
8337 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8338 act
.sa_flags
= old_act
->sa_flags
;
8339 act
.sa_restorer
= old_act
->sa_restorer
;
8340 unlock_user_struct(old_act
, arg2
, 0);
8345 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8346 if (!is_error(ret
) && arg3
) {
8347 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8349 old_act
->_sa_handler
= oact
._sa_handler
;
8350 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8351 old_act
->sa_flags
= oact
.sa_flags
;
8352 old_act
->sa_restorer
= oact
.sa_restorer
;
8353 unlock_user_struct(old_act
, arg3
, 1);
8359 case TARGET_NR_rt_sigaction
:
8361 #if defined(TARGET_ALPHA)
8362 struct target_sigaction act
, oact
, *pact
= 0;
8363 struct target_rt_sigaction
*rt_act
;
8365 if (arg4
!= sizeof(target_sigset_t
)) {
8366 ret
= -TARGET_EINVAL
;
8370 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8372 act
._sa_handler
= rt_act
->_sa_handler
;
8373 act
.sa_mask
= rt_act
->sa_mask
;
8374 act
.sa_flags
= rt_act
->sa_flags
;
8375 act
.sa_restorer
= arg5
;
8376 unlock_user_struct(rt_act
, arg2
, 0);
8379 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8380 if (!is_error(ret
) && arg3
) {
8381 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8383 rt_act
->_sa_handler
= oact
._sa_handler
;
8384 rt_act
->sa_mask
= oact
.sa_mask
;
8385 rt_act
->sa_flags
= oact
.sa_flags
;
8386 unlock_user_struct(rt_act
, arg3
, 1);
8389 struct target_sigaction
*act
;
8390 struct target_sigaction
*oact
;
8392 if (arg4
!= sizeof(target_sigset_t
)) {
8393 ret
= -TARGET_EINVAL
;
8397 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8402 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8403 ret
= -TARGET_EFAULT
;
8404 goto rt_sigaction_fail
;
8408 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8411 unlock_user_struct(act
, arg2
, 0);
8413 unlock_user_struct(oact
, arg3
, 1);
8417 #ifdef TARGET_NR_sgetmask /* not on alpha */
8418 case TARGET_NR_sgetmask
:
8421 abi_ulong target_set
;
8422 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8424 host_to_target_old_sigset(&target_set
, &cur_set
);
8430 #ifdef TARGET_NR_ssetmask /* not on alpha */
8431 case TARGET_NR_ssetmask
:
8433 sigset_t set
, oset
, cur_set
;
8434 abi_ulong target_set
= arg1
;
8435 /* We only have one word of the new mask so we must read
8436 * the rest of it with do_sigprocmask() and OR in this word.
8437 * We are guaranteed that a do_sigprocmask() that only queries
8438 * the signal mask will not fail.
8440 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8442 target_to_host_old_sigset(&set
, &target_set
);
8443 sigorset(&set
, &set
, &cur_set
);
8444 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8446 host_to_target_old_sigset(&target_set
, &oset
);
8452 #ifdef TARGET_NR_sigprocmask
8453 case TARGET_NR_sigprocmask
:
8455 #if defined(TARGET_ALPHA)
8456 sigset_t set
, oldset
;
8461 case TARGET_SIG_BLOCK
:
8464 case TARGET_SIG_UNBLOCK
:
8467 case TARGET_SIG_SETMASK
:
8471 ret
= -TARGET_EINVAL
;
8475 target_to_host_old_sigset(&set
, &mask
);
8477 ret
= do_sigprocmask(how
, &set
, &oldset
);
8478 if (!is_error(ret
)) {
8479 host_to_target_old_sigset(&mask
, &oldset
);
8481 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8484 sigset_t set
, oldset
, *set_ptr
;
8489 case TARGET_SIG_BLOCK
:
8492 case TARGET_SIG_UNBLOCK
:
8495 case TARGET_SIG_SETMASK
:
8499 ret
= -TARGET_EINVAL
;
8502 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8504 target_to_host_old_sigset(&set
, p
);
8505 unlock_user(p
, arg2
, 0);
8511 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8512 if (!is_error(ret
) && arg3
) {
8513 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8515 host_to_target_old_sigset(p
, &oldset
);
8516 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8522 case TARGET_NR_rt_sigprocmask
:
8525 sigset_t set
, oldset
, *set_ptr
;
8527 if (arg4
!= sizeof(target_sigset_t
)) {
8528 ret
= -TARGET_EINVAL
;
8534 case TARGET_SIG_BLOCK
:
8537 case TARGET_SIG_UNBLOCK
:
8540 case TARGET_SIG_SETMASK
:
8544 ret
= -TARGET_EINVAL
;
8547 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8549 target_to_host_sigset(&set
, p
);
8550 unlock_user(p
, arg2
, 0);
8556 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8557 if (!is_error(ret
) && arg3
) {
8558 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8560 host_to_target_sigset(p
, &oldset
);
8561 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8565 #ifdef TARGET_NR_sigpending
8566 case TARGET_NR_sigpending
:
8569 ret
= get_errno(sigpending(&set
));
8570 if (!is_error(ret
)) {
8571 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8573 host_to_target_old_sigset(p
, &set
);
8574 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8579 case TARGET_NR_rt_sigpending
:
8583 /* Yes, this check is >, not != like most. We follow the kernel's
8584 * logic and it does it like this because it implements
8585 * NR_sigpending through the same code path, and in that case
8586 * the old_sigset_t is smaller in size.
8588 if (arg2
> sizeof(target_sigset_t
)) {
8589 ret
= -TARGET_EINVAL
;
8593 ret
= get_errno(sigpending(&set
));
8594 if (!is_error(ret
)) {
8595 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8597 host_to_target_sigset(p
, &set
);
8598 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8602 #ifdef TARGET_NR_sigsuspend
8603 case TARGET_NR_sigsuspend
:
8605 TaskState
*ts
= cpu
->opaque
;
8606 #if defined(TARGET_ALPHA)
8607 abi_ulong mask
= arg1
;
8608 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8610 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8612 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8613 unlock_user(p
, arg1
, 0);
8615 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8617 if (ret
!= -TARGET_ERESTARTSYS
) {
8618 ts
->in_sigsuspend
= 1;
8623 case TARGET_NR_rt_sigsuspend
:
8625 TaskState
*ts
= cpu
->opaque
;
8627 if (arg2
!= sizeof(target_sigset_t
)) {
8628 ret
= -TARGET_EINVAL
;
8631 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8633 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8634 unlock_user(p
, arg1
, 0);
8635 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8637 if (ret
!= -TARGET_ERESTARTSYS
) {
8638 ts
->in_sigsuspend
= 1;
8642 case TARGET_NR_rt_sigtimedwait
:
8645 struct timespec uts
, *puts
;
8648 if (arg4
!= sizeof(target_sigset_t
)) {
8649 ret
= -TARGET_EINVAL
;
8653 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8655 target_to_host_sigset(&set
, p
);
8656 unlock_user(p
, arg1
, 0);
8659 target_to_host_timespec(puts
, arg3
);
8663 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8665 if (!is_error(ret
)) {
8667 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8672 host_to_target_siginfo(p
, &uinfo
);
8673 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8675 ret
= host_to_target_signal(ret
);
8679 case TARGET_NR_rt_sigqueueinfo
:
8683 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8687 target_to_host_siginfo(&uinfo
, p
);
8688 unlock_user(p
, arg1
, 0);
8689 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8692 #ifdef TARGET_NR_sigreturn
8693 case TARGET_NR_sigreturn
:
8694 if (block_signals()) {
8695 ret
= -TARGET_ERESTARTSYS
;
8697 ret
= do_sigreturn(cpu_env
);
8701 case TARGET_NR_rt_sigreturn
:
8702 if (block_signals()) {
8703 ret
= -TARGET_ERESTARTSYS
;
8705 ret
= do_rt_sigreturn(cpu_env
);
8708 case TARGET_NR_sethostname
:
8709 if (!(p
= lock_user_string(arg1
)))
8711 ret
= get_errno(sethostname(p
, arg2
));
8712 unlock_user(p
, arg1
, 0);
8714 case TARGET_NR_setrlimit
:
8716 int resource
= target_to_host_resource(arg1
);
8717 struct target_rlimit
*target_rlim
;
8719 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8721 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8722 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8723 unlock_user_struct(target_rlim
, arg2
, 0);
8724 ret
= get_errno(setrlimit(resource
, &rlim
));
8727 case TARGET_NR_getrlimit
:
8729 int resource
= target_to_host_resource(arg1
);
8730 struct target_rlimit
*target_rlim
;
8733 ret
= get_errno(getrlimit(resource
, &rlim
));
8734 if (!is_error(ret
)) {
8735 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8737 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8738 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8739 unlock_user_struct(target_rlim
, arg2
, 1);
8743 case TARGET_NR_getrusage
:
8745 struct rusage rusage
;
8746 ret
= get_errno(getrusage(arg1
, &rusage
));
8747 if (!is_error(ret
)) {
8748 ret
= host_to_target_rusage(arg2
, &rusage
);
8752 case TARGET_NR_gettimeofday
:
8755 ret
= get_errno(gettimeofday(&tv
, NULL
));
8756 if (!is_error(ret
)) {
8757 if (copy_to_user_timeval(arg1
, &tv
))
8762 case TARGET_NR_settimeofday
:
8764 struct timeval tv
, *ptv
= NULL
;
8765 struct timezone tz
, *ptz
= NULL
;
8768 if (copy_from_user_timeval(&tv
, arg1
)) {
8775 if (copy_from_user_timezone(&tz
, arg2
)) {
8781 ret
= get_errno(settimeofday(ptv
, ptz
));
8784 #if defined(TARGET_NR_select)
8785 case TARGET_NR_select
:
8786 #if defined(TARGET_WANT_NI_OLD_SELECT)
8787 /* some architectures used to have old_select here
8788 * but now ENOSYS it.
8790 ret
= -TARGET_ENOSYS
;
8791 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8792 ret
= do_old_select(arg1
);
8794 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8798 #ifdef TARGET_NR_pselect6
8799 case TARGET_NR_pselect6
:
8801 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8802 fd_set rfds
, wfds
, efds
;
8803 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8804 struct timespec ts
, *ts_ptr
;
8807 * The 6th arg is actually two args smashed together,
8808 * so we cannot use the C library.
8816 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8817 target_sigset_t
*target_sigset
;
8825 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8829 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8833 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8839 * This takes a timespec, and not a timeval, so we cannot
8840 * use the do_select() helper ...
8843 if (target_to_host_timespec(&ts
, ts_addr
)) {
8851 /* Extract the two packed args for the sigset */
8854 sig
.size
= SIGSET_T_SIZE
;
8856 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8860 arg_sigset
= tswapal(arg7
[0]);
8861 arg_sigsize
= tswapal(arg7
[1]);
8862 unlock_user(arg7
, arg6
, 0);
8866 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8867 /* Like the kernel, we enforce correct size sigsets */
8868 ret
= -TARGET_EINVAL
;
8871 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8872 sizeof(*target_sigset
), 1);
8873 if (!target_sigset
) {
8876 target_to_host_sigset(&set
, target_sigset
);
8877 unlock_user(target_sigset
, arg_sigset
, 0);
8885 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8888 if (!is_error(ret
)) {
8889 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8891 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8893 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8896 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8902 #ifdef TARGET_NR_symlink
8903 case TARGET_NR_symlink
:
8906 p
= lock_user_string(arg1
);
8907 p2
= lock_user_string(arg2
);
8909 ret
= -TARGET_EFAULT
;
8911 ret
= get_errno(symlink(p
, p2
));
8912 unlock_user(p2
, arg2
, 0);
8913 unlock_user(p
, arg1
, 0);
8917 #if defined(TARGET_NR_symlinkat)
8918 case TARGET_NR_symlinkat
:
8921 p
= lock_user_string(arg1
);
8922 p2
= lock_user_string(arg3
);
8924 ret
= -TARGET_EFAULT
;
8926 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8927 unlock_user(p2
, arg3
, 0);
8928 unlock_user(p
, arg1
, 0);
8932 #ifdef TARGET_NR_oldlstat
8933 case TARGET_NR_oldlstat
:
8936 #ifdef TARGET_NR_readlink
8937 case TARGET_NR_readlink
:
8940 p
= lock_user_string(arg1
);
8941 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8943 ret
= -TARGET_EFAULT
;
8945 /* Short circuit this for the magic exe check. */
8946 ret
= -TARGET_EINVAL
;
8947 } else if (is_proc_myself((const char *)p
, "exe")) {
8948 char real
[PATH_MAX
], *temp
;
8949 temp
= realpath(exec_path
, real
);
8950 /* Return value is # of bytes that we wrote to the buffer. */
8952 ret
= get_errno(-1);
8954 /* Don't worry about sign mismatch as earlier mapping
8955 * logic would have thrown a bad address error. */
8956 ret
= MIN(strlen(real
), arg3
);
8957 /* We cannot NUL terminate the string. */
8958 memcpy(p2
, real
, ret
);
8961 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8963 unlock_user(p2
, arg2
, ret
);
8964 unlock_user(p
, arg1
, 0);
8968 #if defined(TARGET_NR_readlinkat)
8969 case TARGET_NR_readlinkat
:
8972 p
= lock_user_string(arg2
);
8973 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8975 ret
= -TARGET_EFAULT
;
8976 } else if (is_proc_myself((const char *)p
, "exe")) {
8977 char real
[PATH_MAX
], *temp
;
8978 temp
= realpath(exec_path
, real
);
8979 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8980 snprintf((char *)p2
, arg4
, "%s", real
);
8982 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8984 unlock_user(p2
, arg3
, ret
);
8985 unlock_user(p
, arg2
, 0);
8989 #ifdef TARGET_NR_uselib
8990 case TARGET_NR_uselib
:
8993 #ifdef TARGET_NR_swapon
8994 case TARGET_NR_swapon
:
8995 if (!(p
= lock_user_string(arg1
)))
8997 ret
= get_errno(swapon(p
, arg2
));
8998 unlock_user(p
, arg1
, 0);
9001 case TARGET_NR_reboot
:
9002 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9003 /* arg4 must be ignored in all other cases */
9004 p
= lock_user_string(arg4
);
9008 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9009 unlock_user(p
, arg4
, 0);
9011 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9014 #ifdef TARGET_NR_readdir
9015 case TARGET_NR_readdir
:
9018 #ifdef TARGET_NR_mmap
9019 case TARGET_NR_mmap
:
9020 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9021 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9022 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9023 || defined(TARGET_S390X)
9026 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9027 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9035 unlock_user(v
, arg1
, 0);
9036 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9037 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9041 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9042 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9048 #ifdef TARGET_NR_mmap2
9049 case TARGET_NR_mmap2
:
9051 #define MMAP_SHIFT 12
9053 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9054 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9056 arg6
<< MMAP_SHIFT
));
9059 case TARGET_NR_munmap
:
9060 ret
= get_errno(target_munmap(arg1
, arg2
));
9062 case TARGET_NR_mprotect
:
9064 TaskState
*ts
= cpu
->opaque
;
9065 /* Special hack to detect libc making the stack executable. */
9066 if ((arg3
& PROT_GROWSDOWN
)
9067 && arg1
>= ts
->info
->stack_limit
9068 && arg1
<= ts
->info
->start_stack
) {
9069 arg3
&= ~PROT_GROWSDOWN
;
9070 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9071 arg1
= ts
->info
->stack_limit
;
9074 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9076 #ifdef TARGET_NR_mremap
9077 case TARGET_NR_mremap
:
9078 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9081 /* ??? msync/mlock/munlock are broken for softmmu. */
9082 #ifdef TARGET_NR_msync
9083 case TARGET_NR_msync
:
9084 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9087 #ifdef TARGET_NR_mlock
9088 case TARGET_NR_mlock
:
9089 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9092 #ifdef TARGET_NR_munlock
9093 case TARGET_NR_munlock
:
9094 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9097 #ifdef TARGET_NR_mlockall
9098 case TARGET_NR_mlockall
:
9099 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9102 #ifdef TARGET_NR_munlockall
9103 case TARGET_NR_munlockall
:
9104 ret
= get_errno(munlockall());
9107 case TARGET_NR_truncate
:
9108 if (!(p
= lock_user_string(arg1
)))
9110 ret
= get_errno(truncate(p
, arg2
));
9111 unlock_user(p
, arg1
, 0);
9113 case TARGET_NR_ftruncate
:
9114 ret
= get_errno(ftruncate(arg1
, arg2
));
9116 case TARGET_NR_fchmod
:
9117 ret
= get_errno(fchmod(arg1
, arg2
));
9119 #if defined(TARGET_NR_fchmodat)
9120 case TARGET_NR_fchmodat
:
9121 if (!(p
= lock_user_string(arg2
)))
9123 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9124 unlock_user(p
, arg2
, 0);
9127 case TARGET_NR_getpriority
:
9128 /* Note that negative values are valid for getpriority, so we must
9129 differentiate based on errno settings. */
9131 ret
= getpriority(arg1
, arg2
);
9132 if (ret
== -1 && errno
!= 0) {
9133 ret
= -host_to_target_errno(errno
);
9137 /* Return value is the unbiased priority. Signal no error. */
9138 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9140 /* Return value is a biased priority to avoid negative numbers. */
9144 case TARGET_NR_setpriority
:
9145 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9147 #ifdef TARGET_NR_profil
9148 case TARGET_NR_profil
:
9151 case TARGET_NR_statfs
:
9152 if (!(p
= lock_user_string(arg1
)))
9154 ret
= get_errno(statfs(path(p
), &stfs
));
9155 unlock_user(p
, arg1
, 0);
9157 if (!is_error(ret
)) {
9158 struct target_statfs
*target_stfs
;
9160 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9162 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9163 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9164 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9165 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9166 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9167 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9168 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9169 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9170 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9171 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9172 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9173 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9174 unlock_user_struct(target_stfs
, arg2
, 1);
9177 case TARGET_NR_fstatfs
:
9178 ret
= get_errno(fstatfs(arg1
, &stfs
));
9179 goto convert_statfs
;
9180 #ifdef TARGET_NR_statfs64
9181 case TARGET_NR_statfs64
:
9182 if (!(p
= lock_user_string(arg1
)))
9184 ret
= get_errno(statfs(path(p
), &stfs
));
9185 unlock_user(p
, arg1
, 0);
9187 if (!is_error(ret
)) {
9188 struct target_statfs64
*target_stfs
;
9190 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9192 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9193 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9194 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9195 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9196 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9197 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9198 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9199 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9200 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9201 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9202 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9203 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9204 unlock_user_struct(target_stfs
, arg3
, 1);
9207 case TARGET_NR_fstatfs64
:
9208 ret
= get_errno(fstatfs(arg1
, &stfs
));
9209 goto convert_statfs64
;
9211 #ifdef TARGET_NR_ioperm
9212 case TARGET_NR_ioperm
:
9215 #ifdef TARGET_NR_socketcall
9216 case TARGET_NR_socketcall
:
9217 ret
= do_socketcall(arg1
, arg2
);
9220 #ifdef TARGET_NR_accept
9221 case TARGET_NR_accept
:
9222 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9225 #ifdef TARGET_NR_accept4
9226 case TARGET_NR_accept4
:
9227 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9230 #ifdef TARGET_NR_bind
9231 case TARGET_NR_bind
:
9232 ret
= do_bind(arg1
, arg2
, arg3
);
9235 #ifdef TARGET_NR_connect
9236 case TARGET_NR_connect
:
9237 ret
= do_connect(arg1
, arg2
, arg3
);
9240 #ifdef TARGET_NR_getpeername
9241 case TARGET_NR_getpeername
:
9242 ret
= do_getpeername(arg1
, arg2
, arg3
);
9245 #ifdef TARGET_NR_getsockname
9246 case TARGET_NR_getsockname
:
9247 ret
= do_getsockname(arg1
, arg2
, arg3
);
9250 #ifdef TARGET_NR_getsockopt
9251 case TARGET_NR_getsockopt
:
9252 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9255 #ifdef TARGET_NR_listen
9256 case TARGET_NR_listen
:
9257 ret
= get_errno(listen(arg1
, arg2
));
9260 #ifdef TARGET_NR_recv
9261 case TARGET_NR_recv
:
9262 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9265 #ifdef TARGET_NR_recvfrom
9266 case TARGET_NR_recvfrom
:
9267 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9270 #ifdef TARGET_NR_recvmsg
9271 case TARGET_NR_recvmsg
:
9272 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9275 #ifdef TARGET_NR_send
9276 case TARGET_NR_send
:
9277 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9280 #ifdef TARGET_NR_sendmsg
9281 case TARGET_NR_sendmsg
:
9282 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9285 #ifdef TARGET_NR_sendmmsg
9286 case TARGET_NR_sendmmsg
:
9287 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9289 case TARGET_NR_recvmmsg
:
9290 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9293 #ifdef TARGET_NR_sendto
9294 case TARGET_NR_sendto
:
9295 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9298 #ifdef TARGET_NR_shutdown
9299 case TARGET_NR_shutdown
:
9300 ret
= get_errno(shutdown(arg1
, arg2
));
9303 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9304 case TARGET_NR_getrandom
:
9305 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9309 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9310 unlock_user(p
, arg1
, ret
);
9313 #ifdef TARGET_NR_socket
9314 case TARGET_NR_socket
:
9315 ret
= do_socket(arg1
, arg2
, arg3
);
9316 fd_trans_unregister(ret
);
9319 #ifdef TARGET_NR_socketpair
9320 case TARGET_NR_socketpair
:
9321 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9324 #ifdef TARGET_NR_setsockopt
9325 case TARGET_NR_setsockopt
:
9326 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9329 #if defined(TARGET_NR_syslog)
9330 case TARGET_NR_syslog
:
9335 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9336 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9337 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9338 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9339 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9340 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9341 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9342 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9344 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9347 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9348 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9349 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9351 ret
= -TARGET_EINVAL
;
9359 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9361 ret
= -TARGET_EFAULT
;
9364 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9365 unlock_user(p
, arg2
, arg3
);
9375 case TARGET_NR_setitimer
:
9377 struct itimerval value
, ovalue
, *pvalue
;
9381 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9382 || copy_from_user_timeval(&pvalue
->it_value
,
9383 arg2
+ sizeof(struct target_timeval
)))
9388 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9389 if (!is_error(ret
) && arg3
) {
9390 if (copy_to_user_timeval(arg3
,
9391 &ovalue
.it_interval
)
9392 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9398 case TARGET_NR_getitimer
:
9400 struct itimerval value
;
9402 ret
= get_errno(getitimer(arg1
, &value
));
9403 if (!is_error(ret
) && arg2
) {
9404 if (copy_to_user_timeval(arg2
,
9406 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9412 #ifdef TARGET_NR_stat
9413 case TARGET_NR_stat
:
9414 if (!(p
= lock_user_string(arg1
)))
9416 ret
= get_errno(stat(path(p
), &st
));
9417 unlock_user(p
, arg1
, 0);
9420 #ifdef TARGET_NR_lstat
9421 case TARGET_NR_lstat
:
9422 if (!(p
= lock_user_string(arg1
)))
9424 ret
= get_errno(lstat(path(p
), &st
));
9425 unlock_user(p
, arg1
, 0);
9428 case TARGET_NR_fstat
:
9430 ret
= get_errno(fstat(arg1
, &st
));
9431 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9434 if (!is_error(ret
)) {
9435 struct target_stat
*target_st
;
9437 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9439 memset(target_st
, 0, sizeof(*target_st
));
9440 __put_user(st
.st_dev
, &target_st
->st_dev
);
9441 __put_user(st
.st_ino
, &target_st
->st_ino
);
9442 __put_user(st
.st_mode
, &target_st
->st_mode
);
9443 __put_user(st
.st_uid
, &target_st
->st_uid
);
9444 __put_user(st
.st_gid
, &target_st
->st_gid
);
9445 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9446 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9447 __put_user(st
.st_size
, &target_st
->st_size
);
9448 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9449 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9450 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9451 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9452 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9453 unlock_user_struct(target_st
, arg2
, 1);
9457 #ifdef TARGET_NR_olduname
9458 case TARGET_NR_olduname
:
9461 #ifdef TARGET_NR_iopl
9462 case TARGET_NR_iopl
:
9465 case TARGET_NR_vhangup
:
9466 ret
= get_errno(vhangup());
9468 #ifdef TARGET_NR_idle
9469 case TARGET_NR_idle
:
9472 #ifdef TARGET_NR_syscall
9473 case TARGET_NR_syscall
:
9474 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9475 arg6
, arg7
, arg8
, 0);
9478 case TARGET_NR_wait4
:
9481 abi_long status_ptr
= arg2
;
9482 struct rusage rusage
, *rusage_ptr
;
9483 abi_ulong target_rusage
= arg4
;
9484 abi_long rusage_err
;
9486 rusage_ptr
= &rusage
;
9489 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9490 if (!is_error(ret
)) {
9491 if (status_ptr
&& ret
) {
9492 status
= host_to_target_waitstatus(status
);
9493 if (put_user_s32(status
, status_ptr
))
9496 if (target_rusage
) {
9497 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9505 #ifdef TARGET_NR_swapoff
9506 case TARGET_NR_swapoff
:
9507 if (!(p
= lock_user_string(arg1
)))
9509 ret
= get_errno(swapoff(p
));
9510 unlock_user(p
, arg1
, 0);
9513 case TARGET_NR_sysinfo
:
9515 struct target_sysinfo
*target_value
;
9516 struct sysinfo value
;
9517 ret
= get_errno(sysinfo(&value
));
9518 if (!is_error(ret
) && arg1
)
9520 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9522 __put_user(value
.uptime
, &target_value
->uptime
);
9523 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9524 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9525 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9526 __put_user(value
.totalram
, &target_value
->totalram
);
9527 __put_user(value
.freeram
, &target_value
->freeram
);
9528 __put_user(value
.sharedram
, &target_value
->sharedram
);
9529 __put_user(value
.bufferram
, &target_value
->bufferram
);
9530 __put_user(value
.totalswap
, &target_value
->totalswap
);
9531 __put_user(value
.freeswap
, &target_value
->freeswap
);
9532 __put_user(value
.procs
, &target_value
->procs
);
9533 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9534 __put_user(value
.freehigh
, &target_value
->freehigh
);
9535 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9536 unlock_user_struct(target_value
, arg1
, 1);
9540 #ifdef TARGET_NR_ipc
9542 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9545 #ifdef TARGET_NR_semget
9546 case TARGET_NR_semget
:
9547 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9550 #ifdef TARGET_NR_semop
9551 case TARGET_NR_semop
:
9552 ret
= do_semop(arg1
, arg2
, arg3
);
9555 #ifdef TARGET_NR_semctl
9556 case TARGET_NR_semctl
:
9557 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9560 #ifdef TARGET_NR_msgctl
9561 case TARGET_NR_msgctl
:
9562 ret
= do_msgctl(arg1
, arg2
, arg3
);
9565 #ifdef TARGET_NR_msgget
9566 case TARGET_NR_msgget
:
9567 ret
= get_errno(msgget(arg1
, arg2
));
9570 #ifdef TARGET_NR_msgrcv
9571 case TARGET_NR_msgrcv
:
9572 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9575 #ifdef TARGET_NR_msgsnd
9576 case TARGET_NR_msgsnd
:
9577 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9580 #ifdef TARGET_NR_shmget
9581 case TARGET_NR_shmget
:
9582 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9585 #ifdef TARGET_NR_shmctl
9586 case TARGET_NR_shmctl
:
9587 ret
= do_shmctl(arg1
, arg2
, arg3
);
9590 #ifdef TARGET_NR_shmat
9591 case TARGET_NR_shmat
:
9592 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9595 #ifdef TARGET_NR_shmdt
9596 case TARGET_NR_shmdt
:
9597 ret
= do_shmdt(arg1
);
9600 case TARGET_NR_fsync
:
9601 ret
= get_errno(fsync(arg1
));
9603 case TARGET_NR_clone
:
9604 /* Linux manages to have three different orderings for its
9605 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9606 * match the kernel's CONFIG_CLONE_* settings.
9607 * Microblaze is further special in that it uses a sixth
9608 * implicit argument to clone for the TLS pointer.
9610 #if defined(TARGET_MICROBLAZE)
9611 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9612 #elif defined(TARGET_CLONE_BACKWARDS)
9613 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9614 #elif defined(TARGET_CLONE_BACKWARDS2)
9615 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9617 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9620 #ifdef __NR_exit_group
9621 /* new thread calls */
9622 case TARGET_NR_exit_group
:
9626 gdb_exit(cpu_env
, arg1
);
9627 ret
= get_errno(exit_group(arg1
));
9630 case TARGET_NR_setdomainname
:
9631 if (!(p
= lock_user_string(arg1
)))
9633 ret
= get_errno(setdomainname(p
, arg2
));
9634 unlock_user(p
, arg1
, 0);
9636 case TARGET_NR_uname
:
9637 /* no need to transcode because we use the linux syscall */
9639 struct new_utsname
* buf
;
9641 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9643 ret
= get_errno(sys_uname(buf
));
9644 if (!is_error(ret
)) {
9645 /* Overwrite the native machine name with whatever is being
9647 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9648 /* Allow the user to override the reported release. */
9649 if (qemu_uname_release
&& *qemu_uname_release
) {
9650 g_strlcpy(buf
->release
, qemu_uname_release
,
9651 sizeof(buf
->release
));
9654 unlock_user_struct(buf
, arg1
, 1);
9658 case TARGET_NR_modify_ldt
:
9659 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9661 #if !defined(TARGET_X86_64)
9662 case TARGET_NR_vm86old
:
9664 case TARGET_NR_vm86
:
9665 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9669 case TARGET_NR_adjtimex
:
9671 struct timex host_buf
;
9673 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9676 ret
= get_errno(adjtimex(&host_buf
));
9677 if (!is_error(ret
)) {
9678 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9684 #ifdef TARGET_NR_create_module
9685 case TARGET_NR_create_module
:
9687 case TARGET_NR_init_module
:
9688 case TARGET_NR_delete_module
:
9689 #ifdef TARGET_NR_get_kernel_syms
9690 case TARGET_NR_get_kernel_syms
:
9693 case TARGET_NR_quotactl
:
9695 case TARGET_NR_getpgid
:
9696 ret
= get_errno(getpgid(arg1
));
9698 case TARGET_NR_fchdir
:
9699 ret
= get_errno(fchdir(arg1
));
9701 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9702 case TARGET_NR_bdflush
:
9705 #ifdef TARGET_NR_sysfs
9706 case TARGET_NR_sysfs
:
9709 case TARGET_NR_personality
:
9710 ret
= get_errno(personality(arg1
));
9712 #ifdef TARGET_NR_afs_syscall
9713 case TARGET_NR_afs_syscall
:
9716 #ifdef TARGET_NR__llseek /* Not on alpha */
9717 case TARGET_NR__llseek
:
9720 #if !defined(__NR_llseek)
9721 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9723 ret
= get_errno(res
);
9728 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9730 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9736 #ifdef TARGET_NR_getdents
9737 case TARGET_NR_getdents
:
9738 #ifdef __NR_getdents
9739 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9741 struct target_dirent
*target_dirp
;
9742 struct linux_dirent
*dirp
;
9743 abi_long count
= arg3
;
9745 dirp
= g_try_malloc(count
);
9747 ret
= -TARGET_ENOMEM
;
9751 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9752 if (!is_error(ret
)) {
9753 struct linux_dirent
*de
;
9754 struct target_dirent
*tde
;
9756 int reclen
, treclen
;
9757 int count1
, tnamelen
;
9761 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9765 reclen
= de
->d_reclen
;
9766 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9767 assert(tnamelen
>= 0);
9768 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9769 assert(count1
+ treclen
<= count
);
9770 tde
->d_reclen
= tswap16(treclen
);
9771 tde
->d_ino
= tswapal(de
->d_ino
);
9772 tde
->d_off
= tswapal(de
->d_off
);
9773 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9774 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9776 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9780 unlock_user(target_dirp
, arg2
, ret
);
9786 struct linux_dirent
*dirp
;
9787 abi_long count
= arg3
;
9789 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9791 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9792 if (!is_error(ret
)) {
9793 struct linux_dirent
*de
;
9798 reclen
= de
->d_reclen
;
9801 de
->d_reclen
= tswap16(reclen
);
9802 tswapls(&de
->d_ino
);
9803 tswapls(&de
->d_off
);
9804 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9808 unlock_user(dirp
, arg2
, ret
);
9812 /* Implement getdents in terms of getdents64 */
9814 struct linux_dirent64
*dirp
;
9815 abi_long count
= arg3
;
9817 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9821 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9822 if (!is_error(ret
)) {
9823 /* Convert the dirent64 structs to target dirent. We do this
9824 * in-place, since we can guarantee that a target_dirent is no
9825 * larger than a dirent64; however this means we have to be
9826 * careful to read everything before writing in the new format.
9828 struct linux_dirent64
*de
;
9829 struct target_dirent
*tde
;
9834 tde
= (struct target_dirent
*)dirp
;
9836 int namelen
, treclen
;
9837 int reclen
= de
->d_reclen
;
9838 uint64_t ino
= de
->d_ino
;
9839 int64_t off
= de
->d_off
;
9840 uint8_t type
= de
->d_type
;
9842 namelen
= strlen(de
->d_name
);
9843 treclen
= offsetof(struct target_dirent
, d_name
)
9845 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9847 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9848 tde
->d_ino
= tswapal(ino
);
9849 tde
->d_off
= tswapal(off
);
9850 tde
->d_reclen
= tswap16(treclen
);
9851 /* The target_dirent type is in what was formerly a padding
9852 * byte at the end of the structure:
9854 *(((char *)tde
) + treclen
- 1) = type
;
9856 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9857 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9863 unlock_user(dirp
, arg2
, ret
);
9867 #endif /* TARGET_NR_getdents */
9868 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9869 case TARGET_NR_getdents64
:
9871 struct linux_dirent64
*dirp
;
9872 abi_long count
= arg3
;
9873 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9875 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9876 if (!is_error(ret
)) {
9877 struct linux_dirent64
*de
;
9882 reclen
= de
->d_reclen
;
9885 de
->d_reclen
= tswap16(reclen
);
9886 tswap64s((uint64_t *)&de
->d_ino
);
9887 tswap64s((uint64_t *)&de
->d_off
);
9888 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9892 unlock_user(dirp
, arg2
, ret
);
9895 #endif /* TARGET_NR_getdents64 */
9896 #if defined(TARGET_NR__newselect)
9897 case TARGET_NR__newselect
:
9898 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9901 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9902 # ifdef TARGET_NR_poll
9903 case TARGET_NR_poll
:
9905 # ifdef TARGET_NR_ppoll
9906 case TARGET_NR_ppoll
:
9909 struct target_pollfd
*target_pfd
;
9910 unsigned int nfds
= arg2
;
9917 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9918 ret
= -TARGET_EINVAL
;
9922 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9923 sizeof(struct target_pollfd
) * nfds
, 1);
9928 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9929 for (i
= 0; i
< nfds
; i
++) {
9930 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9931 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9936 # ifdef TARGET_NR_ppoll
9937 case TARGET_NR_ppoll
:
9939 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9940 target_sigset_t
*target_set
;
9941 sigset_t _set
, *set
= &_set
;
9944 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9945 unlock_user(target_pfd
, arg1
, 0);
9953 if (arg5
!= sizeof(target_sigset_t
)) {
9954 unlock_user(target_pfd
, arg1
, 0);
9955 ret
= -TARGET_EINVAL
;
9959 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9961 unlock_user(target_pfd
, arg1
, 0);
9964 target_to_host_sigset(set
, target_set
);
9969 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9970 set
, SIGSET_T_SIZE
));
9972 if (!is_error(ret
) && arg3
) {
9973 host_to_target_timespec(arg3
, timeout_ts
);
9976 unlock_user(target_set
, arg4
, 0);
9981 # ifdef TARGET_NR_poll
9982 case TARGET_NR_poll
:
9984 struct timespec ts
, *pts
;
9987 /* Convert ms to secs, ns */
9988 ts
.tv_sec
= arg3
/ 1000;
9989 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9992 /* -ve poll() timeout means "infinite" */
9995 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10000 g_assert_not_reached();
10003 if (!is_error(ret
)) {
10004 for(i
= 0; i
< nfds
; i
++) {
10005 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10008 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10012 case TARGET_NR_flock
:
10013 /* NOTE: the flock constant seems to be the same for every
10015 ret
= get_errno(safe_flock(arg1
, arg2
));
10017 case TARGET_NR_readv
:
10019 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10021 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10022 unlock_iovec(vec
, arg2
, arg3
, 1);
10024 ret
= -host_to_target_errno(errno
);
10028 case TARGET_NR_writev
:
10030 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10032 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10033 unlock_iovec(vec
, arg2
, arg3
, 0);
10035 ret
= -host_to_target_errno(errno
);
10039 case TARGET_NR_getsid
:
10040 ret
= get_errno(getsid(arg1
));
10042 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10043 case TARGET_NR_fdatasync
:
10044 ret
= get_errno(fdatasync(arg1
));
10047 #ifdef TARGET_NR__sysctl
10048 case TARGET_NR__sysctl
:
10049 /* We don't implement this, but ENOTDIR is always a safe
10051 ret
= -TARGET_ENOTDIR
;
10054 case TARGET_NR_sched_getaffinity
:
10056 unsigned int mask_size
;
10057 unsigned long *mask
;
10060 * sched_getaffinity needs multiples of ulong, so need to take
10061 * care of mismatches between target ulong and host ulong sizes.
10063 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10064 ret
= -TARGET_EINVAL
;
10067 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10069 mask
= alloca(mask_size
);
10070 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10072 if (!is_error(ret
)) {
10074 /* More data returned than the caller's buffer will fit.
10075 * This only happens if sizeof(abi_long) < sizeof(long)
10076 * and the caller passed us a buffer holding an odd number
10077 * of abi_longs. If the host kernel is actually using the
10078 * extra 4 bytes then fail EINVAL; otherwise we can just
10079 * ignore them and only copy the interesting part.
10081 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10082 if (numcpus
> arg2
* 8) {
10083 ret
= -TARGET_EINVAL
;
10089 if (copy_to_user(arg3
, mask
, ret
)) {
10095 case TARGET_NR_sched_setaffinity
:
10097 unsigned int mask_size
;
10098 unsigned long *mask
;
10101 * sched_setaffinity needs multiples of ulong, so need to take
10102 * care of mismatches between target ulong and host ulong sizes.
10104 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10105 ret
= -TARGET_EINVAL
;
10108 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10110 mask
= alloca(mask_size
);
10111 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10114 memcpy(mask
, p
, arg2
);
10115 unlock_user_struct(p
, arg2
, 0);
10117 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10120 case TARGET_NR_sched_setparam
:
10122 struct sched_param
*target_schp
;
10123 struct sched_param schp
;
10126 return -TARGET_EINVAL
;
10128 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10130 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10131 unlock_user_struct(target_schp
, arg2
, 0);
10132 ret
= get_errno(sched_setparam(arg1
, &schp
));
10135 case TARGET_NR_sched_getparam
:
10137 struct sched_param
*target_schp
;
10138 struct sched_param schp
;
10141 return -TARGET_EINVAL
;
10143 ret
= get_errno(sched_getparam(arg1
, &schp
));
10144 if (!is_error(ret
)) {
10145 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10147 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10148 unlock_user_struct(target_schp
, arg2
, 1);
10152 case TARGET_NR_sched_setscheduler
:
10154 struct sched_param
*target_schp
;
10155 struct sched_param schp
;
10157 return -TARGET_EINVAL
;
10159 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10161 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10162 unlock_user_struct(target_schp
, arg3
, 0);
10163 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10166 case TARGET_NR_sched_getscheduler
:
10167 ret
= get_errno(sched_getscheduler(arg1
));
10169 case TARGET_NR_sched_yield
:
10170 ret
= get_errno(sched_yield());
10172 case TARGET_NR_sched_get_priority_max
:
10173 ret
= get_errno(sched_get_priority_max(arg1
));
10175 case TARGET_NR_sched_get_priority_min
:
10176 ret
= get_errno(sched_get_priority_min(arg1
));
10178 case TARGET_NR_sched_rr_get_interval
:
10180 struct timespec ts
;
10181 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10182 if (!is_error(ret
)) {
10183 ret
= host_to_target_timespec(arg2
, &ts
);
10187 case TARGET_NR_nanosleep
:
10189 struct timespec req
, rem
;
10190 target_to_host_timespec(&req
, arg1
);
10191 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10192 if (is_error(ret
) && arg2
) {
10193 host_to_target_timespec(arg2
, &rem
);
10197 #ifdef TARGET_NR_query_module
10198 case TARGET_NR_query_module
:
10199 goto unimplemented
;
10201 #ifdef TARGET_NR_nfsservctl
10202 case TARGET_NR_nfsservctl
:
10203 goto unimplemented
;
10205 case TARGET_NR_prctl
:
10207 case PR_GET_PDEATHSIG
:
10210 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10211 if (!is_error(ret
) && arg2
10212 && put_user_ual(deathsig
, arg2
)) {
10220 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10224 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10225 arg3
, arg4
, arg5
));
10226 unlock_user(name
, arg2
, 16);
10231 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10235 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10236 arg3
, arg4
, arg5
));
10237 unlock_user(name
, arg2
, 0);
10242 /* Most prctl options have no pointer arguments */
10243 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10247 #ifdef TARGET_NR_arch_prctl
10248 case TARGET_NR_arch_prctl
:
10249 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10250 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10253 goto unimplemented
;
10256 #ifdef TARGET_NR_pread64
10257 case TARGET_NR_pread64
:
10258 if (regpairs_aligned(cpu_env
)) {
10262 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10264 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10265 unlock_user(p
, arg2
, ret
);
10267 case TARGET_NR_pwrite64
:
10268 if (regpairs_aligned(cpu_env
)) {
10272 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10274 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10275 unlock_user(p
, arg2
, 0);
10278 case TARGET_NR_getcwd
:
10279 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10281 ret
= get_errno(sys_getcwd1(p
, arg2
));
10282 unlock_user(p
, arg1
, ret
);
10284 case TARGET_NR_capget
:
10285 case TARGET_NR_capset
:
10287 struct target_user_cap_header
*target_header
;
10288 struct target_user_cap_data
*target_data
= NULL
;
10289 struct __user_cap_header_struct header
;
10290 struct __user_cap_data_struct data
[2];
10291 struct __user_cap_data_struct
*dataptr
= NULL
;
10292 int i
, target_datalen
;
10293 int data_items
= 1;
10295 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10298 header
.version
= tswap32(target_header
->version
);
10299 header
.pid
= tswap32(target_header
->pid
);
10301 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10302 /* Version 2 and up takes pointer to two user_data structs */
10306 target_datalen
= sizeof(*target_data
) * data_items
;
10309 if (num
== TARGET_NR_capget
) {
10310 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10312 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10314 if (!target_data
) {
10315 unlock_user_struct(target_header
, arg1
, 0);
10319 if (num
== TARGET_NR_capset
) {
10320 for (i
= 0; i
< data_items
; i
++) {
10321 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10322 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10323 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10330 if (num
== TARGET_NR_capget
) {
10331 ret
= get_errno(capget(&header
, dataptr
));
10333 ret
= get_errno(capset(&header
, dataptr
));
10336 /* The kernel always updates version for both capget and capset */
10337 target_header
->version
= tswap32(header
.version
);
10338 unlock_user_struct(target_header
, arg1
, 1);
10341 if (num
== TARGET_NR_capget
) {
10342 for (i
= 0; i
< data_items
; i
++) {
10343 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10344 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10345 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10347 unlock_user(target_data
, arg2
, target_datalen
);
10349 unlock_user(target_data
, arg2
, 0);
10354 case TARGET_NR_sigaltstack
:
10355 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10358 #ifdef CONFIG_SENDFILE
10359 case TARGET_NR_sendfile
:
10361 off_t
*offp
= NULL
;
10364 ret
= get_user_sal(off
, arg3
);
10365 if (is_error(ret
)) {
10370 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10371 if (!is_error(ret
) && arg3
) {
10372 abi_long ret2
= put_user_sal(off
, arg3
);
10373 if (is_error(ret2
)) {
10379 #ifdef TARGET_NR_sendfile64
10380 case TARGET_NR_sendfile64
:
10382 off_t
*offp
= NULL
;
10385 ret
= get_user_s64(off
, arg3
);
10386 if (is_error(ret
)) {
10391 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10392 if (!is_error(ret
) && arg3
) {
10393 abi_long ret2
= put_user_s64(off
, arg3
);
10394 if (is_error(ret2
)) {
10402 case TARGET_NR_sendfile
:
10403 #ifdef TARGET_NR_sendfile64
10404 case TARGET_NR_sendfile64
:
10406 goto unimplemented
;
10409 #ifdef TARGET_NR_getpmsg
10410 case TARGET_NR_getpmsg
:
10411 goto unimplemented
;
10413 #ifdef TARGET_NR_putpmsg
10414 case TARGET_NR_putpmsg
:
10415 goto unimplemented
;
10417 #ifdef TARGET_NR_vfork
10418 case TARGET_NR_vfork
:
10419 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10423 #ifdef TARGET_NR_ugetrlimit
10424 case TARGET_NR_ugetrlimit
:
10426 struct rlimit rlim
;
10427 int resource
= target_to_host_resource(arg1
);
10428 ret
= get_errno(getrlimit(resource
, &rlim
));
10429 if (!is_error(ret
)) {
10430 struct target_rlimit
*target_rlim
;
10431 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10433 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10434 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10435 unlock_user_struct(target_rlim
, arg2
, 1);
10440 #ifdef TARGET_NR_truncate64
10441 case TARGET_NR_truncate64
:
10442 if (!(p
= lock_user_string(arg1
)))
10444 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10445 unlock_user(p
, arg1
, 0);
10448 #ifdef TARGET_NR_ftruncate64
10449 case TARGET_NR_ftruncate64
:
10450 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10453 #ifdef TARGET_NR_stat64
10454 case TARGET_NR_stat64
:
10455 if (!(p
= lock_user_string(arg1
)))
10457 ret
= get_errno(stat(path(p
), &st
));
10458 unlock_user(p
, arg1
, 0);
10459 if (!is_error(ret
))
10460 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10463 #ifdef TARGET_NR_lstat64
10464 case TARGET_NR_lstat64
:
10465 if (!(p
= lock_user_string(arg1
)))
10467 ret
= get_errno(lstat(path(p
), &st
));
10468 unlock_user(p
, arg1
, 0);
10469 if (!is_error(ret
))
10470 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10473 #ifdef TARGET_NR_fstat64
10474 case TARGET_NR_fstat64
:
10475 ret
= get_errno(fstat(arg1
, &st
));
10476 if (!is_error(ret
))
10477 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10480 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10481 #ifdef TARGET_NR_fstatat64
10482 case TARGET_NR_fstatat64
:
10484 #ifdef TARGET_NR_newfstatat
10485 case TARGET_NR_newfstatat
:
10487 if (!(p
= lock_user_string(arg2
)))
10489 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10490 if (!is_error(ret
))
10491 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10494 #ifdef TARGET_NR_lchown
10495 case TARGET_NR_lchown
:
10496 if (!(p
= lock_user_string(arg1
)))
10498 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10499 unlock_user(p
, arg1
, 0);
10502 #ifdef TARGET_NR_getuid
10503 case TARGET_NR_getuid
:
10504 ret
= get_errno(high2lowuid(getuid()));
10507 #ifdef TARGET_NR_getgid
10508 case TARGET_NR_getgid
:
10509 ret
= get_errno(high2lowgid(getgid()));
10512 #ifdef TARGET_NR_geteuid
10513 case TARGET_NR_geteuid
:
10514 ret
= get_errno(high2lowuid(geteuid()));
10517 #ifdef TARGET_NR_getegid
10518 case TARGET_NR_getegid
:
10519 ret
= get_errno(high2lowgid(getegid()));
10522 case TARGET_NR_setreuid
:
10523 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10525 case TARGET_NR_setregid
:
10526 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10528 case TARGET_NR_getgroups
:
10530 int gidsetsize
= arg1
;
10531 target_id
*target_grouplist
;
10535 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10536 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10537 if (gidsetsize
== 0)
10539 if (!is_error(ret
)) {
10540 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10541 if (!target_grouplist
)
10543 for(i
= 0;i
< ret
; i
++)
10544 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10545 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10549 case TARGET_NR_setgroups
:
10551 int gidsetsize
= arg1
;
10552 target_id
*target_grouplist
;
10553 gid_t
*grouplist
= NULL
;
10556 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10557 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10558 if (!target_grouplist
) {
10559 ret
= -TARGET_EFAULT
;
10562 for (i
= 0; i
< gidsetsize
; i
++) {
10563 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10565 unlock_user(target_grouplist
, arg2
, 0);
10567 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10570 case TARGET_NR_fchown
:
10571 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10573 #if defined(TARGET_NR_fchownat)
10574 case TARGET_NR_fchownat
:
10575 if (!(p
= lock_user_string(arg2
)))
10577 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10578 low2highgid(arg4
), arg5
));
10579 unlock_user(p
, arg2
, 0);
10582 #ifdef TARGET_NR_setresuid
10583 case TARGET_NR_setresuid
:
10584 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10586 low2highuid(arg3
)));
10589 #ifdef TARGET_NR_getresuid
10590 case TARGET_NR_getresuid
:
10592 uid_t ruid
, euid
, suid
;
10593 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10594 if (!is_error(ret
)) {
10595 if (put_user_id(high2lowuid(ruid
), arg1
)
10596 || put_user_id(high2lowuid(euid
), arg2
)
10597 || put_user_id(high2lowuid(suid
), arg3
))
10603 #ifdef TARGET_NR_getresgid
10604 case TARGET_NR_setresgid
:
10605 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10607 low2highgid(arg3
)));
10610 #ifdef TARGET_NR_getresgid
10611 case TARGET_NR_getresgid
:
10613 gid_t rgid
, egid
, sgid
;
10614 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10615 if (!is_error(ret
)) {
10616 if (put_user_id(high2lowgid(rgid
), arg1
)
10617 || put_user_id(high2lowgid(egid
), arg2
)
10618 || put_user_id(high2lowgid(sgid
), arg3
))
10624 #ifdef TARGET_NR_chown
10625 case TARGET_NR_chown
:
10626 if (!(p
= lock_user_string(arg1
)))
10628 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10629 unlock_user(p
, arg1
, 0);
10632 case TARGET_NR_setuid
:
10633 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10635 case TARGET_NR_setgid
:
10636 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10638 case TARGET_NR_setfsuid
:
10639 ret
= get_errno(setfsuid(arg1
));
10641 case TARGET_NR_setfsgid
:
10642 ret
= get_errno(setfsgid(arg1
));
10645 #ifdef TARGET_NR_lchown32
10646 case TARGET_NR_lchown32
:
10647 if (!(p
= lock_user_string(arg1
)))
10649 ret
= get_errno(lchown(p
, arg2
, arg3
));
10650 unlock_user(p
, arg1
, 0);
10653 #ifdef TARGET_NR_getuid32
10654 case TARGET_NR_getuid32
:
10655 ret
= get_errno(getuid());
10659 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10660 /* Alpha specific */
10661 case TARGET_NR_getxuid
:
10665 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10667 ret
= get_errno(getuid());
10670 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10671 /* Alpha specific */
10672 case TARGET_NR_getxgid
:
10676 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10678 ret
= get_errno(getgid());
10681 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10682 /* Alpha specific */
10683 case TARGET_NR_osf_getsysinfo
:
10684 ret
= -TARGET_EOPNOTSUPP
;
10686 case TARGET_GSI_IEEE_FP_CONTROL
:
10688 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10690 /* Copied from linux ieee_fpcr_to_swcr. */
10691 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10692 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10693 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10694 | SWCR_TRAP_ENABLE_DZE
10695 | SWCR_TRAP_ENABLE_OVF
);
10696 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10697 | SWCR_TRAP_ENABLE_INE
);
10698 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10699 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10701 if (put_user_u64 (swcr
, arg2
))
10707 /* case GSI_IEEE_STATE_AT_SIGNAL:
10708 -- Not implemented in linux kernel.
10710 -- Retrieves current unaligned access state; not much used.
10711 case GSI_PROC_TYPE:
10712 -- Retrieves implver information; surely not used.
10713 case GSI_GET_HWRPB:
10714 -- Grabs a copy of the HWRPB; surely not used.
10719 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10720 /* Alpha specific */
10721 case TARGET_NR_osf_setsysinfo
:
10722 ret
= -TARGET_EOPNOTSUPP
;
10724 case TARGET_SSI_IEEE_FP_CONTROL
:
10726 uint64_t swcr
, fpcr
, orig_fpcr
;
10728 if (get_user_u64 (swcr
, arg2
)) {
10731 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10732 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10734 /* Copied from linux ieee_swcr_to_fpcr. */
10735 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10736 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10737 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10738 | SWCR_TRAP_ENABLE_DZE
10739 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10740 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10741 | SWCR_TRAP_ENABLE_INE
)) << 57;
10742 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10743 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10745 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10750 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10752 uint64_t exc
, fpcr
, orig_fpcr
;
10755 if (get_user_u64(exc
, arg2
)) {
10759 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10761 /* We only add to the exception status here. */
10762 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10764 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10767 /* Old exceptions are not signaled. */
10768 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10770 /* If any exceptions set by this call,
10771 and are unmasked, send a signal. */
10773 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10774 si_code
= TARGET_FPE_FLTRES
;
10776 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10777 si_code
= TARGET_FPE_FLTUND
;
10779 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10780 si_code
= TARGET_FPE_FLTOVF
;
10782 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10783 si_code
= TARGET_FPE_FLTDIV
;
10785 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10786 si_code
= TARGET_FPE_FLTINV
;
10788 if (si_code
!= 0) {
10789 target_siginfo_t info
;
10790 info
.si_signo
= SIGFPE
;
10792 info
.si_code
= si_code
;
10793 info
._sifields
._sigfault
._addr
10794 = ((CPUArchState
*)cpu_env
)->pc
;
10795 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10796 QEMU_SI_FAULT
, &info
);
10801 /* case SSI_NVPAIRS:
10802 -- Used with SSIN_UACPROC to enable unaligned accesses.
10803 case SSI_IEEE_STATE_AT_SIGNAL:
10804 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10805 -- Not implemented in linux kernel
10810 #ifdef TARGET_NR_osf_sigprocmask
10811 /* Alpha specific. */
10812 case TARGET_NR_osf_sigprocmask
:
10816 sigset_t set
, oldset
;
10819 case TARGET_SIG_BLOCK
:
10822 case TARGET_SIG_UNBLOCK
:
10825 case TARGET_SIG_SETMASK
:
10829 ret
= -TARGET_EINVAL
;
10833 target_to_host_old_sigset(&set
, &mask
);
10834 ret
= do_sigprocmask(how
, &set
, &oldset
);
10836 host_to_target_old_sigset(&mask
, &oldset
);
10843 #ifdef TARGET_NR_getgid32
10844 case TARGET_NR_getgid32
:
10845 ret
= get_errno(getgid());
10848 #ifdef TARGET_NR_geteuid32
10849 case TARGET_NR_geteuid32
:
10850 ret
= get_errno(geteuid());
10853 #ifdef TARGET_NR_getegid32
10854 case TARGET_NR_getegid32
:
10855 ret
= get_errno(getegid());
10858 #ifdef TARGET_NR_setreuid32
10859 case TARGET_NR_setreuid32
:
10860 ret
= get_errno(setreuid(arg1
, arg2
));
10863 #ifdef TARGET_NR_setregid32
10864 case TARGET_NR_setregid32
:
10865 ret
= get_errno(setregid(arg1
, arg2
));
10868 #ifdef TARGET_NR_getgroups32
10869 case TARGET_NR_getgroups32
:
10871 int gidsetsize
= arg1
;
10872 uint32_t *target_grouplist
;
10876 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10877 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10878 if (gidsetsize
== 0)
10880 if (!is_error(ret
)) {
10881 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10882 if (!target_grouplist
) {
10883 ret
= -TARGET_EFAULT
;
10886 for(i
= 0;i
< ret
; i
++)
10887 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10888 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10893 #ifdef TARGET_NR_setgroups32
10894 case TARGET_NR_setgroups32
:
10896 int gidsetsize
= arg1
;
10897 uint32_t *target_grouplist
;
10901 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10902 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10903 if (!target_grouplist
) {
10904 ret
= -TARGET_EFAULT
;
10907 for(i
= 0;i
< gidsetsize
; i
++)
10908 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10909 unlock_user(target_grouplist
, arg2
, 0);
10910 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10914 #ifdef TARGET_NR_fchown32
10915 case TARGET_NR_fchown32
:
10916 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10919 #ifdef TARGET_NR_setresuid32
10920 case TARGET_NR_setresuid32
:
10921 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10924 #ifdef TARGET_NR_getresuid32
10925 case TARGET_NR_getresuid32
:
10927 uid_t ruid
, euid
, suid
;
10928 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10929 if (!is_error(ret
)) {
10930 if (put_user_u32(ruid
, arg1
)
10931 || put_user_u32(euid
, arg2
)
10932 || put_user_u32(suid
, arg3
))
10938 #ifdef TARGET_NR_setresgid32
10939 case TARGET_NR_setresgid32
:
10940 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10943 #ifdef TARGET_NR_getresgid32
10944 case TARGET_NR_getresgid32
:
10946 gid_t rgid
, egid
, sgid
;
10947 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10948 if (!is_error(ret
)) {
10949 if (put_user_u32(rgid
, arg1
)
10950 || put_user_u32(egid
, arg2
)
10951 || put_user_u32(sgid
, arg3
))
10957 #ifdef TARGET_NR_chown32
10958 case TARGET_NR_chown32
:
10959 if (!(p
= lock_user_string(arg1
)))
10961 ret
= get_errno(chown(p
, arg2
, arg3
));
10962 unlock_user(p
, arg1
, 0);
10965 #ifdef TARGET_NR_setuid32
10966 case TARGET_NR_setuid32
:
10967 ret
= get_errno(sys_setuid(arg1
));
10970 #ifdef TARGET_NR_setgid32
10971 case TARGET_NR_setgid32
:
10972 ret
= get_errno(sys_setgid(arg1
));
10975 #ifdef TARGET_NR_setfsuid32
10976 case TARGET_NR_setfsuid32
:
10977 ret
= get_errno(setfsuid(arg1
));
10980 #ifdef TARGET_NR_setfsgid32
10981 case TARGET_NR_setfsgid32
:
10982 ret
= get_errno(setfsgid(arg1
));
10986 case TARGET_NR_pivot_root
:
10987 goto unimplemented
;
10988 #ifdef TARGET_NR_mincore
10989 case TARGET_NR_mincore
:
10992 ret
= -TARGET_EFAULT
;
10993 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10995 if (!(p
= lock_user_string(arg3
)))
10997 ret
= get_errno(mincore(a
, arg2
, p
));
10998 unlock_user(p
, arg3
, ret
);
11000 unlock_user(a
, arg1
, 0);
11004 #ifdef TARGET_NR_arm_fadvise64_64
11005 case TARGET_NR_arm_fadvise64_64
:
11006 /* arm_fadvise64_64 looks like fadvise64_64 but
11007 * with different argument order: fd, advice, offset, len
11008 * rather than the usual fd, offset, len, advice.
11009 * Note that offset and len are both 64-bit so appear as
11010 * pairs of 32-bit registers.
11012 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11013 target_offset64(arg5
, arg6
), arg2
);
11014 ret
= -host_to_target_errno(ret
);
11018 #if TARGET_ABI_BITS == 32
11020 #ifdef TARGET_NR_fadvise64_64
11021 case TARGET_NR_fadvise64_64
:
11022 /* 6 args: fd, offset (high, low), len (high, low), advice */
11023 if (regpairs_aligned(cpu_env
)) {
11024 /* offset is in (3,4), len in (5,6) and advice in 7 */
11031 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11032 target_offset64(arg2
, arg3
),
11033 target_offset64(arg4
, arg5
),
11038 #ifdef TARGET_NR_fadvise64
11039 case TARGET_NR_fadvise64
:
11040 /* 5 args: fd, offset (high, low), len, advice */
11041 if (regpairs_aligned(cpu_env
)) {
11042 /* offset is in (3,4), len in 5 and advice in 6 */
11048 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11049 target_offset64(arg2
, arg3
),
11054 #else /* not a 32-bit ABI */
11055 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11056 #ifdef TARGET_NR_fadvise64_64
11057 case TARGET_NR_fadvise64_64
:
11059 #ifdef TARGET_NR_fadvise64
11060 case TARGET_NR_fadvise64
:
11062 #ifdef TARGET_S390X
11064 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11065 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11066 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11067 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11071 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11074 #endif /* end of 64-bit ABI fadvise handling */
11076 #ifdef TARGET_NR_madvise
11077 case TARGET_NR_madvise
:
11078 /* A straight passthrough may not be safe because qemu sometimes
11079 turns private file-backed mappings into anonymous mappings.
11080 This will break MADV_DONTNEED.
11081 This is a hint, so ignoring and returning success is ok. */
11082 ret
= get_errno(0);
11085 #if TARGET_ABI_BITS == 32
11086 case TARGET_NR_fcntl64
:
11090 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11091 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11094 if (((CPUARMState
*)cpu_env
)->eabi
) {
11095 copyfrom
= copy_from_user_eabi_flock64
;
11096 copyto
= copy_to_user_eabi_flock64
;
11100 cmd
= target_to_host_fcntl_cmd(arg2
);
11101 if (cmd
== -TARGET_EINVAL
) {
11107 case TARGET_F_GETLK64
:
11108 ret
= copyfrom(&fl
, arg3
);
11112 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11114 ret
= copyto(arg3
, &fl
);
11118 case TARGET_F_SETLK64
:
11119 case TARGET_F_SETLKW64
:
11120 ret
= copyfrom(&fl
, arg3
);
11124 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11127 ret
= do_fcntl(arg1
, arg2
, arg3
);
11133 #ifdef TARGET_NR_cacheflush
11134 case TARGET_NR_cacheflush
:
11135 /* self-modifying code is handled automatically, so nothing needed */
11139 #ifdef TARGET_NR_security
11140 case TARGET_NR_security
:
11141 goto unimplemented
;
11143 #ifdef TARGET_NR_getpagesize
11144 case TARGET_NR_getpagesize
:
11145 ret
= TARGET_PAGE_SIZE
;
11148 case TARGET_NR_gettid
:
11149 ret
= get_errno(gettid());
11151 #ifdef TARGET_NR_readahead
11152 case TARGET_NR_readahead
:
11153 #if TARGET_ABI_BITS == 32
11154 if (regpairs_aligned(cpu_env
)) {
11159 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11161 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11166 #ifdef TARGET_NR_setxattr
11167 case TARGET_NR_listxattr
:
11168 case TARGET_NR_llistxattr
:
11172 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11174 ret
= -TARGET_EFAULT
;
11178 p
= lock_user_string(arg1
);
11180 if (num
== TARGET_NR_listxattr
) {
11181 ret
= get_errno(listxattr(p
, b
, arg3
));
11183 ret
= get_errno(llistxattr(p
, b
, arg3
));
11186 ret
= -TARGET_EFAULT
;
11188 unlock_user(p
, arg1
, 0);
11189 unlock_user(b
, arg2
, arg3
);
11192 case TARGET_NR_flistxattr
:
11196 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11198 ret
= -TARGET_EFAULT
;
11202 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11203 unlock_user(b
, arg2
, arg3
);
11206 case TARGET_NR_setxattr
:
11207 case TARGET_NR_lsetxattr
:
11209 void *p
, *n
, *v
= 0;
11211 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11213 ret
= -TARGET_EFAULT
;
11217 p
= lock_user_string(arg1
);
11218 n
= lock_user_string(arg2
);
11220 if (num
== TARGET_NR_setxattr
) {
11221 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11223 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11226 ret
= -TARGET_EFAULT
;
11228 unlock_user(p
, arg1
, 0);
11229 unlock_user(n
, arg2
, 0);
11230 unlock_user(v
, arg3
, 0);
11233 case TARGET_NR_fsetxattr
:
11237 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11239 ret
= -TARGET_EFAULT
;
11243 n
= lock_user_string(arg2
);
11245 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11247 ret
= -TARGET_EFAULT
;
11249 unlock_user(n
, arg2
, 0);
11250 unlock_user(v
, arg3
, 0);
11253 case TARGET_NR_getxattr
:
11254 case TARGET_NR_lgetxattr
:
11256 void *p
, *n
, *v
= 0;
11258 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11260 ret
= -TARGET_EFAULT
;
11264 p
= lock_user_string(arg1
);
11265 n
= lock_user_string(arg2
);
11267 if (num
== TARGET_NR_getxattr
) {
11268 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11270 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11273 ret
= -TARGET_EFAULT
;
11275 unlock_user(p
, arg1
, 0);
11276 unlock_user(n
, arg2
, 0);
11277 unlock_user(v
, arg3
, arg4
);
11280 case TARGET_NR_fgetxattr
:
11284 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11286 ret
= -TARGET_EFAULT
;
11290 n
= lock_user_string(arg2
);
11292 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11294 ret
= -TARGET_EFAULT
;
11296 unlock_user(n
, arg2
, 0);
11297 unlock_user(v
, arg3
, arg4
);
11300 case TARGET_NR_removexattr
:
11301 case TARGET_NR_lremovexattr
:
11304 p
= lock_user_string(arg1
);
11305 n
= lock_user_string(arg2
);
11307 if (num
== TARGET_NR_removexattr
) {
11308 ret
= get_errno(removexattr(p
, n
));
11310 ret
= get_errno(lremovexattr(p
, n
));
11313 ret
= -TARGET_EFAULT
;
11315 unlock_user(p
, arg1
, 0);
11316 unlock_user(n
, arg2
, 0);
11319 case TARGET_NR_fremovexattr
:
11322 n
= lock_user_string(arg2
);
11324 ret
= get_errno(fremovexattr(arg1
, n
));
11326 ret
= -TARGET_EFAULT
;
11328 unlock_user(n
, arg2
, 0);
11332 #endif /* CONFIG_ATTR */
11333 #ifdef TARGET_NR_set_thread_area
11334 case TARGET_NR_set_thread_area
:
11335 #if defined(TARGET_MIPS)
11336 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11339 #elif defined(TARGET_CRIS)
11341 ret
= -TARGET_EINVAL
;
11343 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11347 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11348 ret
= do_set_thread_area(cpu_env
, arg1
);
11350 #elif defined(TARGET_M68K)
11352 TaskState
*ts
= cpu
->opaque
;
11353 ts
->tp_value
= arg1
;
11358 goto unimplemented_nowarn
;
11361 #ifdef TARGET_NR_get_thread_area
11362 case TARGET_NR_get_thread_area
:
11363 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11364 ret
= do_get_thread_area(cpu_env
, arg1
);
11366 #elif defined(TARGET_M68K)
11368 TaskState
*ts
= cpu
->opaque
;
11369 ret
= ts
->tp_value
;
11373 goto unimplemented_nowarn
;
11376 #ifdef TARGET_NR_getdomainname
11377 case TARGET_NR_getdomainname
:
11378 goto unimplemented_nowarn
;
11381 #ifdef TARGET_NR_clock_gettime
11382 case TARGET_NR_clock_gettime
:
11384 struct timespec ts
;
11385 ret
= get_errno(clock_gettime(arg1
, &ts
));
11386 if (!is_error(ret
)) {
11387 host_to_target_timespec(arg2
, &ts
);
11392 #ifdef TARGET_NR_clock_getres
11393 case TARGET_NR_clock_getres
:
11395 struct timespec ts
;
11396 ret
= get_errno(clock_getres(arg1
, &ts
));
11397 if (!is_error(ret
)) {
11398 host_to_target_timespec(arg2
, &ts
);
11403 #ifdef TARGET_NR_clock_nanosleep
11404 case TARGET_NR_clock_nanosleep
:
11406 struct timespec ts
;
11407 target_to_host_timespec(&ts
, arg3
);
11408 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11409 &ts
, arg4
? &ts
: NULL
));
11411 host_to_target_timespec(arg4
, &ts
);
11413 #if defined(TARGET_PPC)
11414 /* clock_nanosleep is odd in that it returns positive errno values.
11415 * On PPC, CR0 bit 3 should be set in such a situation. */
11416 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11417 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11424 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11425 case TARGET_NR_set_tid_address
:
11426 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11430 case TARGET_NR_tkill
:
11431 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11434 case TARGET_NR_tgkill
:
11435 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11436 target_to_host_signal(arg3
)));
11439 #ifdef TARGET_NR_set_robust_list
11440 case TARGET_NR_set_robust_list
:
11441 case TARGET_NR_get_robust_list
:
11442 /* The ABI for supporting robust futexes has userspace pass
11443 * the kernel a pointer to a linked list which is updated by
11444 * userspace after the syscall; the list is walked by the kernel
11445 * when the thread exits. Since the linked list in QEMU guest
11446 * memory isn't a valid linked list for the host and we have
11447 * no way to reliably intercept the thread-death event, we can't
11448 * support these. Silently return ENOSYS so that guest userspace
11449 * falls back to a non-robust futex implementation (which should
11450 * be OK except in the corner case of the guest crashing while
11451 * holding a mutex that is shared with another process via
11454 goto unimplemented_nowarn
;
11457 #if defined(TARGET_NR_utimensat)
11458 case TARGET_NR_utimensat
:
11460 struct timespec
*tsp
, ts
[2];
11464 target_to_host_timespec(ts
, arg3
);
11465 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11469 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11471 if (!(p
= lock_user_string(arg2
))) {
11472 ret
= -TARGET_EFAULT
;
11475 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11476 unlock_user(p
, arg2
, 0);
11481 case TARGET_NR_futex
:
11482 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11484 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11485 case TARGET_NR_inotify_init
:
11486 ret
= get_errno(sys_inotify_init());
11489 #ifdef CONFIG_INOTIFY1
11490 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11491 case TARGET_NR_inotify_init1
:
11492 ret
= get_errno(sys_inotify_init1(arg1
));
11496 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11497 case TARGET_NR_inotify_add_watch
:
11498 p
= lock_user_string(arg2
);
11499 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11500 unlock_user(p
, arg2
, 0);
11503 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11504 case TARGET_NR_inotify_rm_watch
:
11505 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11509 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11510 case TARGET_NR_mq_open
:
11512 struct mq_attr posix_mq_attr
;
11515 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11516 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11519 p
= lock_user_string(arg1
- 1);
11523 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11524 unlock_user (p
, arg1
, 0);
11528 case TARGET_NR_mq_unlink
:
11529 p
= lock_user_string(arg1
- 1);
11531 ret
= -TARGET_EFAULT
;
11534 ret
= get_errno(mq_unlink(p
));
11535 unlock_user (p
, arg1
, 0);
11538 case TARGET_NR_mq_timedsend
:
11540 struct timespec ts
;
11542 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11544 target_to_host_timespec(&ts
, arg5
);
11545 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11546 host_to_target_timespec(arg5
, &ts
);
11548 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11550 unlock_user (p
, arg2
, arg3
);
11554 case TARGET_NR_mq_timedreceive
:
11556 struct timespec ts
;
11559 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11561 target_to_host_timespec(&ts
, arg5
);
11562 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11564 host_to_target_timespec(arg5
, &ts
);
11566 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11569 unlock_user (p
, arg2
, arg3
);
11571 put_user_u32(prio
, arg4
);
11575 /* Not implemented for now... */
11576 /* case TARGET_NR_mq_notify: */
11579 case TARGET_NR_mq_getsetattr
:
11581 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11584 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11585 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11588 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11589 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11596 #ifdef CONFIG_SPLICE
11597 #ifdef TARGET_NR_tee
11598 case TARGET_NR_tee
:
11600 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11604 #ifdef TARGET_NR_splice
11605 case TARGET_NR_splice
:
11607 loff_t loff_in
, loff_out
;
11608 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11610 if (get_user_u64(loff_in
, arg2
)) {
11613 ploff_in
= &loff_in
;
11616 if (get_user_u64(loff_out
, arg4
)) {
11619 ploff_out
= &loff_out
;
11621 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11623 if (put_user_u64(loff_in
, arg2
)) {
11628 if (put_user_u64(loff_out
, arg4
)) {
11635 #ifdef TARGET_NR_vmsplice
11636 case TARGET_NR_vmsplice
:
11638 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11640 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11641 unlock_iovec(vec
, arg2
, arg3
, 0);
11643 ret
= -host_to_target_errno(errno
);
11648 #endif /* CONFIG_SPLICE */
11649 #ifdef CONFIG_EVENTFD
11650 #if defined(TARGET_NR_eventfd)
11651 case TARGET_NR_eventfd
:
11652 ret
= get_errno(eventfd(arg1
, 0));
11653 fd_trans_unregister(ret
);
11656 #if defined(TARGET_NR_eventfd2)
11657 case TARGET_NR_eventfd2
:
11659 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11660 if (arg2
& TARGET_O_NONBLOCK
) {
11661 host_flags
|= O_NONBLOCK
;
11663 if (arg2
& TARGET_O_CLOEXEC
) {
11664 host_flags
|= O_CLOEXEC
;
11666 ret
= get_errno(eventfd(arg1
, host_flags
));
11667 fd_trans_unregister(ret
);
11671 #endif /* CONFIG_EVENTFD */
11672 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11673 case TARGET_NR_fallocate
:
11674 #if TARGET_ABI_BITS == 32
11675 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11676 target_offset64(arg5
, arg6
)));
11678 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11682 #if defined(CONFIG_SYNC_FILE_RANGE)
11683 #if defined(TARGET_NR_sync_file_range)
11684 case TARGET_NR_sync_file_range
:
11685 #if TARGET_ABI_BITS == 32
11686 #if defined(TARGET_MIPS)
11687 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11688 target_offset64(arg5
, arg6
), arg7
));
11690 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11691 target_offset64(arg4
, arg5
), arg6
));
11692 #endif /* !TARGET_MIPS */
11694 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11698 #if defined(TARGET_NR_sync_file_range2)
11699 case TARGET_NR_sync_file_range2
:
11700 /* This is like sync_file_range but the arguments are reordered */
11701 #if TARGET_ABI_BITS == 32
11702 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11703 target_offset64(arg5
, arg6
), arg2
));
11705 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11710 #if defined(TARGET_NR_signalfd4)
11711 case TARGET_NR_signalfd4
:
11712 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11715 #if defined(TARGET_NR_signalfd)
11716 case TARGET_NR_signalfd
:
11717 ret
= do_signalfd4(arg1
, arg2
, 0);
11720 #if defined(CONFIG_EPOLL)
11721 #if defined(TARGET_NR_epoll_create)
11722 case TARGET_NR_epoll_create
:
11723 ret
= get_errno(epoll_create(arg1
));
11726 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11727 case TARGET_NR_epoll_create1
:
11728 ret
= get_errno(epoll_create1(arg1
));
11731 #if defined(TARGET_NR_epoll_ctl)
11732 case TARGET_NR_epoll_ctl
:
11734 struct epoll_event ep
;
11735 struct epoll_event
*epp
= 0;
11737 struct target_epoll_event
*target_ep
;
11738 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11741 ep
.events
= tswap32(target_ep
->events
);
11742 /* The epoll_data_t union is just opaque data to the kernel,
11743 * so we transfer all 64 bits across and need not worry what
11744 * actual data type it is.
11746 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11747 unlock_user_struct(target_ep
, arg4
, 0);
11750 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11755 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11756 #if defined(TARGET_NR_epoll_wait)
11757 case TARGET_NR_epoll_wait
:
11759 #if defined(TARGET_NR_epoll_pwait)
11760 case TARGET_NR_epoll_pwait
:
11763 struct target_epoll_event
*target_ep
;
11764 struct epoll_event
*ep
;
11766 int maxevents
= arg3
;
11767 int timeout
= arg4
;
11769 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11770 ret
= -TARGET_EINVAL
;
11774 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11775 maxevents
* sizeof(struct target_epoll_event
), 1);
11780 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11783 #if defined(TARGET_NR_epoll_pwait)
11784 case TARGET_NR_epoll_pwait
:
11786 target_sigset_t
*target_set
;
11787 sigset_t _set
, *set
= &_set
;
11790 if (arg6
!= sizeof(target_sigset_t
)) {
11791 ret
= -TARGET_EINVAL
;
11795 target_set
= lock_user(VERIFY_READ
, arg5
,
11796 sizeof(target_sigset_t
), 1);
11798 unlock_user(target_ep
, arg2
, 0);
11801 target_to_host_sigset(set
, target_set
);
11802 unlock_user(target_set
, arg5
, 0);
11807 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11808 set
, SIGSET_T_SIZE
));
11812 #if defined(TARGET_NR_epoll_wait)
11813 case TARGET_NR_epoll_wait
:
11814 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11819 ret
= -TARGET_ENOSYS
;
11821 if (!is_error(ret
)) {
11823 for (i
= 0; i
< ret
; i
++) {
11824 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11825 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11828 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11833 #ifdef TARGET_NR_prlimit64
11834 case TARGET_NR_prlimit64
:
11836 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11837 struct target_rlimit64
*target_rnew
, *target_rold
;
11838 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11839 int resource
= target_to_host_resource(arg2
);
11841 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11844 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11845 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11846 unlock_user_struct(target_rnew
, arg3
, 0);
11850 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11851 if (!is_error(ret
) && arg4
) {
11852 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11855 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11856 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11857 unlock_user_struct(target_rold
, arg4
, 1);
11862 #ifdef TARGET_NR_gethostname
11863 case TARGET_NR_gethostname
:
11865 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11867 ret
= get_errno(gethostname(name
, arg2
));
11868 unlock_user(name
, arg1
, arg2
);
11870 ret
= -TARGET_EFAULT
;
11875 #ifdef TARGET_NR_atomic_cmpxchg_32
11876 case TARGET_NR_atomic_cmpxchg_32
:
11878 /* should use start_exclusive from main.c */
11879 abi_ulong mem_value
;
11880 if (get_user_u32(mem_value
, arg6
)) {
11881 target_siginfo_t info
;
11882 info
.si_signo
= SIGSEGV
;
11884 info
.si_code
= TARGET_SEGV_MAPERR
;
11885 info
._sifields
._sigfault
._addr
= arg6
;
11886 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11887 QEMU_SI_FAULT
, &info
);
11891 if (mem_value
== arg2
)
11892 put_user_u32(arg1
, arg6
);
11897 #ifdef TARGET_NR_atomic_barrier
11898 case TARGET_NR_atomic_barrier
:
11900 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11906 #ifdef TARGET_NR_timer_create
11907 case TARGET_NR_timer_create
:
11909 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11911 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11914 int timer_index
= next_free_host_timer();
11916 if (timer_index
< 0) {
11917 ret
= -TARGET_EAGAIN
;
11919 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11922 phost_sevp
= &host_sevp
;
11923 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11929 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11933 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11942 #ifdef TARGET_NR_timer_settime
11943 case TARGET_NR_timer_settime
:
11945 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11946 * struct itimerspec * old_value */
11947 target_timer_t timerid
= get_timer_id(arg1
);
11951 } else if (arg3
== 0) {
11952 ret
= -TARGET_EINVAL
;
11954 timer_t htimer
= g_posix_timers
[timerid
];
11955 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11957 target_to_host_itimerspec(&hspec_new
, arg3
);
11959 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11960 host_to_target_itimerspec(arg2
, &hspec_old
);
11966 #ifdef TARGET_NR_timer_gettime
11967 case TARGET_NR_timer_gettime
:
11969 /* args: timer_t timerid, struct itimerspec *curr_value */
11970 target_timer_t timerid
= get_timer_id(arg1
);
11974 } else if (!arg2
) {
11975 ret
= -TARGET_EFAULT
;
11977 timer_t htimer
= g_posix_timers
[timerid
];
11978 struct itimerspec hspec
;
11979 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11981 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11982 ret
= -TARGET_EFAULT
;
11989 #ifdef TARGET_NR_timer_getoverrun
11990 case TARGET_NR_timer_getoverrun
:
11992 /* args: timer_t timerid */
11993 target_timer_t timerid
= get_timer_id(arg1
);
11998 timer_t htimer
= g_posix_timers
[timerid
];
11999 ret
= get_errno(timer_getoverrun(htimer
));
12001 fd_trans_unregister(ret
);
12006 #ifdef TARGET_NR_timer_delete
12007 case TARGET_NR_timer_delete
:
12009 /* args: timer_t timerid */
12010 target_timer_t timerid
= get_timer_id(arg1
);
12015 timer_t htimer
= g_posix_timers
[timerid
];
12016 ret
= get_errno(timer_delete(htimer
));
12017 g_posix_timers
[timerid
] = 0;
12023 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12024 case TARGET_NR_timerfd_create
:
12025 ret
= get_errno(timerfd_create(arg1
,
12026 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12030 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12031 case TARGET_NR_timerfd_gettime
:
12033 struct itimerspec its_curr
;
12035 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12037 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12044 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12045 case TARGET_NR_timerfd_settime
:
12047 struct itimerspec its_new
, its_old
, *p_new
;
12050 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12058 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12060 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12067 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12068 case TARGET_NR_ioprio_get
:
12069 ret
= get_errno(ioprio_get(arg1
, arg2
));
12073 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12074 case TARGET_NR_ioprio_set
:
12075 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12079 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12080 case TARGET_NR_setns
:
12081 ret
= get_errno(setns(arg1
, arg2
));
12084 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12085 case TARGET_NR_unshare
:
12086 ret
= get_errno(unshare(arg1
));
12089 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12090 case TARGET_NR_kcmp
:
12091 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12097 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12098 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12099 unimplemented_nowarn
:
12101 ret
= -TARGET_ENOSYS
;
12106 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12109 print_syscall_ret(num
, ret
);
12110 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12113 ret
= -TARGET_EFAULT
;