4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
62 #include <sys/timerfd.h>
68 #include <sys/eventfd.h>
71 #include <sys/epoll.h>
74 #include "qemu/xattr.h"
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
93 #include <linux/mtio.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
111 #include <linux/audit.h>
112 #include "linux_loop.h"
118 #define CLONE_IO 0x80000000 /* Clone io context */
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
247 #define __NR__llseek __NR_lseek
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
256 _syscall0(int, gettid
)
258 /* This is a replacement for the host gettid() and must return a host
260 static int gettid(void) {
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
273 loff_t
*, res
, uint
, wh
);
275 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
276 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group
,int,error_code
)
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address
,int *,tidptr
)
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
285 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
289 unsigned long *, user_mask_ptr
);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
292 unsigned long *, user_mask_ptr
);
293 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
295 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
296 struct __user_cap_data_struct
*, data
);
297 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
298 struct __user_cap_data_struct
*, data
);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get
, int, which
, int, who
)
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
311 unsigned long, idx1
, unsigned long, idx2
)
314 static bitmask_transtbl fcntl_flags_tbl
[] = {
315 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
316 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
317 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
318 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
319 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
320 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
321 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
322 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
323 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
324 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
325 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
326 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
327 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
338 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
349 QEMU_IFLA_BR_FORWARD_DELAY
,
350 QEMU_IFLA_BR_HELLO_TIME
,
351 QEMU_IFLA_BR_MAX_AGE
,
352 QEMU_IFLA_BR_AGEING_TIME
,
353 QEMU_IFLA_BR_STP_STATE
,
354 QEMU_IFLA_BR_PRIORITY
,
355 QEMU_IFLA_BR_VLAN_FILTERING
,
356 QEMU_IFLA_BR_VLAN_PROTOCOL
,
357 QEMU_IFLA_BR_GROUP_FWD_MASK
,
358 QEMU_IFLA_BR_ROOT_ID
,
359 QEMU_IFLA_BR_BRIDGE_ID
,
360 QEMU_IFLA_BR_ROOT_PORT
,
361 QEMU_IFLA_BR_ROOT_PATH_COST
,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
364 QEMU_IFLA_BR_HELLO_TIMER
,
365 QEMU_IFLA_BR_TCN_TIMER
,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
367 QEMU_IFLA_BR_GC_TIMER
,
368 QEMU_IFLA_BR_GROUP_ADDR
,
369 QEMU_IFLA_BR_FDB_FLUSH
,
370 QEMU_IFLA_BR_MCAST_ROUTER
,
371 QEMU_IFLA_BR_MCAST_SNOOPING
,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
373 QEMU_IFLA_BR_MCAST_QUERIER
,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
375 QEMU_IFLA_BR_MCAST_HASH_MAX
,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
414 QEMU_IFLA_NET_NS_PID
,
417 QEMU_IFLA_VFINFO_LIST
,
425 QEMU_IFLA_PROMISCUITY
,
426 QEMU_IFLA_NUM_TX_QUEUES
,
427 QEMU_IFLA_NUM_RX_QUEUES
,
429 QEMU_IFLA_PHYS_PORT_ID
,
430 QEMU_IFLA_CARRIER_CHANGES
,
431 QEMU_IFLA_PHYS_SWITCH_ID
,
432 QEMU_IFLA_LINK_NETNSID
,
433 QEMU_IFLA_PHYS_PORT_NAME
,
434 QEMU_IFLA_PROTO_DOWN
,
435 QEMU_IFLA_GSO_MAX_SEGS
,
436 QEMU_IFLA_GSO_MAX_SIZE
,
443 QEMU_IFLA_BRPORT_UNSPEC
,
444 QEMU_IFLA_BRPORT_STATE
,
445 QEMU_IFLA_BRPORT_PRIORITY
,
446 QEMU_IFLA_BRPORT_COST
,
447 QEMU_IFLA_BRPORT_MODE
,
448 QEMU_IFLA_BRPORT_GUARD
,
449 QEMU_IFLA_BRPORT_PROTECT
,
450 QEMU_IFLA_BRPORT_FAST_LEAVE
,
451 QEMU_IFLA_BRPORT_LEARNING
,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
453 QEMU_IFLA_BRPORT_PROXYARP
,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
456 QEMU_IFLA_BRPORT_ROOT_ID
,
457 QEMU_IFLA_BRPORT_BRIDGE_ID
,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
466 QEMU_IFLA_BRPORT_HOLD_TIMER
,
467 QEMU_IFLA_BRPORT_FLUSH
,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
469 QEMU_IFLA_BRPORT_PAD
,
470 QEMU___IFLA_BRPORT_MAX
474 QEMU_IFLA_INFO_UNSPEC
,
477 QEMU_IFLA_INFO_XSTATS
,
478 QEMU_IFLA_INFO_SLAVE_KIND
,
479 QEMU_IFLA_INFO_SLAVE_DATA
,
480 QEMU___IFLA_INFO_MAX
,
484 QEMU_IFLA_INET_UNSPEC
,
486 QEMU___IFLA_INET_MAX
,
490 QEMU_IFLA_INET6_UNSPEC
,
491 QEMU_IFLA_INET6_FLAGS
,
492 QEMU_IFLA_INET6_CONF
,
493 QEMU_IFLA_INET6_STATS
,
494 QEMU_IFLA_INET6_MCAST
,
495 QEMU_IFLA_INET6_CACHEINFO
,
496 QEMU_IFLA_INET6_ICMP6STATS
,
497 QEMU_IFLA_INET6_TOKEN
,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
504 typedef struct TargetFdTrans
{
505 TargetFdDataFunc host_to_target_data
;
506 TargetFdDataFunc target_to_host_data
;
507 TargetFdAddrFunc target_to_host_addr
;
510 static TargetFdTrans
**target_fd_trans
;
512 static unsigned int target_fd_max
;
514 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
516 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
517 return target_fd_trans
[fd
]->target_to_host_data
;
522 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
524 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
525 return target_fd_trans
[fd
]->host_to_target_data
;
530 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
532 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
533 return target_fd_trans
[fd
]->target_to_host_addr
;
538 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
542 if (fd
>= target_fd_max
) {
543 oldmax
= target_fd_max
;
544 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans
= g_renew(TargetFdTrans
*,
546 target_fd_trans
, target_fd_max
);
547 memset((void *)(target_fd_trans
+ oldmax
), 0,
548 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
550 target_fd_trans
[fd
] = trans
;
553 static void fd_trans_unregister(int fd
)
555 if (fd
>= 0 && fd
< target_fd_max
) {
556 target_fd_trans
[fd
] = NULL
;
560 static void fd_trans_dup(int oldfd
, int newfd
)
562 fd_trans_unregister(newfd
);
563 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
564 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
568 static int sys_getcwd1(char *buf
, size_t size
)
570 if (getcwd(buf
, size
) == NULL
) {
571 /* getcwd() sets errno */
574 return strlen(buf
)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
581 const struct timespec
*,tsp
,int,flags
)
583 static int sys_utimensat(int dirfd
, const char *pathname
,
584 const struct timespec times
[2], int flags
)
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
604 return (inotify_add_watch(fd
, pathname
, mask
));
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
610 return (inotify_rm_watch(fd
, wd
));
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags
)
617 return (inotify_init1(flags
));
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64
{
639 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
640 const struct host_rlimit64
*, new_limit
,
641 struct host_rlimit64
*, old_limit
)
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers
[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
654 if (g_posix_timers
[k
] == 0) {
655 g_posix_timers
[k
] = (timer_t
) 1;
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
665 static inline int regpairs_aligned(void *cpu_env
) {
666 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
674 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
676 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
691 [EAGAIN
] = TARGET_EAGAIN
,
692 [EIDRM
] = TARGET_EIDRM
,
693 [ECHRNG
] = TARGET_ECHRNG
,
694 [EL2NSYNC
] = TARGET_EL2NSYNC
,
695 [EL3HLT
] = TARGET_EL3HLT
,
696 [EL3RST
] = TARGET_EL3RST
,
697 [ELNRNG
] = TARGET_ELNRNG
,
698 [EUNATCH
] = TARGET_EUNATCH
,
699 [ENOCSI
] = TARGET_ENOCSI
,
700 [EL2HLT
] = TARGET_EL2HLT
,
701 [EDEADLK
] = TARGET_EDEADLK
,
702 [ENOLCK
] = TARGET_ENOLCK
,
703 [EBADE
] = TARGET_EBADE
,
704 [EBADR
] = TARGET_EBADR
,
705 [EXFULL
] = TARGET_EXFULL
,
706 [ENOANO
] = TARGET_ENOANO
,
707 [EBADRQC
] = TARGET_EBADRQC
,
708 [EBADSLT
] = TARGET_EBADSLT
,
709 [EBFONT
] = TARGET_EBFONT
,
710 [ENOSTR
] = TARGET_ENOSTR
,
711 [ENODATA
] = TARGET_ENODATA
,
712 [ETIME
] = TARGET_ETIME
,
713 [ENOSR
] = TARGET_ENOSR
,
714 [ENONET
] = TARGET_ENONET
,
715 [ENOPKG
] = TARGET_ENOPKG
,
716 [EREMOTE
] = TARGET_EREMOTE
,
717 [ENOLINK
] = TARGET_ENOLINK
,
718 [EADV
] = TARGET_EADV
,
719 [ESRMNT
] = TARGET_ESRMNT
,
720 [ECOMM
] = TARGET_ECOMM
,
721 [EPROTO
] = TARGET_EPROTO
,
722 [EDOTDOT
] = TARGET_EDOTDOT
,
723 [EMULTIHOP
] = TARGET_EMULTIHOP
,
724 [EBADMSG
] = TARGET_EBADMSG
,
725 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
726 [EOVERFLOW
] = TARGET_EOVERFLOW
,
727 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
728 [EBADFD
] = TARGET_EBADFD
,
729 [EREMCHG
] = TARGET_EREMCHG
,
730 [ELIBACC
] = TARGET_ELIBACC
,
731 [ELIBBAD
] = TARGET_ELIBBAD
,
732 [ELIBSCN
] = TARGET_ELIBSCN
,
733 [ELIBMAX
] = TARGET_ELIBMAX
,
734 [ELIBEXEC
] = TARGET_ELIBEXEC
,
735 [EILSEQ
] = TARGET_EILSEQ
,
736 [ENOSYS
] = TARGET_ENOSYS
,
737 [ELOOP
] = TARGET_ELOOP
,
738 [ERESTART
] = TARGET_ERESTART
,
739 [ESTRPIPE
] = TARGET_ESTRPIPE
,
740 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
741 [EUSERS
] = TARGET_EUSERS
,
742 [ENOTSOCK
] = TARGET_ENOTSOCK
,
743 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
744 [EMSGSIZE
] = TARGET_EMSGSIZE
,
745 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
746 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
747 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
748 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
749 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
750 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
751 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
752 [EADDRINUSE
] = TARGET_EADDRINUSE
,
753 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
754 [ENETDOWN
] = TARGET_ENETDOWN
,
755 [ENETUNREACH
] = TARGET_ENETUNREACH
,
756 [ENETRESET
] = TARGET_ENETRESET
,
757 [ECONNABORTED
] = TARGET_ECONNABORTED
,
758 [ECONNRESET
] = TARGET_ECONNRESET
,
759 [ENOBUFS
] = TARGET_ENOBUFS
,
760 [EISCONN
] = TARGET_EISCONN
,
761 [ENOTCONN
] = TARGET_ENOTCONN
,
762 [EUCLEAN
] = TARGET_EUCLEAN
,
763 [ENOTNAM
] = TARGET_ENOTNAM
,
764 [ENAVAIL
] = TARGET_ENAVAIL
,
765 [EISNAM
] = TARGET_EISNAM
,
766 [EREMOTEIO
] = TARGET_EREMOTEIO
,
767 [EDQUOT
] = TARGET_EDQUOT
,
768 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
769 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
770 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
771 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
772 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
773 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
774 [EALREADY
] = TARGET_EALREADY
,
775 [EINPROGRESS
] = TARGET_EINPROGRESS
,
776 [ESTALE
] = TARGET_ESTALE
,
777 [ECANCELED
] = TARGET_ECANCELED
,
778 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
779 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
781 [ENOKEY
] = TARGET_ENOKEY
,
784 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
787 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
790 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
793 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
799 [ENOMSG
] = TARGET_ENOMSG
,
803 static inline int host_to_target_errno(int err
)
805 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
806 host_to_target_errno_table
[err
]) {
807 return host_to_target_errno_table
[err
];
812 static inline int target_to_host_errno(int err
)
814 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
815 target_to_host_errno_table
[err
]) {
816 return target_to_host_errno_table
[err
];
821 static inline abi_long
get_errno(abi_long ret
)
824 return -host_to_target_errno(errno
);
829 static inline int is_error(abi_long ret
)
831 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
834 const char *target_strerror(int err
)
836 if (err
== TARGET_ERESTARTSYS
) {
837 return "To be restarted";
839 if (err
== TARGET_QEMU_ESIGRETURN
) {
840 return "Successful exit from sigreturn";
843 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
846 return strerror(target_to_host_errno(err
));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
897 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
898 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
899 int, flags
, mode_t
, mode
)
900 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
901 struct rusage
*, rusage
)
902 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
903 int, options
, struct rusage
*, rusage
)
904 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
905 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
906 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
907 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
908 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
910 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
911 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
913 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
914 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
915 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
916 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
917 safe_syscall2(int, tkill
, int, tid
, int, sig
)
918 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
919 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
920 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
921 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
923 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
924 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
925 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
926 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
927 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
928 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
929 safe_syscall2(int, flock
, int, fd
, int, operation
)
930 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
931 const struct timespec
*, uts
, size_t, sigsetsize
)
932 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
934 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
935 struct timespec
*, rem
)
936 #ifdef TARGET_NR_clock_nanosleep
937 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
938 const struct timespec
*, req
, struct timespec
*, rem
)
941 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
943 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
944 long, msgtype
, int, flags
)
945 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
946 unsigned, nsops
, const struct timespec
*, timeout
)
948 /* This host kernel architecture uses a single ipc syscall; fake up
949 * wrappers for the sub-operations to hide this implementation detail.
950 * Annoyingly we can't include linux/ipc.h to get the constant definitions
951 * for the call parameter because some structs in there conflict with the
952 * sys/ipc.h ones. So we just define them here, and rely on them being
953 * the same for all host architectures.
955 #define Q_SEMTIMEDOP 4
958 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
960 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
961 void *, ptr
, long, fifth
)
962 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
964 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
966 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
968 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
970 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
971 const struct timespec
*timeout
)
973 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
977 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
978 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
979 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
980 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
981 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
983 /* We do ioctl like this rather than via safe_syscall3 to preserve the
984 * "third argument might be integer or pointer or not present" behaviour of
987 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
988 /* Similarly for fcntl. Note that callers must always:
989 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
990 * use the flock64 struct rather than unsuffixed flock
991 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
994 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
996 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
999 static inline int host_to_target_sock_type(int host_type
)
1003 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1005 target_type
= TARGET_SOCK_DGRAM
;
1008 target_type
= TARGET_SOCK_STREAM
;
1011 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1015 #if defined(SOCK_CLOEXEC)
1016 if (host_type
& SOCK_CLOEXEC
) {
1017 target_type
|= TARGET_SOCK_CLOEXEC
;
1021 #if defined(SOCK_NONBLOCK)
1022 if (host_type
& SOCK_NONBLOCK
) {
1023 target_type
|= TARGET_SOCK_NONBLOCK
;
1030 static abi_ulong target_brk
;
1031 static abi_ulong target_original_brk
;
1032 static abi_ulong brk_page
;
1034 void target_set_brk(abi_ulong new_brk
)
1036 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1037 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1040 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1041 #define DEBUGF_BRK(message, args...)
1043 /* do_brk() must return target values and target errnos. */
1044 abi_long
do_brk(abi_ulong new_brk
)
1046 abi_long mapped_addr
;
1047 abi_ulong new_alloc_size
;
1049 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1052 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1055 if (new_brk
< target_original_brk
) {
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1061 /* If the new brk is less than the highest page reserved to the
1062 * target heap allocation, set it and we're almost done... */
1063 if (new_brk
<= brk_page
) {
1064 /* Heap contents are initialized to zero, as for anonymous
1066 if (new_brk
> target_brk
) {
1067 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1069 target_brk
= new_brk
;
1070 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1074 /* We need to allocate more memory after the brk... Note that
1075 * we don't use MAP_FIXED because that will map over the top of
1076 * any existing mapping (like the one with the host libc or qemu
1077 * itself); instead we treat "mapped but at wrong address" as
1078 * a failure and unmap again.
1080 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1081 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1082 PROT_READ
|PROT_WRITE
,
1083 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1085 if (mapped_addr
== brk_page
) {
1086 /* Heap contents are initialized to zero, as for anonymous
1087 * mapped pages. Technically the new pages are already
1088 * initialized to zero since they *are* anonymous mapped
1089 * pages, however we have to take care with the contents that
1090 * come from the remaining part of the previous page: it may
1091 * contains garbage data due to a previous heap usage (grown
1092 * then shrunken). */
1093 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1095 target_brk
= new_brk
;
1096 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1097 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1100 } else if (mapped_addr
!= -1) {
1101 /* Mapped but at wrong address, meaning there wasn't actually
1102 * enough space for this brk.
1104 target_munmap(mapped_addr
, new_alloc_size
);
1106 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1112 #if defined(TARGET_ALPHA)
1113 /* We (partially) emulate OSF/1 on Alpha, which requires we
1114 return a proper errno, not an unchanged brk value. */
1115 return -TARGET_ENOMEM
;
1117 /* For everything else, return the previous break. */
1121 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1122 abi_ulong target_fds_addr
,
1126 abi_ulong b
, *target_fds
;
1128 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1129 if (!(target_fds
= lock_user(VERIFY_READ
,
1131 sizeof(abi_ulong
) * nw
,
1133 return -TARGET_EFAULT
;
1137 for (i
= 0; i
< nw
; i
++) {
1138 /* grab the abi_ulong */
1139 __get_user(b
, &target_fds
[i
]);
1140 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1141 /* check the bit inside the abi_ulong */
1148 unlock_user(target_fds
, target_fds_addr
, 0);
1153 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1154 abi_ulong target_fds_addr
,
1157 if (target_fds_addr
) {
1158 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1159 return -TARGET_EFAULT
;
1167 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1173 abi_ulong
*target_fds
;
1175 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1176 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1178 sizeof(abi_ulong
) * nw
,
1180 return -TARGET_EFAULT
;
1183 for (i
= 0; i
< nw
; i
++) {
1185 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1186 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1189 __put_user(v
, &target_fds
[i
]);
1192 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1197 #if defined(__alpha__)
1198 #define HOST_HZ 1024
1203 static inline abi_long
host_to_target_clock_t(long ticks
)
1205 #if HOST_HZ == TARGET_HZ
1208 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1212 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1213 const struct rusage
*rusage
)
1215 struct target_rusage
*target_rusage
;
1217 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1218 return -TARGET_EFAULT
;
1219 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1220 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1221 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1222 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1223 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1224 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1225 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1226 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1227 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1228 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1229 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1230 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1231 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1232 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1233 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1234 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1235 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1236 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1237 unlock_user_struct(target_rusage
, target_addr
, 1);
1242 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1244 abi_ulong target_rlim_swap
;
1247 target_rlim_swap
= tswapal(target_rlim
);
1248 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1249 return RLIM_INFINITY
;
1251 result
= target_rlim_swap
;
1252 if (target_rlim_swap
!= (rlim_t
)result
)
1253 return RLIM_INFINITY
;
1258 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1260 abi_ulong target_rlim_swap
;
1263 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1264 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1266 target_rlim_swap
= rlim
;
1267 result
= tswapal(target_rlim_swap
);
1272 static inline int target_to_host_resource(int code
)
1275 case TARGET_RLIMIT_AS
:
1277 case TARGET_RLIMIT_CORE
:
1279 case TARGET_RLIMIT_CPU
:
1281 case TARGET_RLIMIT_DATA
:
1283 case TARGET_RLIMIT_FSIZE
:
1284 return RLIMIT_FSIZE
;
1285 case TARGET_RLIMIT_LOCKS
:
1286 return RLIMIT_LOCKS
;
1287 case TARGET_RLIMIT_MEMLOCK
:
1288 return RLIMIT_MEMLOCK
;
1289 case TARGET_RLIMIT_MSGQUEUE
:
1290 return RLIMIT_MSGQUEUE
;
1291 case TARGET_RLIMIT_NICE
:
1293 case TARGET_RLIMIT_NOFILE
:
1294 return RLIMIT_NOFILE
;
1295 case TARGET_RLIMIT_NPROC
:
1296 return RLIMIT_NPROC
;
1297 case TARGET_RLIMIT_RSS
:
1299 case TARGET_RLIMIT_RTPRIO
:
1300 return RLIMIT_RTPRIO
;
1301 case TARGET_RLIMIT_SIGPENDING
:
1302 return RLIMIT_SIGPENDING
;
1303 case TARGET_RLIMIT_STACK
:
1304 return RLIMIT_STACK
;
1310 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1311 abi_ulong target_tv_addr
)
1313 struct target_timeval
*target_tv
;
1315 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1316 return -TARGET_EFAULT
;
1318 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1319 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1321 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1326 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1327 const struct timeval
*tv
)
1329 struct target_timeval
*target_tv
;
1331 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1332 return -TARGET_EFAULT
;
1334 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1335 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1337 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1342 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1343 abi_ulong target_tz_addr
)
1345 struct target_timezone
*target_tz
;
1347 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1348 return -TARGET_EFAULT
;
1351 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1352 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1354 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1359 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1362 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1363 abi_ulong target_mq_attr_addr
)
1365 struct target_mq_attr
*target_mq_attr
;
1367 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1368 target_mq_attr_addr
, 1))
1369 return -TARGET_EFAULT
;
1371 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1372 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1373 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1374 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1376 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1381 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1382 const struct mq_attr
*attr
)
1384 struct target_mq_attr
*target_mq_attr
;
1386 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1387 target_mq_attr_addr
, 0))
1388 return -TARGET_EFAULT
;
1390 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1391 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1392 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1393 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1395 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1401 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1402 /* do_select() must return target values and target errnos. */
1403 static abi_long
do_select(int n
,
1404 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1405 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1407 fd_set rfds
, wfds
, efds
;
1408 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1410 struct timespec ts
, *ts_ptr
;
1413 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1417 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1421 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1426 if (target_tv_addr
) {
1427 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1428 return -TARGET_EFAULT
;
1429 ts
.tv_sec
= tv
.tv_sec
;
1430 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1436 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1439 if (!is_error(ret
)) {
1440 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1441 return -TARGET_EFAULT
;
1442 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1443 return -TARGET_EFAULT
;
1444 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1445 return -TARGET_EFAULT
;
1447 if (target_tv_addr
) {
1448 tv
.tv_sec
= ts
.tv_sec
;
1449 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1450 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1451 return -TARGET_EFAULT
;
1459 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1460 static abi_long
do_old_select(abi_ulong arg1
)
1462 struct target_sel_arg_struct
*sel
;
1463 abi_ulong inp
, outp
, exp
, tvp
;
1466 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1467 return -TARGET_EFAULT
;
1470 nsel
= tswapal(sel
->n
);
1471 inp
= tswapal(sel
->inp
);
1472 outp
= tswapal(sel
->outp
);
1473 exp
= tswapal(sel
->exp
);
1474 tvp
= tswapal(sel
->tvp
);
1476 unlock_user_struct(sel
, arg1
, 0);
1478 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1483 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1486 return pipe2(host_pipe
, flags
);
1492 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1493 int flags
, int is_pipe2
)
1497 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1500 return get_errno(ret
);
1502 /* Several targets have special calling conventions for the original
1503 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1505 #if defined(TARGET_ALPHA)
1506 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1507 return host_pipe
[0];
1508 #elif defined(TARGET_MIPS)
1509 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1510 return host_pipe
[0];
1511 #elif defined(TARGET_SH4)
1512 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1513 return host_pipe
[0];
1514 #elif defined(TARGET_SPARC)
1515 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1516 return host_pipe
[0];
1520 if (put_user_s32(host_pipe
[0], pipedes
)
1521 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1522 return -TARGET_EFAULT
;
1523 return get_errno(ret
);
1526 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1527 abi_ulong target_addr
,
1530 struct target_ip_mreqn
*target_smreqn
;
1532 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1534 return -TARGET_EFAULT
;
1535 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1536 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1537 if (len
== sizeof(struct target_ip_mreqn
))
1538 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1539 unlock_user(target_smreqn
, target_addr
, 0);
1544 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1545 abi_ulong target_addr
,
1548 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1549 sa_family_t sa_family
;
1550 struct target_sockaddr
*target_saddr
;
1552 if (fd_trans_target_to_host_addr(fd
)) {
1553 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1556 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1558 return -TARGET_EFAULT
;
1560 sa_family
= tswap16(target_saddr
->sa_family
);
1562 /* Oops. The caller might send a incomplete sun_path; sun_path
1563 * must be terminated by \0 (see the manual page), but
1564 * unfortunately it is quite common to specify sockaddr_un
1565 * length as "strlen(x->sun_path)" while it should be
1566 * "strlen(...) + 1". We'll fix that here if needed.
1567 * Linux kernel has a similar feature.
1570 if (sa_family
== AF_UNIX
) {
1571 if (len
< unix_maxlen
&& len
> 0) {
1572 char *cp
= (char*)target_saddr
;
1574 if ( cp
[len
-1] && !cp
[len
] )
1577 if (len
> unix_maxlen
)
1581 memcpy(addr
, target_saddr
, len
);
1582 addr
->sa_family
= sa_family
;
1583 if (sa_family
== AF_NETLINK
) {
1584 struct sockaddr_nl
*nladdr
;
1586 nladdr
= (struct sockaddr_nl
*)addr
;
1587 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1588 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1589 } else if (sa_family
== AF_PACKET
) {
1590 struct target_sockaddr_ll
*lladdr
;
1592 lladdr
= (struct target_sockaddr_ll
*)addr
;
1593 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1594 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1596 unlock_user(target_saddr
, target_addr
, 0);
1601 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1602 struct sockaddr
*addr
,
1605 struct target_sockaddr
*target_saddr
;
1611 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1613 return -TARGET_EFAULT
;
1614 memcpy(target_saddr
, addr
, len
);
1615 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1616 sizeof(target_saddr
->sa_family
)) {
1617 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1619 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1620 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1621 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1622 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1623 } else if (addr
->sa_family
== AF_PACKET
) {
1624 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1625 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1626 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1628 unlock_user(target_saddr
, target_addr
, len
);
1633 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1634 struct target_msghdr
*target_msgh
)
1636 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1637 abi_long msg_controllen
;
1638 abi_ulong target_cmsg_addr
;
1639 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1640 socklen_t space
= 0;
1642 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1643 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1645 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1646 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1647 target_cmsg_start
= target_cmsg
;
1649 return -TARGET_EFAULT
;
1651 while (cmsg
&& target_cmsg
) {
1652 void *data
= CMSG_DATA(cmsg
);
1653 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1655 int len
= tswapal(target_cmsg
->cmsg_len
)
1656 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1658 space
+= CMSG_SPACE(len
);
1659 if (space
> msgh
->msg_controllen
) {
1660 space
-= CMSG_SPACE(len
);
1661 /* This is a QEMU bug, since we allocated the payload
1662 * area ourselves (unlike overflow in host-to-target
1663 * conversion, which is just the guest giving us a buffer
1664 * that's too small). It can't happen for the payload types
1665 * we currently support; if it becomes an issue in future
1666 * we would need to improve our allocation strategy to
1667 * something more intelligent than "twice the size of the
1668 * target buffer we're reading from".
1670 gemu_log("Host cmsg overflow\n");
1674 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1675 cmsg
->cmsg_level
= SOL_SOCKET
;
1677 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1679 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1680 cmsg
->cmsg_len
= CMSG_LEN(len
);
1682 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1683 int *fd
= (int *)data
;
1684 int *target_fd
= (int *)target_data
;
1685 int i
, numfds
= len
/ sizeof(int);
1687 for (i
= 0; i
< numfds
; i
++) {
1688 __get_user(fd
[i
], target_fd
+ i
);
1690 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1691 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1692 struct ucred
*cred
= (struct ucred
*)data
;
1693 struct target_ucred
*target_cred
=
1694 (struct target_ucred
*)target_data
;
1696 __get_user(cred
->pid
, &target_cred
->pid
);
1697 __get_user(cred
->uid
, &target_cred
->uid
);
1698 __get_user(cred
->gid
, &target_cred
->gid
);
1700 gemu_log("Unsupported ancillary data: %d/%d\n",
1701 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1702 memcpy(data
, target_data
, len
);
1705 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1706 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1709 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1711 msgh
->msg_controllen
= space
;
1715 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1716 struct msghdr
*msgh
)
1718 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1719 abi_long msg_controllen
;
1720 abi_ulong target_cmsg_addr
;
1721 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1722 socklen_t space
= 0;
1724 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1725 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1727 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1728 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1729 target_cmsg_start
= target_cmsg
;
1731 return -TARGET_EFAULT
;
1733 while (cmsg
&& target_cmsg
) {
1734 void *data
= CMSG_DATA(cmsg
);
1735 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1737 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1738 int tgt_len
, tgt_space
;
1740 /* We never copy a half-header but may copy half-data;
1741 * this is Linux's behaviour in put_cmsg(). Note that
1742 * truncation here is a guest problem (which we report
1743 * to the guest via the CTRUNC bit), unlike truncation
1744 * in target_to_host_cmsg, which is a QEMU bug.
1746 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1747 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1751 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1752 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1754 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1756 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1758 tgt_len
= TARGET_CMSG_LEN(len
);
1760 /* Payload types which need a different size of payload on
1761 * the target must adjust tgt_len here.
1763 switch (cmsg
->cmsg_level
) {
1765 switch (cmsg
->cmsg_type
) {
1767 tgt_len
= sizeof(struct target_timeval
);
1776 if (msg_controllen
< tgt_len
) {
1777 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1778 tgt_len
= msg_controllen
;
1781 /* We must now copy-and-convert len bytes of payload
1782 * into tgt_len bytes of destination space. Bear in mind
1783 * that in both source and destination we may be dealing
1784 * with a truncated value!
1786 switch (cmsg
->cmsg_level
) {
1788 switch (cmsg
->cmsg_type
) {
1791 int *fd
= (int *)data
;
1792 int *target_fd
= (int *)target_data
;
1793 int i
, numfds
= tgt_len
/ sizeof(int);
1795 for (i
= 0; i
< numfds
; i
++) {
1796 __put_user(fd
[i
], target_fd
+ i
);
1802 struct timeval
*tv
= (struct timeval
*)data
;
1803 struct target_timeval
*target_tv
=
1804 (struct target_timeval
*)target_data
;
1806 if (len
!= sizeof(struct timeval
) ||
1807 tgt_len
!= sizeof(struct target_timeval
)) {
1811 /* copy struct timeval to target */
1812 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1813 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1816 case SCM_CREDENTIALS
:
1818 struct ucred
*cred
= (struct ucred
*)data
;
1819 struct target_ucred
*target_cred
=
1820 (struct target_ucred
*)target_data
;
1822 __put_user(cred
->pid
, &target_cred
->pid
);
1823 __put_user(cred
->uid
, &target_cred
->uid
);
1824 __put_user(cred
->gid
, &target_cred
->gid
);
1834 gemu_log("Unsupported ancillary data: %d/%d\n",
1835 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1836 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1837 if (tgt_len
> len
) {
1838 memset(target_data
+ len
, 0, tgt_len
- len
);
1842 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1843 tgt_space
= TARGET_CMSG_SPACE(len
);
1844 if (msg_controllen
< tgt_space
) {
1845 tgt_space
= msg_controllen
;
1847 msg_controllen
-= tgt_space
;
1849 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1850 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1853 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1855 target_msgh
->msg_controllen
= tswapal(space
);
1859 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1861 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1862 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1863 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1864 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1865 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1868 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1870 abi_long (*host_to_target_nlmsg
)
1871 (struct nlmsghdr
*))
1876 while (len
> sizeof(struct nlmsghdr
)) {
1878 nlmsg_len
= nlh
->nlmsg_len
;
1879 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1884 switch (nlh
->nlmsg_type
) {
1886 tswap_nlmsghdr(nlh
);
1892 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1893 e
->error
= tswap32(e
->error
);
1894 tswap_nlmsghdr(&e
->msg
);
1895 tswap_nlmsghdr(nlh
);
1899 ret
= host_to_target_nlmsg(nlh
);
1901 tswap_nlmsghdr(nlh
);
1906 tswap_nlmsghdr(nlh
);
1907 len
-= NLMSG_ALIGN(nlmsg_len
);
1908 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1913 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1915 abi_long (*target_to_host_nlmsg
)
1916 (struct nlmsghdr
*))
1920 while (len
> sizeof(struct nlmsghdr
)) {
1921 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1922 tswap32(nlh
->nlmsg_len
) > len
) {
1925 tswap_nlmsghdr(nlh
);
1926 switch (nlh
->nlmsg_type
) {
1933 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1934 e
->error
= tswap32(e
->error
);
1935 tswap_nlmsghdr(&e
->msg
);
1939 ret
= target_to_host_nlmsg(nlh
);
1944 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1945 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1950 #ifdef CONFIG_RTNETLINK
1951 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1952 size_t len
, void *context
,
1953 abi_long (*host_to_target_nlattr
)
1957 unsigned short nla_len
;
1960 while (len
> sizeof(struct nlattr
)) {
1961 nla_len
= nlattr
->nla_len
;
1962 if (nla_len
< sizeof(struct nlattr
) ||
1966 ret
= host_to_target_nlattr(nlattr
, context
);
1967 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1968 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1972 len
-= NLA_ALIGN(nla_len
);
1973 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1978 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1980 abi_long (*host_to_target_rtattr
)
1983 unsigned short rta_len
;
1986 while (len
> sizeof(struct rtattr
)) {
1987 rta_len
= rtattr
->rta_len
;
1988 if (rta_len
< sizeof(struct rtattr
) ||
1992 ret
= host_to_target_rtattr(rtattr
);
1993 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1994 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1998 len
-= RTA_ALIGN(rta_len
);
1999 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2004 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2006 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2013 switch (nlattr
->nla_type
) {
2015 case QEMU_IFLA_BR_FDB_FLUSH
:
2018 case QEMU_IFLA_BR_GROUP_ADDR
:
2021 case QEMU_IFLA_BR_VLAN_FILTERING
:
2022 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2023 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2024 case QEMU_IFLA_BR_MCAST_ROUTER
:
2025 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2026 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2027 case QEMU_IFLA_BR_MCAST_QUERIER
:
2028 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2029 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2030 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2033 case QEMU_IFLA_BR_PRIORITY
:
2034 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2035 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2036 case QEMU_IFLA_BR_ROOT_PORT
:
2037 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2038 u16
= NLA_DATA(nlattr
);
2039 *u16
= tswap16(*u16
);
2042 case QEMU_IFLA_BR_FORWARD_DELAY
:
2043 case QEMU_IFLA_BR_HELLO_TIME
:
2044 case QEMU_IFLA_BR_MAX_AGE
:
2045 case QEMU_IFLA_BR_AGEING_TIME
:
2046 case QEMU_IFLA_BR_STP_STATE
:
2047 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2048 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2049 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2050 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2051 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2052 u32
= NLA_DATA(nlattr
);
2053 *u32
= tswap32(*u32
);
2056 case QEMU_IFLA_BR_HELLO_TIMER
:
2057 case QEMU_IFLA_BR_TCN_TIMER
:
2058 case QEMU_IFLA_BR_GC_TIMER
:
2059 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2060 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2061 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2062 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2063 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2064 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2065 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2066 u64
= NLA_DATA(nlattr
);
2067 *u64
= tswap64(*u64
);
2069 /* ifla_bridge_id: uin8_t[] */
2070 case QEMU_IFLA_BR_ROOT_ID
:
2071 case QEMU_IFLA_BR_BRIDGE_ID
:
2074 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2080 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2087 switch (nlattr
->nla_type
) {
2089 case QEMU_IFLA_BRPORT_STATE
:
2090 case QEMU_IFLA_BRPORT_MODE
:
2091 case QEMU_IFLA_BRPORT_GUARD
:
2092 case QEMU_IFLA_BRPORT_PROTECT
:
2093 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2094 case QEMU_IFLA_BRPORT_LEARNING
:
2095 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2096 case QEMU_IFLA_BRPORT_PROXYARP
:
2097 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2098 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2099 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2100 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2101 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2104 case QEMU_IFLA_BRPORT_PRIORITY
:
2105 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2106 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2107 case QEMU_IFLA_BRPORT_ID
:
2108 case QEMU_IFLA_BRPORT_NO
:
2109 u16
= NLA_DATA(nlattr
);
2110 *u16
= tswap16(*u16
);
2113 case QEMU_IFLA_BRPORT_COST
:
2114 u32
= NLA_DATA(nlattr
);
2115 *u32
= tswap32(*u32
);
2118 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2119 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2120 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2121 u64
= NLA_DATA(nlattr
);
2122 *u64
= tswap64(*u64
);
2124 /* ifla_bridge_id: uint8_t[] */
2125 case QEMU_IFLA_BRPORT_ROOT_ID
:
2126 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2129 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2135 struct linkinfo_context
{
2142 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2145 struct linkinfo_context
*li_context
= context
;
2147 switch (nlattr
->nla_type
) {
2149 case QEMU_IFLA_INFO_KIND
:
2150 li_context
->name
= NLA_DATA(nlattr
);
2151 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2153 case QEMU_IFLA_INFO_SLAVE_KIND
:
2154 li_context
->slave_name
= NLA_DATA(nlattr
);
2155 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2158 case QEMU_IFLA_INFO_XSTATS
:
2159 /* FIXME: only used by CAN */
2162 case QEMU_IFLA_INFO_DATA
:
2163 if (strncmp(li_context
->name
, "bridge",
2164 li_context
->len
) == 0) {
2165 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2168 host_to_target_data_bridge_nlattr
);
2170 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2173 case QEMU_IFLA_INFO_SLAVE_DATA
:
2174 if (strncmp(li_context
->slave_name
, "bridge",
2175 li_context
->slave_len
) == 0) {
2176 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2179 host_to_target_slave_data_bridge_nlattr
);
2181 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2182 li_context
->slave_name
);
2186 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2193 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2199 switch (nlattr
->nla_type
) {
2200 case QEMU_IFLA_INET_CONF
:
2201 u32
= NLA_DATA(nlattr
);
2202 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2204 u32
[i
] = tswap32(u32
[i
]);
2208 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2213 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2218 struct ifla_cacheinfo
*ci
;
2221 switch (nlattr
->nla_type
) {
2223 case QEMU_IFLA_INET6_TOKEN
:
2226 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2229 case QEMU_IFLA_INET6_FLAGS
:
2230 u32
= NLA_DATA(nlattr
);
2231 *u32
= tswap32(*u32
);
2234 case QEMU_IFLA_INET6_CONF
:
2235 u32
= NLA_DATA(nlattr
);
2236 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2238 u32
[i
] = tswap32(u32
[i
]);
2241 /* ifla_cacheinfo */
2242 case QEMU_IFLA_INET6_CACHEINFO
:
2243 ci
= NLA_DATA(nlattr
);
2244 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2245 ci
->tstamp
= tswap32(ci
->tstamp
);
2246 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2247 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2250 case QEMU_IFLA_INET6_STATS
:
2251 case QEMU_IFLA_INET6_ICMP6STATS
:
2252 u64
= NLA_DATA(nlattr
);
2253 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2255 u64
[i
] = tswap64(u64
[i
]);
2259 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2264 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2267 switch (nlattr
->nla_type
) {
2269 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2271 host_to_target_data_inet_nlattr
);
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2275 host_to_target_data_inet6_nlattr
);
2277 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2283 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2286 struct rtnl_link_stats
*st
;
2287 struct rtnl_link_stats64
*st64
;
2288 struct rtnl_link_ifmap
*map
;
2289 struct linkinfo_context li_context
;
2291 switch (rtattr
->rta_type
) {
2293 case QEMU_IFLA_ADDRESS
:
2294 case QEMU_IFLA_BROADCAST
:
2296 case QEMU_IFLA_IFNAME
:
2297 case QEMU_IFLA_QDISC
:
2300 case QEMU_IFLA_OPERSTATE
:
2301 case QEMU_IFLA_LINKMODE
:
2302 case QEMU_IFLA_CARRIER
:
2303 case QEMU_IFLA_PROTO_DOWN
:
2307 case QEMU_IFLA_LINK
:
2308 case QEMU_IFLA_WEIGHT
:
2309 case QEMU_IFLA_TXQLEN
:
2310 case QEMU_IFLA_CARRIER_CHANGES
:
2311 case QEMU_IFLA_NUM_RX_QUEUES
:
2312 case QEMU_IFLA_NUM_TX_QUEUES
:
2313 case QEMU_IFLA_PROMISCUITY
:
2314 case QEMU_IFLA_EXT_MASK
:
2315 case QEMU_IFLA_LINK_NETNSID
:
2316 case QEMU_IFLA_GROUP
:
2317 case QEMU_IFLA_MASTER
:
2318 case QEMU_IFLA_NUM_VF
:
2319 u32
= RTA_DATA(rtattr
);
2320 *u32
= tswap32(*u32
);
2322 /* struct rtnl_link_stats */
2323 case QEMU_IFLA_STATS
:
2324 st
= RTA_DATA(rtattr
);
2325 st
->rx_packets
= tswap32(st
->rx_packets
);
2326 st
->tx_packets
= tswap32(st
->tx_packets
);
2327 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2328 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2329 st
->rx_errors
= tswap32(st
->rx_errors
);
2330 st
->tx_errors
= tswap32(st
->tx_errors
);
2331 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2332 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2333 st
->multicast
= tswap32(st
->multicast
);
2334 st
->collisions
= tswap32(st
->collisions
);
2336 /* detailed rx_errors: */
2337 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2338 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2339 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2340 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2341 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2342 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2344 /* detailed tx_errors */
2345 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2346 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2347 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2348 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2349 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2352 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2353 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2355 /* struct rtnl_link_stats64 */
2356 case QEMU_IFLA_STATS64
:
2357 st64
= RTA_DATA(rtattr
);
2358 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2359 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2360 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2361 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2362 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2363 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2364 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2365 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2366 st64
->multicast
= tswap64(st64
->multicast
);
2367 st64
->collisions
= tswap64(st64
->collisions
);
2369 /* detailed rx_errors: */
2370 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2371 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2372 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2373 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2374 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2375 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2377 /* detailed tx_errors */
2378 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2379 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2380 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2381 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2382 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2385 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2386 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2388 /* struct rtnl_link_ifmap */
2390 map
= RTA_DATA(rtattr
);
2391 map
->mem_start
= tswap64(map
->mem_start
);
2392 map
->mem_end
= tswap64(map
->mem_end
);
2393 map
->base_addr
= tswap64(map
->base_addr
);
2394 map
->irq
= tswap16(map
->irq
);
2397 case QEMU_IFLA_LINKINFO
:
2398 memset(&li_context
, 0, sizeof(li_context
));
2399 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2401 host_to_target_data_linkinfo_nlattr
);
2402 case QEMU_IFLA_AF_SPEC
:
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2405 host_to_target_data_spec_nlattr
);
2407 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2413 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2416 struct ifa_cacheinfo
*ci
;
2418 switch (rtattr
->rta_type
) {
2419 /* binary: depends on family type */
2429 u32
= RTA_DATA(rtattr
);
2430 *u32
= tswap32(*u32
);
2432 /* struct ifa_cacheinfo */
2434 ci
= RTA_DATA(rtattr
);
2435 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2436 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2437 ci
->cstamp
= tswap32(ci
->cstamp
);
2438 ci
->tstamp
= tswap32(ci
->tstamp
);
2441 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2447 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2450 switch (rtattr
->rta_type
) {
2451 /* binary: depends on family type */
2460 u32
= RTA_DATA(rtattr
);
2461 *u32
= tswap32(*u32
);
2464 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2470 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2471 uint32_t rtattr_len
)
2473 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2474 host_to_target_data_link_rtattr
);
2477 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2478 uint32_t rtattr_len
)
2480 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2481 host_to_target_data_addr_rtattr
);
2484 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2485 uint32_t rtattr_len
)
2487 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2488 host_to_target_data_route_rtattr
);
2491 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2494 struct ifinfomsg
*ifi
;
2495 struct ifaddrmsg
*ifa
;
2498 nlmsg_len
= nlh
->nlmsg_len
;
2499 switch (nlh
->nlmsg_type
) {
2503 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2504 ifi
= NLMSG_DATA(nlh
);
2505 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2506 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2507 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2508 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2509 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2510 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2516 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2517 ifa
= NLMSG_DATA(nlh
);
2518 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2519 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2520 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2526 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2527 rtm
= NLMSG_DATA(nlh
);
2528 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2529 host_to_target_route_rtattr(RTM_RTA(rtm
),
2530 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2534 return -TARGET_EINVAL
;
2539 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2542 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2545 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2547 abi_long (*target_to_host_rtattr
)
2552 while (len
>= sizeof(struct rtattr
)) {
2553 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2554 tswap16(rtattr
->rta_len
) > len
) {
2557 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2558 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2559 ret
= target_to_host_rtattr(rtattr
);
2563 len
-= RTA_ALIGN(rtattr
->rta_len
);
2564 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2565 RTA_ALIGN(rtattr
->rta_len
));
2570 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2572 switch (rtattr
->rta_type
) {
2574 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2580 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2582 switch (rtattr
->rta_type
) {
2583 /* binary: depends on family type */
2588 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2594 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2597 switch (rtattr
->rta_type
) {
2598 /* binary: depends on family type */
2606 u32
= RTA_DATA(rtattr
);
2607 *u32
= tswap32(*u32
);
2610 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2616 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2617 uint32_t rtattr_len
)
2619 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2620 target_to_host_data_link_rtattr
);
2623 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2624 uint32_t rtattr_len
)
2626 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2627 target_to_host_data_addr_rtattr
);
2630 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2631 uint32_t rtattr_len
)
2633 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2634 target_to_host_data_route_rtattr
);
2637 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2639 struct ifinfomsg
*ifi
;
2640 struct ifaddrmsg
*ifa
;
2643 switch (nlh
->nlmsg_type
) {
2648 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2649 ifi
= NLMSG_DATA(nlh
);
2650 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2651 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2652 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2653 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2654 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2655 NLMSG_LENGTH(sizeof(*ifi
)));
2661 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2662 ifa
= NLMSG_DATA(nlh
);
2663 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2664 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2665 NLMSG_LENGTH(sizeof(*ifa
)));
2672 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2673 rtm
= NLMSG_DATA(nlh
);
2674 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2675 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2676 NLMSG_LENGTH(sizeof(*rtm
)));
2680 return -TARGET_EOPNOTSUPP
;
2685 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2687 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2689 #endif /* CONFIG_RTNETLINK */
2691 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2693 switch (nlh
->nlmsg_type
) {
2695 gemu_log("Unknown host audit message type %d\n",
2697 return -TARGET_EINVAL
;
2702 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2705 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2708 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2710 switch (nlh
->nlmsg_type
) {
2712 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2713 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2716 gemu_log("Unknown target audit message type %d\n",
2718 return -TARGET_EINVAL
;
2724 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2726 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2729 /* do_setsockopt() Must return target values and target errnos. */
2730 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2731 abi_ulong optval_addr
, socklen_t optlen
)
2735 struct ip_mreqn
*ip_mreq
;
2736 struct ip_mreq_source
*ip_mreq_source
;
2740 /* TCP options all take an 'int' value. */
2741 if (optlen
< sizeof(uint32_t))
2742 return -TARGET_EINVAL
;
2744 if (get_user_u32(val
, optval_addr
))
2745 return -TARGET_EFAULT
;
2746 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2753 case IP_ROUTER_ALERT
:
2757 case IP_MTU_DISCOVER
:
2763 case IP_MULTICAST_TTL
:
2764 case IP_MULTICAST_LOOP
:
2766 if (optlen
>= sizeof(uint32_t)) {
2767 if (get_user_u32(val
, optval_addr
))
2768 return -TARGET_EFAULT
;
2769 } else if (optlen
>= 1) {
2770 if (get_user_u8(val
, optval_addr
))
2771 return -TARGET_EFAULT
;
2773 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2775 case IP_ADD_MEMBERSHIP
:
2776 case IP_DROP_MEMBERSHIP
:
2777 if (optlen
< sizeof (struct target_ip_mreq
) ||
2778 optlen
> sizeof (struct target_ip_mreqn
))
2779 return -TARGET_EINVAL
;
2781 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2782 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2783 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2786 case IP_BLOCK_SOURCE
:
2787 case IP_UNBLOCK_SOURCE
:
2788 case IP_ADD_SOURCE_MEMBERSHIP
:
2789 case IP_DROP_SOURCE_MEMBERSHIP
:
2790 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2791 return -TARGET_EINVAL
;
2793 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2794 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2795 unlock_user (ip_mreq_source
, optval_addr
, 0);
2804 case IPV6_MTU_DISCOVER
:
2807 case IPV6_RECVPKTINFO
:
2809 if (optlen
< sizeof(uint32_t)) {
2810 return -TARGET_EINVAL
;
2812 if (get_user_u32(val
, optval_addr
)) {
2813 return -TARGET_EFAULT
;
2815 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2816 &val
, sizeof(val
)));
2825 /* struct icmp_filter takes an u32 value */
2826 if (optlen
< sizeof(uint32_t)) {
2827 return -TARGET_EINVAL
;
2830 if (get_user_u32(val
, optval_addr
)) {
2831 return -TARGET_EFAULT
;
2833 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2834 &val
, sizeof(val
)));
2841 case TARGET_SOL_SOCKET
:
2843 case TARGET_SO_RCVTIMEO
:
2847 optname
= SO_RCVTIMEO
;
2850 if (optlen
!= sizeof(struct target_timeval
)) {
2851 return -TARGET_EINVAL
;
2854 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2855 return -TARGET_EFAULT
;
2858 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2862 case TARGET_SO_SNDTIMEO
:
2863 optname
= SO_SNDTIMEO
;
2865 case TARGET_SO_ATTACH_FILTER
:
2867 struct target_sock_fprog
*tfprog
;
2868 struct target_sock_filter
*tfilter
;
2869 struct sock_fprog fprog
;
2870 struct sock_filter
*filter
;
2873 if (optlen
!= sizeof(*tfprog
)) {
2874 return -TARGET_EINVAL
;
2876 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2877 return -TARGET_EFAULT
;
2879 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2880 tswapal(tfprog
->filter
), 0)) {
2881 unlock_user_struct(tfprog
, optval_addr
, 1);
2882 return -TARGET_EFAULT
;
2885 fprog
.len
= tswap16(tfprog
->len
);
2886 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2887 if (filter
== NULL
) {
2888 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2889 unlock_user_struct(tfprog
, optval_addr
, 1);
2890 return -TARGET_ENOMEM
;
2892 for (i
= 0; i
< fprog
.len
; i
++) {
2893 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2894 filter
[i
].jt
= tfilter
[i
].jt
;
2895 filter
[i
].jf
= tfilter
[i
].jf
;
2896 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2898 fprog
.filter
= filter
;
2900 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2901 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2904 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2905 unlock_user_struct(tfprog
, optval_addr
, 1);
2908 case TARGET_SO_BINDTODEVICE
:
2910 char *dev_ifname
, *addr_ifname
;
2912 if (optlen
> IFNAMSIZ
- 1) {
2913 optlen
= IFNAMSIZ
- 1;
2915 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2917 return -TARGET_EFAULT
;
2919 optname
= SO_BINDTODEVICE
;
2920 addr_ifname
= alloca(IFNAMSIZ
);
2921 memcpy(addr_ifname
, dev_ifname
, optlen
);
2922 addr_ifname
[optlen
] = 0;
2923 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2924 addr_ifname
, optlen
));
2925 unlock_user (dev_ifname
, optval_addr
, 0);
2928 /* Options with 'int' argument. */
2929 case TARGET_SO_DEBUG
:
2932 case TARGET_SO_REUSEADDR
:
2933 optname
= SO_REUSEADDR
;
2935 case TARGET_SO_TYPE
:
2938 case TARGET_SO_ERROR
:
2941 case TARGET_SO_DONTROUTE
:
2942 optname
= SO_DONTROUTE
;
2944 case TARGET_SO_BROADCAST
:
2945 optname
= SO_BROADCAST
;
2947 case TARGET_SO_SNDBUF
:
2948 optname
= SO_SNDBUF
;
2950 case TARGET_SO_SNDBUFFORCE
:
2951 optname
= SO_SNDBUFFORCE
;
2953 case TARGET_SO_RCVBUF
:
2954 optname
= SO_RCVBUF
;
2956 case TARGET_SO_RCVBUFFORCE
:
2957 optname
= SO_RCVBUFFORCE
;
2959 case TARGET_SO_KEEPALIVE
:
2960 optname
= SO_KEEPALIVE
;
2962 case TARGET_SO_OOBINLINE
:
2963 optname
= SO_OOBINLINE
;
2965 case TARGET_SO_NO_CHECK
:
2966 optname
= SO_NO_CHECK
;
2968 case TARGET_SO_PRIORITY
:
2969 optname
= SO_PRIORITY
;
2972 case TARGET_SO_BSDCOMPAT
:
2973 optname
= SO_BSDCOMPAT
;
2976 case TARGET_SO_PASSCRED
:
2977 optname
= SO_PASSCRED
;
2979 case TARGET_SO_PASSSEC
:
2980 optname
= SO_PASSSEC
;
2982 case TARGET_SO_TIMESTAMP
:
2983 optname
= SO_TIMESTAMP
;
2985 case TARGET_SO_RCVLOWAT
:
2986 optname
= SO_RCVLOWAT
;
2992 if (optlen
< sizeof(uint32_t))
2993 return -TARGET_EINVAL
;
2995 if (get_user_u32(val
, optval_addr
))
2996 return -TARGET_EFAULT
;
2997 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3001 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3002 ret
= -TARGET_ENOPROTOOPT
;
3007 /* do_getsockopt() Must return target values and target errnos. */
3008 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3009 abi_ulong optval_addr
, abi_ulong optlen
)
3016 case TARGET_SOL_SOCKET
:
3019 /* These don't just return a single integer */
3020 case TARGET_SO_LINGER
:
3021 case TARGET_SO_RCVTIMEO
:
3022 case TARGET_SO_SNDTIMEO
:
3023 case TARGET_SO_PEERNAME
:
3025 case TARGET_SO_PEERCRED
: {
3028 struct target_ucred
*tcr
;
3030 if (get_user_u32(len
, optlen
)) {
3031 return -TARGET_EFAULT
;
3034 return -TARGET_EINVAL
;
3038 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3046 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3047 return -TARGET_EFAULT
;
3049 __put_user(cr
.pid
, &tcr
->pid
);
3050 __put_user(cr
.uid
, &tcr
->uid
);
3051 __put_user(cr
.gid
, &tcr
->gid
);
3052 unlock_user_struct(tcr
, optval_addr
, 1);
3053 if (put_user_u32(len
, optlen
)) {
3054 return -TARGET_EFAULT
;
3058 /* Options with 'int' argument. */
3059 case TARGET_SO_DEBUG
:
3062 case TARGET_SO_REUSEADDR
:
3063 optname
= SO_REUSEADDR
;
3065 case TARGET_SO_TYPE
:
3068 case TARGET_SO_ERROR
:
3071 case TARGET_SO_DONTROUTE
:
3072 optname
= SO_DONTROUTE
;
3074 case TARGET_SO_BROADCAST
:
3075 optname
= SO_BROADCAST
;
3077 case TARGET_SO_SNDBUF
:
3078 optname
= SO_SNDBUF
;
3080 case TARGET_SO_RCVBUF
:
3081 optname
= SO_RCVBUF
;
3083 case TARGET_SO_KEEPALIVE
:
3084 optname
= SO_KEEPALIVE
;
3086 case TARGET_SO_OOBINLINE
:
3087 optname
= SO_OOBINLINE
;
3089 case TARGET_SO_NO_CHECK
:
3090 optname
= SO_NO_CHECK
;
3092 case TARGET_SO_PRIORITY
:
3093 optname
= SO_PRIORITY
;
3096 case TARGET_SO_BSDCOMPAT
:
3097 optname
= SO_BSDCOMPAT
;
3100 case TARGET_SO_PASSCRED
:
3101 optname
= SO_PASSCRED
;
3103 case TARGET_SO_TIMESTAMP
:
3104 optname
= SO_TIMESTAMP
;
3106 case TARGET_SO_RCVLOWAT
:
3107 optname
= SO_RCVLOWAT
;
3109 case TARGET_SO_ACCEPTCONN
:
3110 optname
= SO_ACCEPTCONN
;
3117 /* TCP options all take an 'int' value. */
3119 if (get_user_u32(len
, optlen
))
3120 return -TARGET_EFAULT
;
3122 return -TARGET_EINVAL
;
3124 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3127 if (optname
== SO_TYPE
) {
3128 val
= host_to_target_sock_type(val
);
3133 if (put_user_u32(val
, optval_addr
))
3134 return -TARGET_EFAULT
;
3136 if (put_user_u8(val
, optval_addr
))
3137 return -TARGET_EFAULT
;
3139 if (put_user_u32(len
, optlen
))
3140 return -TARGET_EFAULT
;
3147 case IP_ROUTER_ALERT
:
3151 case IP_MTU_DISCOVER
:
3157 case IP_MULTICAST_TTL
:
3158 case IP_MULTICAST_LOOP
:
3159 if (get_user_u32(len
, optlen
))
3160 return -TARGET_EFAULT
;
3162 return -TARGET_EINVAL
;
3164 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3167 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3169 if (put_user_u32(len
, optlen
)
3170 || put_user_u8(val
, optval_addr
))
3171 return -TARGET_EFAULT
;
3173 if (len
> sizeof(int))
3175 if (put_user_u32(len
, optlen
)
3176 || put_user_u32(val
, optval_addr
))
3177 return -TARGET_EFAULT
;
3181 ret
= -TARGET_ENOPROTOOPT
;
3187 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3189 ret
= -TARGET_EOPNOTSUPP
;
3195 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3196 abi_ulong count
, int copy
)
3198 struct target_iovec
*target_vec
;
3200 abi_ulong total_len
, max_len
;
3203 bool bad_address
= false;
3209 if (count
> IOV_MAX
) {
3214 vec
= g_try_new0(struct iovec
, count
);
3220 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3221 count
* sizeof(struct target_iovec
), 1);
3222 if (target_vec
== NULL
) {
3227 /* ??? If host page size > target page size, this will result in a
3228 value larger than what we can actually support. */
3229 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3232 for (i
= 0; i
< count
; i
++) {
3233 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3234 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3239 } else if (len
== 0) {
3240 /* Zero length pointer is ignored. */
3241 vec
[i
].iov_base
= 0;
3243 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3244 /* If the first buffer pointer is bad, this is a fault. But
3245 * subsequent bad buffers will result in a partial write; this
3246 * is realized by filling the vector with null pointers and
3248 if (!vec
[i
].iov_base
) {
3259 if (len
> max_len
- total_len
) {
3260 len
= max_len
- total_len
;
3263 vec
[i
].iov_len
= len
;
3267 unlock_user(target_vec
, target_addr
, 0);
3272 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3273 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3276 unlock_user(target_vec
, target_addr
, 0);
3283 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3284 abi_ulong count
, int copy
)
3286 struct target_iovec
*target_vec
;
3289 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3290 count
* sizeof(struct target_iovec
), 1);
3292 for (i
= 0; i
< count
; i
++) {
3293 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3294 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3298 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3300 unlock_user(target_vec
, target_addr
, 0);
3306 static inline int target_to_host_sock_type(int *type
)
3309 int target_type
= *type
;
3311 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3312 case TARGET_SOCK_DGRAM
:
3313 host_type
= SOCK_DGRAM
;
3315 case TARGET_SOCK_STREAM
:
3316 host_type
= SOCK_STREAM
;
3319 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3322 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3323 #if defined(SOCK_CLOEXEC)
3324 host_type
|= SOCK_CLOEXEC
;
3326 return -TARGET_EINVAL
;
3329 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3330 #if defined(SOCK_NONBLOCK)
3331 host_type
|= SOCK_NONBLOCK
;
3332 #elif !defined(O_NONBLOCK)
3333 return -TARGET_EINVAL
;
3340 /* Try to emulate socket type flags after socket creation. */
3341 static int sock_flags_fixup(int fd
, int target_type
)
3343 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3344 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3345 int flags
= fcntl(fd
, F_GETFL
);
3346 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3348 return -TARGET_EINVAL
;
3355 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3356 abi_ulong target_addr
,
3359 struct sockaddr
*addr
= host_addr
;
3360 struct target_sockaddr
*target_saddr
;
3362 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3363 if (!target_saddr
) {
3364 return -TARGET_EFAULT
;
3367 memcpy(addr
, target_saddr
, len
);
3368 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3369 /* spkt_protocol is big-endian */
3371 unlock_user(target_saddr
, target_addr
, 0);
3375 static TargetFdTrans target_packet_trans
= {
3376 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3379 #ifdef CONFIG_RTNETLINK
3380 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3384 ret
= target_to_host_nlmsg_route(buf
, len
);
3392 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3396 ret
= host_to_target_nlmsg_route(buf
, len
);
3404 static TargetFdTrans target_netlink_route_trans
= {
3405 .target_to_host_data
= netlink_route_target_to_host
,
3406 .host_to_target_data
= netlink_route_host_to_target
,
3408 #endif /* CONFIG_RTNETLINK */
3410 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3414 ret
= target_to_host_nlmsg_audit(buf
, len
);
3422 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3426 ret
= host_to_target_nlmsg_audit(buf
, len
);
3434 static TargetFdTrans target_netlink_audit_trans
= {
3435 .target_to_host_data
= netlink_audit_target_to_host
,
3436 .host_to_target_data
= netlink_audit_host_to_target
,
3439 /* do_socket() Must return target values and target errnos. */
3440 static abi_long
do_socket(int domain
, int type
, int protocol
)
3442 int target_type
= type
;
3445 ret
= target_to_host_sock_type(&type
);
3450 if (domain
== PF_NETLINK
&& !(
3451 #ifdef CONFIG_RTNETLINK
3452 protocol
== NETLINK_ROUTE
||
3454 protocol
== NETLINK_KOBJECT_UEVENT
||
3455 protocol
== NETLINK_AUDIT
)) {
3456 return -EPFNOSUPPORT
;
3459 if (domain
== AF_PACKET
||
3460 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3461 protocol
= tswap16(protocol
);
3464 ret
= get_errno(socket(domain
, type
, protocol
));
3466 ret
= sock_flags_fixup(ret
, target_type
);
3467 if (type
== SOCK_PACKET
) {
3468 /* Manage an obsolete case :
3469 * if socket type is SOCK_PACKET, bind by name
3471 fd_trans_register(ret
, &target_packet_trans
);
3472 } else if (domain
== PF_NETLINK
) {
3474 #ifdef CONFIG_RTNETLINK
3476 fd_trans_register(ret
, &target_netlink_route_trans
);
3479 case NETLINK_KOBJECT_UEVENT
:
3480 /* nothing to do: messages are strings */
3483 fd_trans_register(ret
, &target_netlink_audit_trans
);
3486 g_assert_not_reached();
3493 /* do_bind() Must return target values and target errnos. */
3494 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3500 if ((int)addrlen
< 0) {
3501 return -TARGET_EINVAL
;
3504 addr
= alloca(addrlen
+1);
3506 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3510 return get_errno(bind(sockfd
, addr
, addrlen
));
3513 /* do_connect() Must return target values and target errnos. */
3514 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3520 if ((int)addrlen
< 0) {
3521 return -TARGET_EINVAL
;
3524 addr
= alloca(addrlen
+1);
3526 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3530 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3533 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3534 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3535 int flags
, int send
)
3541 abi_ulong target_vec
;
3543 if (msgp
->msg_name
) {
3544 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3545 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3546 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3547 tswapal(msgp
->msg_name
),
3549 if (ret
== -TARGET_EFAULT
) {
3550 /* For connected sockets msg_name and msg_namelen must
3551 * be ignored, so returning EFAULT immediately is wrong.
3552 * Instead, pass a bad msg_name to the host kernel, and
3553 * let it decide whether to return EFAULT or not.
3555 msg
.msg_name
= (void *)-1;
3560 msg
.msg_name
= NULL
;
3561 msg
.msg_namelen
= 0;
3563 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3564 msg
.msg_control
= alloca(msg
.msg_controllen
);
3565 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3567 count
= tswapal(msgp
->msg_iovlen
);
3568 target_vec
= tswapal(msgp
->msg_iov
);
3570 if (count
> IOV_MAX
) {
3571 /* sendrcvmsg returns a different errno for this condition than
3572 * readv/writev, so we must catch it here before lock_iovec() does.
3574 ret
= -TARGET_EMSGSIZE
;
3578 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3579 target_vec
, count
, send
);
3581 ret
= -host_to_target_errno(errno
);
3584 msg
.msg_iovlen
= count
;
3588 if (fd_trans_target_to_host_data(fd
)) {
3591 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3592 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3593 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3594 msg
.msg_iov
->iov_len
);
3596 msg
.msg_iov
->iov_base
= host_msg
;
3597 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3601 ret
= target_to_host_cmsg(&msg
, msgp
);
3603 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3607 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3608 if (!is_error(ret
)) {
3610 if (fd_trans_host_to_target_data(fd
)) {
3611 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3614 ret
= host_to_target_cmsg(msgp
, &msg
);
3616 if (!is_error(ret
)) {
3617 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3618 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3619 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3620 msg
.msg_name
, msg
.msg_namelen
);
3632 unlock_iovec(vec
, target_vec
, count
, !send
);
3637 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3638 int flags
, int send
)
3641 struct target_msghdr
*msgp
;
3643 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3647 return -TARGET_EFAULT
;
3649 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3650 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3654 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3655 * so it might not have this *mmsg-specific flag either.
3657 #ifndef MSG_WAITFORONE
3658 #define MSG_WAITFORONE 0x10000
3661 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3662 unsigned int vlen
, unsigned int flags
,
3665 struct target_mmsghdr
*mmsgp
;
3669 if (vlen
> UIO_MAXIOV
) {
3673 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3675 return -TARGET_EFAULT
;
3678 for (i
= 0; i
< vlen
; i
++) {
3679 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3680 if (is_error(ret
)) {
3683 mmsgp
[i
].msg_len
= tswap32(ret
);
3684 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3685 if (flags
& MSG_WAITFORONE
) {
3686 flags
|= MSG_DONTWAIT
;
3690 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3692 /* Return number of datagrams sent if we sent any at all;
3693 * otherwise return the error.
3701 /* do_accept4() Must return target values and target errnos. */
3702 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3703 abi_ulong target_addrlen_addr
, int flags
)
3710 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3712 if (target_addr
== 0) {
3713 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3716 /* linux returns EINVAL if addrlen pointer is invalid */
3717 if (get_user_u32(addrlen
, target_addrlen_addr
))
3718 return -TARGET_EINVAL
;
3720 if ((int)addrlen
< 0) {
3721 return -TARGET_EINVAL
;
3724 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3725 return -TARGET_EINVAL
;
3727 addr
= alloca(addrlen
);
3729 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3730 if (!is_error(ret
)) {
3731 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3732 if (put_user_u32(addrlen
, target_addrlen_addr
))
3733 ret
= -TARGET_EFAULT
;
3738 /* do_getpeername() Must return target values and target errnos. */
3739 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3740 abi_ulong target_addrlen_addr
)
3746 if (get_user_u32(addrlen
, target_addrlen_addr
))
3747 return -TARGET_EFAULT
;
3749 if ((int)addrlen
< 0) {
3750 return -TARGET_EINVAL
;
3753 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3754 return -TARGET_EFAULT
;
3756 addr
= alloca(addrlen
);
3758 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3759 if (!is_error(ret
)) {
3760 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3761 if (put_user_u32(addrlen
, target_addrlen_addr
))
3762 ret
= -TARGET_EFAULT
;
3767 /* do_getsockname() Must return target values and target errnos. */
3768 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3769 abi_ulong target_addrlen_addr
)
3775 if (get_user_u32(addrlen
, target_addrlen_addr
))
3776 return -TARGET_EFAULT
;
3778 if ((int)addrlen
< 0) {
3779 return -TARGET_EINVAL
;
3782 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3783 return -TARGET_EFAULT
;
3785 addr
= alloca(addrlen
);
3787 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3788 if (!is_error(ret
)) {
3789 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3790 if (put_user_u32(addrlen
, target_addrlen_addr
))
3791 ret
= -TARGET_EFAULT
;
3796 /* do_socketpair() Must return target values and target errnos. */
3797 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3798 abi_ulong target_tab_addr
)
3803 target_to_host_sock_type(&type
);
3805 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3806 if (!is_error(ret
)) {
3807 if (put_user_s32(tab
[0], target_tab_addr
)
3808 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3809 ret
= -TARGET_EFAULT
;
3814 /* do_sendto() Must return target values and target errnos. */
3815 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3816 abi_ulong target_addr
, socklen_t addrlen
)
3820 void *copy_msg
= NULL
;
3823 if ((int)addrlen
< 0) {
3824 return -TARGET_EINVAL
;
3827 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3829 return -TARGET_EFAULT
;
3830 if (fd_trans_target_to_host_data(fd
)) {
3831 copy_msg
= host_msg
;
3832 host_msg
= g_malloc(len
);
3833 memcpy(host_msg
, copy_msg
, len
);
3834 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3840 addr
= alloca(addrlen
+1);
3841 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3845 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3847 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3852 host_msg
= copy_msg
;
3854 unlock_user(host_msg
, msg
, 0);
3858 /* do_recvfrom() Must return target values and target errnos. */
3859 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3860 abi_ulong target_addr
,
3861 abi_ulong target_addrlen
)
3868 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3870 return -TARGET_EFAULT
;
3872 if (get_user_u32(addrlen
, target_addrlen
)) {
3873 ret
= -TARGET_EFAULT
;
3876 if ((int)addrlen
< 0) {
3877 ret
= -TARGET_EINVAL
;
3880 addr
= alloca(addrlen
);
3881 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3884 addr
= NULL
; /* To keep compiler quiet. */
3885 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3887 if (!is_error(ret
)) {
3888 if (fd_trans_host_to_target_data(fd
)) {
3889 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3892 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3893 if (put_user_u32(addrlen
, target_addrlen
)) {
3894 ret
= -TARGET_EFAULT
;
3898 unlock_user(host_msg
, msg
, len
);
3901 unlock_user(host_msg
, msg
, 0);
3906 #ifdef TARGET_NR_socketcall
3907 /* do_socketcall() must return target values and target errnos. */
3908 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3910 static const unsigned nargs
[] = { /* number of arguments per operation */
3911 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
3912 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
3913 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
3914 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
3915 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
3916 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
3919 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
3920 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
3921 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
3922 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
3923 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
3924 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3925 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
3926 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
3927 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
3928 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
3929 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
3930 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
3932 abi_long a
[6]; /* max 6 args */
3935 /* check the range of the first argument num */
3936 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3937 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
3938 return -TARGET_EINVAL
;
3940 /* ensure we have space for args */
3941 if (nargs
[num
] > ARRAY_SIZE(a
)) {
3942 return -TARGET_EINVAL
;
3944 /* collect the arguments in a[] according to nargs[] */
3945 for (i
= 0; i
< nargs
[num
]; ++i
) {
3946 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3947 return -TARGET_EFAULT
;
3950 /* now when we have the args, invoke the appropriate underlying function */
3952 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
3953 return do_socket(a
[0], a
[1], a
[2]);
3954 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
3955 return do_bind(a
[0], a
[1], a
[2]);
3956 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
3957 return do_connect(a
[0], a
[1], a
[2]);
3958 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
3959 return get_errno(listen(a
[0], a
[1]));
3960 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
3961 return do_accept4(a
[0], a
[1], a
[2], 0);
3962 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
3963 return do_getsockname(a
[0], a
[1], a
[2]);
3964 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
3965 return do_getpeername(a
[0], a
[1], a
[2]);
3966 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
3967 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3968 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
3969 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3970 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
3971 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3972 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
3973 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3974 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
3975 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3976 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
3977 return get_errno(shutdown(a
[0], a
[1]));
3978 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3979 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3980 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
3981 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3982 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
3983 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3984 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
3985 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3986 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
3987 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3988 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
3989 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3990 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
3991 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3993 gemu_log("Unsupported socketcall: %d\n", num
);
3994 return -TARGET_EINVAL
;
3999 #define N_SHM_REGIONS 32
4001 static struct shm_region
{
4005 } shm_regions
[N_SHM_REGIONS
];
4007 #ifndef TARGET_SEMID64_DS
4008 /* asm-generic version of this struct */
4009 struct target_semid64_ds
4011 struct target_ipc_perm sem_perm
;
4012 abi_ulong sem_otime
;
4013 #if TARGET_ABI_BITS == 32
4014 abi_ulong __unused1
;
4016 abi_ulong sem_ctime
;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused2
;
4020 abi_ulong sem_nsems
;
4021 abi_ulong __unused3
;
4022 abi_ulong __unused4
;
4026 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4027 abi_ulong target_addr
)
4029 struct target_ipc_perm
*target_ip
;
4030 struct target_semid64_ds
*target_sd
;
4032 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4033 return -TARGET_EFAULT
;
4034 target_ip
= &(target_sd
->sem_perm
);
4035 host_ip
->__key
= tswap32(target_ip
->__key
);
4036 host_ip
->uid
= tswap32(target_ip
->uid
);
4037 host_ip
->gid
= tswap32(target_ip
->gid
);
4038 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4039 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4040 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4041 host_ip
->mode
= tswap32(target_ip
->mode
);
4043 host_ip
->mode
= tswap16(target_ip
->mode
);
4045 #if defined(TARGET_PPC)
4046 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4048 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4050 unlock_user_struct(target_sd
, target_addr
, 0);
4054 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4055 struct ipc_perm
*host_ip
)
4057 struct target_ipc_perm
*target_ip
;
4058 struct target_semid64_ds
*target_sd
;
4060 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4061 return -TARGET_EFAULT
;
4062 target_ip
= &(target_sd
->sem_perm
);
4063 target_ip
->__key
= tswap32(host_ip
->__key
);
4064 target_ip
->uid
= tswap32(host_ip
->uid
);
4065 target_ip
->gid
= tswap32(host_ip
->gid
);
4066 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4067 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4068 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4069 target_ip
->mode
= tswap32(host_ip
->mode
);
4071 target_ip
->mode
= tswap16(host_ip
->mode
);
4073 #if defined(TARGET_PPC)
4074 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4076 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4078 unlock_user_struct(target_sd
, target_addr
, 1);
4082 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4083 abi_ulong target_addr
)
4085 struct target_semid64_ds
*target_sd
;
4087 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4088 return -TARGET_EFAULT
;
4089 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4090 return -TARGET_EFAULT
;
4091 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4092 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4093 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4094 unlock_user_struct(target_sd
, target_addr
, 0);
4098 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4099 struct semid_ds
*host_sd
)
4101 struct target_semid64_ds
*target_sd
;
4103 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4104 return -TARGET_EFAULT
;
4105 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4106 return -TARGET_EFAULT
;
4107 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4108 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4109 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4110 unlock_user_struct(target_sd
, target_addr
, 1);
4114 struct target_seminfo
{
4127 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4128 struct seminfo
*host_seminfo
)
4130 struct target_seminfo
*target_seminfo
;
4131 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4132 return -TARGET_EFAULT
;
4133 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4134 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4135 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4136 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4137 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4138 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4139 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4140 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4141 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4142 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4143 unlock_user_struct(target_seminfo
, target_addr
, 1);
4149 struct semid_ds
*buf
;
4150 unsigned short *array
;
4151 struct seminfo
*__buf
;
4154 union target_semun
{
4161 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4162 abi_ulong target_addr
)
4165 unsigned short *array
;
4167 struct semid_ds semid_ds
;
4170 semun
.buf
= &semid_ds
;
4172 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4174 return get_errno(ret
);
4176 nsems
= semid_ds
.sem_nsems
;
4178 *host_array
= g_try_new(unsigned short, nsems
);
4180 return -TARGET_ENOMEM
;
4182 array
= lock_user(VERIFY_READ
, target_addr
,
4183 nsems
*sizeof(unsigned short), 1);
4185 g_free(*host_array
);
4186 return -TARGET_EFAULT
;
4189 for(i
=0; i
<nsems
; i
++) {
4190 __get_user((*host_array
)[i
], &array
[i
]);
4192 unlock_user(array
, target_addr
, 0);
4197 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4198 unsigned short **host_array
)
4201 unsigned short *array
;
4203 struct semid_ds semid_ds
;
4206 semun
.buf
= &semid_ds
;
4208 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4210 return get_errno(ret
);
4212 nsems
= semid_ds
.sem_nsems
;
4214 array
= lock_user(VERIFY_WRITE
, target_addr
,
4215 nsems
*sizeof(unsigned short), 0);
4217 return -TARGET_EFAULT
;
4219 for(i
=0; i
<nsems
; i
++) {
4220 __put_user((*host_array
)[i
], &array
[i
]);
4222 g_free(*host_array
);
4223 unlock_user(array
, target_addr
, 1);
4228 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4229 abi_ulong target_arg
)
4231 union target_semun target_su
= { .buf
= target_arg
};
4233 struct semid_ds dsarg
;
4234 unsigned short *array
= NULL
;
4235 struct seminfo seminfo
;
4236 abi_long ret
= -TARGET_EINVAL
;
4243 /* In 64 bit cross-endian situations, we will erroneously pick up
4244 * the wrong half of the union for the "val" element. To rectify
4245 * this, the entire 8-byte structure is byteswapped, followed by
4246 * a swap of the 4 byte val field. In other cases, the data is
4247 * already in proper host byte order. */
4248 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4249 target_su
.buf
= tswapal(target_su
.buf
);
4250 arg
.val
= tswap32(target_su
.val
);
4252 arg
.val
= target_su
.val
;
4254 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4258 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4262 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4263 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4270 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4274 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4275 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4281 arg
.__buf
= &seminfo
;
4282 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4283 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4291 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4298 struct target_sembuf
{
4299 unsigned short sem_num
;
4304 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4305 abi_ulong target_addr
,
4308 struct target_sembuf
*target_sembuf
;
4311 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4312 nsops
*sizeof(struct target_sembuf
), 1);
4314 return -TARGET_EFAULT
;
4316 for(i
=0; i
<nsops
; i
++) {
4317 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4318 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4319 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4322 unlock_user(target_sembuf
, target_addr
, 0);
4327 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4329 struct sembuf sops
[nsops
];
4331 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4332 return -TARGET_EFAULT
;
4334 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4337 struct target_msqid_ds
4339 struct target_ipc_perm msg_perm
;
4340 abi_ulong msg_stime
;
4341 #if TARGET_ABI_BITS == 32
4342 abi_ulong __unused1
;
4344 abi_ulong msg_rtime
;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused2
;
4348 abi_ulong msg_ctime
;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused3
;
4352 abi_ulong __msg_cbytes
;
4354 abi_ulong msg_qbytes
;
4355 abi_ulong msg_lspid
;
4356 abi_ulong msg_lrpid
;
4357 abi_ulong __unused4
;
4358 abi_ulong __unused5
;
4361 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4362 abi_ulong target_addr
)
4364 struct target_msqid_ds
*target_md
;
4366 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4367 return -TARGET_EFAULT
;
4368 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4369 return -TARGET_EFAULT
;
4370 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4371 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4372 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4373 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4374 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4375 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4376 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4377 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4378 unlock_user_struct(target_md
, target_addr
, 0);
4382 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4383 struct msqid_ds
*host_md
)
4385 struct target_msqid_ds
*target_md
;
4387 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4388 return -TARGET_EFAULT
;
4389 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4390 return -TARGET_EFAULT
;
4391 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4392 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4393 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4394 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4395 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4396 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4397 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4398 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4399 unlock_user_struct(target_md
, target_addr
, 1);
4403 struct target_msginfo
{
4411 unsigned short int msgseg
;
4414 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4415 struct msginfo
*host_msginfo
)
4417 struct target_msginfo
*target_msginfo
;
4418 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4419 return -TARGET_EFAULT
;
4420 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4421 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4422 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4423 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4424 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4425 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4426 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4427 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4428 unlock_user_struct(target_msginfo
, target_addr
, 1);
4432 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4434 struct msqid_ds dsarg
;
4435 struct msginfo msginfo
;
4436 abi_long ret
= -TARGET_EINVAL
;
4444 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4445 return -TARGET_EFAULT
;
4446 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4447 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4448 return -TARGET_EFAULT
;
4451 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4455 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4456 if (host_to_target_msginfo(ptr
, &msginfo
))
4457 return -TARGET_EFAULT
;
4464 struct target_msgbuf
{
4469 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4470 ssize_t msgsz
, int msgflg
)
4472 struct target_msgbuf
*target_mb
;
4473 struct msgbuf
*host_mb
;
4477 return -TARGET_EINVAL
;
4480 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4481 return -TARGET_EFAULT
;
4482 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4484 unlock_user_struct(target_mb
, msgp
, 0);
4485 return -TARGET_ENOMEM
;
4487 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4488 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4489 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4491 unlock_user_struct(target_mb
, msgp
, 0);
4496 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4497 ssize_t msgsz
, abi_long msgtyp
,
4500 struct target_msgbuf
*target_mb
;
4502 struct msgbuf
*host_mb
;
4506 return -TARGET_EINVAL
;
4509 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4510 return -TARGET_EFAULT
;
4512 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4514 ret
= -TARGET_ENOMEM
;
4517 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4520 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4521 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4522 if (!target_mtext
) {
4523 ret
= -TARGET_EFAULT
;
4526 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4527 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4530 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4534 unlock_user_struct(target_mb
, msgp
, 1);
4539 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4540 abi_ulong target_addr
)
4542 struct target_shmid_ds
*target_sd
;
4544 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4545 return -TARGET_EFAULT
;
4546 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4547 return -TARGET_EFAULT
;
4548 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4549 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4550 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4551 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4552 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4553 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4554 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4555 unlock_user_struct(target_sd
, target_addr
, 0);
4559 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4560 struct shmid_ds
*host_sd
)
4562 struct target_shmid_ds
*target_sd
;
4564 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4565 return -TARGET_EFAULT
;
4566 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4567 return -TARGET_EFAULT
;
4568 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4569 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4570 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4571 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4572 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4573 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4574 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4575 unlock_user_struct(target_sd
, target_addr
, 1);
4579 struct target_shminfo
{
4587 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4588 struct shminfo
*host_shminfo
)
4590 struct target_shminfo
*target_shminfo
;
4591 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4592 return -TARGET_EFAULT
;
4593 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4594 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4595 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4596 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4597 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4598 unlock_user_struct(target_shminfo
, target_addr
, 1);
4602 struct target_shm_info
{
4607 abi_ulong swap_attempts
;
4608 abi_ulong swap_successes
;
4611 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4612 struct shm_info
*host_shm_info
)
4614 struct target_shm_info
*target_shm_info
;
4615 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4616 return -TARGET_EFAULT
;
4617 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4618 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4619 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4620 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4621 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4622 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4623 unlock_user_struct(target_shm_info
, target_addr
, 1);
4627 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4629 struct shmid_ds dsarg
;
4630 struct shminfo shminfo
;
4631 struct shm_info shm_info
;
4632 abi_long ret
= -TARGET_EINVAL
;
4640 if (target_to_host_shmid_ds(&dsarg
, buf
))
4641 return -TARGET_EFAULT
;
4642 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4643 if (host_to_target_shmid_ds(buf
, &dsarg
))
4644 return -TARGET_EFAULT
;
4647 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4648 if (host_to_target_shminfo(buf
, &shminfo
))
4649 return -TARGET_EFAULT
;
4652 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4653 if (host_to_target_shm_info(buf
, &shm_info
))
4654 return -TARGET_EFAULT
;
4659 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4666 #ifndef TARGET_FORCE_SHMLBA
4667 /* For most architectures, SHMLBA is the same as the page size;
4668 * some architectures have larger values, in which case they should
4669 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4670 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4671 * and defining its own value for SHMLBA.
4673 * The kernel also permits SHMLBA to be set by the architecture to a
4674 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4675 * this means that addresses are rounded to the large size if
4676 * SHM_RND is set but addresses not aligned to that size are not rejected
4677 * as long as they are at least page-aligned. Since the only architecture
4678 * which uses this is ia64 this code doesn't provide for that oddity.
4680 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4682 return TARGET_PAGE_SIZE
;
4686 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4687 int shmid
, abi_ulong shmaddr
, int shmflg
)
4691 struct shmid_ds shm_info
;
4695 /* find out the length of the shared memory segment */
4696 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4697 if (is_error(ret
)) {
4698 /* can't get length, bail out */
4702 shmlba
= target_shmlba(cpu_env
);
4704 if (shmaddr
& (shmlba
- 1)) {
4705 if (shmflg
& SHM_RND
) {
4706 shmaddr
&= ~(shmlba
- 1);
4708 return -TARGET_EINVAL
;
4715 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4717 abi_ulong mmap_start
;
4719 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4721 if (mmap_start
== -1) {
4723 host_raddr
= (void *)-1;
4725 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4728 if (host_raddr
== (void *)-1) {
4730 return get_errno((long)host_raddr
);
4732 raddr
=h2g((unsigned long)host_raddr
);
4734 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4735 PAGE_VALID
| PAGE_READ
|
4736 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4738 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4739 if (!shm_regions
[i
].in_use
) {
4740 shm_regions
[i
].in_use
= true;
4741 shm_regions
[i
].start
= raddr
;
4742 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4752 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4756 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4757 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4758 shm_regions
[i
].in_use
= false;
4759 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4764 return get_errno(shmdt(g2h(shmaddr
)));
4767 #ifdef TARGET_NR_ipc
4768 /* ??? This only works with linear mappings. */
4769 /* do_ipc() must return target values and target errnos. */
4770 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4771 unsigned int call
, abi_long first
,
4772 abi_long second
, abi_long third
,
4773 abi_long ptr
, abi_long fifth
)
4778 version
= call
>> 16;
4783 ret
= do_semop(first
, ptr
, second
);
4787 ret
= get_errno(semget(first
, second
, third
));
4790 case IPCOP_semctl
: {
4791 /* The semun argument to semctl is passed by value, so dereference the
4794 get_user_ual(atptr
, ptr
);
4795 ret
= do_semctl(first
, second
, third
, atptr
);
4800 ret
= get_errno(msgget(first
, second
));
4804 ret
= do_msgsnd(first
, ptr
, second
, third
);
4808 ret
= do_msgctl(first
, second
, ptr
);
4815 struct target_ipc_kludge
{
4820 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4821 ret
= -TARGET_EFAULT
;
4825 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4827 unlock_user_struct(tmp
, ptr
, 0);
4831 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4840 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4841 if (is_error(raddr
))
4842 return get_errno(raddr
);
4843 if (put_user_ual(raddr
, third
))
4844 return -TARGET_EFAULT
;
4848 ret
= -TARGET_EINVAL
;
4853 ret
= do_shmdt(ptr
);
4857 /* IPC_* flag values are the same on all linux platforms */
4858 ret
= get_errno(shmget(first
, second
, third
));
4861 /* IPC_* and SHM_* command values are the same on all linux platforms */
4863 ret
= do_shmctl(first
, second
, ptr
);
4866 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4867 ret
= -TARGET_ENOSYS
;
4874 /* kernel structure types definitions */
4876 #define STRUCT(name, ...) STRUCT_ ## name,
4877 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4879 #include "syscall_types.h"
4883 #undef STRUCT_SPECIAL
4885 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4886 #define STRUCT_SPECIAL(name)
4887 #include "syscall_types.h"
4889 #undef STRUCT_SPECIAL
4891 typedef struct IOCTLEntry IOCTLEntry
;
4893 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4894 int fd
, int cmd
, abi_long arg
);
4898 unsigned int host_cmd
;
4901 do_ioctl_fn
*do_ioctl
;
4902 const argtype arg_type
[5];
4905 #define IOC_R 0x0001
4906 #define IOC_W 0x0002
4907 #define IOC_RW (IOC_R | IOC_W)
4909 #define MAX_STRUCT_SIZE 4096
4911 #ifdef CONFIG_FIEMAP
4912 /* So fiemap access checks don't overflow on 32 bit systems.
4913 * This is very slightly smaller than the limit imposed by
4914 * the underlying kernel.
4916 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4917 / sizeof(struct fiemap_extent))
4919 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4920 int fd
, int cmd
, abi_long arg
)
4922 /* The parameter for this ioctl is a struct fiemap followed
4923 * by an array of struct fiemap_extent whose size is set
4924 * in fiemap->fm_extent_count. The array is filled in by the
4927 int target_size_in
, target_size_out
;
4929 const argtype
*arg_type
= ie
->arg_type
;
4930 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4933 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4937 assert(arg_type
[0] == TYPE_PTR
);
4938 assert(ie
->access
== IOC_RW
);
4940 target_size_in
= thunk_type_size(arg_type
, 0);
4941 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4943 return -TARGET_EFAULT
;
4945 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4946 unlock_user(argptr
, arg
, 0);
4947 fm
= (struct fiemap
*)buf_temp
;
4948 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4949 return -TARGET_EINVAL
;
4952 outbufsz
= sizeof (*fm
) +
4953 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4955 if (outbufsz
> MAX_STRUCT_SIZE
) {
4956 /* We can't fit all the extents into the fixed size buffer.
4957 * Allocate one that is large enough and use it instead.
4959 fm
= g_try_malloc(outbufsz
);
4961 return -TARGET_ENOMEM
;
4963 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4966 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4967 if (!is_error(ret
)) {
4968 target_size_out
= target_size_in
;
4969 /* An extent_count of 0 means we were only counting the extents
4970 * so there are no structs to copy
4972 if (fm
->fm_extent_count
!= 0) {
4973 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4975 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4977 ret
= -TARGET_EFAULT
;
4979 /* Convert the struct fiemap */
4980 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4981 if (fm
->fm_extent_count
!= 0) {
4982 p
= argptr
+ target_size_in
;
4983 /* ...and then all the struct fiemap_extents */
4984 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4985 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4990 unlock_user(argptr
, arg
, target_size_out
);
5000 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5001 int fd
, int cmd
, abi_long arg
)
5003 const argtype
*arg_type
= ie
->arg_type
;
5007 struct ifconf
*host_ifconf
;
5009 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5010 int target_ifreq_size
;
5015 abi_long target_ifc_buf
;
5019 assert(arg_type
[0] == TYPE_PTR
);
5020 assert(ie
->access
== IOC_RW
);
5023 target_size
= thunk_type_size(arg_type
, 0);
5025 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5027 return -TARGET_EFAULT
;
5028 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5029 unlock_user(argptr
, arg
, 0);
5031 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5032 target_ifc_len
= host_ifconf
->ifc_len
;
5033 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5035 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5036 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5037 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5039 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5040 if (outbufsz
> MAX_STRUCT_SIZE
) {
5041 /* We can't fit all the extents into the fixed size buffer.
5042 * Allocate one that is large enough and use it instead.
5044 host_ifconf
= malloc(outbufsz
);
5046 return -TARGET_ENOMEM
;
5048 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5051 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5053 host_ifconf
->ifc_len
= host_ifc_len
;
5054 host_ifconf
->ifc_buf
= host_ifc_buf
;
5056 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5057 if (!is_error(ret
)) {
5058 /* convert host ifc_len to target ifc_len */
5060 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5061 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5062 host_ifconf
->ifc_len
= target_ifc_len
;
5064 /* restore target ifc_buf */
5066 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5068 /* copy struct ifconf to target user */
5070 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5072 return -TARGET_EFAULT
;
5073 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5074 unlock_user(argptr
, arg
, target_size
);
5076 /* copy ifreq[] to target user */
5078 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5079 for (i
= 0; i
< nb_ifreq
; i
++) {
5080 thunk_convert(argptr
+ i
* target_ifreq_size
,
5081 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5082 ifreq_arg_type
, THUNK_TARGET
);
5084 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5094 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5095 int cmd
, abi_long arg
)
5098 struct dm_ioctl
*host_dm
;
5099 abi_long guest_data
;
5100 uint32_t guest_data_size
;
5102 const argtype
*arg_type
= ie
->arg_type
;
5104 void *big_buf
= NULL
;
5108 target_size
= thunk_type_size(arg_type
, 0);
5109 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5111 ret
= -TARGET_EFAULT
;
5114 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5115 unlock_user(argptr
, arg
, 0);
5117 /* buf_temp is too small, so fetch things into a bigger buffer */
5118 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5119 memcpy(big_buf
, buf_temp
, target_size
);
5123 guest_data
= arg
+ host_dm
->data_start
;
5124 if ((guest_data
- arg
) < 0) {
5125 ret
= -TARGET_EINVAL
;
5128 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5129 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5131 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5133 ret
= -TARGET_EFAULT
;
5137 switch (ie
->host_cmd
) {
5139 case DM_LIST_DEVICES
:
5142 case DM_DEV_SUSPEND
:
5145 case DM_TABLE_STATUS
:
5146 case DM_TABLE_CLEAR
:
5148 case DM_LIST_VERSIONS
:
5152 case DM_DEV_SET_GEOMETRY
:
5153 /* data contains only strings */
5154 memcpy(host_data
, argptr
, guest_data_size
);
5157 memcpy(host_data
, argptr
, guest_data_size
);
5158 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5162 void *gspec
= argptr
;
5163 void *cur_data
= host_data
;
5164 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5165 int spec_size
= thunk_type_size(arg_type
, 0);
5168 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5169 struct dm_target_spec
*spec
= cur_data
;
5173 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5174 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5176 spec
->next
= sizeof(*spec
) + slen
;
5177 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5179 cur_data
+= spec
->next
;
5184 ret
= -TARGET_EINVAL
;
5185 unlock_user(argptr
, guest_data
, 0);
5188 unlock_user(argptr
, guest_data
, 0);
5190 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5191 if (!is_error(ret
)) {
5192 guest_data
= arg
+ host_dm
->data_start
;
5193 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5194 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5195 switch (ie
->host_cmd
) {
5200 case DM_DEV_SUSPEND
:
5203 case DM_TABLE_CLEAR
:
5205 case DM_DEV_SET_GEOMETRY
:
5206 /* no return data */
5208 case DM_LIST_DEVICES
:
5210 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5211 uint32_t remaining_data
= guest_data_size
;
5212 void *cur_data
= argptr
;
5213 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5214 int nl_size
= 12; /* can't use thunk_size due to alignment */
5217 uint32_t next
= nl
->next
;
5219 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5221 if (remaining_data
< nl
->next
) {
5222 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5225 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5226 strcpy(cur_data
+ nl_size
, nl
->name
);
5227 cur_data
+= nl
->next
;
5228 remaining_data
-= nl
->next
;
5232 nl
= (void*)nl
+ next
;
5237 case DM_TABLE_STATUS
:
5239 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5240 void *cur_data
= argptr
;
5241 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5242 int spec_size
= thunk_type_size(arg_type
, 0);
5245 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5246 uint32_t next
= spec
->next
;
5247 int slen
= strlen((char*)&spec
[1]) + 1;
5248 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5249 if (guest_data_size
< spec
->next
) {
5250 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5253 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5254 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5255 cur_data
= argptr
+ spec
->next
;
5256 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5262 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5263 int count
= *(uint32_t*)hdata
;
5264 uint64_t *hdev
= hdata
+ 8;
5265 uint64_t *gdev
= argptr
+ 8;
5268 *(uint32_t*)argptr
= tswap32(count
);
5269 for (i
= 0; i
< count
; i
++) {
5270 *gdev
= tswap64(*hdev
);
5276 case DM_LIST_VERSIONS
:
5278 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5279 uint32_t remaining_data
= guest_data_size
;
5280 void *cur_data
= argptr
;
5281 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5282 int vers_size
= thunk_type_size(arg_type
, 0);
5285 uint32_t next
= vers
->next
;
5287 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5289 if (remaining_data
< vers
->next
) {
5290 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5293 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5294 strcpy(cur_data
+ vers_size
, vers
->name
);
5295 cur_data
+= vers
->next
;
5296 remaining_data
-= vers
->next
;
5300 vers
= (void*)vers
+ next
;
5305 unlock_user(argptr
, guest_data
, 0);
5306 ret
= -TARGET_EINVAL
;
5309 unlock_user(argptr
, guest_data
, guest_data_size
);
5311 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5313 ret
= -TARGET_EFAULT
;
5316 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5317 unlock_user(argptr
, arg
, target_size
);
5324 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5325 int cmd
, abi_long arg
)
5329 const argtype
*arg_type
= ie
->arg_type
;
5330 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5333 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5334 struct blkpg_partition host_part
;
5336 /* Read and convert blkpg */
5338 target_size
= thunk_type_size(arg_type
, 0);
5339 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5341 ret
= -TARGET_EFAULT
;
5344 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5345 unlock_user(argptr
, arg
, 0);
5347 switch (host_blkpg
->op
) {
5348 case BLKPG_ADD_PARTITION
:
5349 case BLKPG_DEL_PARTITION
:
5350 /* payload is struct blkpg_partition */
5353 /* Unknown opcode */
5354 ret
= -TARGET_EINVAL
;
5358 /* Read and convert blkpg->data */
5359 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5360 target_size
= thunk_type_size(part_arg_type
, 0);
5361 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5363 ret
= -TARGET_EFAULT
;
5366 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5367 unlock_user(argptr
, arg
, 0);
5369 /* Swizzle the data pointer to our local copy and call! */
5370 host_blkpg
->data
= &host_part
;
5371 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5377 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5378 int fd
, int cmd
, abi_long arg
)
5380 const argtype
*arg_type
= ie
->arg_type
;
5381 const StructEntry
*se
;
5382 const argtype
*field_types
;
5383 const int *dst_offsets
, *src_offsets
;
5386 abi_ulong
*target_rt_dev_ptr
;
5387 unsigned long *host_rt_dev_ptr
;
5391 assert(ie
->access
== IOC_W
);
5392 assert(*arg_type
== TYPE_PTR
);
5394 assert(*arg_type
== TYPE_STRUCT
);
5395 target_size
= thunk_type_size(arg_type
, 0);
5396 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5398 return -TARGET_EFAULT
;
5401 assert(*arg_type
== (int)STRUCT_rtentry
);
5402 se
= struct_entries
+ *arg_type
++;
5403 assert(se
->convert
[0] == NULL
);
5404 /* convert struct here to be able to catch rt_dev string */
5405 field_types
= se
->field_types
;
5406 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5407 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5408 for (i
= 0; i
< se
->nb_fields
; i
++) {
5409 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5410 assert(*field_types
== TYPE_PTRVOID
);
5411 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5412 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5413 if (*target_rt_dev_ptr
!= 0) {
5414 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5415 tswapal(*target_rt_dev_ptr
));
5416 if (!*host_rt_dev_ptr
) {
5417 unlock_user(argptr
, arg
, 0);
5418 return -TARGET_EFAULT
;
5421 *host_rt_dev_ptr
= 0;
5426 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5427 argptr
+ src_offsets
[i
],
5428 field_types
, THUNK_HOST
);
5430 unlock_user(argptr
, arg
, 0);
5432 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5433 if (*host_rt_dev_ptr
!= 0) {
5434 unlock_user((void *)*host_rt_dev_ptr
,
5435 *target_rt_dev_ptr
, 0);
5440 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5441 int fd
, int cmd
, abi_long arg
)
5443 int sig
= target_to_host_signal(arg
);
5444 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5447 static IOCTLEntry ioctl_entries
[] = {
5448 #define IOCTL(cmd, access, ...) \
5449 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5450 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5451 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5456 /* ??? Implement proper locking for ioctls. */
5457 /* do_ioctl() Must return target values and target errnos. */
5458 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5460 const IOCTLEntry
*ie
;
5461 const argtype
*arg_type
;
5463 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5469 if (ie
->target_cmd
== 0) {
5470 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5471 return -TARGET_ENOSYS
;
5473 if (ie
->target_cmd
== cmd
)
5477 arg_type
= ie
->arg_type
;
5479 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5482 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5485 switch(arg_type
[0]) {
5488 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5492 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5496 target_size
= thunk_type_size(arg_type
, 0);
5497 switch(ie
->access
) {
5499 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5500 if (!is_error(ret
)) {
5501 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5503 return -TARGET_EFAULT
;
5504 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5505 unlock_user(argptr
, arg
, target_size
);
5509 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5511 return -TARGET_EFAULT
;
5512 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5513 unlock_user(argptr
, arg
, 0);
5514 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5518 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5520 return -TARGET_EFAULT
;
5521 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5522 unlock_user(argptr
, arg
, 0);
5523 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5524 if (!is_error(ret
)) {
5525 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5527 return -TARGET_EFAULT
;
5528 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5529 unlock_user(argptr
, arg
, target_size
);
5535 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5536 (long)cmd
, arg_type
[0]);
5537 ret
= -TARGET_ENOSYS
;
5543 static const bitmask_transtbl iflag_tbl
[] = {
5544 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5545 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5546 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5547 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5548 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5549 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5550 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5551 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5552 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5553 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5554 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5555 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5556 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5557 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5561 static const bitmask_transtbl oflag_tbl
[] = {
5562 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5563 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5564 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5565 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5566 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5567 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5568 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5569 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5570 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5571 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5572 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5573 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5574 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5575 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5576 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5577 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5578 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5579 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5580 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5581 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5582 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5583 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5584 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5585 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5589 static const bitmask_transtbl cflag_tbl
[] = {
5590 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5591 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5592 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5593 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5594 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5595 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5596 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5597 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5598 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5599 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5600 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5601 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5602 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5603 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5604 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5605 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5606 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5607 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5608 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5609 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5610 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5611 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5612 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5613 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5614 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5615 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5616 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5617 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5618 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5619 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5620 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5624 static const bitmask_transtbl lflag_tbl
[] = {
5625 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5626 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5627 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5628 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5629 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5630 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5631 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5632 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5633 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5634 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5635 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5636 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5637 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5638 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5639 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5643 static void target_to_host_termios (void *dst
, const void *src
)
5645 struct host_termios
*host
= dst
;
5646 const struct target_termios
*target
= src
;
5649 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5651 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5653 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5655 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5656 host
->c_line
= target
->c_line
;
5658 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5659 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5660 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5661 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5662 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5663 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5664 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5665 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5666 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5667 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5668 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5669 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5670 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5671 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5672 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5673 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5674 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5675 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5678 static void host_to_target_termios (void *dst
, const void *src
)
5680 struct target_termios
*target
= dst
;
5681 const struct host_termios
*host
= src
;
5684 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5686 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5688 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5690 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5691 target
->c_line
= host
->c_line
;
5693 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5694 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5695 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5696 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5697 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5698 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5699 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5700 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5701 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5702 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5703 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5704 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5705 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5706 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5707 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5708 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5709 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5710 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5713 static const StructEntry struct_termios_def
= {
5714 .convert
= { host_to_target_termios
, target_to_host_termios
},
5715 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5716 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5719 static bitmask_transtbl mmap_flags_tbl
[] = {
5720 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5721 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5722 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5723 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5724 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5725 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5726 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5727 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5728 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5733 #if defined(TARGET_I386)
5735 /* NOTE: there is really one LDT for all the threads */
5736 static uint8_t *ldt_table
;
5738 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5745 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5746 if (size
> bytecount
)
5748 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5750 return -TARGET_EFAULT
;
5751 /* ??? Should this by byteswapped? */
5752 memcpy(p
, ldt_table
, size
);
5753 unlock_user(p
, ptr
, size
);
5757 /* XXX: add locking support */
5758 static abi_long
write_ldt(CPUX86State
*env
,
5759 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5761 struct target_modify_ldt_ldt_s ldt_info
;
5762 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5763 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5764 int seg_not_present
, useable
, lm
;
5765 uint32_t *lp
, entry_1
, entry_2
;
5767 if (bytecount
!= sizeof(ldt_info
))
5768 return -TARGET_EINVAL
;
5769 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5770 return -TARGET_EFAULT
;
5771 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5772 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5773 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5774 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5775 unlock_user_struct(target_ldt_info
, ptr
, 0);
5777 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5778 return -TARGET_EINVAL
;
5779 seg_32bit
= ldt_info
.flags
& 1;
5780 contents
= (ldt_info
.flags
>> 1) & 3;
5781 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5782 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5783 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5784 useable
= (ldt_info
.flags
>> 6) & 1;
5788 lm
= (ldt_info
.flags
>> 7) & 1;
5790 if (contents
== 3) {
5792 return -TARGET_EINVAL
;
5793 if (seg_not_present
== 0)
5794 return -TARGET_EINVAL
;
5796 /* allocate the LDT */
5798 env
->ldt
.base
= target_mmap(0,
5799 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5800 PROT_READ
|PROT_WRITE
,
5801 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5802 if (env
->ldt
.base
== -1)
5803 return -TARGET_ENOMEM
;
5804 memset(g2h(env
->ldt
.base
), 0,
5805 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5806 env
->ldt
.limit
= 0xffff;
5807 ldt_table
= g2h(env
->ldt
.base
);
5810 /* NOTE: same code as Linux kernel */
5811 /* Allow LDTs to be cleared by the user. */
5812 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5815 read_exec_only
== 1 &&
5817 limit_in_pages
== 0 &&
5818 seg_not_present
== 1 &&
5826 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5827 (ldt_info
.limit
& 0x0ffff);
5828 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5829 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5830 (ldt_info
.limit
& 0xf0000) |
5831 ((read_exec_only
^ 1) << 9) |
5833 ((seg_not_present
^ 1) << 15) |
5835 (limit_in_pages
<< 23) |
5839 entry_2
|= (useable
<< 20);
5841 /* Install the new entry ... */
5843 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5844 lp
[0] = tswap32(entry_1
);
5845 lp
[1] = tswap32(entry_2
);
5849 /* specific and weird i386 syscalls */
5850 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5851 unsigned long bytecount
)
5857 ret
= read_ldt(ptr
, bytecount
);
5860 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5863 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5866 ret
= -TARGET_ENOSYS
;
5872 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5873 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5875 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5876 struct target_modify_ldt_ldt_s ldt_info
;
5877 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5878 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5879 int seg_not_present
, useable
, lm
;
5880 uint32_t *lp
, entry_1
, entry_2
;
5883 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5884 if (!target_ldt_info
)
5885 return -TARGET_EFAULT
;
5886 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5887 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5888 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5889 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5890 if (ldt_info
.entry_number
== -1) {
5891 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5892 if (gdt_table
[i
] == 0) {
5893 ldt_info
.entry_number
= i
;
5894 target_ldt_info
->entry_number
= tswap32(i
);
5899 unlock_user_struct(target_ldt_info
, ptr
, 1);
5901 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5902 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5903 return -TARGET_EINVAL
;
5904 seg_32bit
= ldt_info
.flags
& 1;
5905 contents
= (ldt_info
.flags
>> 1) & 3;
5906 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5907 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5908 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5909 useable
= (ldt_info
.flags
>> 6) & 1;
5913 lm
= (ldt_info
.flags
>> 7) & 1;
5916 if (contents
== 3) {
5917 if (seg_not_present
== 0)
5918 return -TARGET_EINVAL
;
5921 /* NOTE: same code as Linux kernel */
5922 /* Allow LDTs to be cleared by the user. */
5923 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5924 if ((contents
== 0 &&
5925 read_exec_only
== 1 &&
5927 limit_in_pages
== 0 &&
5928 seg_not_present
== 1 &&
5936 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5937 (ldt_info
.limit
& 0x0ffff);
5938 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5939 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5940 (ldt_info
.limit
& 0xf0000) |
5941 ((read_exec_only
^ 1) << 9) |
5943 ((seg_not_present
^ 1) << 15) |
5945 (limit_in_pages
<< 23) |
5950 /* Install the new entry ... */
5952 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5953 lp
[0] = tswap32(entry_1
);
5954 lp
[1] = tswap32(entry_2
);
5958 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5960 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5961 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5962 uint32_t base_addr
, limit
, flags
;
5963 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5964 int seg_not_present
, useable
, lm
;
5965 uint32_t *lp
, entry_1
, entry_2
;
5967 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5968 if (!target_ldt_info
)
5969 return -TARGET_EFAULT
;
5970 idx
= tswap32(target_ldt_info
->entry_number
);
5971 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5972 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5973 unlock_user_struct(target_ldt_info
, ptr
, 1);
5974 return -TARGET_EINVAL
;
5976 lp
= (uint32_t *)(gdt_table
+ idx
);
5977 entry_1
= tswap32(lp
[0]);
5978 entry_2
= tswap32(lp
[1]);
5980 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5981 contents
= (entry_2
>> 10) & 3;
5982 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5983 seg_32bit
= (entry_2
>> 22) & 1;
5984 limit_in_pages
= (entry_2
>> 23) & 1;
5985 useable
= (entry_2
>> 20) & 1;
5989 lm
= (entry_2
>> 21) & 1;
5991 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5992 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5993 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5994 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5995 base_addr
= (entry_1
>> 16) |
5996 (entry_2
& 0xff000000) |
5997 ((entry_2
& 0xff) << 16);
5998 target_ldt_info
->base_addr
= tswapal(base_addr
);
5999 target_ldt_info
->limit
= tswap32(limit
);
6000 target_ldt_info
->flags
= tswap32(flags
);
6001 unlock_user_struct(target_ldt_info
, ptr
, 1);
6004 #endif /* TARGET_I386 && TARGET_ABI32 */
6006 #ifndef TARGET_ABI32
6007 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6014 case TARGET_ARCH_SET_GS
:
6015 case TARGET_ARCH_SET_FS
:
6016 if (code
== TARGET_ARCH_SET_GS
)
6020 cpu_x86_load_seg(env
, idx
, 0);
6021 env
->segs
[idx
].base
= addr
;
6023 case TARGET_ARCH_GET_GS
:
6024 case TARGET_ARCH_GET_FS
:
6025 if (code
== TARGET_ARCH_GET_GS
)
6029 val
= env
->segs
[idx
].base
;
6030 if (put_user(val
, addr
, abi_ulong
))
6031 ret
= -TARGET_EFAULT
;
6034 ret
= -TARGET_EINVAL
;
6041 #endif /* defined(TARGET_I386) */
6043 #define NEW_STACK_SIZE 0x40000
6046 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6049 pthread_mutex_t mutex
;
6050 pthread_cond_t cond
;
6053 abi_ulong child_tidptr
;
6054 abi_ulong parent_tidptr
;
6058 static void *clone_func(void *arg
)
6060 new_thread_info
*info
= arg
;
6065 rcu_register_thread();
6067 cpu
= ENV_GET_CPU(env
);
6069 ts
= (TaskState
*)cpu
->opaque
;
6070 info
->tid
= gettid();
6071 cpu
->host_tid
= info
->tid
;
6073 if (info
->child_tidptr
)
6074 put_user_u32(info
->tid
, info
->child_tidptr
);
6075 if (info
->parent_tidptr
)
6076 put_user_u32(info
->tid
, info
->parent_tidptr
);
6077 /* Enable signals. */
6078 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6079 /* Signal to the parent that we're ready. */
6080 pthread_mutex_lock(&info
->mutex
);
6081 pthread_cond_broadcast(&info
->cond
);
6082 pthread_mutex_unlock(&info
->mutex
);
6083 /* Wait until the parent has finshed initializing the tls state. */
6084 pthread_mutex_lock(&clone_lock
);
6085 pthread_mutex_unlock(&clone_lock
);
6091 /* do_fork() Must return host values and target errnos (unlike most
6092 do_*() functions). */
6093 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6094 abi_ulong parent_tidptr
, target_ulong newtls
,
6095 abi_ulong child_tidptr
)
6097 CPUState
*cpu
= ENV_GET_CPU(env
);
6101 CPUArchState
*new_env
;
6104 flags
&= ~CLONE_IGNORED_FLAGS
;
6106 /* Emulate vfork() with fork() */
6107 if (flags
& CLONE_VFORK
)
6108 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6110 if (flags
& CLONE_VM
) {
6111 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6112 new_thread_info info
;
6113 pthread_attr_t attr
;
6115 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6116 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6117 return -TARGET_EINVAL
;
6120 ts
= g_new0(TaskState
, 1);
6121 init_task_state(ts
);
6122 /* we create a new CPU instance. */
6123 new_env
= cpu_copy(env
);
6124 /* Init regs that differ from the parent. */
6125 cpu_clone_regs(new_env
, newsp
);
6126 new_cpu
= ENV_GET_CPU(new_env
);
6127 new_cpu
->opaque
= ts
;
6128 ts
->bprm
= parent_ts
->bprm
;
6129 ts
->info
= parent_ts
->info
;
6130 ts
->signal_mask
= parent_ts
->signal_mask
;
6132 if (flags
& CLONE_CHILD_CLEARTID
) {
6133 ts
->child_tidptr
= child_tidptr
;
6136 if (flags
& CLONE_SETTLS
) {
6137 cpu_set_tls (new_env
, newtls
);
6140 /* Grab a mutex so that thread setup appears atomic. */
6141 pthread_mutex_lock(&clone_lock
);
6143 memset(&info
, 0, sizeof(info
));
6144 pthread_mutex_init(&info
.mutex
, NULL
);
6145 pthread_mutex_lock(&info
.mutex
);
6146 pthread_cond_init(&info
.cond
, NULL
);
6148 if (flags
& CLONE_CHILD_SETTID
) {
6149 info
.child_tidptr
= child_tidptr
;
6151 if (flags
& CLONE_PARENT_SETTID
) {
6152 info
.parent_tidptr
= parent_tidptr
;
6155 ret
= pthread_attr_init(&attr
);
6156 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6157 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6158 /* It is not safe to deliver signals until the child has finished
6159 initializing, so temporarily block all signals. */
6160 sigfillset(&sigmask
);
6161 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6163 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6164 /* TODO: Free new CPU state if thread creation failed. */
6166 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6167 pthread_attr_destroy(&attr
);
6169 /* Wait for the child to initialize. */
6170 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6175 pthread_mutex_unlock(&info
.mutex
);
6176 pthread_cond_destroy(&info
.cond
);
6177 pthread_mutex_destroy(&info
.mutex
);
6178 pthread_mutex_unlock(&clone_lock
);
6180 /* if no CLONE_VM, we consider it is a fork */
6181 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6182 return -TARGET_EINVAL
;
6185 /* We can't support custom termination signals */
6186 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6187 return -TARGET_EINVAL
;
6190 if (block_signals()) {
6191 return -TARGET_ERESTARTSYS
;
6197 /* Child Process. */
6199 cpu_clone_regs(env
, newsp
);
6201 /* There is a race condition here. The parent process could
6202 theoretically read the TID in the child process before the child
6203 tid is set. This would require using either ptrace
6204 (not implemented) or having *_tidptr to point at a shared memory
6205 mapping. We can't repeat the spinlock hack used above because
6206 the child process gets its own copy of the lock. */
6207 if (flags
& CLONE_CHILD_SETTID
)
6208 put_user_u32(gettid(), child_tidptr
);
6209 if (flags
& CLONE_PARENT_SETTID
)
6210 put_user_u32(gettid(), parent_tidptr
);
6211 ts
= (TaskState
*)cpu
->opaque
;
6212 if (flags
& CLONE_SETTLS
)
6213 cpu_set_tls (env
, newtls
);
6214 if (flags
& CLONE_CHILD_CLEARTID
)
6215 ts
->child_tidptr
= child_tidptr
;
6223 /* warning : doesn't handle linux specific flags... */
6224 static int target_to_host_fcntl_cmd(int cmd
)
6227 case TARGET_F_DUPFD
:
6228 case TARGET_F_GETFD
:
6229 case TARGET_F_SETFD
:
6230 case TARGET_F_GETFL
:
6231 case TARGET_F_SETFL
:
6233 case TARGET_F_GETLK
:
6235 case TARGET_F_SETLK
:
6237 case TARGET_F_SETLKW
:
6239 case TARGET_F_GETOWN
:
6241 case TARGET_F_SETOWN
:
6243 case TARGET_F_GETSIG
:
6245 case TARGET_F_SETSIG
:
6247 #if TARGET_ABI_BITS == 32
6248 case TARGET_F_GETLK64
:
6250 case TARGET_F_SETLK64
:
6252 case TARGET_F_SETLKW64
:
6255 case TARGET_F_SETLEASE
:
6257 case TARGET_F_GETLEASE
:
6259 #ifdef F_DUPFD_CLOEXEC
6260 case TARGET_F_DUPFD_CLOEXEC
:
6261 return F_DUPFD_CLOEXEC
;
6263 case TARGET_F_NOTIFY
:
6266 case TARGET_F_GETOWN_EX
:
6270 case TARGET_F_SETOWN_EX
:
6274 case TARGET_F_SETPIPE_SZ
:
6275 return F_SETPIPE_SZ
;
6276 case TARGET_F_GETPIPE_SZ
:
6277 return F_GETPIPE_SZ
;
6280 return -TARGET_EINVAL
;
6282 return -TARGET_EINVAL
;
6285 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6286 static const bitmask_transtbl flock_tbl
[] = {
6287 TRANSTBL_CONVERT(F_RDLCK
),
6288 TRANSTBL_CONVERT(F_WRLCK
),
6289 TRANSTBL_CONVERT(F_UNLCK
),
6290 TRANSTBL_CONVERT(F_EXLCK
),
6291 TRANSTBL_CONVERT(F_SHLCK
),
6295 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6296 abi_ulong target_flock_addr
)
6298 struct target_flock
*target_fl
;
6301 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6302 return -TARGET_EFAULT
;
6305 __get_user(l_type
, &target_fl
->l_type
);
6306 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6307 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6308 __get_user(fl
->l_start
, &target_fl
->l_start
);
6309 __get_user(fl
->l_len
, &target_fl
->l_len
);
6310 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6311 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6315 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6316 const struct flock64
*fl
)
6318 struct target_flock
*target_fl
;
6321 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6322 return -TARGET_EFAULT
;
6325 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6326 __put_user(l_type
, &target_fl
->l_type
);
6327 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6328 __put_user(fl
->l_start
, &target_fl
->l_start
);
6329 __put_user(fl
->l_len
, &target_fl
->l_len
);
6330 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6331 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6335 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6336 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6338 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6339 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6340 abi_ulong target_flock_addr
)
6342 struct target_eabi_flock64
*target_fl
;
6345 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6346 return -TARGET_EFAULT
;
6349 __get_user(l_type
, &target_fl
->l_type
);
6350 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6351 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6352 __get_user(fl
->l_start
, &target_fl
->l_start
);
6353 __get_user(fl
->l_len
, &target_fl
->l_len
);
6354 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6355 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6359 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6360 const struct flock64
*fl
)
6362 struct target_eabi_flock64
*target_fl
;
6365 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6366 return -TARGET_EFAULT
;
6369 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6370 __put_user(l_type
, &target_fl
->l_type
);
6371 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6372 __put_user(fl
->l_start
, &target_fl
->l_start
);
6373 __put_user(fl
->l_len
, &target_fl
->l_len
);
6374 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6375 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6380 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6381 abi_ulong target_flock_addr
)
6383 struct target_flock64
*target_fl
;
6386 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6387 return -TARGET_EFAULT
;
6390 __get_user(l_type
, &target_fl
->l_type
);
6391 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6392 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6393 __get_user(fl
->l_start
, &target_fl
->l_start
);
6394 __get_user(fl
->l_len
, &target_fl
->l_len
);
6395 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6396 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6400 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6401 const struct flock64
*fl
)
6403 struct target_flock64
*target_fl
;
6406 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6407 return -TARGET_EFAULT
;
6410 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6411 __put_user(l_type
, &target_fl
->l_type
);
6412 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6413 __put_user(fl
->l_start
, &target_fl
->l_start
);
6414 __put_user(fl
->l_len
, &target_fl
->l_len
);
6415 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6416 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6420 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6422 struct flock64 fl64
;
6424 struct f_owner_ex fox
;
6425 struct target_f_owner_ex
*target_fox
;
6428 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6430 if (host_cmd
== -TARGET_EINVAL
)
6434 case TARGET_F_GETLK
:
6435 ret
= copy_from_user_flock(&fl64
, arg
);
6439 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6441 ret
= copy_to_user_flock(arg
, &fl64
);
6445 case TARGET_F_SETLK
:
6446 case TARGET_F_SETLKW
:
6447 ret
= copy_from_user_flock(&fl64
, arg
);
6451 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6454 case TARGET_F_GETLK64
:
6455 ret
= copy_from_user_flock64(&fl64
, arg
);
6459 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6461 ret
= copy_to_user_flock64(arg
, &fl64
);
6464 case TARGET_F_SETLK64
:
6465 case TARGET_F_SETLKW64
:
6466 ret
= copy_from_user_flock64(&fl64
, arg
);
6470 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6473 case TARGET_F_GETFL
:
6474 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6476 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6480 case TARGET_F_SETFL
:
6481 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6482 target_to_host_bitmask(arg
,
6487 case TARGET_F_GETOWN_EX
:
6488 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6490 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6491 return -TARGET_EFAULT
;
6492 target_fox
->type
= tswap32(fox
.type
);
6493 target_fox
->pid
= tswap32(fox
.pid
);
6494 unlock_user_struct(target_fox
, arg
, 1);
6500 case TARGET_F_SETOWN_EX
:
6501 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6502 return -TARGET_EFAULT
;
6503 fox
.type
= tswap32(target_fox
->type
);
6504 fox
.pid
= tswap32(target_fox
->pid
);
6505 unlock_user_struct(target_fox
, arg
, 0);
6506 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6510 case TARGET_F_SETOWN
:
6511 case TARGET_F_GETOWN
:
6512 case TARGET_F_SETSIG
:
6513 case TARGET_F_GETSIG
:
6514 case TARGET_F_SETLEASE
:
6515 case TARGET_F_GETLEASE
:
6516 case TARGET_F_SETPIPE_SZ
:
6517 case TARGET_F_GETPIPE_SZ
:
6518 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6522 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6530 static inline int high2lowuid(int uid
)
6538 static inline int high2lowgid(int gid
)
6546 static inline int low2highuid(int uid
)
6548 if ((int16_t)uid
== -1)
6554 static inline int low2highgid(int gid
)
6556 if ((int16_t)gid
== -1)
6561 static inline int tswapid(int id
)
6566 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6568 #else /* !USE_UID16 */
6569 static inline int high2lowuid(int uid
)
6573 static inline int high2lowgid(int gid
)
6577 static inline int low2highuid(int uid
)
6581 static inline int low2highgid(int gid
)
6585 static inline int tswapid(int id
)
6590 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6592 #endif /* USE_UID16 */
6594 /* We must do direct syscalls for setting UID/GID, because we want to
6595 * implement the Linux system call semantics of "change only for this thread",
6596 * not the libc/POSIX semantics of "change for all threads in process".
6597 * (See http://ewontfix.com/17/ for more details.)
6598 * We use the 32-bit version of the syscalls if present; if it is not
6599 * then either the host architecture supports 32-bit UIDs natively with
6600 * the standard syscall, or the 16-bit UID is the best we can do.
6602 #ifdef __NR_setuid32
6603 #define __NR_sys_setuid __NR_setuid32
6605 #define __NR_sys_setuid __NR_setuid
6607 #ifdef __NR_setgid32
6608 #define __NR_sys_setgid __NR_setgid32
6610 #define __NR_sys_setgid __NR_setgid
6612 #ifdef __NR_setresuid32
6613 #define __NR_sys_setresuid __NR_setresuid32
6615 #define __NR_sys_setresuid __NR_setresuid
6617 #ifdef __NR_setresgid32
6618 #define __NR_sys_setresgid __NR_setresgid32
6620 #define __NR_sys_setresgid __NR_setresgid
6623 _syscall1(int, sys_setuid
, uid_t
, uid
)
6624 _syscall1(int, sys_setgid
, gid_t
, gid
)
6625 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6626 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6628 void syscall_init(void)
6631 const argtype
*arg_type
;
6635 thunk_init(STRUCT_MAX
);
6637 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6638 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6639 #include "syscall_types.h"
6641 #undef STRUCT_SPECIAL
6643 /* Build target_to_host_errno_table[] table from
6644 * host_to_target_errno_table[]. */
6645 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6646 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6649 /* we patch the ioctl size if necessary. We rely on the fact that
6650 no ioctl has all the bits at '1' in the size field */
6652 while (ie
->target_cmd
!= 0) {
6653 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6654 TARGET_IOC_SIZEMASK
) {
6655 arg_type
= ie
->arg_type
;
6656 if (arg_type
[0] != TYPE_PTR
) {
6657 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6662 size
= thunk_type_size(arg_type
, 0);
6663 ie
->target_cmd
= (ie
->target_cmd
&
6664 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6665 (size
<< TARGET_IOC_SIZESHIFT
);
6668 /* automatic consistency check if same arch */
6669 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6670 (defined(__x86_64__) && defined(TARGET_X86_64))
6671 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6672 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6673 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6680 #if TARGET_ABI_BITS == 32
6681 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6683 #ifdef TARGET_WORDS_BIGENDIAN
6684 return ((uint64_t)word0
<< 32) | word1
;
6686 return ((uint64_t)word1
<< 32) | word0
;
6689 #else /* TARGET_ABI_BITS == 32 */
6690 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6694 #endif /* TARGET_ABI_BITS != 32 */
6696 #ifdef TARGET_NR_truncate64
6697 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6702 if (regpairs_aligned(cpu_env
)) {
6706 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6710 #ifdef TARGET_NR_ftruncate64
6711 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6716 if (regpairs_aligned(cpu_env
)) {
6720 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6724 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6725 abi_ulong target_addr
)
6727 struct target_timespec
*target_ts
;
6729 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6730 return -TARGET_EFAULT
;
6731 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6732 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6733 unlock_user_struct(target_ts
, target_addr
, 0);
6737 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6738 struct timespec
*host_ts
)
6740 struct target_timespec
*target_ts
;
6742 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6743 return -TARGET_EFAULT
;
6744 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6745 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6746 unlock_user_struct(target_ts
, target_addr
, 1);
6750 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6751 abi_ulong target_addr
)
6753 struct target_itimerspec
*target_itspec
;
6755 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6756 return -TARGET_EFAULT
;
6759 host_itspec
->it_interval
.tv_sec
=
6760 tswapal(target_itspec
->it_interval
.tv_sec
);
6761 host_itspec
->it_interval
.tv_nsec
=
6762 tswapal(target_itspec
->it_interval
.tv_nsec
);
6763 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6764 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6766 unlock_user_struct(target_itspec
, target_addr
, 1);
6770 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6771 struct itimerspec
*host_its
)
6773 struct target_itimerspec
*target_itspec
;
6775 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6776 return -TARGET_EFAULT
;
6779 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6780 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6782 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6783 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6785 unlock_user_struct(target_itspec
, target_addr
, 0);
6789 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6790 abi_long target_addr
)
6792 struct target_timex
*target_tx
;
6794 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6795 return -TARGET_EFAULT
;
6798 __get_user(host_tx
->modes
, &target_tx
->modes
);
6799 __get_user(host_tx
->offset
, &target_tx
->offset
);
6800 __get_user(host_tx
->freq
, &target_tx
->freq
);
6801 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6802 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6803 __get_user(host_tx
->status
, &target_tx
->status
);
6804 __get_user(host_tx
->constant
, &target_tx
->constant
);
6805 __get_user(host_tx
->precision
, &target_tx
->precision
);
6806 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6807 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6808 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6809 __get_user(host_tx
->tick
, &target_tx
->tick
);
6810 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6811 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6812 __get_user(host_tx
->shift
, &target_tx
->shift
);
6813 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6814 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6815 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6816 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6817 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6818 __get_user(host_tx
->tai
, &target_tx
->tai
);
6820 unlock_user_struct(target_tx
, target_addr
, 0);
6824 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6825 struct timex
*host_tx
)
6827 struct target_timex
*target_tx
;
6829 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6830 return -TARGET_EFAULT
;
6833 __put_user(host_tx
->modes
, &target_tx
->modes
);
6834 __put_user(host_tx
->offset
, &target_tx
->offset
);
6835 __put_user(host_tx
->freq
, &target_tx
->freq
);
6836 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6837 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6838 __put_user(host_tx
->status
, &target_tx
->status
);
6839 __put_user(host_tx
->constant
, &target_tx
->constant
);
6840 __put_user(host_tx
->precision
, &target_tx
->precision
);
6841 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6842 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6843 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6844 __put_user(host_tx
->tick
, &target_tx
->tick
);
6845 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6846 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
6847 __put_user(host_tx
->shift
, &target_tx
->shift
);
6848 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
6849 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6850 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6851 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6852 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6853 __put_user(host_tx
->tai
, &target_tx
->tai
);
6855 unlock_user_struct(target_tx
, target_addr
, 1);
6860 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6861 abi_ulong target_addr
)
6863 struct target_sigevent
*target_sevp
;
6865 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6866 return -TARGET_EFAULT
;
6869 /* This union is awkward on 64 bit systems because it has a 32 bit
6870 * integer and a pointer in it; we follow the conversion approach
6871 * used for handling sigval types in signal.c so the guest should get
6872 * the correct value back even if we did a 64 bit byteswap and it's
6873 * using the 32 bit integer.
6875 host_sevp
->sigev_value
.sival_ptr
=
6876 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6877 host_sevp
->sigev_signo
=
6878 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6879 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6880 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6882 unlock_user_struct(target_sevp
, target_addr
, 1);
6886 #if defined(TARGET_NR_mlockall)
6887 static inline int target_to_host_mlockall_arg(int arg
)
6891 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6892 result
|= MCL_CURRENT
;
6894 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6895 result
|= MCL_FUTURE
;
6901 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6902 abi_ulong target_addr
,
6903 struct stat
*host_st
)
6905 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6906 if (((CPUARMState
*)cpu_env
)->eabi
) {
6907 struct target_eabi_stat64
*target_st
;
6909 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6910 return -TARGET_EFAULT
;
6911 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6912 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6913 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6914 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6915 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6917 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6918 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6919 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6920 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6921 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6922 __put_user(host_st
->st_size
, &target_st
->st_size
);
6923 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6924 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6925 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6926 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6927 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6928 unlock_user_struct(target_st
, target_addr
, 1);
6932 #if defined(TARGET_HAS_STRUCT_STAT64)
6933 struct target_stat64
*target_st
;
6935 struct target_stat
*target_st
;
6938 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6939 return -TARGET_EFAULT
;
6940 memset(target_st
, 0, sizeof(*target_st
));
6941 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6942 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6943 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6944 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6946 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6947 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6948 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6949 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6950 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6951 /* XXX: better use of kernel struct */
6952 __put_user(host_st
->st_size
, &target_st
->st_size
);
6953 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6954 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6955 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6956 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6957 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6958 unlock_user_struct(target_st
, target_addr
, 1);
6964 /* ??? Using host futex calls even when target atomic operations
6965 are not really atomic probably breaks things. However implementing
6966 futexes locally would make futexes shared between multiple processes
6967 tricky. However they're probably useless because guest atomic
6968 operations won't work either. */
6969 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6970 target_ulong uaddr2
, int val3
)
6972 struct timespec ts
, *pts
;
6975 /* ??? We assume FUTEX_* constants are the same on both host
6977 #ifdef FUTEX_CMD_MASK
6978 base_op
= op
& FUTEX_CMD_MASK
;
6984 case FUTEX_WAIT_BITSET
:
6987 target_to_host_timespec(pts
, timeout
);
6991 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6994 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6996 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6998 case FUTEX_CMP_REQUEUE
:
7000 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7001 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7002 But the prototype takes a `struct timespec *'; insert casts
7003 to satisfy the compiler. We do not need to tswap TIMEOUT
7004 since it's not compared to guest memory. */
7005 pts
= (struct timespec
*)(uintptr_t) timeout
;
7006 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7008 (base_op
== FUTEX_CMP_REQUEUE
7012 return -TARGET_ENOSYS
;
7015 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7016 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7017 abi_long handle
, abi_long mount_id
,
7020 struct file_handle
*target_fh
;
7021 struct file_handle
*fh
;
7025 unsigned int size
, total_size
;
7027 if (get_user_s32(size
, handle
)) {
7028 return -TARGET_EFAULT
;
7031 name
= lock_user_string(pathname
);
7033 return -TARGET_EFAULT
;
7036 total_size
= sizeof(struct file_handle
) + size
;
7037 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7039 unlock_user(name
, pathname
, 0);
7040 return -TARGET_EFAULT
;
7043 fh
= g_malloc0(total_size
);
7044 fh
->handle_bytes
= size
;
7046 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7047 unlock_user(name
, pathname
, 0);
7049 /* man name_to_handle_at(2):
7050 * Other than the use of the handle_bytes field, the caller should treat
7051 * the file_handle structure as an opaque data type
7054 memcpy(target_fh
, fh
, total_size
);
7055 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7056 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7058 unlock_user(target_fh
, handle
, total_size
);
7060 if (put_user_s32(mid
, mount_id
)) {
7061 return -TARGET_EFAULT
;
7069 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7070 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7073 struct file_handle
*target_fh
;
7074 struct file_handle
*fh
;
7075 unsigned int size
, total_size
;
7078 if (get_user_s32(size
, handle
)) {
7079 return -TARGET_EFAULT
;
7082 total_size
= sizeof(struct file_handle
) + size
;
7083 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7085 return -TARGET_EFAULT
;
7088 fh
= g_memdup(target_fh
, total_size
);
7089 fh
->handle_bytes
= size
;
7090 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7092 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7093 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7097 unlock_user(target_fh
, handle
, total_size
);
7103 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7105 /* signalfd siginfo conversion */
7108 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7109 const struct signalfd_siginfo
*info
)
7111 int sig
= host_to_target_signal(info
->ssi_signo
);
7113 /* linux/signalfd.h defines a ssi_addr_lsb
7114 * not defined in sys/signalfd.h but used by some kernels
7117 #ifdef BUS_MCEERR_AO
7118 if (tinfo
->ssi_signo
== SIGBUS
&&
7119 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7120 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7121 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7122 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7123 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7127 tinfo
->ssi_signo
= tswap32(sig
);
7128 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7129 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7130 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7131 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7132 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7133 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7134 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7135 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7136 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7137 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7138 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7139 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7140 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7141 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7142 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7145 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7149 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7150 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7156 static TargetFdTrans target_signalfd_trans
= {
7157 .host_to_target_data
= host_to_target_data_signalfd
,
7160 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7163 target_sigset_t
*target_mask
;
7167 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7168 return -TARGET_EINVAL
;
7170 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7171 return -TARGET_EFAULT
;
7174 target_to_host_sigset(&host_mask
, target_mask
);
7176 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7178 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7180 fd_trans_register(ret
, &target_signalfd_trans
);
7183 unlock_user_struct(target_mask
, mask
, 0);
7189 /* Map host to target signal numbers for the wait family of syscalls.
7190 Assume all other status bits are the same. */
7191 int host_to_target_waitstatus(int status
)
7193 if (WIFSIGNALED(status
)) {
7194 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7196 if (WIFSTOPPED(status
)) {
7197 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7203 static int open_self_cmdline(void *cpu_env
, int fd
)
7206 bool word_skipped
= false;
7208 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7218 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7221 fd_orig
= close(fd_orig
);
7224 } else if (nb_read
== 0) {
7228 if (!word_skipped
) {
7229 /* Skip the first string, which is the path to qemu-*-static
7230 instead of the actual command. */
7231 cp_buf
= memchr(buf
, 0, nb_read
);
7233 /* Null byte found, skip one string */
7235 nb_read
-= cp_buf
- buf
;
7236 word_skipped
= true;
7241 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7250 return close(fd_orig
);
7253 static int open_self_maps(void *cpu_env
, int fd
)
7255 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7256 TaskState
*ts
= cpu
->opaque
;
7262 fp
= fopen("/proc/self/maps", "r");
7267 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7268 int fields
, dev_maj
, dev_min
, inode
;
7269 uint64_t min
, max
, offset
;
7270 char flag_r
, flag_w
, flag_x
, flag_p
;
7271 char path
[512] = "";
7272 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7273 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7274 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7276 if ((fields
< 10) || (fields
> 11)) {
7279 if (h2g_valid(min
)) {
7280 int flags
= page_get_flags(h2g(min
));
7281 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7282 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7285 if (h2g(min
) == ts
->info
->stack_limit
) {
7286 pstrcpy(path
, sizeof(path
), " [stack]");
7288 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7289 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7290 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7291 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7292 path
[0] ? " " : "", path
);
7302 static int open_self_stat(void *cpu_env
, int fd
)
7304 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7305 TaskState
*ts
= cpu
->opaque
;
7306 abi_ulong start_stack
= ts
->info
->start_stack
;
7309 for (i
= 0; i
< 44; i
++) {
7317 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7318 } else if (i
== 1) {
7320 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7321 } else if (i
== 27) {
7324 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7326 /* for the rest, there is MasterCard */
7327 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7331 if (write(fd
, buf
, len
) != len
) {
7339 static int open_self_auxv(void *cpu_env
, int fd
)
7341 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7342 TaskState
*ts
= cpu
->opaque
;
7343 abi_ulong auxv
= ts
->info
->saved_auxv
;
7344 abi_ulong len
= ts
->info
->auxv_len
;
7348 * Auxiliary vector is stored in target process stack.
7349 * read in whole auxv vector and copy it to file
7351 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7355 r
= write(fd
, ptr
, len
);
7362 lseek(fd
, 0, SEEK_SET
);
7363 unlock_user(ptr
, auxv
, len
);
7369 static int is_proc_myself(const char *filename
, const char *entry
)
7371 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7372 filename
+= strlen("/proc/");
7373 if (!strncmp(filename
, "self/", strlen("self/"))) {
7374 filename
+= strlen("self/");
7375 } else if (*filename
>= '1' && *filename
<= '9') {
7377 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7378 if (!strncmp(filename
, myself
, strlen(myself
))) {
7379 filename
+= strlen(myself
);
7386 if (!strcmp(filename
, entry
)) {
7393 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7394 static int is_proc(const char *filename
, const char *entry
)
7396 return strcmp(filename
, entry
) == 0;
7399 static int open_net_route(void *cpu_env
, int fd
)
7406 fp
= fopen("/proc/net/route", "r");
7413 read
= getline(&line
, &len
, fp
);
7414 dprintf(fd
, "%s", line
);
7418 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7420 uint32_t dest
, gw
, mask
;
7421 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7422 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7423 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7424 &mask
, &mtu
, &window
, &irtt
);
7425 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7426 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7427 metric
, tswap32(mask
), mtu
, window
, irtt
);
7437 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7440 const char *filename
;
7441 int (*fill
)(void *cpu_env
, int fd
);
7442 int (*cmp
)(const char *s1
, const char *s2
);
7444 const struct fake_open
*fake_open
;
7445 static const struct fake_open fakes
[] = {
7446 { "maps", open_self_maps
, is_proc_myself
},
7447 { "stat", open_self_stat
, is_proc_myself
},
7448 { "auxv", open_self_auxv
, is_proc_myself
},
7449 { "cmdline", open_self_cmdline
, is_proc_myself
},
7450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7451 { "/proc/net/route", open_net_route
, is_proc
},
7453 { NULL
, NULL
, NULL
}
7456 if (is_proc_myself(pathname
, "exe")) {
7457 int execfd
= qemu_getauxval(AT_EXECFD
);
7458 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7461 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7462 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7467 if (fake_open
->filename
) {
7469 char filename
[PATH_MAX
];
7472 /* create temporary file to map stat to */
7473 tmpdir
= getenv("TMPDIR");
7476 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7477 fd
= mkstemp(filename
);
7483 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7489 lseek(fd
, 0, SEEK_SET
);
7494 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7497 #define TIMER_MAGIC 0x0caf0000
7498 #define TIMER_MAGIC_MASK 0xffff0000
7500 /* Convert QEMU provided timer ID back to internal 16bit index format */
7501 static target_timer_t
get_timer_id(abi_long arg
)
7503 target_timer_t timerid
= arg
;
7505 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7506 return -TARGET_EINVAL
;
7511 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7512 return -TARGET_EINVAL
;
7518 /* do_syscall() should always have a single exit point at the end so
7519 that actions, such as logging of syscall results, can be performed.
7520 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7521 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7522 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7523 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7526 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7532 #if defined(DEBUG_ERESTARTSYS)
7533 /* Debug-only code for exercising the syscall-restart code paths
7534 * in the per-architecture cpu main loops: restart every syscall
7535 * the guest makes once before letting it through.
7542 return -TARGET_ERESTARTSYS
;
7548 gemu_log("syscall %d", num
);
7550 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7552 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7555 case TARGET_NR_exit
:
7556 /* In old applications this may be used to implement _exit(2).
7557 However in threaded applictions it is used for thread termination,
7558 and _exit_group is used for application termination.
7559 Do thread termination if we have more then one thread. */
7561 if (block_signals()) {
7562 ret
= -TARGET_ERESTARTSYS
;
7568 if (CPU_NEXT(first_cpu
)) {
7571 /* Remove the CPU from the list. */
7572 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7577 if (ts
->child_tidptr
) {
7578 put_user_u32(0, ts
->child_tidptr
);
7579 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7583 object_unref(OBJECT(cpu
));
7585 rcu_unregister_thread();
7593 gdb_exit(cpu_env
, arg1
);
7595 ret
= 0; /* avoid warning */
7597 case TARGET_NR_read
:
7601 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7603 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7605 fd_trans_host_to_target_data(arg1
)) {
7606 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7608 unlock_user(p
, arg2
, ret
);
7611 case TARGET_NR_write
:
7612 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7614 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7615 unlock_user(p
, arg2
, 0);
7617 #ifdef TARGET_NR_open
7618 case TARGET_NR_open
:
7619 if (!(p
= lock_user_string(arg1
)))
7621 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7622 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7624 fd_trans_unregister(ret
);
7625 unlock_user(p
, arg1
, 0);
7628 case TARGET_NR_openat
:
7629 if (!(p
= lock_user_string(arg2
)))
7631 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7632 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7634 fd_trans_unregister(ret
);
7635 unlock_user(p
, arg2
, 0);
7637 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7638 case TARGET_NR_name_to_handle_at
:
7639 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7642 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7643 case TARGET_NR_open_by_handle_at
:
7644 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7645 fd_trans_unregister(ret
);
7648 case TARGET_NR_close
:
7649 fd_trans_unregister(arg1
);
7650 ret
= get_errno(close(arg1
));
7655 #ifdef TARGET_NR_fork
7656 case TARGET_NR_fork
:
7657 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7660 #ifdef TARGET_NR_waitpid
7661 case TARGET_NR_waitpid
:
7664 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7665 if (!is_error(ret
) && arg2
&& ret
7666 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7671 #ifdef TARGET_NR_waitid
7672 case TARGET_NR_waitid
:
7676 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7677 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7678 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7680 host_to_target_siginfo(p
, &info
);
7681 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7686 #ifdef TARGET_NR_creat /* not on alpha */
7687 case TARGET_NR_creat
:
7688 if (!(p
= lock_user_string(arg1
)))
7690 ret
= get_errno(creat(p
, arg2
));
7691 fd_trans_unregister(ret
);
7692 unlock_user(p
, arg1
, 0);
7695 #ifdef TARGET_NR_link
7696 case TARGET_NR_link
:
7699 p
= lock_user_string(arg1
);
7700 p2
= lock_user_string(arg2
);
7702 ret
= -TARGET_EFAULT
;
7704 ret
= get_errno(link(p
, p2
));
7705 unlock_user(p2
, arg2
, 0);
7706 unlock_user(p
, arg1
, 0);
7710 #if defined(TARGET_NR_linkat)
7711 case TARGET_NR_linkat
:
7716 p
= lock_user_string(arg2
);
7717 p2
= lock_user_string(arg4
);
7719 ret
= -TARGET_EFAULT
;
7721 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7722 unlock_user(p
, arg2
, 0);
7723 unlock_user(p2
, arg4
, 0);
7727 #ifdef TARGET_NR_unlink
7728 case TARGET_NR_unlink
:
7729 if (!(p
= lock_user_string(arg1
)))
7731 ret
= get_errno(unlink(p
));
7732 unlock_user(p
, arg1
, 0);
7735 #if defined(TARGET_NR_unlinkat)
7736 case TARGET_NR_unlinkat
:
7737 if (!(p
= lock_user_string(arg2
)))
7739 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7740 unlock_user(p
, arg2
, 0);
7743 case TARGET_NR_execve
:
7745 char **argp
, **envp
;
7748 abi_ulong guest_argp
;
7749 abi_ulong guest_envp
;
7756 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7757 if (get_user_ual(addr
, gp
))
7765 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7766 if (get_user_ual(addr
, gp
))
7773 argp
= alloca((argc
+ 1) * sizeof(void *));
7774 envp
= alloca((envc
+ 1) * sizeof(void *));
7776 for (gp
= guest_argp
, q
= argp
; gp
;
7777 gp
+= sizeof(abi_ulong
), q
++) {
7778 if (get_user_ual(addr
, gp
))
7782 if (!(*q
= lock_user_string(addr
)))
7784 total_size
+= strlen(*q
) + 1;
7788 for (gp
= guest_envp
, q
= envp
; gp
;
7789 gp
+= sizeof(abi_ulong
), q
++) {
7790 if (get_user_ual(addr
, gp
))
7794 if (!(*q
= lock_user_string(addr
)))
7796 total_size
+= strlen(*q
) + 1;
7800 if (!(p
= lock_user_string(arg1
)))
7802 /* Although execve() is not an interruptible syscall it is
7803 * a special case where we must use the safe_syscall wrapper:
7804 * if we allow a signal to happen before we make the host
7805 * syscall then we will 'lose' it, because at the point of
7806 * execve the process leaves QEMU's control. So we use the
7807 * safe syscall wrapper to ensure that we either take the
7808 * signal as a guest signal, or else it does not happen
7809 * before the execve completes and makes it the other
7810 * program's problem.
7812 ret
= get_errno(safe_execve(p
, argp
, envp
));
7813 unlock_user(p
, arg1
, 0);
7818 ret
= -TARGET_EFAULT
;
7821 for (gp
= guest_argp
, q
= argp
; *q
;
7822 gp
+= sizeof(abi_ulong
), q
++) {
7823 if (get_user_ual(addr
, gp
)
7826 unlock_user(*q
, addr
, 0);
7828 for (gp
= guest_envp
, q
= envp
; *q
;
7829 gp
+= sizeof(abi_ulong
), q
++) {
7830 if (get_user_ual(addr
, gp
)
7833 unlock_user(*q
, addr
, 0);
7837 case TARGET_NR_chdir
:
7838 if (!(p
= lock_user_string(arg1
)))
7840 ret
= get_errno(chdir(p
));
7841 unlock_user(p
, arg1
, 0);
7843 #ifdef TARGET_NR_time
7844 case TARGET_NR_time
:
7847 ret
= get_errno(time(&host_time
));
7850 && put_user_sal(host_time
, arg1
))
7855 #ifdef TARGET_NR_mknod
7856 case TARGET_NR_mknod
:
7857 if (!(p
= lock_user_string(arg1
)))
7859 ret
= get_errno(mknod(p
, arg2
, arg3
));
7860 unlock_user(p
, arg1
, 0);
7863 #if defined(TARGET_NR_mknodat)
7864 case TARGET_NR_mknodat
:
7865 if (!(p
= lock_user_string(arg2
)))
7867 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7868 unlock_user(p
, arg2
, 0);
7871 #ifdef TARGET_NR_chmod
7872 case TARGET_NR_chmod
:
7873 if (!(p
= lock_user_string(arg1
)))
7875 ret
= get_errno(chmod(p
, arg2
));
7876 unlock_user(p
, arg1
, 0);
7879 #ifdef TARGET_NR_break
7880 case TARGET_NR_break
:
7883 #ifdef TARGET_NR_oldstat
7884 case TARGET_NR_oldstat
:
7887 case TARGET_NR_lseek
:
7888 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7890 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7891 /* Alpha specific */
7892 case TARGET_NR_getxpid
:
7893 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7894 ret
= get_errno(getpid());
7897 #ifdef TARGET_NR_getpid
7898 case TARGET_NR_getpid
:
7899 ret
= get_errno(getpid());
7902 case TARGET_NR_mount
:
7904 /* need to look at the data field */
7908 p
= lock_user_string(arg1
);
7916 p2
= lock_user_string(arg2
);
7919 unlock_user(p
, arg1
, 0);
7925 p3
= lock_user_string(arg3
);
7928 unlock_user(p
, arg1
, 0);
7930 unlock_user(p2
, arg2
, 0);
7937 /* FIXME - arg5 should be locked, but it isn't clear how to
7938 * do that since it's not guaranteed to be a NULL-terminated
7942 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7944 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7946 ret
= get_errno(ret
);
7949 unlock_user(p
, arg1
, 0);
7951 unlock_user(p2
, arg2
, 0);
7953 unlock_user(p3
, arg3
, 0);
7957 #ifdef TARGET_NR_umount
7958 case TARGET_NR_umount
:
7959 if (!(p
= lock_user_string(arg1
)))
7961 ret
= get_errno(umount(p
));
7962 unlock_user(p
, arg1
, 0);
7965 #ifdef TARGET_NR_stime /* not on alpha */
7966 case TARGET_NR_stime
:
7969 if (get_user_sal(host_time
, arg1
))
7971 ret
= get_errno(stime(&host_time
));
7975 case TARGET_NR_ptrace
:
7977 #ifdef TARGET_NR_alarm /* not on alpha */
7978 case TARGET_NR_alarm
:
7982 #ifdef TARGET_NR_oldfstat
7983 case TARGET_NR_oldfstat
:
7986 #ifdef TARGET_NR_pause /* not on alpha */
7987 case TARGET_NR_pause
:
7988 if (!block_signals()) {
7989 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7991 ret
= -TARGET_EINTR
;
7994 #ifdef TARGET_NR_utime
7995 case TARGET_NR_utime
:
7997 struct utimbuf tbuf
, *host_tbuf
;
7998 struct target_utimbuf
*target_tbuf
;
8000 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8002 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8003 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8004 unlock_user_struct(target_tbuf
, arg2
, 0);
8009 if (!(p
= lock_user_string(arg1
)))
8011 ret
= get_errno(utime(p
, host_tbuf
));
8012 unlock_user(p
, arg1
, 0);
8016 #ifdef TARGET_NR_utimes
8017 case TARGET_NR_utimes
:
8019 struct timeval
*tvp
, tv
[2];
8021 if (copy_from_user_timeval(&tv
[0], arg2
)
8022 || copy_from_user_timeval(&tv
[1],
8023 arg2
+ sizeof(struct target_timeval
)))
8029 if (!(p
= lock_user_string(arg1
)))
8031 ret
= get_errno(utimes(p
, tvp
));
8032 unlock_user(p
, arg1
, 0);
8036 #if defined(TARGET_NR_futimesat)
8037 case TARGET_NR_futimesat
:
8039 struct timeval
*tvp
, tv
[2];
8041 if (copy_from_user_timeval(&tv
[0], arg3
)
8042 || copy_from_user_timeval(&tv
[1],
8043 arg3
+ sizeof(struct target_timeval
)))
8049 if (!(p
= lock_user_string(arg2
)))
8051 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8052 unlock_user(p
, arg2
, 0);
8056 #ifdef TARGET_NR_stty
8057 case TARGET_NR_stty
:
8060 #ifdef TARGET_NR_gtty
8061 case TARGET_NR_gtty
:
8064 #ifdef TARGET_NR_access
8065 case TARGET_NR_access
:
8066 if (!(p
= lock_user_string(arg1
)))
8068 ret
= get_errno(access(path(p
), arg2
));
8069 unlock_user(p
, arg1
, 0);
8072 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8073 case TARGET_NR_faccessat
:
8074 if (!(p
= lock_user_string(arg2
)))
8076 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8077 unlock_user(p
, arg2
, 0);
8080 #ifdef TARGET_NR_nice /* not on alpha */
8081 case TARGET_NR_nice
:
8082 ret
= get_errno(nice(arg1
));
8085 #ifdef TARGET_NR_ftime
8086 case TARGET_NR_ftime
:
8089 case TARGET_NR_sync
:
8093 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8094 case TARGET_NR_syncfs
:
8095 ret
= get_errno(syncfs(arg1
));
8098 case TARGET_NR_kill
:
8099 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8101 #ifdef TARGET_NR_rename
8102 case TARGET_NR_rename
:
8105 p
= lock_user_string(arg1
);
8106 p2
= lock_user_string(arg2
);
8108 ret
= -TARGET_EFAULT
;
8110 ret
= get_errno(rename(p
, p2
));
8111 unlock_user(p2
, arg2
, 0);
8112 unlock_user(p
, arg1
, 0);
8116 #if defined(TARGET_NR_renameat)
8117 case TARGET_NR_renameat
:
8120 p
= lock_user_string(arg2
);
8121 p2
= lock_user_string(arg4
);
8123 ret
= -TARGET_EFAULT
;
8125 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8126 unlock_user(p2
, arg4
, 0);
8127 unlock_user(p
, arg2
, 0);
8131 #ifdef TARGET_NR_mkdir
8132 case TARGET_NR_mkdir
:
8133 if (!(p
= lock_user_string(arg1
)))
8135 ret
= get_errno(mkdir(p
, arg2
));
8136 unlock_user(p
, arg1
, 0);
8139 #if defined(TARGET_NR_mkdirat)
8140 case TARGET_NR_mkdirat
:
8141 if (!(p
= lock_user_string(arg2
)))
8143 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8144 unlock_user(p
, arg2
, 0);
8147 #ifdef TARGET_NR_rmdir
8148 case TARGET_NR_rmdir
:
8149 if (!(p
= lock_user_string(arg1
)))
8151 ret
= get_errno(rmdir(p
));
8152 unlock_user(p
, arg1
, 0);
8156 ret
= get_errno(dup(arg1
));
8158 fd_trans_dup(arg1
, ret
);
8161 #ifdef TARGET_NR_pipe
8162 case TARGET_NR_pipe
:
8163 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8166 #ifdef TARGET_NR_pipe2
8167 case TARGET_NR_pipe2
:
8168 ret
= do_pipe(cpu_env
, arg1
,
8169 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8172 case TARGET_NR_times
:
8174 struct target_tms
*tmsp
;
8176 ret
= get_errno(times(&tms
));
8178 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8181 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8182 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8183 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8184 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8187 ret
= host_to_target_clock_t(ret
);
8190 #ifdef TARGET_NR_prof
8191 case TARGET_NR_prof
:
8194 #ifdef TARGET_NR_signal
8195 case TARGET_NR_signal
:
8198 case TARGET_NR_acct
:
8200 ret
= get_errno(acct(NULL
));
8202 if (!(p
= lock_user_string(arg1
)))
8204 ret
= get_errno(acct(path(p
)));
8205 unlock_user(p
, arg1
, 0);
8208 #ifdef TARGET_NR_umount2
8209 case TARGET_NR_umount2
:
8210 if (!(p
= lock_user_string(arg1
)))
8212 ret
= get_errno(umount2(p
, arg2
));
8213 unlock_user(p
, arg1
, 0);
8216 #ifdef TARGET_NR_lock
8217 case TARGET_NR_lock
:
8220 case TARGET_NR_ioctl
:
8221 ret
= do_ioctl(arg1
, arg2
, arg3
);
8223 case TARGET_NR_fcntl
:
8224 ret
= do_fcntl(arg1
, arg2
, arg3
);
8226 #ifdef TARGET_NR_mpx
8230 case TARGET_NR_setpgid
:
8231 ret
= get_errno(setpgid(arg1
, arg2
));
8233 #ifdef TARGET_NR_ulimit
8234 case TARGET_NR_ulimit
:
8237 #ifdef TARGET_NR_oldolduname
8238 case TARGET_NR_oldolduname
:
8241 case TARGET_NR_umask
:
8242 ret
= get_errno(umask(arg1
));
8244 case TARGET_NR_chroot
:
8245 if (!(p
= lock_user_string(arg1
)))
8247 ret
= get_errno(chroot(p
));
8248 unlock_user(p
, arg1
, 0);
8250 #ifdef TARGET_NR_ustat
8251 case TARGET_NR_ustat
:
8254 #ifdef TARGET_NR_dup2
8255 case TARGET_NR_dup2
:
8256 ret
= get_errno(dup2(arg1
, arg2
));
8258 fd_trans_dup(arg1
, arg2
);
8262 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8263 case TARGET_NR_dup3
:
8264 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8266 fd_trans_dup(arg1
, arg2
);
8270 #ifdef TARGET_NR_getppid /* not on alpha */
8271 case TARGET_NR_getppid
:
8272 ret
= get_errno(getppid());
8275 #ifdef TARGET_NR_getpgrp
8276 case TARGET_NR_getpgrp
:
8277 ret
= get_errno(getpgrp());
8280 case TARGET_NR_setsid
:
8281 ret
= get_errno(setsid());
8283 #ifdef TARGET_NR_sigaction
8284 case TARGET_NR_sigaction
:
8286 #if defined(TARGET_ALPHA)
8287 struct target_sigaction act
, oact
, *pact
= 0;
8288 struct target_old_sigaction
*old_act
;
8290 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8292 act
._sa_handler
= old_act
->_sa_handler
;
8293 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8294 act
.sa_flags
= old_act
->sa_flags
;
8295 act
.sa_restorer
= 0;
8296 unlock_user_struct(old_act
, arg2
, 0);
8299 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8300 if (!is_error(ret
) && arg3
) {
8301 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8303 old_act
->_sa_handler
= oact
._sa_handler
;
8304 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8305 old_act
->sa_flags
= oact
.sa_flags
;
8306 unlock_user_struct(old_act
, arg3
, 1);
8308 #elif defined(TARGET_MIPS)
8309 struct target_sigaction act
, oact
, *pact
, *old_act
;
8312 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8314 act
._sa_handler
= old_act
->_sa_handler
;
8315 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8316 act
.sa_flags
= old_act
->sa_flags
;
8317 unlock_user_struct(old_act
, arg2
, 0);
8323 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8325 if (!is_error(ret
) && arg3
) {
8326 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8328 old_act
->_sa_handler
= oact
._sa_handler
;
8329 old_act
->sa_flags
= oact
.sa_flags
;
8330 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8331 old_act
->sa_mask
.sig
[1] = 0;
8332 old_act
->sa_mask
.sig
[2] = 0;
8333 old_act
->sa_mask
.sig
[3] = 0;
8334 unlock_user_struct(old_act
, arg3
, 1);
8337 struct target_old_sigaction
*old_act
;
8338 struct target_sigaction act
, oact
, *pact
;
8340 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8342 act
._sa_handler
= old_act
->_sa_handler
;
8343 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8344 act
.sa_flags
= old_act
->sa_flags
;
8345 act
.sa_restorer
= old_act
->sa_restorer
;
8346 unlock_user_struct(old_act
, arg2
, 0);
8351 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8352 if (!is_error(ret
) && arg3
) {
8353 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8355 old_act
->_sa_handler
= oact
._sa_handler
;
8356 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8357 old_act
->sa_flags
= oact
.sa_flags
;
8358 old_act
->sa_restorer
= oact
.sa_restorer
;
8359 unlock_user_struct(old_act
, arg3
, 1);
8365 case TARGET_NR_rt_sigaction
:
8367 #if defined(TARGET_ALPHA)
8368 struct target_sigaction act
, oact
, *pact
= 0;
8369 struct target_rt_sigaction
*rt_act
;
8371 if (arg4
!= sizeof(target_sigset_t
)) {
8372 ret
= -TARGET_EINVAL
;
8376 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8378 act
._sa_handler
= rt_act
->_sa_handler
;
8379 act
.sa_mask
= rt_act
->sa_mask
;
8380 act
.sa_flags
= rt_act
->sa_flags
;
8381 act
.sa_restorer
= arg5
;
8382 unlock_user_struct(rt_act
, arg2
, 0);
8385 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8386 if (!is_error(ret
) && arg3
) {
8387 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8389 rt_act
->_sa_handler
= oact
._sa_handler
;
8390 rt_act
->sa_mask
= oact
.sa_mask
;
8391 rt_act
->sa_flags
= oact
.sa_flags
;
8392 unlock_user_struct(rt_act
, arg3
, 1);
8395 struct target_sigaction
*act
;
8396 struct target_sigaction
*oact
;
8398 if (arg4
!= sizeof(target_sigset_t
)) {
8399 ret
= -TARGET_EINVAL
;
8403 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8408 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8409 ret
= -TARGET_EFAULT
;
8410 goto rt_sigaction_fail
;
8414 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8417 unlock_user_struct(act
, arg2
, 0);
8419 unlock_user_struct(oact
, arg3
, 1);
8423 #ifdef TARGET_NR_sgetmask /* not on alpha */
8424 case TARGET_NR_sgetmask
:
8427 abi_ulong target_set
;
8428 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8430 host_to_target_old_sigset(&target_set
, &cur_set
);
8436 #ifdef TARGET_NR_ssetmask /* not on alpha */
8437 case TARGET_NR_ssetmask
:
8439 sigset_t set
, oset
, cur_set
;
8440 abi_ulong target_set
= arg1
;
8441 /* We only have one word of the new mask so we must read
8442 * the rest of it with do_sigprocmask() and OR in this word.
8443 * We are guaranteed that a do_sigprocmask() that only queries
8444 * the signal mask will not fail.
8446 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8448 target_to_host_old_sigset(&set
, &target_set
);
8449 sigorset(&set
, &set
, &cur_set
);
8450 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8452 host_to_target_old_sigset(&target_set
, &oset
);
8458 #ifdef TARGET_NR_sigprocmask
8459 case TARGET_NR_sigprocmask
:
8461 #if defined(TARGET_ALPHA)
8462 sigset_t set
, oldset
;
8467 case TARGET_SIG_BLOCK
:
8470 case TARGET_SIG_UNBLOCK
:
8473 case TARGET_SIG_SETMASK
:
8477 ret
= -TARGET_EINVAL
;
8481 target_to_host_old_sigset(&set
, &mask
);
8483 ret
= do_sigprocmask(how
, &set
, &oldset
);
8484 if (!is_error(ret
)) {
8485 host_to_target_old_sigset(&mask
, &oldset
);
8487 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8490 sigset_t set
, oldset
, *set_ptr
;
8495 case TARGET_SIG_BLOCK
:
8498 case TARGET_SIG_UNBLOCK
:
8501 case TARGET_SIG_SETMASK
:
8505 ret
= -TARGET_EINVAL
;
8508 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8510 target_to_host_old_sigset(&set
, p
);
8511 unlock_user(p
, arg2
, 0);
8517 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8518 if (!is_error(ret
) && arg3
) {
8519 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8521 host_to_target_old_sigset(p
, &oldset
);
8522 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8528 case TARGET_NR_rt_sigprocmask
:
8531 sigset_t set
, oldset
, *set_ptr
;
8533 if (arg4
!= sizeof(target_sigset_t
)) {
8534 ret
= -TARGET_EINVAL
;
8540 case TARGET_SIG_BLOCK
:
8543 case TARGET_SIG_UNBLOCK
:
8546 case TARGET_SIG_SETMASK
:
8550 ret
= -TARGET_EINVAL
;
8553 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8555 target_to_host_sigset(&set
, p
);
8556 unlock_user(p
, arg2
, 0);
8562 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8563 if (!is_error(ret
) && arg3
) {
8564 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8566 host_to_target_sigset(p
, &oldset
);
8567 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8571 #ifdef TARGET_NR_sigpending
8572 case TARGET_NR_sigpending
:
8575 ret
= get_errno(sigpending(&set
));
8576 if (!is_error(ret
)) {
8577 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8579 host_to_target_old_sigset(p
, &set
);
8580 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8585 case TARGET_NR_rt_sigpending
:
8589 /* Yes, this check is >, not != like most. We follow the kernel's
8590 * logic and it does it like this because it implements
8591 * NR_sigpending through the same code path, and in that case
8592 * the old_sigset_t is smaller in size.
8594 if (arg2
> sizeof(target_sigset_t
)) {
8595 ret
= -TARGET_EINVAL
;
8599 ret
= get_errno(sigpending(&set
));
8600 if (!is_error(ret
)) {
8601 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8603 host_to_target_sigset(p
, &set
);
8604 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8608 #ifdef TARGET_NR_sigsuspend
8609 case TARGET_NR_sigsuspend
:
8611 TaskState
*ts
= cpu
->opaque
;
8612 #if defined(TARGET_ALPHA)
8613 abi_ulong mask
= arg1
;
8614 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8616 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8618 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8619 unlock_user(p
, arg1
, 0);
8621 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8623 if (ret
!= -TARGET_ERESTARTSYS
) {
8624 ts
->in_sigsuspend
= 1;
8629 case TARGET_NR_rt_sigsuspend
:
8631 TaskState
*ts
= cpu
->opaque
;
8633 if (arg2
!= sizeof(target_sigset_t
)) {
8634 ret
= -TARGET_EINVAL
;
8637 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8639 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8640 unlock_user(p
, arg1
, 0);
8641 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8643 if (ret
!= -TARGET_ERESTARTSYS
) {
8644 ts
->in_sigsuspend
= 1;
8648 case TARGET_NR_rt_sigtimedwait
:
8651 struct timespec uts
, *puts
;
8654 if (arg4
!= sizeof(target_sigset_t
)) {
8655 ret
= -TARGET_EINVAL
;
8659 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8661 target_to_host_sigset(&set
, p
);
8662 unlock_user(p
, arg1
, 0);
8665 target_to_host_timespec(puts
, arg3
);
8669 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8671 if (!is_error(ret
)) {
8673 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8678 host_to_target_siginfo(p
, &uinfo
);
8679 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8681 ret
= host_to_target_signal(ret
);
8685 case TARGET_NR_rt_sigqueueinfo
:
8689 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8693 target_to_host_siginfo(&uinfo
, p
);
8694 unlock_user(p
, arg1
, 0);
8695 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8698 #ifdef TARGET_NR_sigreturn
8699 case TARGET_NR_sigreturn
:
8700 if (block_signals()) {
8701 ret
= -TARGET_ERESTARTSYS
;
8703 ret
= do_sigreturn(cpu_env
);
8707 case TARGET_NR_rt_sigreturn
:
8708 if (block_signals()) {
8709 ret
= -TARGET_ERESTARTSYS
;
8711 ret
= do_rt_sigreturn(cpu_env
);
8714 case TARGET_NR_sethostname
:
8715 if (!(p
= lock_user_string(arg1
)))
8717 ret
= get_errno(sethostname(p
, arg2
));
8718 unlock_user(p
, arg1
, 0);
8720 case TARGET_NR_setrlimit
:
8722 int resource
= target_to_host_resource(arg1
);
8723 struct target_rlimit
*target_rlim
;
8725 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8727 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8728 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8729 unlock_user_struct(target_rlim
, arg2
, 0);
8730 ret
= get_errno(setrlimit(resource
, &rlim
));
8733 case TARGET_NR_getrlimit
:
8735 int resource
= target_to_host_resource(arg1
);
8736 struct target_rlimit
*target_rlim
;
8739 ret
= get_errno(getrlimit(resource
, &rlim
));
8740 if (!is_error(ret
)) {
8741 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8743 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8744 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8745 unlock_user_struct(target_rlim
, arg2
, 1);
8749 case TARGET_NR_getrusage
:
8751 struct rusage rusage
;
8752 ret
= get_errno(getrusage(arg1
, &rusage
));
8753 if (!is_error(ret
)) {
8754 ret
= host_to_target_rusage(arg2
, &rusage
);
8758 case TARGET_NR_gettimeofday
:
8761 ret
= get_errno(gettimeofday(&tv
, NULL
));
8762 if (!is_error(ret
)) {
8763 if (copy_to_user_timeval(arg1
, &tv
))
8768 case TARGET_NR_settimeofday
:
8770 struct timeval tv
, *ptv
= NULL
;
8771 struct timezone tz
, *ptz
= NULL
;
8774 if (copy_from_user_timeval(&tv
, arg1
)) {
8781 if (copy_from_user_timezone(&tz
, arg2
)) {
8787 ret
= get_errno(settimeofday(ptv
, ptz
));
8790 #if defined(TARGET_NR_select)
8791 case TARGET_NR_select
:
8792 #if defined(TARGET_WANT_NI_OLD_SELECT)
8793 /* some architectures used to have old_select here
8794 * but now ENOSYS it.
8796 ret
= -TARGET_ENOSYS
;
8797 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8798 ret
= do_old_select(arg1
);
8800 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8804 #ifdef TARGET_NR_pselect6
8805 case TARGET_NR_pselect6
:
8807 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8808 fd_set rfds
, wfds
, efds
;
8809 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8810 struct timespec ts
, *ts_ptr
;
8813 * The 6th arg is actually two args smashed together,
8814 * so we cannot use the C library.
8822 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8823 target_sigset_t
*target_sigset
;
8831 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8835 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8839 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8845 * This takes a timespec, and not a timeval, so we cannot
8846 * use the do_select() helper ...
8849 if (target_to_host_timespec(&ts
, ts_addr
)) {
8857 /* Extract the two packed args for the sigset */
8860 sig
.size
= SIGSET_T_SIZE
;
8862 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8866 arg_sigset
= tswapal(arg7
[0]);
8867 arg_sigsize
= tswapal(arg7
[1]);
8868 unlock_user(arg7
, arg6
, 0);
8872 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8873 /* Like the kernel, we enforce correct size sigsets */
8874 ret
= -TARGET_EINVAL
;
8877 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8878 sizeof(*target_sigset
), 1);
8879 if (!target_sigset
) {
8882 target_to_host_sigset(&set
, target_sigset
);
8883 unlock_user(target_sigset
, arg_sigset
, 0);
8891 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8894 if (!is_error(ret
)) {
8895 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8897 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8899 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8902 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8908 #ifdef TARGET_NR_symlink
8909 case TARGET_NR_symlink
:
8912 p
= lock_user_string(arg1
);
8913 p2
= lock_user_string(arg2
);
8915 ret
= -TARGET_EFAULT
;
8917 ret
= get_errno(symlink(p
, p2
));
8918 unlock_user(p2
, arg2
, 0);
8919 unlock_user(p
, arg1
, 0);
8923 #if defined(TARGET_NR_symlinkat)
8924 case TARGET_NR_symlinkat
:
8927 p
= lock_user_string(arg1
);
8928 p2
= lock_user_string(arg3
);
8930 ret
= -TARGET_EFAULT
;
8932 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8933 unlock_user(p2
, arg3
, 0);
8934 unlock_user(p
, arg1
, 0);
8938 #ifdef TARGET_NR_oldlstat
8939 case TARGET_NR_oldlstat
:
8942 #ifdef TARGET_NR_readlink
8943 case TARGET_NR_readlink
:
8946 p
= lock_user_string(arg1
);
8947 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8949 ret
= -TARGET_EFAULT
;
8951 /* Short circuit this for the magic exe check. */
8952 ret
= -TARGET_EINVAL
;
8953 } else if (is_proc_myself((const char *)p
, "exe")) {
8954 char real
[PATH_MAX
], *temp
;
8955 temp
= realpath(exec_path
, real
);
8956 /* Return value is # of bytes that we wrote to the buffer. */
8958 ret
= get_errno(-1);
8960 /* Don't worry about sign mismatch as earlier mapping
8961 * logic would have thrown a bad address error. */
8962 ret
= MIN(strlen(real
), arg3
);
8963 /* We cannot NUL terminate the string. */
8964 memcpy(p2
, real
, ret
);
8967 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8969 unlock_user(p2
, arg2
, ret
);
8970 unlock_user(p
, arg1
, 0);
8974 #if defined(TARGET_NR_readlinkat)
8975 case TARGET_NR_readlinkat
:
8978 p
= lock_user_string(arg2
);
8979 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8981 ret
= -TARGET_EFAULT
;
8982 } else if (is_proc_myself((const char *)p
, "exe")) {
8983 char real
[PATH_MAX
], *temp
;
8984 temp
= realpath(exec_path
, real
);
8985 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8986 snprintf((char *)p2
, arg4
, "%s", real
);
8988 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8990 unlock_user(p2
, arg3
, ret
);
8991 unlock_user(p
, arg2
, 0);
8995 #ifdef TARGET_NR_uselib
8996 case TARGET_NR_uselib
:
8999 #ifdef TARGET_NR_swapon
9000 case TARGET_NR_swapon
:
9001 if (!(p
= lock_user_string(arg1
)))
9003 ret
= get_errno(swapon(p
, arg2
));
9004 unlock_user(p
, arg1
, 0);
9007 case TARGET_NR_reboot
:
9008 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9009 /* arg4 must be ignored in all other cases */
9010 p
= lock_user_string(arg4
);
9014 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9015 unlock_user(p
, arg4
, 0);
9017 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9020 #ifdef TARGET_NR_readdir
9021 case TARGET_NR_readdir
:
9024 #ifdef TARGET_NR_mmap
9025 case TARGET_NR_mmap
:
9026 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9027 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9028 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9029 || defined(TARGET_S390X)
9032 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9033 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9041 unlock_user(v
, arg1
, 0);
9042 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9043 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9047 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9048 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9054 #ifdef TARGET_NR_mmap2
9055 case TARGET_NR_mmap2
:
9057 #define MMAP_SHIFT 12
9059 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9060 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9062 arg6
<< MMAP_SHIFT
));
9065 case TARGET_NR_munmap
:
9066 ret
= get_errno(target_munmap(arg1
, arg2
));
9068 case TARGET_NR_mprotect
:
9070 TaskState
*ts
= cpu
->opaque
;
9071 /* Special hack to detect libc making the stack executable. */
9072 if ((arg3
& PROT_GROWSDOWN
)
9073 && arg1
>= ts
->info
->stack_limit
9074 && arg1
<= ts
->info
->start_stack
) {
9075 arg3
&= ~PROT_GROWSDOWN
;
9076 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9077 arg1
= ts
->info
->stack_limit
;
9080 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9082 #ifdef TARGET_NR_mremap
9083 case TARGET_NR_mremap
:
9084 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9087 /* ??? msync/mlock/munlock are broken for softmmu. */
9088 #ifdef TARGET_NR_msync
9089 case TARGET_NR_msync
:
9090 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9093 #ifdef TARGET_NR_mlock
9094 case TARGET_NR_mlock
:
9095 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9098 #ifdef TARGET_NR_munlock
9099 case TARGET_NR_munlock
:
9100 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9103 #ifdef TARGET_NR_mlockall
9104 case TARGET_NR_mlockall
:
9105 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9108 #ifdef TARGET_NR_munlockall
9109 case TARGET_NR_munlockall
:
9110 ret
= get_errno(munlockall());
9113 case TARGET_NR_truncate
:
9114 if (!(p
= lock_user_string(arg1
)))
9116 ret
= get_errno(truncate(p
, arg2
));
9117 unlock_user(p
, arg1
, 0);
9119 case TARGET_NR_ftruncate
:
9120 ret
= get_errno(ftruncate(arg1
, arg2
));
9122 case TARGET_NR_fchmod
:
9123 ret
= get_errno(fchmod(arg1
, arg2
));
9125 #if defined(TARGET_NR_fchmodat)
9126 case TARGET_NR_fchmodat
:
9127 if (!(p
= lock_user_string(arg2
)))
9129 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9130 unlock_user(p
, arg2
, 0);
9133 case TARGET_NR_getpriority
:
9134 /* Note that negative values are valid for getpriority, so we must
9135 differentiate based on errno settings. */
9137 ret
= getpriority(arg1
, arg2
);
9138 if (ret
== -1 && errno
!= 0) {
9139 ret
= -host_to_target_errno(errno
);
9143 /* Return value is the unbiased priority. Signal no error. */
9144 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9146 /* Return value is a biased priority to avoid negative numbers. */
9150 case TARGET_NR_setpriority
:
9151 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9153 #ifdef TARGET_NR_profil
9154 case TARGET_NR_profil
:
9157 case TARGET_NR_statfs
:
9158 if (!(p
= lock_user_string(arg1
)))
9160 ret
= get_errno(statfs(path(p
), &stfs
));
9161 unlock_user(p
, arg1
, 0);
9163 if (!is_error(ret
)) {
9164 struct target_statfs
*target_stfs
;
9166 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9168 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9169 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9170 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9171 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9172 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9173 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9174 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9175 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9176 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9177 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9178 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9179 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9180 unlock_user_struct(target_stfs
, arg2
, 1);
9183 case TARGET_NR_fstatfs
:
9184 ret
= get_errno(fstatfs(arg1
, &stfs
));
9185 goto convert_statfs
;
9186 #ifdef TARGET_NR_statfs64
9187 case TARGET_NR_statfs64
:
9188 if (!(p
= lock_user_string(arg1
)))
9190 ret
= get_errno(statfs(path(p
), &stfs
));
9191 unlock_user(p
, arg1
, 0);
9193 if (!is_error(ret
)) {
9194 struct target_statfs64
*target_stfs
;
9196 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9198 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9199 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9200 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9201 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9202 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9203 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9204 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9205 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9206 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9207 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9208 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9209 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9210 unlock_user_struct(target_stfs
, arg3
, 1);
9213 case TARGET_NR_fstatfs64
:
9214 ret
= get_errno(fstatfs(arg1
, &stfs
));
9215 goto convert_statfs64
;
9217 #ifdef TARGET_NR_ioperm
9218 case TARGET_NR_ioperm
:
9221 #ifdef TARGET_NR_socketcall
9222 case TARGET_NR_socketcall
:
9223 ret
= do_socketcall(arg1
, arg2
);
9226 #ifdef TARGET_NR_accept
9227 case TARGET_NR_accept
:
9228 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9231 #ifdef TARGET_NR_accept4
9232 case TARGET_NR_accept4
:
9233 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9236 #ifdef TARGET_NR_bind
9237 case TARGET_NR_bind
:
9238 ret
= do_bind(arg1
, arg2
, arg3
);
9241 #ifdef TARGET_NR_connect
9242 case TARGET_NR_connect
:
9243 ret
= do_connect(arg1
, arg2
, arg3
);
9246 #ifdef TARGET_NR_getpeername
9247 case TARGET_NR_getpeername
:
9248 ret
= do_getpeername(arg1
, arg2
, arg3
);
9251 #ifdef TARGET_NR_getsockname
9252 case TARGET_NR_getsockname
:
9253 ret
= do_getsockname(arg1
, arg2
, arg3
);
9256 #ifdef TARGET_NR_getsockopt
9257 case TARGET_NR_getsockopt
:
9258 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9261 #ifdef TARGET_NR_listen
9262 case TARGET_NR_listen
:
9263 ret
= get_errno(listen(arg1
, arg2
));
9266 #ifdef TARGET_NR_recv
9267 case TARGET_NR_recv
:
9268 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9271 #ifdef TARGET_NR_recvfrom
9272 case TARGET_NR_recvfrom
:
9273 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9276 #ifdef TARGET_NR_recvmsg
9277 case TARGET_NR_recvmsg
:
9278 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9281 #ifdef TARGET_NR_send
9282 case TARGET_NR_send
:
9283 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9286 #ifdef TARGET_NR_sendmsg
9287 case TARGET_NR_sendmsg
:
9288 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9291 #ifdef TARGET_NR_sendmmsg
9292 case TARGET_NR_sendmmsg
:
9293 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9295 case TARGET_NR_recvmmsg
:
9296 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9299 #ifdef TARGET_NR_sendto
9300 case TARGET_NR_sendto
:
9301 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9304 #ifdef TARGET_NR_shutdown
9305 case TARGET_NR_shutdown
:
9306 ret
= get_errno(shutdown(arg1
, arg2
));
9309 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9310 case TARGET_NR_getrandom
:
9311 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9315 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9316 unlock_user(p
, arg1
, ret
);
9319 #ifdef TARGET_NR_socket
9320 case TARGET_NR_socket
:
9321 ret
= do_socket(arg1
, arg2
, arg3
);
9322 fd_trans_unregister(ret
);
9325 #ifdef TARGET_NR_socketpair
9326 case TARGET_NR_socketpair
:
9327 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9330 #ifdef TARGET_NR_setsockopt
9331 case TARGET_NR_setsockopt
:
9332 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9335 #if defined(TARGET_NR_syslog)
9336 case TARGET_NR_syslog
:
9341 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9342 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9343 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9344 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9345 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9346 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9347 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9348 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9350 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9353 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9354 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9355 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9357 ret
= -TARGET_EINVAL
;
9365 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9367 ret
= -TARGET_EFAULT
;
9370 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9371 unlock_user(p
, arg2
, arg3
);
9381 case TARGET_NR_setitimer
:
9383 struct itimerval value
, ovalue
, *pvalue
;
9387 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9388 || copy_from_user_timeval(&pvalue
->it_value
,
9389 arg2
+ sizeof(struct target_timeval
)))
9394 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9395 if (!is_error(ret
) && arg3
) {
9396 if (copy_to_user_timeval(arg3
,
9397 &ovalue
.it_interval
)
9398 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9404 case TARGET_NR_getitimer
:
9406 struct itimerval value
;
9408 ret
= get_errno(getitimer(arg1
, &value
));
9409 if (!is_error(ret
) && arg2
) {
9410 if (copy_to_user_timeval(arg2
,
9412 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9418 #ifdef TARGET_NR_stat
9419 case TARGET_NR_stat
:
9420 if (!(p
= lock_user_string(arg1
)))
9422 ret
= get_errno(stat(path(p
), &st
));
9423 unlock_user(p
, arg1
, 0);
9426 #ifdef TARGET_NR_lstat
9427 case TARGET_NR_lstat
:
9428 if (!(p
= lock_user_string(arg1
)))
9430 ret
= get_errno(lstat(path(p
), &st
));
9431 unlock_user(p
, arg1
, 0);
9434 case TARGET_NR_fstat
:
9436 ret
= get_errno(fstat(arg1
, &st
));
9437 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9440 if (!is_error(ret
)) {
9441 struct target_stat
*target_st
;
9443 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9445 memset(target_st
, 0, sizeof(*target_st
));
9446 __put_user(st
.st_dev
, &target_st
->st_dev
);
9447 __put_user(st
.st_ino
, &target_st
->st_ino
);
9448 __put_user(st
.st_mode
, &target_st
->st_mode
);
9449 __put_user(st
.st_uid
, &target_st
->st_uid
);
9450 __put_user(st
.st_gid
, &target_st
->st_gid
);
9451 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9452 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9453 __put_user(st
.st_size
, &target_st
->st_size
);
9454 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9455 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9456 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9457 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9458 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9459 unlock_user_struct(target_st
, arg2
, 1);
9463 #ifdef TARGET_NR_olduname
9464 case TARGET_NR_olduname
:
9467 #ifdef TARGET_NR_iopl
9468 case TARGET_NR_iopl
:
9471 case TARGET_NR_vhangup
:
9472 ret
= get_errno(vhangup());
9474 #ifdef TARGET_NR_idle
9475 case TARGET_NR_idle
:
9478 #ifdef TARGET_NR_syscall
9479 case TARGET_NR_syscall
:
9480 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9481 arg6
, arg7
, arg8
, 0);
9484 case TARGET_NR_wait4
:
9487 abi_long status_ptr
= arg2
;
9488 struct rusage rusage
, *rusage_ptr
;
9489 abi_ulong target_rusage
= arg4
;
9490 abi_long rusage_err
;
9492 rusage_ptr
= &rusage
;
9495 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9496 if (!is_error(ret
)) {
9497 if (status_ptr
&& ret
) {
9498 status
= host_to_target_waitstatus(status
);
9499 if (put_user_s32(status
, status_ptr
))
9502 if (target_rusage
) {
9503 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9511 #ifdef TARGET_NR_swapoff
9512 case TARGET_NR_swapoff
:
9513 if (!(p
= lock_user_string(arg1
)))
9515 ret
= get_errno(swapoff(p
));
9516 unlock_user(p
, arg1
, 0);
9519 case TARGET_NR_sysinfo
:
9521 struct target_sysinfo
*target_value
;
9522 struct sysinfo value
;
9523 ret
= get_errno(sysinfo(&value
));
9524 if (!is_error(ret
) && arg1
)
9526 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9528 __put_user(value
.uptime
, &target_value
->uptime
);
9529 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9530 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9531 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9532 __put_user(value
.totalram
, &target_value
->totalram
);
9533 __put_user(value
.freeram
, &target_value
->freeram
);
9534 __put_user(value
.sharedram
, &target_value
->sharedram
);
9535 __put_user(value
.bufferram
, &target_value
->bufferram
);
9536 __put_user(value
.totalswap
, &target_value
->totalswap
);
9537 __put_user(value
.freeswap
, &target_value
->freeswap
);
9538 __put_user(value
.procs
, &target_value
->procs
);
9539 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9540 __put_user(value
.freehigh
, &target_value
->freehigh
);
9541 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9542 unlock_user_struct(target_value
, arg1
, 1);
9546 #ifdef TARGET_NR_ipc
9548 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9551 #ifdef TARGET_NR_semget
9552 case TARGET_NR_semget
:
9553 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9556 #ifdef TARGET_NR_semop
9557 case TARGET_NR_semop
:
9558 ret
= do_semop(arg1
, arg2
, arg3
);
9561 #ifdef TARGET_NR_semctl
9562 case TARGET_NR_semctl
:
9563 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9566 #ifdef TARGET_NR_msgctl
9567 case TARGET_NR_msgctl
:
9568 ret
= do_msgctl(arg1
, arg2
, arg3
);
9571 #ifdef TARGET_NR_msgget
9572 case TARGET_NR_msgget
:
9573 ret
= get_errno(msgget(arg1
, arg2
));
9576 #ifdef TARGET_NR_msgrcv
9577 case TARGET_NR_msgrcv
:
9578 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9581 #ifdef TARGET_NR_msgsnd
9582 case TARGET_NR_msgsnd
:
9583 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9586 #ifdef TARGET_NR_shmget
9587 case TARGET_NR_shmget
:
9588 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9591 #ifdef TARGET_NR_shmctl
9592 case TARGET_NR_shmctl
:
9593 ret
= do_shmctl(arg1
, arg2
, arg3
);
9596 #ifdef TARGET_NR_shmat
9597 case TARGET_NR_shmat
:
9598 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9601 #ifdef TARGET_NR_shmdt
9602 case TARGET_NR_shmdt
:
9603 ret
= do_shmdt(arg1
);
9606 case TARGET_NR_fsync
:
9607 ret
= get_errno(fsync(arg1
));
9609 case TARGET_NR_clone
:
9610 /* Linux manages to have three different orderings for its
9611 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9612 * match the kernel's CONFIG_CLONE_* settings.
9613 * Microblaze is further special in that it uses a sixth
9614 * implicit argument to clone for the TLS pointer.
9616 #if defined(TARGET_MICROBLAZE)
9617 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9618 #elif defined(TARGET_CLONE_BACKWARDS)
9619 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9620 #elif defined(TARGET_CLONE_BACKWARDS2)
9621 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9623 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9626 #ifdef __NR_exit_group
9627 /* new thread calls */
9628 case TARGET_NR_exit_group
:
9632 gdb_exit(cpu_env
, arg1
);
9633 ret
= get_errno(exit_group(arg1
));
9636 case TARGET_NR_setdomainname
:
9637 if (!(p
= lock_user_string(arg1
)))
9639 ret
= get_errno(setdomainname(p
, arg2
));
9640 unlock_user(p
, arg1
, 0);
9642 case TARGET_NR_uname
:
9643 /* no need to transcode because we use the linux syscall */
9645 struct new_utsname
* buf
;
9647 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9649 ret
= get_errno(sys_uname(buf
));
9650 if (!is_error(ret
)) {
9651 /* Overwrite the native machine name with whatever is being
9653 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9654 /* Allow the user to override the reported release. */
9655 if (qemu_uname_release
&& *qemu_uname_release
) {
9656 g_strlcpy(buf
->release
, qemu_uname_release
,
9657 sizeof(buf
->release
));
9660 unlock_user_struct(buf
, arg1
, 1);
9664 case TARGET_NR_modify_ldt
:
9665 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9667 #if !defined(TARGET_X86_64)
9668 case TARGET_NR_vm86old
:
9670 case TARGET_NR_vm86
:
9671 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9675 case TARGET_NR_adjtimex
:
9677 struct timex host_buf
;
9679 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9682 ret
= get_errno(adjtimex(&host_buf
));
9683 if (!is_error(ret
)) {
9684 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9690 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9691 case TARGET_NR_clock_adjtime
:
9693 struct timex htx
, *phtx
= &htx
;
9695 if (target_to_host_timex(phtx
, arg2
) != 0) {
9698 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9699 if (!is_error(ret
) && phtx
) {
9700 if (host_to_target_timex(arg2
, phtx
) != 0) {
9707 #ifdef TARGET_NR_create_module
9708 case TARGET_NR_create_module
:
9710 case TARGET_NR_init_module
:
9711 case TARGET_NR_delete_module
:
9712 #ifdef TARGET_NR_get_kernel_syms
9713 case TARGET_NR_get_kernel_syms
:
9716 case TARGET_NR_quotactl
:
9718 case TARGET_NR_getpgid
:
9719 ret
= get_errno(getpgid(arg1
));
9721 case TARGET_NR_fchdir
:
9722 ret
= get_errno(fchdir(arg1
));
9724 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9725 case TARGET_NR_bdflush
:
9728 #ifdef TARGET_NR_sysfs
9729 case TARGET_NR_sysfs
:
9732 case TARGET_NR_personality
:
9733 ret
= get_errno(personality(arg1
));
9735 #ifdef TARGET_NR_afs_syscall
9736 case TARGET_NR_afs_syscall
:
9739 #ifdef TARGET_NR__llseek /* Not on alpha */
9740 case TARGET_NR__llseek
:
9743 #if !defined(__NR_llseek)
9744 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9746 ret
= get_errno(res
);
9751 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9753 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9759 #ifdef TARGET_NR_getdents
9760 case TARGET_NR_getdents
:
9761 #ifdef __NR_getdents
9762 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9764 struct target_dirent
*target_dirp
;
9765 struct linux_dirent
*dirp
;
9766 abi_long count
= arg3
;
9768 dirp
= g_try_malloc(count
);
9770 ret
= -TARGET_ENOMEM
;
9774 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9775 if (!is_error(ret
)) {
9776 struct linux_dirent
*de
;
9777 struct target_dirent
*tde
;
9779 int reclen
, treclen
;
9780 int count1
, tnamelen
;
9784 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9788 reclen
= de
->d_reclen
;
9789 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9790 assert(tnamelen
>= 0);
9791 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9792 assert(count1
+ treclen
<= count
);
9793 tde
->d_reclen
= tswap16(treclen
);
9794 tde
->d_ino
= tswapal(de
->d_ino
);
9795 tde
->d_off
= tswapal(de
->d_off
);
9796 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9797 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9799 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9803 unlock_user(target_dirp
, arg2
, ret
);
9809 struct linux_dirent
*dirp
;
9810 abi_long count
= arg3
;
9812 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9814 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9815 if (!is_error(ret
)) {
9816 struct linux_dirent
*de
;
9821 reclen
= de
->d_reclen
;
9824 de
->d_reclen
= tswap16(reclen
);
9825 tswapls(&de
->d_ino
);
9826 tswapls(&de
->d_off
);
9827 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9831 unlock_user(dirp
, arg2
, ret
);
9835 /* Implement getdents in terms of getdents64 */
9837 struct linux_dirent64
*dirp
;
9838 abi_long count
= arg3
;
9840 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9844 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9845 if (!is_error(ret
)) {
9846 /* Convert the dirent64 structs to target dirent. We do this
9847 * in-place, since we can guarantee that a target_dirent is no
9848 * larger than a dirent64; however this means we have to be
9849 * careful to read everything before writing in the new format.
9851 struct linux_dirent64
*de
;
9852 struct target_dirent
*tde
;
9857 tde
= (struct target_dirent
*)dirp
;
9859 int namelen
, treclen
;
9860 int reclen
= de
->d_reclen
;
9861 uint64_t ino
= de
->d_ino
;
9862 int64_t off
= de
->d_off
;
9863 uint8_t type
= de
->d_type
;
9865 namelen
= strlen(de
->d_name
);
9866 treclen
= offsetof(struct target_dirent
, d_name
)
9868 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9870 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9871 tde
->d_ino
= tswapal(ino
);
9872 tde
->d_off
= tswapal(off
);
9873 tde
->d_reclen
= tswap16(treclen
);
9874 /* The target_dirent type is in what was formerly a padding
9875 * byte at the end of the structure:
9877 *(((char *)tde
) + treclen
- 1) = type
;
9879 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9880 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9886 unlock_user(dirp
, arg2
, ret
);
9890 #endif /* TARGET_NR_getdents */
9891 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9892 case TARGET_NR_getdents64
:
9894 struct linux_dirent64
*dirp
;
9895 abi_long count
= arg3
;
9896 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9898 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9899 if (!is_error(ret
)) {
9900 struct linux_dirent64
*de
;
9905 reclen
= de
->d_reclen
;
9908 de
->d_reclen
= tswap16(reclen
);
9909 tswap64s((uint64_t *)&de
->d_ino
);
9910 tswap64s((uint64_t *)&de
->d_off
);
9911 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9915 unlock_user(dirp
, arg2
, ret
);
9918 #endif /* TARGET_NR_getdents64 */
9919 #if defined(TARGET_NR__newselect)
9920 case TARGET_NR__newselect
:
9921 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9924 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9925 # ifdef TARGET_NR_poll
9926 case TARGET_NR_poll
:
9928 # ifdef TARGET_NR_ppoll
9929 case TARGET_NR_ppoll
:
9932 struct target_pollfd
*target_pfd
;
9933 unsigned int nfds
= arg2
;
9940 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9941 ret
= -TARGET_EINVAL
;
9945 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9946 sizeof(struct target_pollfd
) * nfds
, 1);
9951 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9952 for (i
= 0; i
< nfds
; i
++) {
9953 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9954 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9959 # ifdef TARGET_NR_ppoll
9960 case TARGET_NR_ppoll
:
9962 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9963 target_sigset_t
*target_set
;
9964 sigset_t _set
, *set
= &_set
;
9967 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9968 unlock_user(target_pfd
, arg1
, 0);
9976 if (arg5
!= sizeof(target_sigset_t
)) {
9977 unlock_user(target_pfd
, arg1
, 0);
9978 ret
= -TARGET_EINVAL
;
9982 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9984 unlock_user(target_pfd
, arg1
, 0);
9987 target_to_host_sigset(set
, target_set
);
9992 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9993 set
, SIGSET_T_SIZE
));
9995 if (!is_error(ret
) && arg3
) {
9996 host_to_target_timespec(arg3
, timeout_ts
);
9999 unlock_user(target_set
, arg4
, 0);
10004 # ifdef TARGET_NR_poll
10005 case TARGET_NR_poll
:
10007 struct timespec ts
, *pts
;
10010 /* Convert ms to secs, ns */
10011 ts
.tv_sec
= arg3
/ 1000;
10012 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10015 /* -ve poll() timeout means "infinite" */
10018 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10023 g_assert_not_reached();
10026 if (!is_error(ret
)) {
10027 for(i
= 0; i
< nfds
; i
++) {
10028 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10031 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10035 case TARGET_NR_flock
:
10036 /* NOTE: the flock constant seems to be the same for every
10038 ret
= get_errno(safe_flock(arg1
, arg2
));
10040 case TARGET_NR_readv
:
10042 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10044 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10045 unlock_iovec(vec
, arg2
, arg3
, 1);
10047 ret
= -host_to_target_errno(errno
);
10051 case TARGET_NR_writev
:
10053 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10055 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10056 unlock_iovec(vec
, arg2
, arg3
, 0);
10058 ret
= -host_to_target_errno(errno
);
10062 case TARGET_NR_getsid
:
10063 ret
= get_errno(getsid(arg1
));
10065 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10066 case TARGET_NR_fdatasync
:
10067 ret
= get_errno(fdatasync(arg1
));
10070 #ifdef TARGET_NR__sysctl
10071 case TARGET_NR__sysctl
:
10072 /* We don't implement this, but ENOTDIR is always a safe
10074 ret
= -TARGET_ENOTDIR
;
10077 case TARGET_NR_sched_getaffinity
:
10079 unsigned int mask_size
;
10080 unsigned long *mask
;
10083 * sched_getaffinity needs multiples of ulong, so need to take
10084 * care of mismatches between target ulong and host ulong sizes.
10086 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10087 ret
= -TARGET_EINVAL
;
10090 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10092 mask
= alloca(mask_size
);
10093 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10095 if (!is_error(ret
)) {
10097 /* More data returned than the caller's buffer will fit.
10098 * This only happens if sizeof(abi_long) < sizeof(long)
10099 * and the caller passed us a buffer holding an odd number
10100 * of abi_longs. If the host kernel is actually using the
10101 * extra 4 bytes then fail EINVAL; otherwise we can just
10102 * ignore them and only copy the interesting part.
10104 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10105 if (numcpus
> arg2
* 8) {
10106 ret
= -TARGET_EINVAL
;
10112 if (copy_to_user(arg3
, mask
, ret
)) {
10118 case TARGET_NR_sched_setaffinity
:
10120 unsigned int mask_size
;
10121 unsigned long *mask
;
10124 * sched_setaffinity needs multiples of ulong, so need to take
10125 * care of mismatches between target ulong and host ulong sizes.
10127 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10128 ret
= -TARGET_EINVAL
;
10131 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10133 mask
= alloca(mask_size
);
10134 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10137 memcpy(mask
, p
, arg2
);
10138 unlock_user_struct(p
, arg2
, 0);
10140 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10143 case TARGET_NR_sched_setparam
:
10145 struct sched_param
*target_schp
;
10146 struct sched_param schp
;
10149 return -TARGET_EINVAL
;
10151 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10153 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10154 unlock_user_struct(target_schp
, arg2
, 0);
10155 ret
= get_errno(sched_setparam(arg1
, &schp
));
10158 case TARGET_NR_sched_getparam
:
10160 struct sched_param
*target_schp
;
10161 struct sched_param schp
;
10164 return -TARGET_EINVAL
;
10166 ret
= get_errno(sched_getparam(arg1
, &schp
));
10167 if (!is_error(ret
)) {
10168 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10170 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10171 unlock_user_struct(target_schp
, arg2
, 1);
10175 case TARGET_NR_sched_setscheduler
:
10177 struct sched_param
*target_schp
;
10178 struct sched_param schp
;
10180 return -TARGET_EINVAL
;
10182 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10184 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10185 unlock_user_struct(target_schp
, arg3
, 0);
10186 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10189 case TARGET_NR_sched_getscheduler
:
10190 ret
= get_errno(sched_getscheduler(arg1
));
10192 case TARGET_NR_sched_yield
:
10193 ret
= get_errno(sched_yield());
10195 case TARGET_NR_sched_get_priority_max
:
10196 ret
= get_errno(sched_get_priority_max(arg1
));
10198 case TARGET_NR_sched_get_priority_min
:
10199 ret
= get_errno(sched_get_priority_min(arg1
));
10201 case TARGET_NR_sched_rr_get_interval
:
10203 struct timespec ts
;
10204 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10205 if (!is_error(ret
)) {
10206 ret
= host_to_target_timespec(arg2
, &ts
);
10210 case TARGET_NR_nanosleep
:
10212 struct timespec req
, rem
;
10213 target_to_host_timespec(&req
, arg1
);
10214 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10215 if (is_error(ret
) && arg2
) {
10216 host_to_target_timespec(arg2
, &rem
);
10220 #ifdef TARGET_NR_query_module
10221 case TARGET_NR_query_module
:
10222 goto unimplemented
;
10224 #ifdef TARGET_NR_nfsservctl
10225 case TARGET_NR_nfsservctl
:
10226 goto unimplemented
;
10228 case TARGET_NR_prctl
:
10230 case PR_GET_PDEATHSIG
:
10233 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10234 if (!is_error(ret
) && arg2
10235 && put_user_ual(deathsig
, arg2
)) {
10243 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10247 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10248 arg3
, arg4
, arg5
));
10249 unlock_user(name
, arg2
, 16);
10254 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10258 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10259 arg3
, arg4
, arg5
));
10260 unlock_user(name
, arg2
, 0);
10265 /* Most prctl options have no pointer arguments */
10266 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10270 #ifdef TARGET_NR_arch_prctl
10271 case TARGET_NR_arch_prctl
:
10272 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10273 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10276 goto unimplemented
;
10279 #ifdef TARGET_NR_pread64
10280 case TARGET_NR_pread64
:
10281 if (regpairs_aligned(cpu_env
)) {
10285 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10287 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10288 unlock_user(p
, arg2
, ret
);
10290 case TARGET_NR_pwrite64
:
10291 if (regpairs_aligned(cpu_env
)) {
10295 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10297 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10298 unlock_user(p
, arg2
, 0);
10301 case TARGET_NR_getcwd
:
10302 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10304 ret
= get_errno(sys_getcwd1(p
, arg2
));
10305 unlock_user(p
, arg1
, ret
);
10307 case TARGET_NR_capget
:
10308 case TARGET_NR_capset
:
10310 struct target_user_cap_header
*target_header
;
10311 struct target_user_cap_data
*target_data
= NULL
;
10312 struct __user_cap_header_struct header
;
10313 struct __user_cap_data_struct data
[2];
10314 struct __user_cap_data_struct
*dataptr
= NULL
;
10315 int i
, target_datalen
;
10316 int data_items
= 1;
10318 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10321 header
.version
= tswap32(target_header
->version
);
10322 header
.pid
= tswap32(target_header
->pid
);
10324 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10325 /* Version 2 and up takes pointer to two user_data structs */
10329 target_datalen
= sizeof(*target_data
) * data_items
;
10332 if (num
== TARGET_NR_capget
) {
10333 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10335 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10337 if (!target_data
) {
10338 unlock_user_struct(target_header
, arg1
, 0);
10342 if (num
== TARGET_NR_capset
) {
10343 for (i
= 0; i
< data_items
; i
++) {
10344 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10345 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10346 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10353 if (num
== TARGET_NR_capget
) {
10354 ret
= get_errno(capget(&header
, dataptr
));
10356 ret
= get_errno(capset(&header
, dataptr
));
10359 /* The kernel always updates version for both capget and capset */
10360 target_header
->version
= tswap32(header
.version
);
10361 unlock_user_struct(target_header
, arg1
, 1);
10364 if (num
== TARGET_NR_capget
) {
10365 for (i
= 0; i
< data_items
; i
++) {
10366 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10367 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10368 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10370 unlock_user(target_data
, arg2
, target_datalen
);
10372 unlock_user(target_data
, arg2
, 0);
10377 case TARGET_NR_sigaltstack
:
10378 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10381 #ifdef CONFIG_SENDFILE
10382 case TARGET_NR_sendfile
:
10384 off_t
*offp
= NULL
;
10387 ret
= get_user_sal(off
, arg3
);
10388 if (is_error(ret
)) {
10393 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10394 if (!is_error(ret
) && arg3
) {
10395 abi_long ret2
= put_user_sal(off
, arg3
);
10396 if (is_error(ret2
)) {
10402 #ifdef TARGET_NR_sendfile64
10403 case TARGET_NR_sendfile64
:
10405 off_t
*offp
= NULL
;
10408 ret
= get_user_s64(off
, arg3
);
10409 if (is_error(ret
)) {
10414 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10415 if (!is_error(ret
) && arg3
) {
10416 abi_long ret2
= put_user_s64(off
, arg3
);
10417 if (is_error(ret2
)) {
10425 case TARGET_NR_sendfile
:
10426 #ifdef TARGET_NR_sendfile64
10427 case TARGET_NR_sendfile64
:
10429 goto unimplemented
;
10432 #ifdef TARGET_NR_getpmsg
10433 case TARGET_NR_getpmsg
:
10434 goto unimplemented
;
10436 #ifdef TARGET_NR_putpmsg
10437 case TARGET_NR_putpmsg
:
10438 goto unimplemented
;
10440 #ifdef TARGET_NR_vfork
10441 case TARGET_NR_vfork
:
10442 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10446 #ifdef TARGET_NR_ugetrlimit
10447 case TARGET_NR_ugetrlimit
:
10449 struct rlimit rlim
;
10450 int resource
= target_to_host_resource(arg1
);
10451 ret
= get_errno(getrlimit(resource
, &rlim
));
10452 if (!is_error(ret
)) {
10453 struct target_rlimit
*target_rlim
;
10454 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10456 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10457 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10458 unlock_user_struct(target_rlim
, arg2
, 1);
10463 #ifdef TARGET_NR_truncate64
10464 case TARGET_NR_truncate64
:
10465 if (!(p
= lock_user_string(arg1
)))
10467 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10468 unlock_user(p
, arg1
, 0);
10471 #ifdef TARGET_NR_ftruncate64
10472 case TARGET_NR_ftruncate64
:
10473 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10476 #ifdef TARGET_NR_stat64
10477 case TARGET_NR_stat64
:
10478 if (!(p
= lock_user_string(arg1
)))
10480 ret
= get_errno(stat(path(p
), &st
));
10481 unlock_user(p
, arg1
, 0);
10482 if (!is_error(ret
))
10483 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10486 #ifdef TARGET_NR_lstat64
10487 case TARGET_NR_lstat64
:
10488 if (!(p
= lock_user_string(arg1
)))
10490 ret
= get_errno(lstat(path(p
), &st
));
10491 unlock_user(p
, arg1
, 0);
10492 if (!is_error(ret
))
10493 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10496 #ifdef TARGET_NR_fstat64
10497 case TARGET_NR_fstat64
:
10498 ret
= get_errno(fstat(arg1
, &st
));
10499 if (!is_error(ret
))
10500 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10503 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10504 #ifdef TARGET_NR_fstatat64
10505 case TARGET_NR_fstatat64
:
10507 #ifdef TARGET_NR_newfstatat
10508 case TARGET_NR_newfstatat
:
10510 if (!(p
= lock_user_string(arg2
)))
10512 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10513 if (!is_error(ret
))
10514 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10517 #ifdef TARGET_NR_lchown
10518 case TARGET_NR_lchown
:
10519 if (!(p
= lock_user_string(arg1
)))
10521 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10522 unlock_user(p
, arg1
, 0);
10525 #ifdef TARGET_NR_getuid
10526 case TARGET_NR_getuid
:
10527 ret
= get_errno(high2lowuid(getuid()));
10530 #ifdef TARGET_NR_getgid
10531 case TARGET_NR_getgid
:
10532 ret
= get_errno(high2lowgid(getgid()));
10535 #ifdef TARGET_NR_geteuid
10536 case TARGET_NR_geteuid
:
10537 ret
= get_errno(high2lowuid(geteuid()));
10540 #ifdef TARGET_NR_getegid
10541 case TARGET_NR_getegid
:
10542 ret
= get_errno(high2lowgid(getegid()));
10545 case TARGET_NR_setreuid
:
10546 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10548 case TARGET_NR_setregid
:
10549 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10551 case TARGET_NR_getgroups
:
10553 int gidsetsize
= arg1
;
10554 target_id
*target_grouplist
;
10558 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10559 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10560 if (gidsetsize
== 0)
10562 if (!is_error(ret
)) {
10563 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10564 if (!target_grouplist
)
10566 for(i
= 0;i
< ret
; i
++)
10567 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10568 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10572 case TARGET_NR_setgroups
:
10574 int gidsetsize
= arg1
;
10575 target_id
*target_grouplist
;
10576 gid_t
*grouplist
= NULL
;
10579 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10580 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10581 if (!target_grouplist
) {
10582 ret
= -TARGET_EFAULT
;
10585 for (i
= 0; i
< gidsetsize
; i
++) {
10586 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10588 unlock_user(target_grouplist
, arg2
, 0);
10590 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10593 case TARGET_NR_fchown
:
10594 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10596 #if defined(TARGET_NR_fchownat)
10597 case TARGET_NR_fchownat
:
10598 if (!(p
= lock_user_string(arg2
)))
10600 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10601 low2highgid(arg4
), arg5
));
10602 unlock_user(p
, arg2
, 0);
10605 #ifdef TARGET_NR_setresuid
10606 case TARGET_NR_setresuid
:
10607 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10609 low2highuid(arg3
)));
10612 #ifdef TARGET_NR_getresuid
10613 case TARGET_NR_getresuid
:
10615 uid_t ruid
, euid
, suid
;
10616 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10617 if (!is_error(ret
)) {
10618 if (put_user_id(high2lowuid(ruid
), arg1
)
10619 || put_user_id(high2lowuid(euid
), arg2
)
10620 || put_user_id(high2lowuid(suid
), arg3
))
10626 #ifdef TARGET_NR_getresgid
10627 case TARGET_NR_setresgid
:
10628 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10630 low2highgid(arg3
)));
10633 #ifdef TARGET_NR_getresgid
10634 case TARGET_NR_getresgid
:
10636 gid_t rgid
, egid
, sgid
;
10637 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10638 if (!is_error(ret
)) {
10639 if (put_user_id(high2lowgid(rgid
), arg1
)
10640 || put_user_id(high2lowgid(egid
), arg2
)
10641 || put_user_id(high2lowgid(sgid
), arg3
))
10647 #ifdef TARGET_NR_chown
10648 case TARGET_NR_chown
:
10649 if (!(p
= lock_user_string(arg1
)))
10651 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10652 unlock_user(p
, arg1
, 0);
10655 case TARGET_NR_setuid
:
10656 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10658 case TARGET_NR_setgid
:
10659 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10661 case TARGET_NR_setfsuid
:
10662 ret
= get_errno(setfsuid(arg1
));
10664 case TARGET_NR_setfsgid
:
10665 ret
= get_errno(setfsgid(arg1
));
10668 #ifdef TARGET_NR_lchown32
10669 case TARGET_NR_lchown32
:
10670 if (!(p
= lock_user_string(arg1
)))
10672 ret
= get_errno(lchown(p
, arg2
, arg3
));
10673 unlock_user(p
, arg1
, 0);
10676 #ifdef TARGET_NR_getuid32
10677 case TARGET_NR_getuid32
:
10678 ret
= get_errno(getuid());
10682 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10683 /* Alpha specific */
10684 case TARGET_NR_getxuid
:
10688 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10690 ret
= get_errno(getuid());
10693 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10694 /* Alpha specific */
10695 case TARGET_NR_getxgid
:
10699 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10701 ret
= get_errno(getgid());
10704 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10705 /* Alpha specific */
10706 case TARGET_NR_osf_getsysinfo
:
10707 ret
= -TARGET_EOPNOTSUPP
;
10709 case TARGET_GSI_IEEE_FP_CONTROL
:
10711 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10713 /* Copied from linux ieee_fpcr_to_swcr. */
10714 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10715 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10716 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10717 | SWCR_TRAP_ENABLE_DZE
10718 | SWCR_TRAP_ENABLE_OVF
);
10719 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10720 | SWCR_TRAP_ENABLE_INE
);
10721 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10722 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10724 if (put_user_u64 (swcr
, arg2
))
10730 /* case GSI_IEEE_STATE_AT_SIGNAL:
10731 -- Not implemented in linux kernel.
10733 -- Retrieves current unaligned access state; not much used.
10734 case GSI_PROC_TYPE:
10735 -- Retrieves implver information; surely not used.
10736 case GSI_GET_HWRPB:
10737 -- Grabs a copy of the HWRPB; surely not used.
10742 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10743 /* Alpha specific */
10744 case TARGET_NR_osf_setsysinfo
:
10745 ret
= -TARGET_EOPNOTSUPP
;
10747 case TARGET_SSI_IEEE_FP_CONTROL
:
10749 uint64_t swcr
, fpcr
, orig_fpcr
;
10751 if (get_user_u64 (swcr
, arg2
)) {
10754 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10755 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10757 /* Copied from linux ieee_swcr_to_fpcr. */
10758 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10759 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10760 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10761 | SWCR_TRAP_ENABLE_DZE
10762 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10763 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10764 | SWCR_TRAP_ENABLE_INE
)) << 57;
10765 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10766 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10768 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10773 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10775 uint64_t exc
, fpcr
, orig_fpcr
;
10778 if (get_user_u64(exc
, arg2
)) {
10782 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10784 /* We only add to the exception status here. */
10785 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10787 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10790 /* Old exceptions are not signaled. */
10791 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10793 /* If any exceptions set by this call,
10794 and are unmasked, send a signal. */
10796 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10797 si_code
= TARGET_FPE_FLTRES
;
10799 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10800 si_code
= TARGET_FPE_FLTUND
;
10802 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10803 si_code
= TARGET_FPE_FLTOVF
;
10805 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10806 si_code
= TARGET_FPE_FLTDIV
;
10808 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10809 si_code
= TARGET_FPE_FLTINV
;
10811 if (si_code
!= 0) {
10812 target_siginfo_t info
;
10813 info
.si_signo
= SIGFPE
;
10815 info
.si_code
= si_code
;
10816 info
._sifields
._sigfault
._addr
10817 = ((CPUArchState
*)cpu_env
)->pc
;
10818 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10819 QEMU_SI_FAULT
, &info
);
10824 /* case SSI_NVPAIRS:
10825 -- Used with SSIN_UACPROC to enable unaligned accesses.
10826 case SSI_IEEE_STATE_AT_SIGNAL:
10827 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10828 -- Not implemented in linux kernel
10833 #ifdef TARGET_NR_osf_sigprocmask
10834 /* Alpha specific. */
10835 case TARGET_NR_osf_sigprocmask
:
10839 sigset_t set
, oldset
;
10842 case TARGET_SIG_BLOCK
:
10845 case TARGET_SIG_UNBLOCK
:
10848 case TARGET_SIG_SETMASK
:
10852 ret
= -TARGET_EINVAL
;
10856 target_to_host_old_sigset(&set
, &mask
);
10857 ret
= do_sigprocmask(how
, &set
, &oldset
);
10859 host_to_target_old_sigset(&mask
, &oldset
);
10866 #ifdef TARGET_NR_getgid32
10867 case TARGET_NR_getgid32
:
10868 ret
= get_errno(getgid());
10871 #ifdef TARGET_NR_geteuid32
10872 case TARGET_NR_geteuid32
:
10873 ret
= get_errno(geteuid());
10876 #ifdef TARGET_NR_getegid32
10877 case TARGET_NR_getegid32
:
10878 ret
= get_errno(getegid());
10881 #ifdef TARGET_NR_setreuid32
10882 case TARGET_NR_setreuid32
:
10883 ret
= get_errno(setreuid(arg1
, arg2
));
10886 #ifdef TARGET_NR_setregid32
10887 case TARGET_NR_setregid32
:
10888 ret
= get_errno(setregid(arg1
, arg2
));
10891 #ifdef TARGET_NR_getgroups32
10892 case TARGET_NR_getgroups32
:
10894 int gidsetsize
= arg1
;
10895 uint32_t *target_grouplist
;
10899 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10900 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10901 if (gidsetsize
== 0)
10903 if (!is_error(ret
)) {
10904 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10905 if (!target_grouplist
) {
10906 ret
= -TARGET_EFAULT
;
10909 for(i
= 0;i
< ret
; i
++)
10910 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10911 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10916 #ifdef TARGET_NR_setgroups32
10917 case TARGET_NR_setgroups32
:
10919 int gidsetsize
= arg1
;
10920 uint32_t *target_grouplist
;
10924 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10925 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10926 if (!target_grouplist
) {
10927 ret
= -TARGET_EFAULT
;
10930 for(i
= 0;i
< gidsetsize
; i
++)
10931 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10932 unlock_user(target_grouplist
, arg2
, 0);
10933 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10937 #ifdef TARGET_NR_fchown32
10938 case TARGET_NR_fchown32
:
10939 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10942 #ifdef TARGET_NR_setresuid32
10943 case TARGET_NR_setresuid32
:
10944 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10947 #ifdef TARGET_NR_getresuid32
10948 case TARGET_NR_getresuid32
:
10950 uid_t ruid
, euid
, suid
;
10951 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10952 if (!is_error(ret
)) {
10953 if (put_user_u32(ruid
, arg1
)
10954 || put_user_u32(euid
, arg2
)
10955 || put_user_u32(suid
, arg3
))
10961 #ifdef TARGET_NR_setresgid32
10962 case TARGET_NR_setresgid32
:
10963 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10966 #ifdef TARGET_NR_getresgid32
10967 case TARGET_NR_getresgid32
:
10969 gid_t rgid
, egid
, sgid
;
10970 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10971 if (!is_error(ret
)) {
10972 if (put_user_u32(rgid
, arg1
)
10973 || put_user_u32(egid
, arg2
)
10974 || put_user_u32(sgid
, arg3
))
10980 #ifdef TARGET_NR_chown32
10981 case TARGET_NR_chown32
:
10982 if (!(p
= lock_user_string(arg1
)))
10984 ret
= get_errno(chown(p
, arg2
, arg3
));
10985 unlock_user(p
, arg1
, 0);
10988 #ifdef TARGET_NR_setuid32
10989 case TARGET_NR_setuid32
:
10990 ret
= get_errno(sys_setuid(arg1
));
10993 #ifdef TARGET_NR_setgid32
10994 case TARGET_NR_setgid32
:
10995 ret
= get_errno(sys_setgid(arg1
));
10998 #ifdef TARGET_NR_setfsuid32
10999 case TARGET_NR_setfsuid32
:
11000 ret
= get_errno(setfsuid(arg1
));
11003 #ifdef TARGET_NR_setfsgid32
11004 case TARGET_NR_setfsgid32
:
11005 ret
= get_errno(setfsgid(arg1
));
11009 case TARGET_NR_pivot_root
:
11010 goto unimplemented
;
11011 #ifdef TARGET_NR_mincore
11012 case TARGET_NR_mincore
:
11015 ret
= -TARGET_EFAULT
;
11016 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
11018 if (!(p
= lock_user_string(arg3
)))
11020 ret
= get_errno(mincore(a
, arg2
, p
));
11021 unlock_user(p
, arg3
, ret
);
11023 unlock_user(a
, arg1
, 0);
11027 #ifdef TARGET_NR_arm_fadvise64_64
11028 case TARGET_NR_arm_fadvise64_64
:
11029 /* arm_fadvise64_64 looks like fadvise64_64 but
11030 * with different argument order: fd, advice, offset, len
11031 * rather than the usual fd, offset, len, advice.
11032 * Note that offset and len are both 64-bit so appear as
11033 * pairs of 32-bit registers.
11035 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11036 target_offset64(arg5
, arg6
), arg2
);
11037 ret
= -host_to_target_errno(ret
);
11041 #if TARGET_ABI_BITS == 32
11043 #ifdef TARGET_NR_fadvise64_64
11044 case TARGET_NR_fadvise64_64
:
11045 /* 6 args: fd, offset (high, low), len (high, low), advice */
11046 if (regpairs_aligned(cpu_env
)) {
11047 /* offset is in (3,4), len in (5,6) and advice in 7 */
11054 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11055 target_offset64(arg2
, arg3
),
11056 target_offset64(arg4
, arg5
),
11061 #ifdef TARGET_NR_fadvise64
11062 case TARGET_NR_fadvise64
:
11063 /* 5 args: fd, offset (high, low), len, advice */
11064 if (regpairs_aligned(cpu_env
)) {
11065 /* offset is in (3,4), len in 5 and advice in 6 */
11071 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11072 target_offset64(arg2
, arg3
),
11077 #else /* not a 32-bit ABI */
11078 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11079 #ifdef TARGET_NR_fadvise64_64
11080 case TARGET_NR_fadvise64_64
:
11082 #ifdef TARGET_NR_fadvise64
11083 case TARGET_NR_fadvise64
:
11085 #ifdef TARGET_S390X
11087 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11088 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11089 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11090 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11094 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11097 #endif /* end of 64-bit ABI fadvise handling */
11099 #ifdef TARGET_NR_madvise
11100 case TARGET_NR_madvise
:
11101 /* A straight passthrough may not be safe because qemu sometimes
11102 turns private file-backed mappings into anonymous mappings.
11103 This will break MADV_DONTNEED.
11104 This is a hint, so ignoring and returning success is ok. */
11105 ret
= get_errno(0);
11108 #if TARGET_ABI_BITS == 32
11109 case TARGET_NR_fcntl64
:
11113 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11114 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11117 if (((CPUARMState
*)cpu_env
)->eabi
) {
11118 copyfrom
= copy_from_user_eabi_flock64
;
11119 copyto
= copy_to_user_eabi_flock64
;
11123 cmd
= target_to_host_fcntl_cmd(arg2
);
11124 if (cmd
== -TARGET_EINVAL
) {
11130 case TARGET_F_GETLK64
:
11131 ret
= copyfrom(&fl
, arg3
);
11135 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11137 ret
= copyto(arg3
, &fl
);
11141 case TARGET_F_SETLK64
:
11142 case TARGET_F_SETLKW64
:
11143 ret
= copyfrom(&fl
, arg3
);
11147 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11150 ret
= do_fcntl(arg1
, arg2
, arg3
);
11156 #ifdef TARGET_NR_cacheflush
11157 case TARGET_NR_cacheflush
:
11158 /* self-modifying code is handled automatically, so nothing needed */
11162 #ifdef TARGET_NR_security
11163 case TARGET_NR_security
:
11164 goto unimplemented
;
11166 #ifdef TARGET_NR_getpagesize
11167 case TARGET_NR_getpagesize
:
11168 ret
= TARGET_PAGE_SIZE
;
11171 case TARGET_NR_gettid
:
11172 ret
= get_errno(gettid());
11174 #ifdef TARGET_NR_readahead
11175 case TARGET_NR_readahead
:
11176 #if TARGET_ABI_BITS == 32
11177 if (regpairs_aligned(cpu_env
)) {
11182 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11184 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11189 #ifdef TARGET_NR_setxattr
11190 case TARGET_NR_listxattr
:
11191 case TARGET_NR_llistxattr
:
11195 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11197 ret
= -TARGET_EFAULT
;
11201 p
= lock_user_string(arg1
);
11203 if (num
== TARGET_NR_listxattr
) {
11204 ret
= get_errno(listxattr(p
, b
, arg3
));
11206 ret
= get_errno(llistxattr(p
, b
, arg3
));
11209 ret
= -TARGET_EFAULT
;
11211 unlock_user(p
, arg1
, 0);
11212 unlock_user(b
, arg2
, arg3
);
11215 case TARGET_NR_flistxattr
:
11219 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11221 ret
= -TARGET_EFAULT
;
11225 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11226 unlock_user(b
, arg2
, arg3
);
11229 case TARGET_NR_setxattr
:
11230 case TARGET_NR_lsetxattr
:
11232 void *p
, *n
, *v
= 0;
11234 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11236 ret
= -TARGET_EFAULT
;
11240 p
= lock_user_string(arg1
);
11241 n
= lock_user_string(arg2
);
11243 if (num
== TARGET_NR_setxattr
) {
11244 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11246 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11249 ret
= -TARGET_EFAULT
;
11251 unlock_user(p
, arg1
, 0);
11252 unlock_user(n
, arg2
, 0);
11253 unlock_user(v
, arg3
, 0);
11256 case TARGET_NR_fsetxattr
:
11260 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11262 ret
= -TARGET_EFAULT
;
11266 n
= lock_user_string(arg2
);
11268 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11270 ret
= -TARGET_EFAULT
;
11272 unlock_user(n
, arg2
, 0);
11273 unlock_user(v
, arg3
, 0);
11276 case TARGET_NR_getxattr
:
11277 case TARGET_NR_lgetxattr
:
11279 void *p
, *n
, *v
= 0;
11281 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11283 ret
= -TARGET_EFAULT
;
11287 p
= lock_user_string(arg1
);
11288 n
= lock_user_string(arg2
);
11290 if (num
== TARGET_NR_getxattr
) {
11291 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11293 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11296 ret
= -TARGET_EFAULT
;
11298 unlock_user(p
, arg1
, 0);
11299 unlock_user(n
, arg2
, 0);
11300 unlock_user(v
, arg3
, arg4
);
11303 case TARGET_NR_fgetxattr
:
11307 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11309 ret
= -TARGET_EFAULT
;
11313 n
= lock_user_string(arg2
);
11315 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11317 ret
= -TARGET_EFAULT
;
11319 unlock_user(n
, arg2
, 0);
11320 unlock_user(v
, arg3
, arg4
);
11323 case TARGET_NR_removexattr
:
11324 case TARGET_NR_lremovexattr
:
11327 p
= lock_user_string(arg1
);
11328 n
= lock_user_string(arg2
);
11330 if (num
== TARGET_NR_removexattr
) {
11331 ret
= get_errno(removexattr(p
, n
));
11333 ret
= get_errno(lremovexattr(p
, n
));
11336 ret
= -TARGET_EFAULT
;
11338 unlock_user(p
, arg1
, 0);
11339 unlock_user(n
, arg2
, 0);
11342 case TARGET_NR_fremovexattr
:
11345 n
= lock_user_string(arg2
);
11347 ret
= get_errno(fremovexattr(arg1
, n
));
11349 ret
= -TARGET_EFAULT
;
11351 unlock_user(n
, arg2
, 0);
11355 #endif /* CONFIG_ATTR */
11356 #ifdef TARGET_NR_set_thread_area
11357 case TARGET_NR_set_thread_area
:
11358 #if defined(TARGET_MIPS)
11359 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11362 #elif defined(TARGET_CRIS)
11364 ret
= -TARGET_EINVAL
;
11366 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11370 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11371 ret
= do_set_thread_area(cpu_env
, arg1
);
11373 #elif defined(TARGET_M68K)
11375 TaskState
*ts
= cpu
->opaque
;
11376 ts
->tp_value
= arg1
;
11381 goto unimplemented_nowarn
;
11384 #ifdef TARGET_NR_get_thread_area
11385 case TARGET_NR_get_thread_area
:
11386 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11387 ret
= do_get_thread_area(cpu_env
, arg1
);
11389 #elif defined(TARGET_M68K)
11391 TaskState
*ts
= cpu
->opaque
;
11392 ret
= ts
->tp_value
;
11396 goto unimplemented_nowarn
;
11399 #ifdef TARGET_NR_getdomainname
11400 case TARGET_NR_getdomainname
:
11401 goto unimplemented_nowarn
;
11404 #ifdef TARGET_NR_clock_gettime
11405 case TARGET_NR_clock_gettime
:
11407 struct timespec ts
;
11408 ret
= get_errno(clock_gettime(arg1
, &ts
));
11409 if (!is_error(ret
)) {
11410 host_to_target_timespec(arg2
, &ts
);
11415 #ifdef TARGET_NR_clock_getres
11416 case TARGET_NR_clock_getres
:
11418 struct timespec ts
;
11419 ret
= get_errno(clock_getres(arg1
, &ts
));
11420 if (!is_error(ret
)) {
11421 host_to_target_timespec(arg2
, &ts
);
11426 #ifdef TARGET_NR_clock_nanosleep
11427 case TARGET_NR_clock_nanosleep
:
11429 struct timespec ts
;
11430 target_to_host_timespec(&ts
, arg3
);
11431 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11432 &ts
, arg4
? &ts
: NULL
));
11434 host_to_target_timespec(arg4
, &ts
);
11436 #if defined(TARGET_PPC)
11437 /* clock_nanosleep is odd in that it returns positive errno values.
11438 * On PPC, CR0 bit 3 should be set in such a situation. */
11439 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11440 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11447 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11448 case TARGET_NR_set_tid_address
:
11449 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11453 case TARGET_NR_tkill
:
11454 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11457 case TARGET_NR_tgkill
:
11458 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11459 target_to_host_signal(arg3
)));
11462 #ifdef TARGET_NR_set_robust_list
11463 case TARGET_NR_set_robust_list
:
11464 case TARGET_NR_get_robust_list
:
11465 /* The ABI for supporting robust futexes has userspace pass
11466 * the kernel a pointer to a linked list which is updated by
11467 * userspace after the syscall; the list is walked by the kernel
11468 * when the thread exits. Since the linked list in QEMU guest
11469 * memory isn't a valid linked list for the host and we have
11470 * no way to reliably intercept the thread-death event, we can't
11471 * support these. Silently return ENOSYS so that guest userspace
11472 * falls back to a non-robust futex implementation (which should
11473 * be OK except in the corner case of the guest crashing while
11474 * holding a mutex that is shared with another process via
11477 goto unimplemented_nowarn
;
11480 #if defined(TARGET_NR_utimensat)
11481 case TARGET_NR_utimensat
:
11483 struct timespec
*tsp
, ts
[2];
11487 target_to_host_timespec(ts
, arg3
);
11488 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11492 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11494 if (!(p
= lock_user_string(arg2
))) {
11495 ret
= -TARGET_EFAULT
;
11498 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11499 unlock_user(p
, arg2
, 0);
11504 case TARGET_NR_futex
:
11505 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11507 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11508 case TARGET_NR_inotify_init
:
11509 ret
= get_errno(sys_inotify_init());
11512 #ifdef CONFIG_INOTIFY1
11513 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11514 case TARGET_NR_inotify_init1
:
11515 ret
= get_errno(sys_inotify_init1(arg1
));
11519 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11520 case TARGET_NR_inotify_add_watch
:
11521 p
= lock_user_string(arg2
);
11522 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11523 unlock_user(p
, arg2
, 0);
11526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11527 case TARGET_NR_inotify_rm_watch
:
11528 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11532 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11533 case TARGET_NR_mq_open
:
11535 struct mq_attr posix_mq_attr
;
11538 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11539 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11542 p
= lock_user_string(arg1
- 1);
11546 ret
= get_errno(mq_open(p
, host_flags
, arg3
, &posix_mq_attr
));
11547 unlock_user (p
, arg1
, 0);
11551 case TARGET_NR_mq_unlink
:
11552 p
= lock_user_string(arg1
- 1);
11554 ret
= -TARGET_EFAULT
;
11557 ret
= get_errno(mq_unlink(p
));
11558 unlock_user (p
, arg1
, 0);
11561 case TARGET_NR_mq_timedsend
:
11563 struct timespec ts
;
11565 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11567 target_to_host_timespec(&ts
, arg5
);
11568 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11569 host_to_target_timespec(arg5
, &ts
);
11571 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11573 unlock_user (p
, arg2
, arg3
);
11577 case TARGET_NR_mq_timedreceive
:
11579 struct timespec ts
;
11582 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11584 target_to_host_timespec(&ts
, arg5
);
11585 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11587 host_to_target_timespec(arg5
, &ts
);
11589 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11592 unlock_user (p
, arg2
, arg3
);
11594 put_user_u32(prio
, arg4
);
11598 /* Not implemented for now... */
11599 /* case TARGET_NR_mq_notify: */
11602 case TARGET_NR_mq_getsetattr
:
11604 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11607 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11608 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11611 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11612 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11619 #ifdef CONFIG_SPLICE
11620 #ifdef TARGET_NR_tee
11621 case TARGET_NR_tee
:
11623 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11627 #ifdef TARGET_NR_splice
11628 case TARGET_NR_splice
:
11630 loff_t loff_in
, loff_out
;
11631 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11633 if (get_user_u64(loff_in
, arg2
)) {
11636 ploff_in
= &loff_in
;
11639 if (get_user_u64(loff_out
, arg4
)) {
11642 ploff_out
= &loff_out
;
11644 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11646 if (put_user_u64(loff_in
, arg2
)) {
11651 if (put_user_u64(loff_out
, arg4
)) {
11658 #ifdef TARGET_NR_vmsplice
11659 case TARGET_NR_vmsplice
:
11661 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11663 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11664 unlock_iovec(vec
, arg2
, arg3
, 0);
11666 ret
= -host_to_target_errno(errno
);
11671 #endif /* CONFIG_SPLICE */
11672 #ifdef CONFIG_EVENTFD
11673 #if defined(TARGET_NR_eventfd)
11674 case TARGET_NR_eventfd
:
11675 ret
= get_errno(eventfd(arg1
, 0));
11676 fd_trans_unregister(ret
);
11679 #if defined(TARGET_NR_eventfd2)
11680 case TARGET_NR_eventfd2
:
11682 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11683 if (arg2
& TARGET_O_NONBLOCK
) {
11684 host_flags
|= O_NONBLOCK
;
11686 if (arg2
& TARGET_O_CLOEXEC
) {
11687 host_flags
|= O_CLOEXEC
;
11689 ret
= get_errno(eventfd(arg1
, host_flags
));
11690 fd_trans_unregister(ret
);
11694 #endif /* CONFIG_EVENTFD */
11695 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11696 case TARGET_NR_fallocate
:
11697 #if TARGET_ABI_BITS == 32
11698 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11699 target_offset64(arg5
, arg6
)));
11701 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11705 #if defined(CONFIG_SYNC_FILE_RANGE)
11706 #if defined(TARGET_NR_sync_file_range)
11707 case TARGET_NR_sync_file_range
:
11708 #if TARGET_ABI_BITS == 32
11709 #if defined(TARGET_MIPS)
11710 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11711 target_offset64(arg5
, arg6
), arg7
));
11713 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11714 target_offset64(arg4
, arg5
), arg6
));
11715 #endif /* !TARGET_MIPS */
11717 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11721 #if defined(TARGET_NR_sync_file_range2)
11722 case TARGET_NR_sync_file_range2
:
11723 /* This is like sync_file_range but the arguments are reordered */
11724 #if TARGET_ABI_BITS == 32
11725 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11726 target_offset64(arg5
, arg6
), arg2
));
11728 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11733 #if defined(TARGET_NR_signalfd4)
11734 case TARGET_NR_signalfd4
:
11735 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11738 #if defined(TARGET_NR_signalfd)
11739 case TARGET_NR_signalfd
:
11740 ret
= do_signalfd4(arg1
, arg2
, 0);
11743 #if defined(CONFIG_EPOLL)
11744 #if defined(TARGET_NR_epoll_create)
11745 case TARGET_NR_epoll_create
:
11746 ret
= get_errno(epoll_create(arg1
));
11749 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11750 case TARGET_NR_epoll_create1
:
11751 ret
= get_errno(epoll_create1(arg1
));
11754 #if defined(TARGET_NR_epoll_ctl)
11755 case TARGET_NR_epoll_ctl
:
11757 struct epoll_event ep
;
11758 struct epoll_event
*epp
= 0;
11760 struct target_epoll_event
*target_ep
;
11761 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11764 ep
.events
= tswap32(target_ep
->events
);
11765 /* The epoll_data_t union is just opaque data to the kernel,
11766 * so we transfer all 64 bits across and need not worry what
11767 * actual data type it is.
11769 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11770 unlock_user_struct(target_ep
, arg4
, 0);
11773 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11778 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11779 #if defined(TARGET_NR_epoll_wait)
11780 case TARGET_NR_epoll_wait
:
11782 #if defined(TARGET_NR_epoll_pwait)
11783 case TARGET_NR_epoll_pwait
:
11786 struct target_epoll_event
*target_ep
;
11787 struct epoll_event
*ep
;
11789 int maxevents
= arg3
;
11790 int timeout
= arg4
;
11792 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11793 ret
= -TARGET_EINVAL
;
11797 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11798 maxevents
* sizeof(struct target_epoll_event
), 1);
11803 ep
= g_try_new(struct epoll_event
, maxevents
);
11805 unlock_user(target_ep
, arg2
, 0);
11806 ret
= -TARGET_ENOMEM
;
11811 #if defined(TARGET_NR_epoll_pwait)
11812 case TARGET_NR_epoll_pwait
:
11814 target_sigset_t
*target_set
;
11815 sigset_t _set
, *set
= &_set
;
11818 if (arg6
!= sizeof(target_sigset_t
)) {
11819 ret
= -TARGET_EINVAL
;
11823 target_set
= lock_user(VERIFY_READ
, arg5
,
11824 sizeof(target_sigset_t
), 1);
11826 ret
= -TARGET_EFAULT
;
11829 target_to_host_sigset(set
, target_set
);
11830 unlock_user(target_set
, arg5
, 0);
11835 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11836 set
, SIGSET_T_SIZE
));
11840 #if defined(TARGET_NR_epoll_wait)
11841 case TARGET_NR_epoll_wait
:
11842 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11847 ret
= -TARGET_ENOSYS
;
11849 if (!is_error(ret
)) {
11851 for (i
= 0; i
< ret
; i
++) {
11852 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11853 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11855 unlock_user(target_ep
, arg2
,
11856 ret
* sizeof(struct target_epoll_event
));
11858 unlock_user(target_ep
, arg2
, 0);
11865 #ifdef TARGET_NR_prlimit64
11866 case TARGET_NR_prlimit64
:
11868 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11869 struct target_rlimit64
*target_rnew
, *target_rold
;
11870 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11871 int resource
= target_to_host_resource(arg2
);
11873 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11876 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11877 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11878 unlock_user_struct(target_rnew
, arg3
, 0);
11882 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11883 if (!is_error(ret
) && arg4
) {
11884 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11887 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11888 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11889 unlock_user_struct(target_rold
, arg4
, 1);
11894 #ifdef TARGET_NR_gethostname
11895 case TARGET_NR_gethostname
:
11897 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11899 ret
= get_errno(gethostname(name
, arg2
));
11900 unlock_user(name
, arg1
, arg2
);
11902 ret
= -TARGET_EFAULT
;
11907 #ifdef TARGET_NR_atomic_cmpxchg_32
11908 case TARGET_NR_atomic_cmpxchg_32
:
11910 /* should use start_exclusive from main.c */
11911 abi_ulong mem_value
;
11912 if (get_user_u32(mem_value
, arg6
)) {
11913 target_siginfo_t info
;
11914 info
.si_signo
= SIGSEGV
;
11916 info
.si_code
= TARGET_SEGV_MAPERR
;
11917 info
._sifields
._sigfault
._addr
= arg6
;
11918 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11919 QEMU_SI_FAULT
, &info
);
11923 if (mem_value
== arg2
)
11924 put_user_u32(arg1
, arg6
);
11929 #ifdef TARGET_NR_atomic_barrier
11930 case TARGET_NR_atomic_barrier
:
11932 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11938 #ifdef TARGET_NR_timer_create
11939 case TARGET_NR_timer_create
:
11941 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11943 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11946 int timer_index
= next_free_host_timer();
11948 if (timer_index
< 0) {
11949 ret
= -TARGET_EAGAIN
;
11951 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11954 phost_sevp
= &host_sevp
;
11955 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11961 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11965 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11974 #ifdef TARGET_NR_timer_settime
11975 case TARGET_NR_timer_settime
:
11977 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11978 * struct itimerspec * old_value */
11979 target_timer_t timerid
= get_timer_id(arg1
);
11983 } else if (arg3
== 0) {
11984 ret
= -TARGET_EINVAL
;
11986 timer_t htimer
= g_posix_timers
[timerid
];
11987 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11989 target_to_host_itimerspec(&hspec_new
, arg3
);
11991 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11992 host_to_target_itimerspec(arg2
, &hspec_old
);
11998 #ifdef TARGET_NR_timer_gettime
11999 case TARGET_NR_timer_gettime
:
12001 /* args: timer_t timerid, struct itimerspec *curr_value */
12002 target_timer_t timerid
= get_timer_id(arg1
);
12006 } else if (!arg2
) {
12007 ret
= -TARGET_EFAULT
;
12009 timer_t htimer
= g_posix_timers
[timerid
];
12010 struct itimerspec hspec
;
12011 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12013 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12014 ret
= -TARGET_EFAULT
;
12021 #ifdef TARGET_NR_timer_getoverrun
12022 case TARGET_NR_timer_getoverrun
:
12024 /* args: timer_t timerid */
12025 target_timer_t timerid
= get_timer_id(arg1
);
12030 timer_t htimer
= g_posix_timers
[timerid
];
12031 ret
= get_errno(timer_getoverrun(htimer
));
12033 fd_trans_unregister(ret
);
12038 #ifdef TARGET_NR_timer_delete
12039 case TARGET_NR_timer_delete
:
12041 /* args: timer_t timerid */
12042 target_timer_t timerid
= get_timer_id(arg1
);
12047 timer_t htimer
= g_posix_timers
[timerid
];
12048 ret
= get_errno(timer_delete(htimer
));
12049 g_posix_timers
[timerid
] = 0;
12055 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12056 case TARGET_NR_timerfd_create
:
12057 ret
= get_errno(timerfd_create(arg1
,
12058 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12062 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12063 case TARGET_NR_timerfd_gettime
:
12065 struct itimerspec its_curr
;
12067 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12069 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12076 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12077 case TARGET_NR_timerfd_settime
:
12079 struct itimerspec its_new
, its_old
, *p_new
;
12082 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12090 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12092 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12099 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12100 case TARGET_NR_ioprio_get
:
12101 ret
= get_errno(ioprio_get(arg1
, arg2
));
12105 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12106 case TARGET_NR_ioprio_set
:
12107 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12111 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12112 case TARGET_NR_setns
:
12113 ret
= get_errno(setns(arg1
, arg2
));
12116 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12117 case TARGET_NR_unshare
:
12118 ret
= get_errno(unshare(arg1
));
12121 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12122 case TARGET_NR_kcmp
:
12123 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12129 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12130 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12131 unimplemented_nowarn
:
12133 ret
= -TARGET_ENOSYS
;
12138 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12141 print_syscall_ret(num
, ret
);
12142 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12145 ret
= -TARGET_EFAULT
;