4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
271 loff_t
*, res
, uint
, wh
);
273 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
274 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group
,int,error_code
)
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address
,int *,tidptr
)
281 #if defined(TARGET_NR_futex) && defined(__NR_futex)
282 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
283 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
285 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
286 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
287 unsigned long *, user_mask_ptr
);
288 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
289 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
290 unsigned long *, user_mask_ptr
);
291 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
293 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
294 struct __user_cap_data_struct
*, data
);
295 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
296 struct __user_cap_data_struct
*, data
);
297 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
298 _syscall2(int, ioprio_get
, int, which
, int, who
)
300 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
301 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
303 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
304 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
307 static bitmask_transtbl fcntl_flags_tbl
[] = {
308 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
309 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
310 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
311 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
312 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
313 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
314 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
315 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
316 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
317 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
318 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
319 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
320 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
321 #if defined(O_DIRECT)
322 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
324 #if defined(O_NOATIME)
325 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
327 #if defined(O_CLOEXEC)
328 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
331 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
333 /* Don't terminate the list prematurely on 64-bit host+guest. */
334 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
335 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
342 QEMU_IFLA_BR_FORWARD_DELAY
,
343 QEMU_IFLA_BR_HELLO_TIME
,
344 QEMU_IFLA_BR_MAX_AGE
,
345 QEMU_IFLA_BR_AGEING_TIME
,
346 QEMU_IFLA_BR_STP_STATE
,
347 QEMU_IFLA_BR_PRIORITY
,
348 QEMU_IFLA_BR_VLAN_FILTERING
,
349 QEMU_IFLA_BR_VLAN_PROTOCOL
,
350 QEMU_IFLA_BR_GROUP_FWD_MASK
,
351 QEMU_IFLA_BR_ROOT_ID
,
352 QEMU_IFLA_BR_BRIDGE_ID
,
353 QEMU_IFLA_BR_ROOT_PORT
,
354 QEMU_IFLA_BR_ROOT_PATH_COST
,
355 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
357 QEMU_IFLA_BR_HELLO_TIMER
,
358 QEMU_IFLA_BR_TCN_TIMER
,
359 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
360 QEMU_IFLA_BR_GC_TIMER
,
361 QEMU_IFLA_BR_GROUP_ADDR
,
362 QEMU_IFLA_BR_FDB_FLUSH
,
363 QEMU_IFLA_BR_MCAST_ROUTER
,
364 QEMU_IFLA_BR_MCAST_SNOOPING
,
365 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
366 QEMU_IFLA_BR_MCAST_QUERIER
,
367 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
368 QEMU_IFLA_BR_MCAST_HASH_MAX
,
369 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
370 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
371 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
372 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
373 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
374 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
375 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
376 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
377 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
378 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
379 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
380 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
382 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
383 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
407 QEMU_IFLA_NET_NS_PID
,
410 QEMU_IFLA_VFINFO_LIST
,
418 QEMU_IFLA_PROMISCUITY
,
419 QEMU_IFLA_NUM_TX_QUEUES
,
420 QEMU_IFLA_NUM_RX_QUEUES
,
422 QEMU_IFLA_PHYS_PORT_ID
,
423 QEMU_IFLA_CARRIER_CHANGES
,
424 QEMU_IFLA_PHYS_SWITCH_ID
,
425 QEMU_IFLA_LINK_NETNSID
,
426 QEMU_IFLA_PHYS_PORT_NAME
,
427 QEMU_IFLA_PROTO_DOWN
,
428 QEMU_IFLA_GSO_MAX_SEGS
,
429 QEMU_IFLA_GSO_MAX_SIZE
,
436 QEMU_IFLA_BRPORT_UNSPEC
,
437 QEMU_IFLA_BRPORT_STATE
,
438 QEMU_IFLA_BRPORT_PRIORITY
,
439 QEMU_IFLA_BRPORT_COST
,
440 QEMU_IFLA_BRPORT_MODE
,
441 QEMU_IFLA_BRPORT_GUARD
,
442 QEMU_IFLA_BRPORT_PROTECT
,
443 QEMU_IFLA_BRPORT_FAST_LEAVE
,
444 QEMU_IFLA_BRPORT_LEARNING
,
445 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
446 QEMU_IFLA_BRPORT_PROXYARP
,
447 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
448 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
449 QEMU_IFLA_BRPORT_ROOT_ID
,
450 QEMU_IFLA_BRPORT_BRIDGE_ID
,
451 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
452 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
455 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
456 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
457 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
458 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
459 QEMU_IFLA_BRPORT_HOLD_TIMER
,
460 QEMU_IFLA_BRPORT_FLUSH
,
461 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
462 QEMU_IFLA_BRPORT_PAD
,
463 QEMU___IFLA_BRPORT_MAX
467 QEMU_IFLA_INFO_UNSPEC
,
470 QEMU_IFLA_INFO_XSTATS
,
471 QEMU_IFLA_INFO_SLAVE_KIND
,
472 QEMU_IFLA_INFO_SLAVE_DATA
,
473 QEMU___IFLA_INFO_MAX
,
477 QEMU_IFLA_INET_UNSPEC
,
479 QEMU___IFLA_INET_MAX
,
483 QEMU_IFLA_INET6_UNSPEC
,
484 QEMU_IFLA_INET6_FLAGS
,
485 QEMU_IFLA_INET6_CONF
,
486 QEMU_IFLA_INET6_STATS
,
487 QEMU_IFLA_INET6_MCAST
,
488 QEMU_IFLA_INET6_CACHEINFO
,
489 QEMU_IFLA_INET6_ICMP6STATS
,
490 QEMU_IFLA_INET6_TOKEN
,
491 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
492 QEMU___IFLA_INET6_MAX
495 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
496 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
497 typedef struct TargetFdTrans
{
498 TargetFdDataFunc host_to_target_data
;
499 TargetFdDataFunc target_to_host_data
;
500 TargetFdAddrFunc target_to_host_addr
;
503 static TargetFdTrans
**target_fd_trans
;
505 static unsigned int target_fd_max
;
507 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
509 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
510 return target_fd_trans
[fd
]->target_to_host_data
;
515 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
517 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
518 return target_fd_trans
[fd
]->host_to_target_data
;
523 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
525 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
526 return target_fd_trans
[fd
]->target_to_host_addr
;
531 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
535 if (fd
>= target_fd_max
) {
536 oldmax
= target_fd_max
;
537 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
538 target_fd_trans
= g_renew(TargetFdTrans
*,
539 target_fd_trans
, target_fd_max
);
540 memset((void *)(target_fd_trans
+ oldmax
), 0,
541 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
543 target_fd_trans
[fd
] = trans
;
546 static void fd_trans_unregister(int fd
)
548 if (fd
>= 0 && fd
< target_fd_max
) {
549 target_fd_trans
[fd
] = NULL
;
553 static void fd_trans_dup(int oldfd
, int newfd
)
555 fd_trans_unregister(newfd
);
556 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
557 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
561 static int sys_getcwd1(char *buf
, size_t size
)
563 if (getcwd(buf
, size
) == NULL
) {
564 /* getcwd() sets errno */
567 return strlen(buf
)+1;
570 #ifdef TARGET_NR_utimensat
571 #if defined(__NR_utimensat)
572 #define __NR_sys_utimensat __NR_utimensat
573 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
574 const struct timespec
*,tsp
,int,flags
)
576 static int sys_utimensat(int dirfd
, const char *pathname
,
577 const struct timespec times
[2], int flags
)
583 #endif /* TARGET_NR_utimensat */
585 #ifdef CONFIG_INOTIFY
586 #include <sys/inotify.h>
588 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
589 static int sys_inotify_init(void)
591 return (inotify_init());
594 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
595 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
597 return (inotify_add_watch(fd
, pathname
, mask
));
600 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
601 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
603 return (inotify_rm_watch(fd
, wd
));
606 #ifdef CONFIG_INOTIFY1
607 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
608 static int sys_inotify_init1(int flags
)
610 return (inotify_init1(flags
));
615 /* Userspace can usually survive runtime without inotify */
616 #undef TARGET_NR_inotify_init
617 #undef TARGET_NR_inotify_init1
618 #undef TARGET_NR_inotify_add_watch
619 #undef TARGET_NR_inotify_rm_watch
620 #endif /* CONFIG_INOTIFY */
622 #if defined(TARGET_NR_prlimit64)
623 #ifndef __NR_prlimit64
624 # define __NR_prlimit64 -1
626 #define __NR_sys_prlimit64 __NR_prlimit64
627 /* The glibc rlimit structure may not be that used by the underlying syscall */
628 struct host_rlimit64
{
632 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
633 const struct host_rlimit64
*, new_limit
,
634 struct host_rlimit64
*, old_limit
)
638 #if defined(TARGET_NR_timer_create)
639 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
640 static timer_t g_posix_timers
[32] = { 0, } ;
642 static inline int next_free_host_timer(void)
645 /* FIXME: Does finding the next free slot require a lock? */
646 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
647 if (g_posix_timers
[k
] == 0) {
648 g_posix_timers
[k
] = (timer_t
) 1;
656 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
658 static inline int regpairs_aligned(void *cpu_env
) {
659 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
661 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
662 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
663 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
664 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
665 * of registers which translates to the same as ARM/MIPS, because we start with
667 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
669 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
672 #define ERRNO_TABLE_SIZE 1200
674 /* target_to_host_errno_table[] is initialized from
675 * host_to_target_errno_table[] in syscall_init(). */
676 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
680 * This list is the union of errno values overridden in asm-<arch>/errno.h
681 * minus the errnos that are not actually generic to all archs.
683 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
684 [EAGAIN
] = TARGET_EAGAIN
,
685 [EIDRM
] = TARGET_EIDRM
,
686 [ECHRNG
] = TARGET_ECHRNG
,
687 [EL2NSYNC
] = TARGET_EL2NSYNC
,
688 [EL3HLT
] = TARGET_EL3HLT
,
689 [EL3RST
] = TARGET_EL3RST
,
690 [ELNRNG
] = TARGET_ELNRNG
,
691 [EUNATCH
] = TARGET_EUNATCH
,
692 [ENOCSI
] = TARGET_ENOCSI
,
693 [EL2HLT
] = TARGET_EL2HLT
,
694 [EDEADLK
] = TARGET_EDEADLK
,
695 [ENOLCK
] = TARGET_ENOLCK
,
696 [EBADE
] = TARGET_EBADE
,
697 [EBADR
] = TARGET_EBADR
,
698 [EXFULL
] = TARGET_EXFULL
,
699 [ENOANO
] = TARGET_ENOANO
,
700 [EBADRQC
] = TARGET_EBADRQC
,
701 [EBADSLT
] = TARGET_EBADSLT
,
702 [EBFONT
] = TARGET_EBFONT
,
703 [ENOSTR
] = TARGET_ENOSTR
,
704 [ENODATA
] = TARGET_ENODATA
,
705 [ETIME
] = TARGET_ETIME
,
706 [ENOSR
] = TARGET_ENOSR
,
707 [ENONET
] = TARGET_ENONET
,
708 [ENOPKG
] = TARGET_ENOPKG
,
709 [EREMOTE
] = TARGET_EREMOTE
,
710 [ENOLINK
] = TARGET_ENOLINK
,
711 [EADV
] = TARGET_EADV
,
712 [ESRMNT
] = TARGET_ESRMNT
,
713 [ECOMM
] = TARGET_ECOMM
,
714 [EPROTO
] = TARGET_EPROTO
,
715 [EDOTDOT
] = TARGET_EDOTDOT
,
716 [EMULTIHOP
] = TARGET_EMULTIHOP
,
717 [EBADMSG
] = TARGET_EBADMSG
,
718 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
719 [EOVERFLOW
] = TARGET_EOVERFLOW
,
720 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
721 [EBADFD
] = TARGET_EBADFD
,
722 [EREMCHG
] = TARGET_EREMCHG
,
723 [ELIBACC
] = TARGET_ELIBACC
,
724 [ELIBBAD
] = TARGET_ELIBBAD
,
725 [ELIBSCN
] = TARGET_ELIBSCN
,
726 [ELIBMAX
] = TARGET_ELIBMAX
,
727 [ELIBEXEC
] = TARGET_ELIBEXEC
,
728 [EILSEQ
] = TARGET_EILSEQ
,
729 [ENOSYS
] = TARGET_ENOSYS
,
730 [ELOOP
] = TARGET_ELOOP
,
731 [ERESTART
] = TARGET_ERESTART
,
732 [ESTRPIPE
] = TARGET_ESTRPIPE
,
733 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
734 [EUSERS
] = TARGET_EUSERS
,
735 [ENOTSOCK
] = TARGET_ENOTSOCK
,
736 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
737 [EMSGSIZE
] = TARGET_EMSGSIZE
,
738 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
739 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
740 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
741 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
742 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
743 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
744 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
745 [EADDRINUSE
] = TARGET_EADDRINUSE
,
746 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
747 [ENETDOWN
] = TARGET_ENETDOWN
,
748 [ENETUNREACH
] = TARGET_ENETUNREACH
,
749 [ENETRESET
] = TARGET_ENETRESET
,
750 [ECONNABORTED
] = TARGET_ECONNABORTED
,
751 [ECONNRESET
] = TARGET_ECONNRESET
,
752 [ENOBUFS
] = TARGET_ENOBUFS
,
753 [EISCONN
] = TARGET_EISCONN
,
754 [ENOTCONN
] = TARGET_ENOTCONN
,
755 [EUCLEAN
] = TARGET_EUCLEAN
,
756 [ENOTNAM
] = TARGET_ENOTNAM
,
757 [ENAVAIL
] = TARGET_ENAVAIL
,
758 [EISNAM
] = TARGET_EISNAM
,
759 [EREMOTEIO
] = TARGET_EREMOTEIO
,
760 [EDQUOT
] = TARGET_EDQUOT
,
761 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
762 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
763 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
764 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
765 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
766 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
767 [EALREADY
] = TARGET_EALREADY
,
768 [EINPROGRESS
] = TARGET_EINPROGRESS
,
769 [ESTALE
] = TARGET_ESTALE
,
770 [ECANCELED
] = TARGET_ECANCELED
,
771 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
772 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
774 [ENOKEY
] = TARGET_ENOKEY
,
777 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
780 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
783 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
786 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
788 #ifdef ENOTRECOVERABLE
789 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
793 static inline int host_to_target_errno(int err
)
795 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
796 host_to_target_errno_table
[err
]) {
797 return host_to_target_errno_table
[err
];
802 static inline int target_to_host_errno(int err
)
804 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
805 target_to_host_errno_table
[err
]) {
806 return target_to_host_errno_table
[err
];
811 static inline abi_long
get_errno(abi_long ret
)
814 return -host_to_target_errno(errno
);
819 static inline int is_error(abi_long ret
)
821 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
824 const char *target_strerror(int err
)
826 if (err
== TARGET_ERESTARTSYS
) {
827 return "To be restarted";
829 if (err
== TARGET_QEMU_ESIGRETURN
) {
830 return "Successful exit from sigreturn";
833 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
836 return strerror(target_to_host_errno(err
));
839 #define safe_syscall0(type, name) \
840 static type safe_##name(void) \
842 return safe_syscall(__NR_##name); \
845 #define safe_syscall1(type, name, type1, arg1) \
846 static type safe_##name(type1 arg1) \
848 return safe_syscall(__NR_##name, arg1); \
851 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
852 static type safe_##name(type1 arg1, type2 arg2) \
854 return safe_syscall(__NR_##name, arg1, arg2); \
857 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
858 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
860 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
863 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
865 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
867 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
870 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
871 type4, arg4, type5, arg5) \
872 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
875 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
878 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
879 type4, arg4, type5, arg5, type6, arg6) \
880 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
881 type5 arg5, type6 arg6) \
883 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
886 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
887 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
888 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
889 int, flags
, mode_t
, mode
)
890 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
891 struct rusage
*, rusage
)
892 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
893 int, options
, struct rusage
*, rusage
)
894 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
895 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
896 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
897 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
898 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
900 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
901 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
903 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
904 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
905 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
906 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
907 safe_syscall2(int, tkill
, int, tid
, int, sig
)
908 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
909 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
910 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
911 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
913 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
914 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
915 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
916 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
917 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
918 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
919 safe_syscall2(int, flock
, int, fd
, int, operation
)
920 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
921 const struct timespec
*, uts
, size_t, sigsetsize
)
922 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
924 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
925 struct timespec
*, rem
)
926 #ifdef TARGET_NR_clock_nanosleep
927 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
928 const struct timespec
*, req
, struct timespec
*, rem
)
931 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
933 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
934 long, msgtype
, int, flags
)
935 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
936 unsigned, nsops
, const struct timespec
*, timeout
)
938 /* This host kernel architecture uses a single ipc syscall; fake up
939 * wrappers for the sub-operations to hide this implementation detail.
940 * Annoyingly we can't include linux/ipc.h to get the constant definitions
941 * for the call parameter because some structs in there conflict with the
942 * sys/ipc.h ones. So we just define them here, and rely on them being
943 * the same for all host architectures.
945 #define Q_SEMTIMEDOP 4
948 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
950 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
951 void *, ptr
, long, fifth
)
952 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
954 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
956 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
958 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
960 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
961 const struct timespec
*timeout
)
963 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
967 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
968 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
969 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
970 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
971 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
973 /* We do ioctl like this rather than via safe_syscall3 to preserve the
974 * "third argument might be integer or pointer or not present" behaviour of
977 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
978 /* Similarly for fcntl. Note that callers must always:
979 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
980 * use the flock64 struct rather than unsuffixed flock
981 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
984 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
986 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
989 static inline int host_to_target_sock_type(int host_type
)
993 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
995 target_type
= TARGET_SOCK_DGRAM
;
998 target_type
= TARGET_SOCK_STREAM
;
1001 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1005 #if defined(SOCK_CLOEXEC)
1006 if (host_type
& SOCK_CLOEXEC
) {
1007 target_type
|= TARGET_SOCK_CLOEXEC
;
1011 #if defined(SOCK_NONBLOCK)
1012 if (host_type
& SOCK_NONBLOCK
) {
1013 target_type
|= TARGET_SOCK_NONBLOCK
;
1020 static abi_ulong target_brk
;
1021 static abi_ulong target_original_brk
;
1022 static abi_ulong brk_page
;
1024 void target_set_brk(abi_ulong new_brk
)
1026 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1027 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1030 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1031 #define DEBUGF_BRK(message, args...)
1033 /* do_brk() must return target values and target errnos. */
1034 abi_long
do_brk(abi_ulong new_brk
)
1036 abi_long mapped_addr
;
1037 abi_ulong new_alloc_size
;
1039 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1042 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1045 if (new_brk
< target_original_brk
) {
1046 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1051 /* If the new brk is less than the highest page reserved to the
1052 * target heap allocation, set it and we're almost done... */
1053 if (new_brk
<= brk_page
) {
1054 /* Heap contents are initialized to zero, as for anonymous
1056 if (new_brk
> target_brk
) {
1057 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1059 target_brk
= new_brk
;
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1064 /* We need to allocate more memory after the brk... Note that
1065 * we don't use MAP_FIXED because that will map over the top of
1066 * any existing mapping (like the one with the host libc or qemu
1067 * itself); instead we treat "mapped but at wrong address" as
1068 * a failure and unmap again.
1070 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1071 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1072 PROT_READ
|PROT_WRITE
,
1073 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1075 if (mapped_addr
== brk_page
) {
1076 /* Heap contents are initialized to zero, as for anonymous
1077 * mapped pages. Technically the new pages are already
1078 * initialized to zero since they *are* anonymous mapped
1079 * pages, however we have to take care with the contents that
1080 * come from the remaining part of the previous page: it may
1081 * contains garbage data due to a previous heap usage (grown
1082 * then shrunken). */
1083 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1085 target_brk
= new_brk
;
1086 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1087 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1090 } else if (mapped_addr
!= -1) {
1091 /* Mapped but at wrong address, meaning there wasn't actually
1092 * enough space for this brk.
1094 target_munmap(mapped_addr
, new_alloc_size
);
1096 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1099 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1102 #if defined(TARGET_ALPHA)
1103 /* We (partially) emulate OSF/1 on Alpha, which requires we
1104 return a proper errno, not an unchanged brk value. */
1105 return -TARGET_ENOMEM
;
1107 /* For everything else, return the previous break. */
1111 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1112 abi_ulong target_fds_addr
,
1116 abi_ulong b
, *target_fds
;
1118 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1119 if (!(target_fds
= lock_user(VERIFY_READ
,
1121 sizeof(abi_ulong
) * nw
,
1123 return -TARGET_EFAULT
;
1127 for (i
= 0; i
< nw
; i
++) {
1128 /* grab the abi_ulong */
1129 __get_user(b
, &target_fds
[i
]);
1130 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1131 /* check the bit inside the abi_ulong */
1138 unlock_user(target_fds
, target_fds_addr
, 0);
1143 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1144 abi_ulong target_fds_addr
,
1147 if (target_fds_addr
) {
1148 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1149 return -TARGET_EFAULT
;
1157 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1163 abi_ulong
*target_fds
;
1165 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1166 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1168 sizeof(abi_ulong
) * nw
,
1170 return -TARGET_EFAULT
;
1173 for (i
= 0; i
< nw
; i
++) {
1175 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1176 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1179 __put_user(v
, &target_fds
[i
]);
1182 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1187 #if defined(__alpha__)
1188 #define HOST_HZ 1024
1193 static inline abi_long
host_to_target_clock_t(long ticks
)
1195 #if HOST_HZ == TARGET_HZ
1198 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1202 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1203 const struct rusage
*rusage
)
1205 struct target_rusage
*target_rusage
;
1207 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1208 return -TARGET_EFAULT
;
1209 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1210 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1211 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1212 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1213 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1214 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1215 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1216 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1217 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1218 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1219 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1220 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1221 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1222 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1223 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1224 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1225 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1226 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1227 unlock_user_struct(target_rusage
, target_addr
, 1);
1232 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1234 abi_ulong target_rlim_swap
;
1237 target_rlim_swap
= tswapal(target_rlim
);
1238 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1239 return RLIM_INFINITY
;
1241 result
= target_rlim_swap
;
1242 if (target_rlim_swap
!= (rlim_t
)result
)
1243 return RLIM_INFINITY
;
1248 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1250 abi_ulong target_rlim_swap
;
1253 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1254 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1256 target_rlim_swap
= rlim
;
1257 result
= tswapal(target_rlim_swap
);
1262 static inline int target_to_host_resource(int code
)
1265 case TARGET_RLIMIT_AS
:
1267 case TARGET_RLIMIT_CORE
:
1269 case TARGET_RLIMIT_CPU
:
1271 case TARGET_RLIMIT_DATA
:
1273 case TARGET_RLIMIT_FSIZE
:
1274 return RLIMIT_FSIZE
;
1275 case TARGET_RLIMIT_LOCKS
:
1276 return RLIMIT_LOCKS
;
1277 case TARGET_RLIMIT_MEMLOCK
:
1278 return RLIMIT_MEMLOCK
;
1279 case TARGET_RLIMIT_MSGQUEUE
:
1280 return RLIMIT_MSGQUEUE
;
1281 case TARGET_RLIMIT_NICE
:
1283 case TARGET_RLIMIT_NOFILE
:
1284 return RLIMIT_NOFILE
;
1285 case TARGET_RLIMIT_NPROC
:
1286 return RLIMIT_NPROC
;
1287 case TARGET_RLIMIT_RSS
:
1289 case TARGET_RLIMIT_RTPRIO
:
1290 return RLIMIT_RTPRIO
;
1291 case TARGET_RLIMIT_SIGPENDING
:
1292 return RLIMIT_SIGPENDING
;
1293 case TARGET_RLIMIT_STACK
:
1294 return RLIMIT_STACK
;
1300 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1301 abi_ulong target_tv_addr
)
1303 struct target_timeval
*target_tv
;
1305 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1306 return -TARGET_EFAULT
;
1308 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1309 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1311 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1316 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1317 const struct timeval
*tv
)
1319 struct target_timeval
*target_tv
;
1321 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1322 return -TARGET_EFAULT
;
1324 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1325 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1327 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1332 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1333 abi_ulong target_tz_addr
)
1335 struct target_timezone
*target_tz
;
1337 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1338 return -TARGET_EFAULT
;
1341 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1342 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1344 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1349 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1352 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1353 abi_ulong target_mq_attr_addr
)
1355 struct target_mq_attr
*target_mq_attr
;
1357 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1358 target_mq_attr_addr
, 1))
1359 return -TARGET_EFAULT
;
1361 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1362 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1363 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1364 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1366 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1371 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1372 const struct mq_attr
*attr
)
1374 struct target_mq_attr
*target_mq_attr
;
1376 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1377 target_mq_attr_addr
, 0))
1378 return -TARGET_EFAULT
;
1380 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1381 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1382 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1383 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1385 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1391 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1392 /* do_select() must return target values and target errnos. */
1393 static abi_long
do_select(int n
,
1394 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1395 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1397 fd_set rfds
, wfds
, efds
;
1398 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1400 struct timespec ts
, *ts_ptr
;
1403 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1407 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1411 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1416 if (target_tv_addr
) {
1417 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1418 return -TARGET_EFAULT
;
1419 ts
.tv_sec
= tv
.tv_sec
;
1420 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1426 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1429 if (!is_error(ret
)) {
1430 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1431 return -TARGET_EFAULT
;
1432 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1433 return -TARGET_EFAULT
;
1434 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1435 return -TARGET_EFAULT
;
1437 if (target_tv_addr
) {
1438 tv
.tv_sec
= ts
.tv_sec
;
1439 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1440 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1441 return -TARGET_EFAULT
;
1449 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1450 static abi_long
do_old_select(abi_ulong arg1
)
1452 struct target_sel_arg_struct
*sel
;
1453 abi_ulong inp
, outp
, exp
, tvp
;
1456 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1457 return -TARGET_EFAULT
;
1460 nsel
= tswapal(sel
->n
);
1461 inp
= tswapal(sel
->inp
);
1462 outp
= tswapal(sel
->outp
);
1463 exp
= tswapal(sel
->exp
);
1464 tvp
= tswapal(sel
->tvp
);
1466 unlock_user_struct(sel
, arg1
, 0);
1468 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1473 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1476 return pipe2(host_pipe
, flags
);
1482 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1483 int flags
, int is_pipe2
)
1487 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1490 return get_errno(ret
);
1492 /* Several targets have special calling conventions for the original
1493 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1495 #if defined(TARGET_ALPHA)
1496 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1497 return host_pipe
[0];
1498 #elif defined(TARGET_MIPS)
1499 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1500 return host_pipe
[0];
1501 #elif defined(TARGET_SH4)
1502 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1503 return host_pipe
[0];
1504 #elif defined(TARGET_SPARC)
1505 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1506 return host_pipe
[0];
1510 if (put_user_s32(host_pipe
[0], pipedes
)
1511 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1512 return -TARGET_EFAULT
;
1513 return get_errno(ret
);
1516 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1517 abi_ulong target_addr
,
1520 struct target_ip_mreqn
*target_smreqn
;
1522 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1524 return -TARGET_EFAULT
;
1525 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1526 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1527 if (len
== sizeof(struct target_ip_mreqn
))
1528 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1529 unlock_user(target_smreqn
, target_addr
, 0);
1534 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1535 abi_ulong target_addr
,
1538 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1539 sa_family_t sa_family
;
1540 struct target_sockaddr
*target_saddr
;
1542 if (fd_trans_target_to_host_addr(fd
)) {
1543 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1546 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1548 return -TARGET_EFAULT
;
1550 sa_family
= tswap16(target_saddr
->sa_family
);
1552 /* Oops. The caller might send a incomplete sun_path; sun_path
1553 * must be terminated by \0 (see the manual page), but
1554 * unfortunately it is quite common to specify sockaddr_un
1555 * length as "strlen(x->sun_path)" while it should be
1556 * "strlen(...) + 1". We'll fix that here if needed.
1557 * Linux kernel has a similar feature.
1560 if (sa_family
== AF_UNIX
) {
1561 if (len
< unix_maxlen
&& len
> 0) {
1562 char *cp
= (char*)target_saddr
;
1564 if ( cp
[len
-1] && !cp
[len
] )
1567 if (len
> unix_maxlen
)
1571 memcpy(addr
, target_saddr
, len
);
1572 addr
->sa_family
= sa_family
;
1573 if (sa_family
== AF_NETLINK
) {
1574 struct sockaddr_nl
*nladdr
;
1576 nladdr
= (struct sockaddr_nl
*)addr
;
1577 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1578 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1579 } else if (sa_family
== AF_PACKET
) {
1580 struct target_sockaddr_ll
*lladdr
;
1582 lladdr
= (struct target_sockaddr_ll
*)addr
;
1583 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1584 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1586 unlock_user(target_saddr
, target_addr
, 0);
1591 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1592 struct sockaddr
*addr
,
1595 struct target_sockaddr
*target_saddr
;
1601 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1603 return -TARGET_EFAULT
;
1604 memcpy(target_saddr
, addr
, len
);
1605 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1606 sizeof(target_saddr
->sa_family
)) {
1607 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1609 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1610 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1611 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1612 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1613 } else if (addr
->sa_family
== AF_PACKET
) {
1614 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1615 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1616 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1618 unlock_user(target_saddr
, target_addr
, len
);
1623 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1624 struct target_msghdr
*target_msgh
)
1626 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1627 abi_long msg_controllen
;
1628 abi_ulong target_cmsg_addr
;
1629 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1630 socklen_t space
= 0;
1632 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1633 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1635 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1636 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1637 target_cmsg_start
= target_cmsg
;
1639 return -TARGET_EFAULT
;
1641 while (cmsg
&& target_cmsg
) {
1642 void *data
= CMSG_DATA(cmsg
);
1643 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1645 int len
= tswapal(target_cmsg
->cmsg_len
)
1646 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1648 space
+= CMSG_SPACE(len
);
1649 if (space
> msgh
->msg_controllen
) {
1650 space
-= CMSG_SPACE(len
);
1651 /* This is a QEMU bug, since we allocated the payload
1652 * area ourselves (unlike overflow in host-to-target
1653 * conversion, which is just the guest giving us a buffer
1654 * that's too small). It can't happen for the payload types
1655 * we currently support; if it becomes an issue in future
1656 * we would need to improve our allocation strategy to
1657 * something more intelligent than "twice the size of the
1658 * target buffer we're reading from".
1660 gemu_log("Host cmsg overflow\n");
1664 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1665 cmsg
->cmsg_level
= SOL_SOCKET
;
1667 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1669 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1670 cmsg
->cmsg_len
= CMSG_LEN(len
);
1672 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1673 int *fd
= (int *)data
;
1674 int *target_fd
= (int *)target_data
;
1675 int i
, numfds
= len
/ sizeof(int);
1677 for (i
= 0; i
< numfds
; i
++) {
1678 __get_user(fd
[i
], target_fd
+ i
);
1680 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1681 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1682 struct ucred
*cred
= (struct ucred
*)data
;
1683 struct target_ucred
*target_cred
=
1684 (struct target_ucred
*)target_data
;
1686 __get_user(cred
->pid
, &target_cred
->pid
);
1687 __get_user(cred
->uid
, &target_cred
->uid
);
1688 __get_user(cred
->gid
, &target_cred
->gid
);
1690 gemu_log("Unsupported ancillary data: %d/%d\n",
1691 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1692 memcpy(data
, target_data
, len
);
1695 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1696 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1699 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1701 msgh
->msg_controllen
= space
;
1705 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1706 struct msghdr
*msgh
)
1708 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1709 abi_long msg_controllen
;
1710 abi_ulong target_cmsg_addr
;
1711 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1712 socklen_t space
= 0;
1714 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1715 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1717 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1718 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1719 target_cmsg_start
= target_cmsg
;
1721 return -TARGET_EFAULT
;
1723 while (cmsg
&& target_cmsg
) {
1724 void *data
= CMSG_DATA(cmsg
);
1725 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1727 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1728 int tgt_len
, tgt_space
;
1730 /* We never copy a half-header but may copy half-data;
1731 * this is Linux's behaviour in put_cmsg(). Note that
1732 * truncation here is a guest problem (which we report
1733 * to the guest via the CTRUNC bit), unlike truncation
1734 * in target_to_host_cmsg, which is a QEMU bug.
1736 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1737 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1741 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1742 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1744 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1746 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1748 tgt_len
= TARGET_CMSG_LEN(len
);
1750 /* Payload types which need a different size of payload on
1751 * the target must adjust tgt_len here.
1753 switch (cmsg
->cmsg_level
) {
1755 switch (cmsg
->cmsg_type
) {
1757 tgt_len
= sizeof(struct target_timeval
);
1766 if (msg_controllen
< tgt_len
) {
1767 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1768 tgt_len
= msg_controllen
;
1771 /* We must now copy-and-convert len bytes of payload
1772 * into tgt_len bytes of destination space. Bear in mind
1773 * that in both source and destination we may be dealing
1774 * with a truncated value!
1776 switch (cmsg
->cmsg_level
) {
1778 switch (cmsg
->cmsg_type
) {
1781 int *fd
= (int *)data
;
1782 int *target_fd
= (int *)target_data
;
1783 int i
, numfds
= tgt_len
/ sizeof(int);
1785 for (i
= 0; i
< numfds
; i
++) {
1786 __put_user(fd
[i
], target_fd
+ i
);
1792 struct timeval
*tv
= (struct timeval
*)data
;
1793 struct target_timeval
*target_tv
=
1794 (struct target_timeval
*)target_data
;
1796 if (len
!= sizeof(struct timeval
) ||
1797 tgt_len
!= sizeof(struct target_timeval
)) {
1801 /* copy struct timeval to target */
1802 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1803 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1806 case SCM_CREDENTIALS
:
1808 struct ucred
*cred
= (struct ucred
*)data
;
1809 struct target_ucred
*target_cred
=
1810 (struct target_ucred
*)target_data
;
1812 __put_user(cred
->pid
, &target_cred
->pid
);
1813 __put_user(cred
->uid
, &target_cred
->uid
);
1814 __put_user(cred
->gid
, &target_cred
->gid
);
1824 gemu_log("Unsupported ancillary data: %d/%d\n",
1825 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1826 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1827 if (tgt_len
> len
) {
1828 memset(target_data
+ len
, 0, tgt_len
- len
);
1832 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1833 tgt_space
= TARGET_CMSG_SPACE(len
);
1834 if (msg_controllen
< tgt_space
) {
1835 tgt_space
= msg_controllen
;
1837 msg_controllen
-= tgt_space
;
1839 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1840 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1843 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1845 target_msgh
->msg_controllen
= tswapal(space
);
1849 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1851 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1852 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1853 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1854 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1855 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1858 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1860 abi_long (*host_to_target_nlmsg
)
1861 (struct nlmsghdr
*))
1866 while (len
> sizeof(struct nlmsghdr
)) {
1868 nlmsg_len
= nlh
->nlmsg_len
;
1869 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1874 switch (nlh
->nlmsg_type
) {
1876 tswap_nlmsghdr(nlh
);
1882 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1883 e
->error
= tswap32(e
->error
);
1884 tswap_nlmsghdr(&e
->msg
);
1885 tswap_nlmsghdr(nlh
);
1889 ret
= host_to_target_nlmsg(nlh
);
1891 tswap_nlmsghdr(nlh
);
1896 tswap_nlmsghdr(nlh
);
1897 len
-= NLMSG_ALIGN(nlmsg_len
);
1898 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1903 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1905 abi_long (*target_to_host_nlmsg
)
1906 (struct nlmsghdr
*))
1910 while (len
> sizeof(struct nlmsghdr
)) {
1911 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1912 tswap32(nlh
->nlmsg_len
) > len
) {
1915 tswap_nlmsghdr(nlh
);
1916 switch (nlh
->nlmsg_type
) {
1923 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1924 e
->error
= tswap32(e
->error
);
1925 tswap_nlmsghdr(&e
->msg
);
1929 ret
= target_to_host_nlmsg(nlh
);
1934 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1935 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1940 #ifdef CONFIG_RTNETLINK
1941 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1942 size_t len
, void *context
,
1943 abi_long (*host_to_target_nlattr
)
1947 unsigned short nla_len
;
1950 while (len
> sizeof(struct nlattr
)) {
1951 nla_len
= nlattr
->nla_len
;
1952 if (nla_len
< sizeof(struct nlattr
) ||
1956 ret
= host_to_target_nlattr(nlattr
, context
);
1957 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1958 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1962 len
-= NLA_ALIGN(nla_len
);
1963 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1968 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1970 abi_long (*host_to_target_rtattr
)
1973 unsigned short rta_len
;
1976 while (len
> sizeof(struct rtattr
)) {
1977 rta_len
= rtattr
->rta_len
;
1978 if (rta_len
< sizeof(struct rtattr
) ||
1982 ret
= host_to_target_rtattr(rtattr
);
1983 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1984 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1988 len
-= RTA_ALIGN(rta_len
);
1989 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1994 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1996 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2003 switch (nlattr
->nla_type
) {
2005 case QEMU_IFLA_BR_FDB_FLUSH
:
2008 case QEMU_IFLA_BR_GROUP_ADDR
:
2011 case QEMU_IFLA_BR_VLAN_FILTERING
:
2012 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2013 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2014 case QEMU_IFLA_BR_MCAST_ROUTER
:
2015 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2016 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2017 case QEMU_IFLA_BR_MCAST_QUERIER
:
2018 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2019 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2020 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2023 case QEMU_IFLA_BR_PRIORITY
:
2024 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2025 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2026 case QEMU_IFLA_BR_ROOT_PORT
:
2027 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2028 u16
= NLA_DATA(nlattr
);
2029 *u16
= tswap16(*u16
);
2032 case QEMU_IFLA_BR_FORWARD_DELAY
:
2033 case QEMU_IFLA_BR_HELLO_TIME
:
2034 case QEMU_IFLA_BR_MAX_AGE
:
2035 case QEMU_IFLA_BR_AGEING_TIME
:
2036 case QEMU_IFLA_BR_STP_STATE
:
2037 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2038 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2039 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2040 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2041 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2042 u32
= NLA_DATA(nlattr
);
2043 *u32
= tswap32(*u32
);
2046 case QEMU_IFLA_BR_HELLO_TIMER
:
2047 case QEMU_IFLA_BR_TCN_TIMER
:
2048 case QEMU_IFLA_BR_GC_TIMER
:
2049 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2050 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2051 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2052 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2053 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2054 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2056 u64
= NLA_DATA(nlattr
);
2057 *u64
= tswap64(*u64
);
2059 /* ifla_bridge_id: uin8_t[] */
2060 case QEMU_IFLA_BR_ROOT_ID
:
2061 case QEMU_IFLA_BR_BRIDGE_ID
:
2064 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2070 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2077 switch (nlattr
->nla_type
) {
2079 case QEMU_IFLA_BRPORT_STATE
:
2080 case QEMU_IFLA_BRPORT_MODE
:
2081 case QEMU_IFLA_BRPORT_GUARD
:
2082 case QEMU_IFLA_BRPORT_PROTECT
:
2083 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2084 case QEMU_IFLA_BRPORT_LEARNING
:
2085 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2086 case QEMU_IFLA_BRPORT_PROXYARP
:
2087 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2088 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2089 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2090 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2091 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2094 case QEMU_IFLA_BRPORT_PRIORITY
:
2095 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2096 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2097 case QEMU_IFLA_BRPORT_ID
:
2098 case QEMU_IFLA_BRPORT_NO
:
2099 u16
= NLA_DATA(nlattr
);
2100 *u16
= tswap16(*u16
);
2103 case QEMU_IFLA_BRPORT_COST
:
2104 u32
= NLA_DATA(nlattr
);
2105 *u32
= tswap32(*u32
);
2108 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2109 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2110 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2111 u64
= NLA_DATA(nlattr
);
2112 *u64
= tswap64(*u64
);
2114 /* ifla_bridge_id: uint8_t[] */
2115 case QEMU_IFLA_BRPORT_ROOT_ID
:
2116 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2119 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2125 struct linkinfo_context
{
2132 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2135 struct linkinfo_context
*li_context
= context
;
2137 switch (nlattr
->nla_type
) {
2139 case QEMU_IFLA_INFO_KIND
:
2140 li_context
->name
= NLA_DATA(nlattr
);
2141 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2143 case QEMU_IFLA_INFO_SLAVE_KIND
:
2144 li_context
->slave_name
= NLA_DATA(nlattr
);
2145 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2148 case QEMU_IFLA_INFO_XSTATS
:
2149 /* FIXME: only used by CAN */
2152 case QEMU_IFLA_INFO_DATA
:
2153 if (strncmp(li_context
->name
, "bridge",
2154 li_context
->len
) == 0) {
2155 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2158 host_to_target_data_bridge_nlattr
);
2160 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2163 case QEMU_IFLA_INFO_SLAVE_DATA
:
2164 if (strncmp(li_context
->slave_name
, "bridge",
2165 li_context
->slave_len
) == 0) {
2166 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2169 host_to_target_slave_data_bridge_nlattr
);
2171 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2172 li_context
->slave_name
);
2176 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2183 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2189 switch (nlattr
->nla_type
) {
2190 case QEMU_IFLA_INET_CONF
:
2191 u32
= NLA_DATA(nlattr
);
2192 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2194 u32
[i
] = tswap32(u32
[i
]);
2198 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2203 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2208 struct ifla_cacheinfo
*ci
;
2211 switch (nlattr
->nla_type
) {
2213 case QEMU_IFLA_INET6_TOKEN
:
2216 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2219 case QEMU_IFLA_INET6_FLAGS
:
2220 u32
= NLA_DATA(nlattr
);
2221 *u32
= tswap32(*u32
);
2224 case QEMU_IFLA_INET6_CONF
:
2225 u32
= NLA_DATA(nlattr
);
2226 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2228 u32
[i
] = tswap32(u32
[i
]);
2231 /* ifla_cacheinfo */
2232 case QEMU_IFLA_INET6_CACHEINFO
:
2233 ci
= NLA_DATA(nlattr
);
2234 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2235 ci
->tstamp
= tswap32(ci
->tstamp
);
2236 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2237 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2240 case QEMU_IFLA_INET6_STATS
:
2241 case QEMU_IFLA_INET6_ICMP6STATS
:
2242 u64
= NLA_DATA(nlattr
);
2243 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2245 u64
[i
] = tswap64(u64
[i
]);
2249 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2254 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2257 switch (nlattr
->nla_type
) {
2259 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2261 host_to_target_data_inet_nlattr
);
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2265 host_to_target_data_inet6_nlattr
);
2267 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2273 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2276 struct rtnl_link_stats
*st
;
2277 struct rtnl_link_stats64
*st64
;
2278 struct rtnl_link_ifmap
*map
;
2279 struct linkinfo_context li_context
;
2281 switch (rtattr
->rta_type
) {
2283 case QEMU_IFLA_ADDRESS
:
2284 case QEMU_IFLA_BROADCAST
:
2286 case QEMU_IFLA_IFNAME
:
2287 case QEMU_IFLA_QDISC
:
2290 case QEMU_IFLA_OPERSTATE
:
2291 case QEMU_IFLA_LINKMODE
:
2292 case QEMU_IFLA_CARRIER
:
2293 case QEMU_IFLA_PROTO_DOWN
:
2297 case QEMU_IFLA_LINK
:
2298 case QEMU_IFLA_WEIGHT
:
2299 case QEMU_IFLA_TXQLEN
:
2300 case QEMU_IFLA_CARRIER_CHANGES
:
2301 case QEMU_IFLA_NUM_RX_QUEUES
:
2302 case QEMU_IFLA_NUM_TX_QUEUES
:
2303 case QEMU_IFLA_PROMISCUITY
:
2304 case QEMU_IFLA_EXT_MASK
:
2305 case QEMU_IFLA_LINK_NETNSID
:
2306 case QEMU_IFLA_GROUP
:
2307 case QEMU_IFLA_MASTER
:
2308 case QEMU_IFLA_NUM_VF
:
2309 u32
= RTA_DATA(rtattr
);
2310 *u32
= tswap32(*u32
);
2312 /* struct rtnl_link_stats */
2313 case QEMU_IFLA_STATS
:
2314 st
= RTA_DATA(rtattr
);
2315 st
->rx_packets
= tswap32(st
->rx_packets
);
2316 st
->tx_packets
= tswap32(st
->tx_packets
);
2317 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2318 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2319 st
->rx_errors
= tswap32(st
->rx_errors
);
2320 st
->tx_errors
= tswap32(st
->tx_errors
);
2321 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2322 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2323 st
->multicast
= tswap32(st
->multicast
);
2324 st
->collisions
= tswap32(st
->collisions
);
2326 /* detailed rx_errors: */
2327 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2328 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2329 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2330 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2331 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2332 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2334 /* detailed tx_errors */
2335 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2336 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2337 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2338 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2339 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2342 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2343 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2345 /* struct rtnl_link_stats64 */
2346 case QEMU_IFLA_STATS64
:
2347 st64
= RTA_DATA(rtattr
);
2348 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2349 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2350 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2351 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2352 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2353 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2354 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2355 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2356 st64
->multicast
= tswap64(st64
->multicast
);
2357 st64
->collisions
= tswap64(st64
->collisions
);
2359 /* detailed rx_errors: */
2360 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2361 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2362 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2363 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2364 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2365 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2367 /* detailed tx_errors */
2368 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2369 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2370 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2371 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2372 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2375 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2376 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2378 /* struct rtnl_link_ifmap */
2380 map
= RTA_DATA(rtattr
);
2381 map
->mem_start
= tswap64(map
->mem_start
);
2382 map
->mem_end
= tswap64(map
->mem_end
);
2383 map
->base_addr
= tswap64(map
->base_addr
);
2384 map
->irq
= tswap16(map
->irq
);
2387 case QEMU_IFLA_LINKINFO
:
2388 memset(&li_context
, 0, sizeof(li_context
));
2389 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2391 host_to_target_data_linkinfo_nlattr
);
2392 case QEMU_IFLA_AF_SPEC
:
2393 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2395 host_to_target_data_spec_nlattr
);
2397 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2403 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2406 struct ifa_cacheinfo
*ci
;
2408 switch (rtattr
->rta_type
) {
2409 /* binary: depends on family type */
2419 u32
= RTA_DATA(rtattr
);
2420 *u32
= tswap32(*u32
);
2422 /* struct ifa_cacheinfo */
2424 ci
= RTA_DATA(rtattr
);
2425 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2426 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2427 ci
->cstamp
= tswap32(ci
->cstamp
);
2428 ci
->tstamp
= tswap32(ci
->tstamp
);
2431 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2437 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2440 switch (rtattr
->rta_type
) {
2441 /* binary: depends on family type */
2450 u32
= RTA_DATA(rtattr
);
2451 *u32
= tswap32(*u32
);
2454 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2460 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2461 uint32_t rtattr_len
)
2463 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2464 host_to_target_data_link_rtattr
);
2467 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2468 uint32_t rtattr_len
)
2470 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2471 host_to_target_data_addr_rtattr
);
2474 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2475 uint32_t rtattr_len
)
2477 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2478 host_to_target_data_route_rtattr
);
2481 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2484 struct ifinfomsg
*ifi
;
2485 struct ifaddrmsg
*ifa
;
2488 nlmsg_len
= nlh
->nlmsg_len
;
2489 switch (nlh
->nlmsg_type
) {
2493 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2494 ifi
= NLMSG_DATA(nlh
);
2495 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2496 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2497 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2498 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2499 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2500 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2506 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2507 ifa
= NLMSG_DATA(nlh
);
2508 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2509 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2510 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2516 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2517 rtm
= NLMSG_DATA(nlh
);
2518 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2519 host_to_target_route_rtattr(RTM_RTA(rtm
),
2520 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2524 return -TARGET_EINVAL
;
2529 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2532 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2535 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2537 abi_long (*target_to_host_rtattr
)
2542 while (len
>= sizeof(struct rtattr
)) {
2543 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2544 tswap16(rtattr
->rta_len
) > len
) {
2547 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2548 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2549 ret
= target_to_host_rtattr(rtattr
);
2553 len
-= RTA_ALIGN(rtattr
->rta_len
);
2554 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2555 RTA_ALIGN(rtattr
->rta_len
));
2560 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2562 switch (rtattr
->rta_type
) {
2564 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2570 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2572 switch (rtattr
->rta_type
) {
2573 /* binary: depends on family type */
2578 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2584 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2587 switch (rtattr
->rta_type
) {
2588 /* binary: depends on family type */
2595 u32
= RTA_DATA(rtattr
);
2596 *u32
= tswap32(*u32
);
2599 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2605 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2606 uint32_t rtattr_len
)
2608 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2609 target_to_host_data_link_rtattr
);
2612 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2613 uint32_t rtattr_len
)
2615 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2616 target_to_host_data_addr_rtattr
);
2619 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2620 uint32_t rtattr_len
)
2622 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2623 target_to_host_data_route_rtattr
);
2626 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2628 struct ifinfomsg
*ifi
;
2629 struct ifaddrmsg
*ifa
;
2632 switch (nlh
->nlmsg_type
) {
2637 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2638 ifi
= NLMSG_DATA(nlh
);
2639 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2640 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2641 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2642 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2643 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2644 NLMSG_LENGTH(sizeof(*ifi
)));
2650 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2651 ifa
= NLMSG_DATA(nlh
);
2652 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2653 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2654 NLMSG_LENGTH(sizeof(*ifa
)));
2661 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2662 rtm
= NLMSG_DATA(nlh
);
2663 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2664 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2665 NLMSG_LENGTH(sizeof(*rtm
)));
2669 return -TARGET_EOPNOTSUPP
;
2674 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2676 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2678 #endif /* CONFIG_RTNETLINK */
2680 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2682 switch (nlh
->nlmsg_type
) {
2684 gemu_log("Unknown host audit message type %d\n",
2686 return -TARGET_EINVAL
;
2691 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2694 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2697 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2699 switch (nlh
->nlmsg_type
) {
2701 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2702 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2705 gemu_log("Unknown target audit message type %d\n",
2707 return -TARGET_EINVAL
;
2713 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2715 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2718 /* do_setsockopt() Must return target values and target errnos. */
2719 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2720 abi_ulong optval_addr
, socklen_t optlen
)
2724 struct ip_mreqn
*ip_mreq
;
2725 struct ip_mreq_source
*ip_mreq_source
;
2729 /* TCP options all take an 'int' value. */
2730 if (optlen
< sizeof(uint32_t))
2731 return -TARGET_EINVAL
;
2733 if (get_user_u32(val
, optval_addr
))
2734 return -TARGET_EFAULT
;
2735 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2742 case IP_ROUTER_ALERT
:
2746 case IP_MTU_DISCOVER
:
2752 case IP_MULTICAST_TTL
:
2753 case IP_MULTICAST_LOOP
:
2755 if (optlen
>= sizeof(uint32_t)) {
2756 if (get_user_u32(val
, optval_addr
))
2757 return -TARGET_EFAULT
;
2758 } else if (optlen
>= 1) {
2759 if (get_user_u8(val
, optval_addr
))
2760 return -TARGET_EFAULT
;
2762 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2764 case IP_ADD_MEMBERSHIP
:
2765 case IP_DROP_MEMBERSHIP
:
2766 if (optlen
< sizeof (struct target_ip_mreq
) ||
2767 optlen
> sizeof (struct target_ip_mreqn
))
2768 return -TARGET_EINVAL
;
2770 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2771 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2772 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2775 case IP_BLOCK_SOURCE
:
2776 case IP_UNBLOCK_SOURCE
:
2777 case IP_ADD_SOURCE_MEMBERSHIP
:
2778 case IP_DROP_SOURCE_MEMBERSHIP
:
2779 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2780 return -TARGET_EINVAL
;
2782 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2783 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2784 unlock_user (ip_mreq_source
, optval_addr
, 0);
2793 case IPV6_MTU_DISCOVER
:
2796 case IPV6_RECVPKTINFO
:
2798 if (optlen
< sizeof(uint32_t)) {
2799 return -TARGET_EINVAL
;
2801 if (get_user_u32(val
, optval_addr
)) {
2802 return -TARGET_EFAULT
;
2804 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2805 &val
, sizeof(val
)));
2814 /* struct icmp_filter takes an u32 value */
2815 if (optlen
< sizeof(uint32_t)) {
2816 return -TARGET_EINVAL
;
2819 if (get_user_u32(val
, optval_addr
)) {
2820 return -TARGET_EFAULT
;
2822 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2823 &val
, sizeof(val
)));
2830 case TARGET_SOL_SOCKET
:
2832 case TARGET_SO_RCVTIMEO
:
2836 optname
= SO_RCVTIMEO
;
2839 if (optlen
!= sizeof(struct target_timeval
)) {
2840 return -TARGET_EINVAL
;
2843 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2844 return -TARGET_EFAULT
;
2847 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2851 case TARGET_SO_SNDTIMEO
:
2852 optname
= SO_SNDTIMEO
;
2854 case TARGET_SO_ATTACH_FILTER
:
2856 struct target_sock_fprog
*tfprog
;
2857 struct target_sock_filter
*tfilter
;
2858 struct sock_fprog fprog
;
2859 struct sock_filter
*filter
;
2862 if (optlen
!= sizeof(*tfprog
)) {
2863 return -TARGET_EINVAL
;
2865 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2866 return -TARGET_EFAULT
;
2868 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2869 tswapal(tfprog
->filter
), 0)) {
2870 unlock_user_struct(tfprog
, optval_addr
, 1);
2871 return -TARGET_EFAULT
;
2874 fprog
.len
= tswap16(tfprog
->len
);
2875 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2876 if (filter
== NULL
) {
2877 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2878 unlock_user_struct(tfprog
, optval_addr
, 1);
2879 return -TARGET_ENOMEM
;
2881 for (i
= 0; i
< fprog
.len
; i
++) {
2882 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2883 filter
[i
].jt
= tfilter
[i
].jt
;
2884 filter
[i
].jf
= tfilter
[i
].jf
;
2885 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2887 fprog
.filter
= filter
;
2889 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2890 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2893 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2894 unlock_user_struct(tfprog
, optval_addr
, 1);
2897 case TARGET_SO_BINDTODEVICE
:
2899 char *dev_ifname
, *addr_ifname
;
2901 if (optlen
> IFNAMSIZ
- 1) {
2902 optlen
= IFNAMSIZ
- 1;
2904 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2906 return -TARGET_EFAULT
;
2908 optname
= SO_BINDTODEVICE
;
2909 addr_ifname
= alloca(IFNAMSIZ
);
2910 memcpy(addr_ifname
, dev_ifname
, optlen
);
2911 addr_ifname
[optlen
] = 0;
2912 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2913 addr_ifname
, optlen
));
2914 unlock_user (dev_ifname
, optval_addr
, 0);
2917 /* Options with 'int' argument. */
2918 case TARGET_SO_DEBUG
:
2921 case TARGET_SO_REUSEADDR
:
2922 optname
= SO_REUSEADDR
;
2924 case TARGET_SO_TYPE
:
2927 case TARGET_SO_ERROR
:
2930 case TARGET_SO_DONTROUTE
:
2931 optname
= SO_DONTROUTE
;
2933 case TARGET_SO_BROADCAST
:
2934 optname
= SO_BROADCAST
;
2936 case TARGET_SO_SNDBUF
:
2937 optname
= SO_SNDBUF
;
2939 case TARGET_SO_SNDBUFFORCE
:
2940 optname
= SO_SNDBUFFORCE
;
2942 case TARGET_SO_RCVBUF
:
2943 optname
= SO_RCVBUF
;
2945 case TARGET_SO_RCVBUFFORCE
:
2946 optname
= SO_RCVBUFFORCE
;
2948 case TARGET_SO_KEEPALIVE
:
2949 optname
= SO_KEEPALIVE
;
2951 case TARGET_SO_OOBINLINE
:
2952 optname
= SO_OOBINLINE
;
2954 case TARGET_SO_NO_CHECK
:
2955 optname
= SO_NO_CHECK
;
2957 case TARGET_SO_PRIORITY
:
2958 optname
= SO_PRIORITY
;
2961 case TARGET_SO_BSDCOMPAT
:
2962 optname
= SO_BSDCOMPAT
;
2965 case TARGET_SO_PASSCRED
:
2966 optname
= SO_PASSCRED
;
2968 case TARGET_SO_PASSSEC
:
2969 optname
= SO_PASSSEC
;
2971 case TARGET_SO_TIMESTAMP
:
2972 optname
= SO_TIMESTAMP
;
2974 case TARGET_SO_RCVLOWAT
:
2975 optname
= SO_RCVLOWAT
;
2981 if (optlen
< sizeof(uint32_t))
2982 return -TARGET_EINVAL
;
2984 if (get_user_u32(val
, optval_addr
))
2985 return -TARGET_EFAULT
;
2986 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2990 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2991 ret
= -TARGET_ENOPROTOOPT
;
2996 /* do_getsockopt() Must return target values and target errnos. */
2997 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2998 abi_ulong optval_addr
, abi_ulong optlen
)
3005 case TARGET_SOL_SOCKET
:
3008 /* These don't just return a single integer */
3009 case TARGET_SO_LINGER
:
3010 case TARGET_SO_RCVTIMEO
:
3011 case TARGET_SO_SNDTIMEO
:
3012 case TARGET_SO_PEERNAME
:
3014 case TARGET_SO_PEERCRED
: {
3017 struct target_ucred
*tcr
;
3019 if (get_user_u32(len
, optlen
)) {
3020 return -TARGET_EFAULT
;
3023 return -TARGET_EINVAL
;
3027 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3035 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3036 return -TARGET_EFAULT
;
3038 __put_user(cr
.pid
, &tcr
->pid
);
3039 __put_user(cr
.uid
, &tcr
->uid
);
3040 __put_user(cr
.gid
, &tcr
->gid
);
3041 unlock_user_struct(tcr
, optval_addr
, 1);
3042 if (put_user_u32(len
, optlen
)) {
3043 return -TARGET_EFAULT
;
3047 /* Options with 'int' argument. */
3048 case TARGET_SO_DEBUG
:
3051 case TARGET_SO_REUSEADDR
:
3052 optname
= SO_REUSEADDR
;
3054 case TARGET_SO_TYPE
:
3057 case TARGET_SO_ERROR
:
3060 case TARGET_SO_DONTROUTE
:
3061 optname
= SO_DONTROUTE
;
3063 case TARGET_SO_BROADCAST
:
3064 optname
= SO_BROADCAST
;
3066 case TARGET_SO_SNDBUF
:
3067 optname
= SO_SNDBUF
;
3069 case TARGET_SO_RCVBUF
:
3070 optname
= SO_RCVBUF
;
3072 case TARGET_SO_KEEPALIVE
:
3073 optname
= SO_KEEPALIVE
;
3075 case TARGET_SO_OOBINLINE
:
3076 optname
= SO_OOBINLINE
;
3078 case TARGET_SO_NO_CHECK
:
3079 optname
= SO_NO_CHECK
;
3081 case TARGET_SO_PRIORITY
:
3082 optname
= SO_PRIORITY
;
3085 case TARGET_SO_BSDCOMPAT
:
3086 optname
= SO_BSDCOMPAT
;
3089 case TARGET_SO_PASSCRED
:
3090 optname
= SO_PASSCRED
;
3092 case TARGET_SO_TIMESTAMP
:
3093 optname
= SO_TIMESTAMP
;
3095 case TARGET_SO_RCVLOWAT
:
3096 optname
= SO_RCVLOWAT
;
3098 case TARGET_SO_ACCEPTCONN
:
3099 optname
= SO_ACCEPTCONN
;
3106 /* TCP options all take an 'int' value. */
3108 if (get_user_u32(len
, optlen
))
3109 return -TARGET_EFAULT
;
3111 return -TARGET_EINVAL
;
3113 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3116 if (optname
== SO_TYPE
) {
3117 val
= host_to_target_sock_type(val
);
3122 if (put_user_u32(val
, optval_addr
))
3123 return -TARGET_EFAULT
;
3125 if (put_user_u8(val
, optval_addr
))
3126 return -TARGET_EFAULT
;
3128 if (put_user_u32(len
, optlen
))
3129 return -TARGET_EFAULT
;
3136 case IP_ROUTER_ALERT
:
3140 case IP_MTU_DISCOVER
:
3146 case IP_MULTICAST_TTL
:
3147 case IP_MULTICAST_LOOP
:
3148 if (get_user_u32(len
, optlen
))
3149 return -TARGET_EFAULT
;
3151 return -TARGET_EINVAL
;
3153 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3156 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3158 if (put_user_u32(len
, optlen
)
3159 || put_user_u8(val
, optval_addr
))
3160 return -TARGET_EFAULT
;
3162 if (len
> sizeof(int))
3164 if (put_user_u32(len
, optlen
)
3165 || put_user_u32(val
, optval_addr
))
3166 return -TARGET_EFAULT
;
3170 ret
= -TARGET_ENOPROTOOPT
;
3176 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3178 ret
= -TARGET_EOPNOTSUPP
;
3184 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3185 abi_ulong count
, int copy
)
3187 struct target_iovec
*target_vec
;
3189 abi_ulong total_len
, max_len
;
3192 bool bad_address
= false;
3198 if (count
> IOV_MAX
) {
3203 vec
= g_try_new0(struct iovec
, count
);
3209 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3210 count
* sizeof(struct target_iovec
), 1);
3211 if (target_vec
== NULL
) {
3216 /* ??? If host page size > target page size, this will result in a
3217 value larger than what we can actually support. */
3218 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3221 for (i
= 0; i
< count
; i
++) {
3222 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3223 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3228 } else if (len
== 0) {
3229 /* Zero length pointer is ignored. */
3230 vec
[i
].iov_base
= 0;
3232 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3233 /* If the first buffer pointer is bad, this is a fault. But
3234 * subsequent bad buffers will result in a partial write; this
3235 * is realized by filling the vector with null pointers and
3237 if (!vec
[i
].iov_base
) {
3248 if (len
> max_len
- total_len
) {
3249 len
= max_len
- total_len
;
3252 vec
[i
].iov_len
= len
;
3256 unlock_user(target_vec
, target_addr
, 0);
3261 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3262 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3265 unlock_user(target_vec
, target_addr
, 0);
3272 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3273 abi_ulong count
, int copy
)
3275 struct target_iovec
*target_vec
;
3278 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3279 count
* sizeof(struct target_iovec
), 1);
3281 for (i
= 0; i
< count
; i
++) {
3282 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3283 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3287 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3289 unlock_user(target_vec
, target_addr
, 0);
3295 static inline int target_to_host_sock_type(int *type
)
3298 int target_type
= *type
;
3300 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3301 case TARGET_SOCK_DGRAM
:
3302 host_type
= SOCK_DGRAM
;
3304 case TARGET_SOCK_STREAM
:
3305 host_type
= SOCK_STREAM
;
3308 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3311 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3312 #if defined(SOCK_CLOEXEC)
3313 host_type
|= SOCK_CLOEXEC
;
3315 return -TARGET_EINVAL
;
3318 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3319 #if defined(SOCK_NONBLOCK)
3320 host_type
|= SOCK_NONBLOCK
;
3321 #elif !defined(O_NONBLOCK)
3322 return -TARGET_EINVAL
;
3329 /* Try to emulate socket type flags after socket creation. */
3330 static int sock_flags_fixup(int fd
, int target_type
)
3332 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3333 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3334 int flags
= fcntl(fd
, F_GETFL
);
3335 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3337 return -TARGET_EINVAL
;
3344 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3345 abi_ulong target_addr
,
3348 struct sockaddr
*addr
= host_addr
;
3349 struct target_sockaddr
*target_saddr
;
3351 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3352 if (!target_saddr
) {
3353 return -TARGET_EFAULT
;
3356 memcpy(addr
, target_saddr
, len
);
3357 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3358 /* spkt_protocol is big-endian */
3360 unlock_user(target_saddr
, target_addr
, 0);
3364 static TargetFdTrans target_packet_trans
= {
3365 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3368 #ifdef CONFIG_RTNETLINK
3369 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3373 ret
= target_to_host_nlmsg_route(buf
, len
);
3381 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3385 ret
= host_to_target_nlmsg_route(buf
, len
);
3393 static TargetFdTrans target_netlink_route_trans
= {
3394 .target_to_host_data
= netlink_route_target_to_host
,
3395 .host_to_target_data
= netlink_route_host_to_target
,
3397 #endif /* CONFIG_RTNETLINK */
3399 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3403 ret
= target_to_host_nlmsg_audit(buf
, len
);
3411 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3415 ret
= host_to_target_nlmsg_audit(buf
, len
);
3423 static TargetFdTrans target_netlink_audit_trans
= {
3424 .target_to_host_data
= netlink_audit_target_to_host
,
3425 .host_to_target_data
= netlink_audit_host_to_target
,
3428 /* do_socket() Must return target values and target errnos. */
3429 static abi_long
do_socket(int domain
, int type
, int protocol
)
3431 int target_type
= type
;
3434 ret
= target_to_host_sock_type(&type
);
3439 if (domain
== PF_NETLINK
&& !(
3440 #ifdef CONFIG_RTNETLINK
3441 protocol
== NETLINK_ROUTE
||
3443 protocol
== NETLINK_KOBJECT_UEVENT
||
3444 protocol
== NETLINK_AUDIT
)) {
3445 return -EPFNOSUPPORT
;
3448 if (domain
== AF_PACKET
||
3449 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3450 protocol
= tswap16(protocol
);
3453 ret
= get_errno(socket(domain
, type
, protocol
));
3455 ret
= sock_flags_fixup(ret
, target_type
);
3456 if (type
== SOCK_PACKET
) {
3457 /* Manage an obsolete case :
3458 * if socket type is SOCK_PACKET, bind by name
3460 fd_trans_register(ret
, &target_packet_trans
);
3461 } else if (domain
== PF_NETLINK
) {
3463 #ifdef CONFIG_RTNETLINK
3465 fd_trans_register(ret
, &target_netlink_route_trans
);
3468 case NETLINK_KOBJECT_UEVENT
:
3469 /* nothing to do: messages are strings */
3472 fd_trans_register(ret
, &target_netlink_audit_trans
);
3475 g_assert_not_reached();
3482 /* do_bind() Must return target values and target errnos. */
3483 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3489 if ((int)addrlen
< 0) {
3490 return -TARGET_EINVAL
;
3493 addr
= alloca(addrlen
+1);
3495 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3499 return get_errno(bind(sockfd
, addr
, addrlen
));
3502 /* do_connect() Must return target values and target errnos. */
3503 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3509 if ((int)addrlen
< 0) {
3510 return -TARGET_EINVAL
;
3513 addr
= alloca(addrlen
+1);
3515 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3519 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3522 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3523 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3524 int flags
, int send
)
3530 abi_ulong target_vec
;
3532 if (msgp
->msg_name
) {
3533 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3534 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3535 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3536 tswapal(msgp
->msg_name
),
3538 if (ret
== -TARGET_EFAULT
) {
3539 /* For connected sockets msg_name and msg_namelen must
3540 * be ignored, so returning EFAULT immediately is wrong.
3541 * Instead, pass a bad msg_name to the host kernel, and
3542 * let it decide whether to return EFAULT or not.
3544 msg
.msg_name
= (void *)-1;
3549 msg
.msg_name
= NULL
;
3550 msg
.msg_namelen
= 0;
3552 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3553 msg
.msg_control
= alloca(msg
.msg_controllen
);
3554 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3556 count
= tswapal(msgp
->msg_iovlen
);
3557 target_vec
= tswapal(msgp
->msg_iov
);
3559 if (count
> IOV_MAX
) {
3560 /* sendrcvmsg returns a different errno for this condition than
3561 * readv/writev, so we must catch it here before lock_iovec() does.
3563 ret
= -TARGET_EMSGSIZE
;
3567 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3568 target_vec
, count
, send
);
3570 ret
= -host_to_target_errno(errno
);
3573 msg
.msg_iovlen
= count
;
3577 if (fd_trans_target_to_host_data(fd
)) {
3580 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3581 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3582 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3583 msg
.msg_iov
->iov_len
);
3585 msg
.msg_iov
->iov_base
= host_msg
;
3586 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3590 ret
= target_to_host_cmsg(&msg
, msgp
);
3592 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3596 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3597 if (!is_error(ret
)) {
3599 if (fd_trans_host_to_target_data(fd
)) {
3600 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3603 ret
= host_to_target_cmsg(msgp
, &msg
);
3605 if (!is_error(ret
)) {
3606 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3607 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3608 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3609 msg
.msg_name
, msg
.msg_namelen
);
3621 unlock_iovec(vec
, target_vec
, count
, !send
);
3626 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3627 int flags
, int send
)
3630 struct target_msghdr
*msgp
;
3632 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3636 return -TARGET_EFAULT
;
3638 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3639 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3643 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3644 * so it might not have this *mmsg-specific flag either.
3646 #ifndef MSG_WAITFORONE
3647 #define MSG_WAITFORONE 0x10000
3650 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3651 unsigned int vlen
, unsigned int flags
,
3654 struct target_mmsghdr
*mmsgp
;
3658 if (vlen
> UIO_MAXIOV
) {
3662 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3664 return -TARGET_EFAULT
;
3667 for (i
= 0; i
< vlen
; i
++) {
3668 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3669 if (is_error(ret
)) {
3672 mmsgp
[i
].msg_len
= tswap32(ret
);
3673 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3674 if (flags
& MSG_WAITFORONE
) {
3675 flags
|= MSG_DONTWAIT
;
3679 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3681 /* Return number of datagrams sent if we sent any at all;
3682 * otherwise return the error.
3690 /* do_accept4() Must return target values and target errnos. */
3691 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3692 abi_ulong target_addrlen_addr
, int flags
)
3699 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3701 if (target_addr
== 0) {
3702 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3705 /* linux returns EINVAL if addrlen pointer is invalid */
3706 if (get_user_u32(addrlen
, target_addrlen_addr
))
3707 return -TARGET_EINVAL
;
3709 if ((int)addrlen
< 0) {
3710 return -TARGET_EINVAL
;
3713 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3714 return -TARGET_EINVAL
;
3716 addr
= alloca(addrlen
);
3718 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3719 if (!is_error(ret
)) {
3720 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3721 if (put_user_u32(addrlen
, target_addrlen_addr
))
3722 ret
= -TARGET_EFAULT
;
3727 /* do_getpeername() Must return target values and target errnos. */
3728 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3729 abi_ulong target_addrlen_addr
)
3735 if (get_user_u32(addrlen
, target_addrlen_addr
))
3736 return -TARGET_EFAULT
;
3738 if ((int)addrlen
< 0) {
3739 return -TARGET_EINVAL
;
3742 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3743 return -TARGET_EFAULT
;
3745 addr
= alloca(addrlen
);
3747 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3748 if (!is_error(ret
)) {
3749 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3750 if (put_user_u32(addrlen
, target_addrlen_addr
))
3751 ret
= -TARGET_EFAULT
;
3756 /* do_getsockname() Must return target values and target errnos. */
3757 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3758 abi_ulong target_addrlen_addr
)
3764 if (get_user_u32(addrlen
, target_addrlen_addr
))
3765 return -TARGET_EFAULT
;
3767 if ((int)addrlen
< 0) {
3768 return -TARGET_EINVAL
;
3771 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3772 return -TARGET_EFAULT
;
3774 addr
= alloca(addrlen
);
3776 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3777 if (!is_error(ret
)) {
3778 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3779 if (put_user_u32(addrlen
, target_addrlen_addr
))
3780 ret
= -TARGET_EFAULT
;
3785 /* do_socketpair() Must return target values and target errnos. */
3786 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3787 abi_ulong target_tab_addr
)
3792 target_to_host_sock_type(&type
);
3794 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3795 if (!is_error(ret
)) {
3796 if (put_user_s32(tab
[0], target_tab_addr
)
3797 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3798 ret
= -TARGET_EFAULT
;
3803 /* do_sendto() Must return target values and target errnos. */
3804 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3805 abi_ulong target_addr
, socklen_t addrlen
)
3809 void *copy_msg
= NULL
;
3812 if ((int)addrlen
< 0) {
3813 return -TARGET_EINVAL
;
3816 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3818 return -TARGET_EFAULT
;
3819 if (fd_trans_target_to_host_data(fd
)) {
3820 copy_msg
= host_msg
;
3821 host_msg
= g_malloc(len
);
3822 memcpy(host_msg
, copy_msg
, len
);
3823 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3829 addr
= alloca(addrlen
+1);
3830 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3834 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3836 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3841 host_msg
= copy_msg
;
3843 unlock_user(host_msg
, msg
, 0);
3847 /* do_recvfrom() Must return target values and target errnos. */
3848 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3849 abi_ulong target_addr
,
3850 abi_ulong target_addrlen
)
3857 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3859 return -TARGET_EFAULT
;
3861 if (get_user_u32(addrlen
, target_addrlen
)) {
3862 ret
= -TARGET_EFAULT
;
3865 if ((int)addrlen
< 0) {
3866 ret
= -TARGET_EINVAL
;
3869 addr
= alloca(addrlen
);
3870 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3873 addr
= NULL
; /* To keep compiler quiet. */
3874 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3876 if (!is_error(ret
)) {
3877 if (fd_trans_host_to_target_data(fd
)) {
3878 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3881 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3882 if (put_user_u32(addrlen
, target_addrlen
)) {
3883 ret
= -TARGET_EFAULT
;
3887 unlock_user(host_msg
, msg
, len
);
3890 unlock_user(host_msg
, msg
, 0);
3895 #ifdef TARGET_NR_socketcall
3896 /* do_socketcall() Must return target values and target errnos. */
3897 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3899 static const unsigned ac
[] = { /* number of arguments per call */
3900 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3901 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3902 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3903 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3904 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3905 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3906 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3908 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3909 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3910 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3911 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3912 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3913 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3914 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3915 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3916 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3917 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3918 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3919 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3921 abi_long a
[6]; /* max 6 args */
3923 /* first, collect the arguments in a[] according to ac[] */
3924 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3926 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3927 for (i
= 0; i
< ac
[num
]; ++i
) {
3928 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3929 return -TARGET_EFAULT
;
3934 /* now when we have the args, actually handle the call */
3936 case SOCKOP_socket
: /* domain, type, protocol */
3937 return do_socket(a
[0], a
[1], a
[2]);
3938 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3939 return do_bind(a
[0], a
[1], a
[2]);
3940 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3941 return do_connect(a
[0], a
[1], a
[2]);
3942 case SOCKOP_listen
: /* sockfd, backlog */
3943 return get_errno(listen(a
[0], a
[1]));
3944 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3945 return do_accept4(a
[0], a
[1], a
[2], 0);
3946 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3947 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3948 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3949 return do_getsockname(a
[0], a
[1], a
[2]);
3950 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3951 return do_getpeername(a
[0], a
[1], a
[2]);
3952 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3953 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3954 case SOCKOP_send
: /* sockfd, msg, len, flags */
3955 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3956 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3957 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3958 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3959 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3960 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3961 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3962 case SOCKOP_shutdown
: /* sockfd, how */
3963 return get_errno(shutdown(a
[0], a
[1]));
3964 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3965 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3966 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3967 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3968 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3969 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3970 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3971 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3972 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3973 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3974 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3975 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3977 gemu_log("Unsupported socketcall: %d\n", num
);
3978 return -TARGET_ENOSYS
;
3983 #define N_SHM_REGIONS 32
3985 static struct shm_region
{
3989 } shm_regions
[N_SHM_REGIONS
];
3991 #ifndef TARGET_SEMID64_DS
3992 /* asm-generic version of this struct */
3993 struct target_semid64_ds
3995 struct target_ipc_perm sem_perm
;
3996 abi_ulong sem_otime
;
3997 #if TARGET_ABI_BITS == 32
3998 abi_ulong __unused1
;
4000 abi_ulong sem_ctime
;
4001 #if TARGET_ABI_BITS == 32
4002 abi_ulong __unused2
;
4004 abi_ulong sem_nsems
;
4005 abi_ulong __unused3
;
4006 abi_ulong __unused4
;
4010 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4011 abi_ulong target_addr
)
4013 struct target_ipc_perm
*target_ip
;
4014 struct target_semid64_ds
*target_sd
;
4016 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4017 return -TARGET_EFAULT
;
4018 target_ip
= &(target_sd
->sem_perm
);
4019 host_ip
->__key
= tswap32(target_ip
->__key
);
4020 host_ip
->uid
= tswap32(target_ip
->uid
);
4021 host_ip
->gid
= tswap32(target_ip
->gid
);
4022 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4023 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4024 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4025 host_ip
->mode
= tswap32(target_ip
->mode
);
4027 host_ip
->mode
= tswap16(target_ip
->mode
);
4029 #if defined(TARGET_PPC)
4030 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4032 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4034 unlock_user_struct(target_sd
, target_addr
, 0);
4038 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4039 struct ipc_perm
*host_ip
)
4041 struct target_ipc_perm
*target_ip
;
4042 struct target_semid64_ds
*target_sd
;
4044 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4045 return -TARGET_EFAULT
;
4046 target_ip
= &(target_sd
->sem_perm
);
4047 target_ip
->__key
= tswap32(host_ip
->__key
);
4048 target_ip
->uid
= tswap32(host_ip
->uid
);
4049 target_ip
->gid
= tswap32(host_ip
->gid
);
4050 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4051 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4052 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4053 target_ip
->mode
= tswap32(host_ip
->mode
);
4055 target_ip
->mode
= tswap16(host_ip
->mode
);
4057 #if defined(TARGET_PPC)
4058 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4060 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4062 unlock_user_struct(target_sd
, target_addr
, 1);
4066 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4067 abi_ulong target_addr
)
4069 struct target_semid64_ds
*target_sd
;
4071 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4072 return -TARGET_EFAULT
;
4073 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4074 return -TARGET_EFAULT
;
4075 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4076 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4077 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4078 unlock_user_struct(target_sd
, target_addr
, 0);
4082 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4083 struct semid_ds
*host_sd
)
4085 struct target_semid64_ds
*target_sd
;
4087 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4088 return -TARGET_EFAULT
;
4089 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4090 return -TARGET_EFAULT
;
4091 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4092 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4093 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4094 unlock_user_struct(target_sd
, target_addr
, 1);
4098 struct target_seminfo
{
4111 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4112 struct seminfo
*host_seminfo
)
4114 struct target_seminfo
*target_seminfo
;
4115 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4116 return -TARGET_EFAULT
;
4117 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4118 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4119 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4120 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4121 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4122 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4123 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4124 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4125 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4126 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4127 unlock_user_struct(target_seminfo
, target_addr
, 1);
4133 struct semid_ds
*buf
;
4134 unsigned short *array
;
4135 struct seminfo
*__buf
;
4138 union target_semun
{
4145 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4146 abi_ulong target_addr
)
4149 unsigned short *array
;
4151 struct semid_ds semid_ds
;
4154 semun
.buf
= &semid_ds
;
4156 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4158 return get_errno(ret
);
4160 nsems
= semid_ds
.sem_nsems
;
4162 *host_array
= g_try_new(unsigned short, nsems
);
4164 return -TARGET_ENOMEM
;
4166 array
= lock_user(VERIFY_READ
, target_addr
,
4167 nsems
*sizeof(unsigned short), 1);
4169 g_free(*host_array
);
4170 return -TARGET_EFAULT
;
4173 for(i
=0; i
<nsems
; i
++) {
4174 __get_user((*host_array
)[i
], &array
[i
]);
4176 unlock_user(array
, target_addr
, 0);
4181 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4182 unsigned short **host_array
)
4185 unsigned short *array
;
4187 struct semid_ds semid_ds
;
4190 semun
.buf
= &semid_ds
;
4192 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4194 return get_errno(ret
);
4196 nsems
= semid_ds
.sem_nsems
;
4198 array
= lock_user(VERIFY_WRITE
, target_addr
,
4199 nsems
*sizeof(unsigned short), 0);
4201 return -TARGET_EFAULT
;
4203 for(i
=0; i
<nsems
; i
++) {
4204 __put_user((*host_array
)[i
], &array
[i
]);
4206 g_free(*host_array
);
4207 unlock_user(array
, target_addr
, 1);
4212 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4213 abi_ulong target_arg
)
4215 union target_semun target_su
= { .buf
= target_arg
};
4217 struct semid_ds dsarg
;
4218 unsigned short *array
= NULL
;
4219 struct seminfo seminfo
;
4220 abi_long ret
= -TARGET_EINVAL
;
4227 /* In 64 bit cross-endian situations, we will erroneously pick up
4228 * the wrong half of the union for the "val" element. To rectify
4229 * this, the entire 8-byte structure is byteswapped, followed by
4230 * a swap of the 4 byte val field. In other cases, the data is
4231 * already in proper host byte order. */
4232 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4233 target_su
.buf
= tswapal(target_su
.buf
);
4234 arg
.val
= tswap32(target_su
.val
);
4236 arg
.val
= target_su
.val
;
4238 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4242 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4246 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4247 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4254 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4258 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4259 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4265 arg
.__buf
= &seminfo
;
4266 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4267 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4275 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4282 struct target_sembuf
{
4283 unsigned short sem_num
;
4288 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4289 abi_ulong target_addr
,
4292 struct target_sembuf
*target_sembuf
;
4295 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4296 nsops
*sizeof(struct target_sembuf
), 1);
4298 return -TARGET_EFAULT
;
4300 for(i
=0; i
<nsops
; i
++) {
4301 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4302 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4303 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4306 unlock_user(target_sembuf
, target_addr
, 0);
4311 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4313 struct sembuf sops
[nsops
];
4315 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4316 return -TARGET_EFAULT
;
4318 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4321 struct target_msqid_ds
4323 struct target_ipc_perm msg_perm
;
4324 abi_ulong msg_stime
;
4325 #if TARGET_ABI_BITS == 32
4326 abi_ulong __unused1
;
4328 abi_ulong msg_rtime
;
4329 #if TARGET_ABI_BITS == 32
4330 abi_ulong __unused2
;
4332 abi_ulong msg_ctime
;
4333 #if TARGET_ABI_BITS == 32
4334 abi_ulong __unused3
;
4336 abi_ulong __msg_cbytes
;
4338 abi_ulong msg_qbytes
;
4339 abi_ulong msg_lspid
;
4340 abi_ulong msg_lrpid
;
4341 abi_ulong __unused4
;
4342 abi_ulong __unused5
;
4345 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4346 abi_ulong target_addr
)
4348 struct target_msqid_ds
*target_md
;
4350 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4351 return -TARGET_EFAULT
;
4352 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4353 return -TARGET_EFAULT
;
4354 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4355 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4356 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4357 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4358 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4359 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4360 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4361 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4362 unlock_user_struct(target_md
, target_addr
, 0);
4366 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4367 struct msqid_ds
*host_md
)
4369 struct target_msqid_ds
*target_md
;
4371 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4372 return -TARGET_EFAULT
;
4373 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4374 return -TARGET_EFAULT
;
4375 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4376 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4377 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4378 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4379 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4380 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4381 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4382 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4383 unlock_user_struct(target_md
, target_addr
, 1);
4387 struct target_msginfo
{
4395 unsigned short int msgseg
;
4398 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4399 struct msginfo
*host_msginfo
)
4401 struct target_msginfo
*target_msginfo
;
4402 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4403 return -TARGET_EFAULT
;
4404 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4405 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4406 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4407 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4408 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4409 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4410 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4411 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4412 unlock_user_struct(target_msginfo
, target_addr
, 1);
4416 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4418 struct msqid_ds dsarg
;
4419 struct msginfo msginfo
;
4420 abi_long ret
= -TARGET_EINVAL
;
4428 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4429 return -TARGET_EFAULT
;
4430 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4431 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4432 return -TARGET_EFAULT
;
4435 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4439 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4440 if (host_to_target_msginfo(ptr
, &msginfo
))
4441 return -TARGET_EFAULT
;
4448 struct target_msgbuf
{
4453 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4454 ssize_t msgsz
, int msgflg
)
4456 struct target_msgbuf
*target_mb
;
4457 struct msgbuf
*host_mb
;
4461 return -TARGET_EINVAL
;
4464 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4465 return -TARGET_EFAULT
;
4466 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4468 unlock_user_struct(target_mb
, msgp
, 0);
4469 return -TARGET_ENOMEM
;
4471 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4472 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4473 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4475 unlock_user_struct(target_mb
, msgp
, 0);
4480 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4481 ssize_t msgsz
, abi_long msgtyp
,
4484 struct target_msgbuf
*target_mb
;
4486 struct msgbuf
*host_mb
;
4490 return -TARGET_EINVAL
;
4493 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4494 return -TARGET_EFAULT
;
4496 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4498 ret
= -TARGET_ENOMEM
;
4501 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4504 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4505 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4506 if (!target_mtext
) {
4507 ret
= -TARGET_EFAULT
;
4510 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4511 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4514 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4518 unlock_user_struct(target_mb
, msgp
, 1);
4523 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4524 abi_ulong target_addr
)
4526 struct target_shmid_ds
*target_sd
;
4528 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4529 return -TARGET_EFAULT
;
4530 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4531 return -TARGET_EFAULT
;
4532 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4533 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4534 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4535 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4536 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4537 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4538 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4539 unlock_user_struct(target_sd
, target_addr
, 0);
4543 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4544 struct shmid_ds
*host_sd
)
4546 struct target_shmid_ds
*target_sd
;
4548 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4549 return -TARGET_EFAULT
;
4550 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4551 return -TARGET_EFAULT
;
4552 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4553 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4554 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4555 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4556 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4557 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4558 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4559 unlock_user_struct(target_sd
, target_addr
, 1);
4563 struct target_shminfo
{
4571 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4572 struct shminfo
*host_shminfo
)
4574 struct target_shminfo
*target_shminfo
;
4575 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4576 return -TARGET_EFAULT
;
4577 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4578 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4579 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4580 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4581 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4582 unlock_user_struct(target_shminfo
, target_addr
, 1);
4586 struct target_shm_info
{
4591 abi_ulong swap_attempts
;
4592 abi_ulong swap_successes
;
4595 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4596 struct shm_info
*host_shm_info
)
4598 struct target_shm_info
*target_shm_info
;
4599 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4600 return -TARGET_EFAULT
;
4601 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4602 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4603 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4604 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4605 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4606 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4607 unlock_user_struct(target_shm_info
, target_addr
, 1);
4611 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4613 struct shmid_ds dsarg
;
4614 struct shminfo shminfo
;
4615 struct shm_info shm_info
;
4616 abi_long ret
= -TARGET_EINVAL
;
4624 if (target_to_host_shmid_ds(&dsarg
, buf
))
4625 return -TARGET_EFAULT
;
4626 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4627 if (host_to_target_shmid_ds(buf
, &dsarg
))
4628 return -TARGET_EFAULT
;
4631 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4632 if (host_to_target_shminfo(buf
, &shminfo
))
4633 return -TARGET_EFAULT
;
4636 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4637 if (host_to_target_shm_info(buf
, &shm_info
))
4638 return -TARGET_EFAULT
;
4643 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4650 #ifndef TARGET_FORCE_SHMLBA
4651 /* For most architectures, SHMLBA is the same as the page size;
4652 * some architectures have larger values, in which case they should
4653 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4654 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4655 * and defining its own value for SHMLBA.
4657 * The kernel also permits SHMLBA to be set by the architecture to a
4658 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4659 * this means that addresses are rounded to the large size if
4660 * SHM_RND is set but addresses not aligned to that size are not rejected
4661 * as long as they are at least page-aligned. Since the only architecture
4662 * which uses this is ia64 this code doesn't provide for that oddity.
4664 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4666 return TARGET_PAGE_SIZE
;
4670 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4671 int shmid
, abi_ulong shmaddr
, int shmflg
)
4675 struct shmid_ds shm_info
;
4679 /* find out the length of the shared memory segment */
4680 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4681 if (is_error(ret
)) {
4682 /* can't get length, bail out */
4686 shmlba
= target_shmlba(cpu_env
);
4688 if (shmaddr
& (shmlba
- 1)) {
4689 if (shmflg
& SHM_RND
) {
4690 shmaddr
&= ~(shmlba
- 1);
4692 return -TARGET_EINVAL
;
4699 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4701 abi_ulong mmap_start
;
4703 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4705 if (mmap_start
== -1) {
4707 host_raddr
= (void *)-1;
4709 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4712 if (host_raddr
== (void *)-1) {
4714 return get_errno((long)host_raddr
);
4716 raddr
=h2g((unsigned long)host_raddr
);
4718 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4719 PAGE_VALID
| PAGE_READ
|
4720 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4722 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4723 if (!shm_regions
[i
].in_use
) {
4724 shm_regions
[i
].in_use
= true;
4725 shm_regions
[i
].start
= raddr
;
4726 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4736 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4740 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4741 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4742 shm_regions
[i
].in_use
= false;
4743 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4748 return get_errno(shmdt(g2h(shmaddr
)));
4751 #ifdef TARGET_NR_ipc
4752 /* ??? This only works with linear mappings. */
4753 /* do_ipc() must return target values and target errnos. */
4754 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4755 unsigned int call
, abi_long first
,
4756 abi_long second
, abi_long third
,
4757 abi_long ptr
, abi_long fifth
)
4762 version
= call
>> 16;
4767 ret
= do_semop(first
, ptr
, second
);
4771 ret
= get_errno(semget(first
, second
, third
));
4774 case IPCOP_semctl
: {
4775 /* The semun argument to semctl is passed by value, so dereference the
4778 get_user_ual(atptr
, ptr
);
4779 ret
= do_semctl(first
, second
, third
, atptr
);
4784 ret
= get_errno(msgget(first
, second
));
4788 ret
= do_msgsnd(first
, ptr
, second
, third
);
4792 ret
= do_msgctl(first
, second
, ptr
);
4799 struct target_ipc_kludge
{
4804 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4805 ret
= -TARGET_EFAULT
;
4809 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4811 unlock_user_struct(tmp
, ptr
, 0);
4815 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4824 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4825 if (is_error(raddr
))
4826 return get_errno(raddr
);
4827 if (put_user_ual(raddr
, third
))
4828 return -TARGET_EFAULT
;
4832 ret
= -TARGET_EINVAL
;
4837 ret
= do_shmdt(ptr
);
4841 /* IPC_* flag values are the same on all linux platforms */
4842 ret
= get_errno(shmget(first
, second
, third
));
4845 /* IPC_* and SHM_* command values are the same on all linux platforms */
4847 ret
= do_shmctl(first
, second
, ptr
);
4850 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4851 ret
= -TARGET_ENOSYS
;
4858 /* kernel structure types definitions */
4860 #define STRUCT(name, ...) STRUCT_ ## name,
4861 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4863 #include "syscall_types.h"
4867 #undef STRUCT_SPECIAL
4869 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4870 #define STRUCT_SPECIAL(name)
4871 #include "syscall_types.h"
4873 #undef STRUCT_SPECIAL
4875 typedef struct IOCTLEntry IOCTLEntry
;
4877 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4878 int fd
, int cmd
, abi_long arg
);
4882 unsigned int host_cmd
;
4885 do_ioctl_fn
*do_ioctl
;
4886 const argtype arg_type
[5];
4889 #define IOC_R 0x0001
4890 #define IOC_W 0x0002
4891 #define IOC_RW (IOC_R | IOC_W)
4893 #define MAX_STRUCT_SIZE 4096
4895 #ifdef CONFIG_FIEMAP
4896 /* So fiemap access checks don't overflow on 32 bit systems.
4897 * This is very slightly smaller than the limit imposed by
4898 * the underlying kernel.
4900 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4901 / sizeof(struct fiemap_extent))
4903 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4904 int fd
, int cmd
, abi_long arg
)
4906 /* The parameter for this ioctl is a struct fiemap followed
4907 * by an array of struct fiemap_extent whose size is set
4908 * in fiemap->fm_extent_count. The array is filled in by the
4911 int target_size_in
, target_size_out
;
4913 const argtype
*arg_type
= ie
->arg_type
;
4914 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4917 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4921 assert(arg_type
[0] == TYPE_PTR
);
4922 assert(ie
->access
== IOC_RW
);
4924 target_size_in
= thunk_type_size(arg_type
, 0);
4925 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4927 return -TARGET_EFAULT
;
4929 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4930 unlock_user(argptr
, arg
, 0);
4931 fm
= (struct fiemap
*)buf_temp
;
4932 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4933 return -TARGET_EINVAL
;
4936 outbufsz
= sizeof (*fm
) +
4937 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4939 if (outbufsz
> MAX_STRUCT_SIZE
) {
4940 /* We can't fit all the extents into the fixed size buffer.
4941 * Allocate one that is large enough and use it instead.
4943 fm
= g_try_malloc(outbufsz
);
4945 return -TARGET_ENOMEM
;
4947 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4950 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4951 if (!is_error(ret
)) {
4952 target_size_out
= target_size_in
;
4953 /* An extent_count of 0 means we were only counting the extents
4954 * so there are no structs to copy
4956 if (fm
->fm_extent_count
!= 0) {
4957 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4959 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4961 ret
= -TARGET_EFAULT
;
4963 /* Convert the struct fiemap */
4964 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4965 if (fm
->fm_extent_count
!= 0) {
4966 p
= argptr
+ target_size_in
;
4967 /* ...and then all the struct fiemap_extents */
4968 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4969 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4974 unlock_user(argptr
, arg
, target_size_out
);
4984 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4985 int fd
, int cmd
, abi_long arg
)
4987 const argtype
*arg_type
= ie
->arg_type
;
4991 struct ifconf
*host_ifconf
;
4993 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4994 int target_ifreq_size
;
4999 abi_long target_ifc_buf
;
5003 assert(arg_type
[0] == TYPE_PTR
);
5004 assert(ie
->access
== IOC_RW
);
5007 target_size
= thunk_type_size(arg_type
, 0);
5009 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5011 return -TARGET_EFAULT
;
5012 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5013 unlock_user(argptr
, arg
, 0);
5015 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5016 target_ifc_len
= host_ifconf
->ifc_len
;
5017 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5019 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5020 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5021 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5023 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5024 if (outbufsz
> MAX_STRUCT_SIZE
) {
5025 /* We can't fit all the extents into the fixed size buffer.
5026 * Allocate one that is large enough and use it instead.
5028 host_ifconf
= malloc(outbufsz
);
5030 return -TARGET_ENOMEM
;
5032 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5035 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5037 host_ifconf
->ifc_len
= host_ifc_len
;
5038 host_ifconf
->ifc_buf
= host_ifc_buf
;
5040 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5041 if (!is_error(ret
)) {
5042 /* convert host ifc_len to target ifc_len */
5044 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5045 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5046 host_ifconf
->ifc_len
= target_ifc_len
;
5048 /* restore target ifc_buf */
5050 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5052 /* copy struct ifconf to target user */
5054 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5056 return -TARGET_EFAULT
;
5057 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5058 unlock_user(argptr
, arg
, target_size
);
5060 /* copy ifreq[] to target user */
5062 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5063 for (i
= 0; i
< nb_ifreq
; i
++) {
5064 thunk_convert(argptr
+ i
* target_ifreq_size
,
5065 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5066 ifreq_arg_type
, THUNK_TARGET
);
5068 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5078 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5079 int cmd
, abi_long arg
)
5082 struct dm_ioctl
*host_dm
;
5083 abi_long guest_data
;
5084 uint32_t guest_data_size
;
5086 const argtype
*arg_type
= ie
->arg_type
;
5088 void *big_buf
= NULL
;
5092 target_size
= thunk_type_size(arg_type
, 0);
5093 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5095 ret
= -TARGET_EFAULT
;
5098 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5099 unlock_user(argptr
, arg
, 0);
5101 /* buf_temp is too small, so fetch things into a bigger buffer */
5102 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5103 memcpy(big_buf
, buf_temp
, target_size
);
5107 guest_data
= arg
+ host_dm
->data_start
;
5108 if ((guest_data
- arg
) < 0) {
5109 ret
= -TARGET_EINVAL
;
5112 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5113 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5115 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5117 ret
= -TARGET_EFAULT
;
5121 switch (ie
->host_cmd
) {
5123 case DM_LIST_DEVICES
:
5126 case DM_DEV_SUSPEND
:
5129 case DM_TABLE_STATUS
:
5130 case DM_TABLE_CLEAR
:
5132 case DM_LIST_VERSIONS
:
5136 case DM_DEV_SET_GEOMETRY
:
5137 /* data contains only strings */
5138 memcpy(host_data
, argptr
, guest_data_size
);
5141 memcpy(host_data
, argptr
, guest_data_size
);
5142 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5146 void *gspec
= argptr
;
5147 void *cur_data
= host_data
;
5148 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5149 int spec_size
= thunk_type_size(arg_type
, 0);
5152 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5153 struct dm_target_spec
*spec
= cur_data
;
5157 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5158 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5160 spec
->next
= sizeof(*spec
) + slen
;
5161 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5163 cur_data
+= spec
->next
;
5168 ret
= -TARGET_EINVAL
;
5169 unlock_user(argptr
, guest_data
, 0);
5172 unlock_user(argptr
, guest_data
, 0);
5174 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5175 if (!is_error(ret
)) {
5176 guest_data
= arg
+ host_dm
->data_start
;
5177 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5178 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5179 switch (ie
->host_cmd
) {
5184 case DM_DEV_SUSPEND
:
5187 case DM_TABLE_CLEAR
:
5189 case DM_DEV_SET_GEOMETRY
:
5190 /* no return data */
5192 case DM_LIST_DEVICES
:
5194 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5195 uint32_t remaining_data
= guest_data_size
;
5196 void *cur_data
= argptr
;
5197 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5198 int nl_size
= 12; /* can't use thunk_size due to alignment */
5201 uint32_t next
= nl
->next
;
5203 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5205 if (remaining_data
< nl
->next
) {
5206 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5209 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5210 strcpy(cur_data
+ nl_size
, nl
->name
);
5211 cur_data
+= nl
->next
;
5212 remaining_data
-= nl
->next
;
5216 nl
= (void*)nl
+ next
;
5221 case DM_TABLE_STATUS
:
5223 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5224 void *cur_data
= argptr
;
5225 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5226 int spec_size
= thunk_type_size(arg_type
, 0);
5229 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5230 uint32_t next
= spec
->next
;
5231 int slen
= strlen((char*)&spec
[1]) + 1;
5232 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5233 if (guest_data_size
< spec
->next
) {
5234 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5237 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5238 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5239 cur_data
= argptr
+ spec
->next
;
5240 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5246 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5247 int count
= *(uint32_t*)hdata
;
5248 uint64_t *hdev
= hdata
+ 8;
5249 uint64_t *gdev
= argptr
+ 8;
5252 *(uint32_t*)argptr
= tswap32(count
);
5253 for (i
= 0; i
< count
; i
++) {
5254 *gdev
= tswap64(*hdev
);
5260 case DM_LIST_VERSIONS
:
5262 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5263 uint32_t remaining_data
= guest_data_size
;
5264 void *cur_data
= argptr
;
5265 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5266 int vers_size
= thunk_type_size(arg_type
, 0);
5269 uint32_t next
= vers
->next
;
5271 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5273 if (remaining_data
< vers
->next
) {
5274 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5277 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5278 strcpy(cur_data
+ vers_size
, vers
->name
);
5279 cur_data
+= vers
->next
;
5280 remaining_data
-= vers
->next
;
5284 vers
= (void*)vers
+ next
;
5289 unlock_user(argptr
, guest_data
, 0);
5290 ret
= -TARGET_EINVAL
;
5293 unlock_user(argptr
, guest_data
, guest_data_size
);
5295 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5297 ret
= -TARGET_EFAULT
;
5300 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5301 unlock_user(argptr
, arg
, target_size
);
5308 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5309 int cmd
, abi_long arg
)
5313 const argtype
*arg_type
= ie
->arg_type
;
5314 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5317 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5318 struct blkpg_partition host_part
;
5320 /* Read and convert blkpg */
5322 target_size
= thunk_type_size(arg_type
, 0);
5323 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5325 ret
= -TARGET_EFAULT
;
5328 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5329 unlock_user(argptr
, arg
, 0);
5331 switch (host_blkpg
->op
) {
5332 case BLKPG_ADD_PARTITION
:
5333 case BLKPG_DEL_PARTITION
:
5334 /* payload is struct blkpg_partition */
5337 /* Unknown opcode */
5338 ret
= -TARGET_EINVAL
;
5342 /* Read and convert blkpg->data */
5343 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5344 target_size
= thunk_type_size(part_arg_type
, 0);
5345 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5347 ret
= -TARGET_EFAULT
;
5350 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5351 unlock_user(argptr
, arg
, 0);
5353 /* Swizzle the data pointer to our local copy and call! */
5354 host_blkpg
->data
= &host_part
;
5355 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5361 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5362 int fd
, int cmd
, abi_long arg
)
5364 const argtype
*arg_type
= ie
->arg_type
;
5365 const StructEntry
*se
;
5366 const argtype
*field_types
;
5367 const int *dst_offsets
, *src_offsets
;
5370 abi_ulong
*target_rt_dev_ptr
;
5371 unsigned long *host_rt_dev_ptr
;
5375 assert(ie
->access
== IOC_W
);
5376 assert(*arg_type
== TYPE_PTR
);
5378 assert(*arg_type
== TYPE_STRUCT
);
5379 target_size
= thunk_type_size(arg_type
, 0);
5380 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5382 return -TARGET_EFAULT
;
5385 assert(*arg_type
== (int)STRUCT_rtentry
);
5386 se
= struct_entries
+ *arg_type
++;
5387 assert(se
->convert
[0] == NULL
);
5388 /* convert struct here to be able to catch rt_dev string */
5389 field_types
= se
->field_types
;
5390 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5391 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5392 for (i
= 0; i
< se
->nb_fields
; i
++) {
5393 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5394 assert(*field_types
== TYPE_PTRVOID
);
5395 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5396 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5397 if (*target_rt_dev_ptr
!= 0) {
5398 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5399 tswapal(*target_rt_dev_ptr
));
5400 if (!*host_rt_dev_ptr
) {
5401 unlock_user(argptr
, arg
, 0);
5402 return -TARGET_EFAULT
;
5405 *host_rt_dev_ptr
= 0;
5410 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5411 argptr
+ src_offsets
[i
],
5412 field_types
, THUNK_HOST
);
5414 unlock_user(argptr
, arg
, 0);
5416 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5417 if (*host_rt_dev_ptr
!= 0) {
5418 unlock_user((void *)*host_rt_dev_ptr
,
5419 *target_rt_dev_ptr
, 0);
5424 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5425 int fd
, int cmd
, abi_long arg
)
5427 int sig
= target_to_host_signal(arg
);
5428 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5431 static IOCTLEntry ioctl_entries
[] = {
5432 #define IOCTL(cmd, access, ...) \
5433 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5434 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5435 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5440 /* ??? Implement proper locking for ioctls. */
5441 /* do_ioctl() Must return target values and target errnos. */
5442 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5444 const IOCTLEntry
*ie
;
5445 const argtype
*arg_type
;
5447 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5453 if (ie
->target_cmd
== 0) {
5454 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5455 return -TARGET_ENOSYS
;
5457 if (ie
->target_cmd
== cmd
)
5461 arg_type
= ie
->arg_type
;
5463 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5466 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5469 switch(arg_type
[0]) {
5472 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5476 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5480 target_size
= thunk_type_size(arg_type
, 0);
5481 switch(ie
->access
) {
5483 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5484 if (!is_error(ret
)) {
5485 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5487 return -TARGET_EFAULT
;
5488 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5489 unlock_user(argptr
, arg
, target_size
);
5493 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5495 return -TARGET_EFAULT
;
5496 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5497 unlock_user(argptr
, arg
, 0);
5498 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5502 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5504 return -TARGET_EFAULT
;
5505 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5506 unlock_user(argptr
, arg
, 0);
5507 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5508 if (!is_error(ret
)) {
5509 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5511 return -TARGET_EFAULT
;
5512 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5513 unlock_user(argptr
, arg
, target_size
);
5519 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5520 (long)cmd
, arg_type
[0]);
5521 ret
= -TARGET_ENOSYS
;
5527 static const bitmask_transtbl iflag_tbl
[] = {
5528 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5529 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5530 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5531 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5532 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5533 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5534 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5535 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5536 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5537 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5538 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5539 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5540 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5541 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5545 static const bitmask_transtbl oflag_tbl
[] = {
5546 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5547 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5548 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5549 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5550 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5551 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5552 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5553 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5554 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5555 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5556 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5557 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5558 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5559 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5560 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5561 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5562 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5563 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5564 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5565 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5566 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5567 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5568 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5569 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5573 static const bitmask_transtbl cflag_tbl
[] = {
5574 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5575 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5576 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5577 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5578 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5579 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5580 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5581 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5582 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5583 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5584 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5585 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5586 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5587 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5588 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5589 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5590 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5591 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5592 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5593 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5594 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5595 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5596 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5597 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5598 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5599 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5600 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5601 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5602 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5603 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5604 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5608 static const bitmask_transtbl lflag_tbl
[] = {
5609 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5610 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5611 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5612 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5613 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5614 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5615 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5616 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5617 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5618 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5619 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5620 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5621 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5622 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5623 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5627 static void target_to_host_termios (void *dst
, const void *src
)
5629 struct host_termios
*host
= dst
;
5630 const struct target_termios
*target
= src
;
5633 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5635 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5637 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5639 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5640 host
->c_line
= target
->c_line
;
5642 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5643 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5644 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5645 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5646 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5647 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5648 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5649 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5650 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5651 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5652 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5653 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5654 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5655 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5656 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5657 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5658 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5659 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5662 static void host_to_target_termios (void *dst
, const void *src
)
5664 struct target_termios
*target
= dst
;
5665 const struct host_termios
*host
= src
;
5668 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5670 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5672 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5674 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5675 target
->c_line
= host
->c_line
;
5677 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5678 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5679 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5680 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5681 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5682 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5683 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5684 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5685 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5686 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5687 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5688 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5689 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5690 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5691 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5692 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5693 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5694 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5697 static const StructEntry struct_termios_def
= {
5698 .convert
= { host_to_target_termios
, target_to_host_termios
},
5699 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5700 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5703 static bitmask_transtbl mmap_flags_tbl
[] = {
5704 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5705 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5706 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5707 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5708 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5709 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5710 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5711 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5712 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5717 #if defined(TARGET_I386)
5719 /* NOTE: there is really one LDT for all the threads */
5720 static uint8_t *ldt_table
;
5722 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5729 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5730 if (size
> bytecount
)
5732 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5734 return -TARGET_EFAULT
;
5735 /* ??? Should this by byteswapped? */
5736 memcpy(p
, ldt_table
, size
);
5737 unlock_user(p
, ptr
, size
);
5741 /* XXX: add locking support */
5742 static abi_long
write_ldt(CPUX86State
*env
,
5743 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5745 struct target_modify_ldt_ldt_s ldt_info
;
5746 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5747 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5748 int seg_not_present
, useable
, lm
;
5749 uint32_t *lp
, entry_1
, entry_2
;
5751 if (bytecount
!= sizeof(ldt_info
))
5752 return -TARGET_EINVAL
;
5753 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5754 return -TARGET_EFAULT
;
5755 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5756 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5757 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5758 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5759 unlock_user_struct(target_ldt_info
, ptr
, 0);
5761 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5762 return -TARGET_EINVAL
;
5763 seg_32bit
= ldt_info
.flags
& 1;
5764 contents
= (ldt_info
.flags
>> 1) & 3;
5765 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5766 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5767 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5768 useable
= (ldt_info
.flags
>> 6) & 1;
5772 lm
= (ldt_info
.flags
>> 7) & 1;
5774 if (contents
== 3) {
5776 return -TARGET_EINVAL
;
5777 if (seg_not_present
== 0)
5778 return -TARGET_EINVAL
;
5780 /* allocate the LDT */
5782 env
->ldt
.base
= target_mmap(0,
5783 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5784 PROT_READ
|PROT_WRITE
,
5785 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5786 if (env
->ldt
.base
== -1)
5787 return -TARGET_ENOMEM
;
5788 memset(g2h(env
->ldt
.base
), 0,
5789 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5790 env
->ldt
.limit
= 0xffff;
5791 ldt_table
= g2h(env
->ldt
.base
);
5794 /* NOTE: same code as Linux kernel */
5795 /* Allow LDTs to be cleared by the user. */
5796 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5799 read_exec_only
== 1 &&
5801 limit_in_pages
== 0 &&
5802 seg_not_present
== 1 &&
5810 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5811 (ldt_info
.limit
& 0x0ffff);
5812 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5813 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5814 (ldt_info
.limit
& 0xf0000) |
5815 ((read_exec_only
^ 1) << 9) |
5817 ((seg_not_present
^ 1) << 15) |
5819 (limit_in_pages
<< 23) |
5823 entry_2
|= (useable
<< 20);
5825 /* Install the new entry ... */
5827 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5828 lp
[0] = tswap32(entry_1
);
5829 lp
[1] = tswap32(entry_2
);
5833 /* specific and weird i386 syscalls */
5834 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5835 unsigned long bytecount
)
5841 ret
= read_ldt(ptr
, bytecount
);
5844 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5847 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5850 ret
= -TARGET_ENOSYS
;
5856 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5857 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5859 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5860 struct target_modify_ldt_ldt_s ldt_info
;
5861 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5862 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5863 int seg_not_present
, useable
, lm
;
5864 uint32_t *lp
, entry_1
, entry_2
;
5867 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5868 if (!target_ldt_info
)
5869 return -TARGET_EFAULT
;
5870 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5871 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5872 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5873 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5874 if (ldt_info
.entry_number
== -1) {
5875 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5876 if (gdt_table
[i
] == 0) {
5877 ldt_info
.entry_number
= i
;
5878 target_ldt_info
->entry_number
= tswap32(i
);
5883 unlock_user_struct(target_ldt_info
, ptr
, 1);
5885 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5886 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5887 return -TARGET_EINVAL
;
5888 seg_32bit
= ldt_info
.flags
& 1;
5889 contents
= (ldt_info
.flags
>> 1) & 3;
5890 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5891 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5892 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5893 useable
= (ldt_info
.flags
>> 6) & 1;
5897 lm
= (ldt_info
.flags
>> 7) & 1;
5900 if (contents
== 3) {
5901 if (seg_not_present
== 0)
5902 return -TARGET_EINVAL
;
5905 /* NOTE: same code as Linux kernel */
5906 /* Allow LDTs to be cleared by the user. */
5907 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5908 if ((contents
== 0 &&
5909 read_exec_only
== 1 &&
5911 limit_in_pages
== 0 &&
5912 seg_not_present
== 1 &&
5920 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5921 (ldt_info
.limit
& 0x0ffff);
5922 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5923 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5924 (ldt_info
.limit
& 0xf0000) |
5925 ((read_exec_only
^ 1) << 9) |
5927 ((seg_not_present
^ 1) << 15) |
5929 (limit_in_pages
<< 23) |
5934 /* Install the new entry ... */
5936 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5937 lp
[0] = tswap32(entry_1
);
5938 lp
[1] = tswap32(entry_2
);
5942 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5944 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5945 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5946 uint32_t base_addr
, limit
, flags
;
5947 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5948 int seg_not_present
, useable
, lm
;
5949 uint32_t *lp
, entry_1
, entry_2
;
5951 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5952 if (!target_ldt_info
)
5953 return -TARGET_EFAULT
;
5954 idx
= tswap32(target_ldt_info
->entry_number
);
5955 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5956 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5957 unlock_user_struct(target_ldt_info
, ptr
, 1);
5958 return -TARGET_EINVAL
;
5960 lp
= (uint32_t *)(gdt_table
+ idx
);
5961 entry_1
= tswap32(lp
[0]);
5962 entry_2
= tswap32(lp
[1]);
5964 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5965 contents
= (entry_2
>> 10) & 3;
5966 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5967 seg_32bit
= (entry_2
>> 22) & 1;
5968 limit_in_pages
= (entry_2
>> 23) & 1;
5969 useable
= (entry_2
>> 20) & 1;
5973 lm
= (entry_2
>> 21) & 1;
5975 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5976 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5977 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5978 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5979 base_addr
= (entry_1
>> 16) |
5980 (entry_2
& 0xff000000) |
5981 ((entry_2
& 0xff) << 16);
5982 target_ldt_info
->base_addr
= tswapal(base_addr
);
5983 target_ldt_info
->limit
= tswap32(limit
);
5984 target_ldt_info
->flags
= tswap32(flags
);
5985 unlock_user_struct(target_ldt_info
, ptr
, 1);
5988 #endif /* TARGET_I386 && TARGET_ABI32 */
5990 #ifndef TARGET_ABI32
5991 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5998 case TARGET_ARCH_SET_GS
:
5999 case TARGET_ARCH_SET_FS
:
6000 if (code
== TARGET_ARCH_SET_GS
)
6004 cpu_x86_load_seg(env
, idx
, 0);
6005 env
->segs
[idx
].base
= addr
;
6007 case TARGET_ARCH_GET_GS
:
6008 case TARGET_ARCH_GET_FS
:
6009 if (code
== TARGET_ARCH_GET_GS
)
6013 val
= env
->segs
[idx
].base
;
6014 if (put_user(val
, addr
, abi_ulong
))
6015 ret
= -TARGET_EFAULT
;
6018 ret
= -TARGET_EINVAL
;
6025 #endif /* defined(TARGET_I386) */
6027 #define NEW_STACK_SIZE 0x40000
6030 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6033 pthread_mutex_t mutex
;
6034 pthread_cond_t cond
;
6037 abi_ulong child_tidptr
;
6038 abi_ulong parent_tidptr
;
6042 static void *clone_func(void *arg
)
6044 new_thread_info
*info
= arg
;
6049 rcu_register_thread();
6051 cpu
= ENV_GET_CPU(env
);
6053 ts
= (TaskState
*)cpu
->opaque
;
6054 info
->tid
= gettid();
6055 cpu
->host_tid
= info
->tid
;
6057 if (info
->child_tidptr
)
6058 put_user_u32(info
->tid
, info
->child_tidptr
);
6059 if (info
->parent_tidptr
)
6060 put_user_u32(info
->tid
, info
->parent_tidptr
);
6061 /* Enable signals. */
6062 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6063 /* Signal to the parent that we're ready. */
6064 pthread_mutex_lock(&info
->mutex
);
6065 pthread_cond_broadcast(&info
->cond
);
6066 pthread_mutex_unlock(&info
->mutex
);
6067 /* Wait until the parent has finshed initializing the tls state. */
6068 pthread_mutex_lock(&clone_lock
);
6069 pthread_mutex_unlock(&clone_lock
);
6075 /* do_fork() Must return host values and target errnos (unlike most
6076 do_*() functions). */
6077 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6078 abi_ulong parent_tidptr
, target_ulong newtls
,
6079 abi_ulong child_tidptr
)
6081 CPUState
*cpu
= ENV_GET_CPU(env
);
6085 CPUArchState
*new_env
;
6088 flags
&= ~CLONE_IGNORED_FLAGS
;
6090 /* Emulate vfork() with fork() */
6091 if (flags
& CLONE_VFORK
)
6092 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6094 if (flags
& CLONE_VM
) {
6095 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6096 new_thread_info info
;
6097 pthread_attr_t attr
;
6099 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6100 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6101 return -TARGET_EINVAL
;
6104 ts
= g_new0(TaskState
, 1);
6105 init_task_state(ts
);
6106 /* we create a new CPU instance. */
6107 new_env
= cpu_copy(env
);
6108 /* Init regs that differ from the parent. */
6109 cpu_clone_regs(new_env
, newsp
);
6110 new_cpu
= ENV_GET_CPU(new_env
);
6111 new_cpu
->opaque
= ts
;
6112 ts
->bprm
= parent_ts
->bprm
;
6113 ts
->info
= parent_ts
->info
;
6114 ts
->signal_mask
= parent_ts
->signal_mask
;
6116 if (flags
& CLONE_CHILD_CLEARTID
) {
6117 ts
->child_tidptr
= child_tidptr
;
6120 if (flags
& CLONE_SETTLS
) {
6121 cpu_set_tls (new_env
, newtls
);
6124 /* Grab a mutex so that thread setup appears atomic. */
6125 pthread_mutex_lock(&clone_lock
);
6127 memset(&info
, 0, sizeof(info
));
6128 pthread_mutex_init(&info
.mutex
, NULL
);
6129 pthread_mutex_lock(&info
.mutex
);
6130 pthread_cond_init(&info
.cond
, NULL
);
6132 if (flags
& CLONE_CHILD_SETTID
) {
6133 info
.child_tidptr
= child_tidptr
;
6135 if (flags
& CLONE_PARENT_SETTID
) {
6136 info
.parent_tidptr
= parent_tidptr
;
6139 ret
= pthread_attr_init(&attr
);
6140 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6141 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6142 /* It is not safe to deliver signals until the child has finished
6143 initializing, so temporarily block all signals. */
6144 sigfillset(&sigmask
);
6145 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6147 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6148 /* TODO: Free new CPU state if thread creation failed. */
6150 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6151 pthread_attr_destroy(&attr
);
6153 /* Wait for the child to initialize. */
6154 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6159 pthread_mutex_unlock(&info
.mutex
);
6160 pthread_cond_destroy(&info
.cond
);
6161 pthread_mutex_destroy(&info
.mutex
);
6162 pthread_mutex_unlock(&clone_lock
);
6164 /* if no CLONE_VM, we consider it is a fork */
6165 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6166 return -TARGET_EINVAL
;
6169 /* We can't support custom termination signals */
6170 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6171 return -TARGET_EINVAL
;
6174 if (block_signals()) {
6175 return -TARGET_ERESTARTSYS
;
6181 /* Child Process. */
6183 cpu_clone_regs(env
, newsp
);
6185 /* There is a race condition here. The parent process could
6186 theoretically read the TID in the child process before the child
6187 tid is set. This would require using either ptrace
6188 (not implemented) or having *_tidptr to point at a shared memory
6189 mapping. We can't repeat the spinlock hack used above because
6190 the child process gets its own copy of the lock. */
6191 if (flags
& CLONE_CHILD_SETTID
)
6192 put_user_u32(gettid(), child_tidptr
);
6193 if (flags
& CLONE_PARENT_SETTID
)
6194 put_user_u32(gettid(), parent_tidptr
);
6195 ts
= (TaskState
*)cpu
->opaque
;
6196 if (flags
& CLONE_SETTLS
)
6197 cpu_set_tls (env
, newtls
);
6198 if (flags
& CLONE_CHILD_CLEARTID
)
6199 ts
->child_tidptr
= child_tidptr
;
6207 /* warning : doesn't handle linux specific flags... */
6208 static int target_to_host_fcntl_cmd(int cmd
)
6211 case TARGET_F_DUPFD
:
6212 case TARGET_F_GETFD
:
6213 case TARGET_F_SETFD
:
6214 case TARGET_F_GETFL
:
6215 case TARGET_F_SETFL
:
6217 case TARGET_F_GETLK
:
6219 case TARGET_F_SETLK
:
6221 case TARGET_F_SETLKW
:
6223 case TARGET_F_GETOWN
:
6225 case TARGET_F_SETOWN
:
6227 case TARGET_F_GETSIG
:
6229 case TARGET_F_SETSIG
:
6231 #if TARGET_ABI_BITS == 32
6232 case TARGET_F_GETLK64
:
6234 case TARGET_F_SETLK64
:
6236 case TARGET_F_SETLKW64
:
6239 case TARGET_F_SETLEASE
:
6241 case TARGET_F_GETLEASE
:
6243 #ifdef F_DUPFD_CLOEXEC
6244 case TARGET_F_DUPFD_CLOEXEC
:
6245 return F_DUPFD_CLOEXEC
;
6247 case TARGET_F_NOTIFY
:
6250 case TARGET_F_GETOWN_EX
:
6254 case TARGET_F_SETOWN_EX
:
6258 case TARGET_F_SETPIPE_SZ
:
6259 return F_SETPIPE_SZ
;
6260 case TARGET_F_GETPIPE_SZ
:
6261 return F_GETPIPE_SZ
;
6264 return -TARGET_EINVAL
;
6266 return -TARGET_EINVAL
;
6269 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6270 static const bitmask_transtbl flock_tbl
[] = {
6271 TRANSTBL_CONVERT(F_RDLCK
),
6272 TRANSTBL_CONVERT(F_WRLCK
),
6273 TRANSTBL_CONVERT(F_UNLCK
),
6274 TRANSTBL_CONVERT(F_EXLCK
),
6275 TRANSTBL_CONVERT(F_SHLCK
),
6279 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6280 abi_ulong target_flock_addr
)
6282 struct target_flock
*target_fl
;
6285 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6286 return -TARGET_EFAULT
;
6289 __get_user(l_type
, &target_fl
->l_type
);
6290 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6291 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6292 __get_user(fl
->l_start
, &target_fl
->l_start
);
6293 __get_user(fl
->l_len
, &target_fl
->l_len
);
6294 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6295 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6299 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6300 const struct flock64
*fl
)
6302 struct target_flock
*target_fl
;
6305 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6306 return -TARGET_EFAULT
;
6309 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6310 __put_user(l_type
, &target_fl
->l_type
);
6311 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6312 __put_user(fl
->l_start
, &target_fl
->l_start
);
6313 __put_user(fl
->l_len
, &target_fl
->l_len
);
6314 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6315 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6319 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6320 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6322 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6323 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6324 abi_ulong target_flock_addr
)
6326 struct target_eabi_flock64
*target_fl
;
6329 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6330 return -TARGET_EFAULT
;
6333 __get_user(l_type
, &target_fl
->l_type
);
6334 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6335 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6336 __get_user(fl
->l_start
, &target_fl
->l_start
);
6337 __get_user(fl
->l_len
, &target_fl
->l_len
);
6338 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6339 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6343 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6344 const struct flock64
*fl
)
6346 struct target_eabi_flock64
*target_fl
;
6349 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6350 return -TARGET_EFAULT
;
6353 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6354 __put_user(l_type
, &target_fl
->l_type
);
6355 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6356 __put_user(fl
->l_start
, &target_fl
->l_start
);
6357 __put_user(fl
->l_len
, &target_fl
->l_len
);
6358 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6359 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6364 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6365 abi_ulong target_flock_addr
)
6367 struct target_flock64
*target_fl
;
6370 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6371 return -TARGET_EFAULT
;
6374 __get_user(l_type
, &target_fl
->l_type
);
6375 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6376 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6377 __get_user(fl
->l_start
, &target_fl
->l_start
);
6378 __get_user(fl
->l_len
, &target_fl
->l_len
);
6379 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6380 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6384 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6385 const struct flock64
*fl
)
6387 struct target_flock64
*target_fl
;
6390 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6391 return -TARGET_EFAULT
;
6394 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6395 __put_user(l_type
, &target_fl
->l_type
);
6396 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6397 __put_user(fl
->l_start
, &target_fl
->l_start
);
6398 __put_user(fl
->l_len
, &target_fl
->l_len
);
6399 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6400 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6404 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6406 struct flock64 fl64
;
6408 struct f_owner_ex fox
;
6409 struct target_f_owner_ex
*target_fox
;
6412 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6414 if (host_cmd
== -TARGET_EINVAL
)
6418 case TARGET_F_GETLK
:
6419 ret
= copy_from_user_flock(&fl64
, arg
);
6423 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6425 ret
= copy_to_user_flock(arg
, &fl64
);
6429 case TARGET_F_SETLK
:
6430 case TARGET_F_SETLKW
:
6431 ret
= copy_from_user_flock(&fl64
, arg
);
6435 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6438 case TARGET_F_GETLK64
:
6439 ret
= copy_from_user_flock64(&fl64
, arg
);
6443 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6445 ret
= copy_to_user_flock64(arg
, &fl64
);
6448 case TARGET_F_SETLK64
:
6449 case TARGET_F_SETLKW64
:
6450 ret
= copy_from_user_flock64(&fl64
, arg
);
6454 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6457 case TARGET_F_GETFL
:
6458 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6460 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6464 case TARGET_F_SETFL
:
6465 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6466 target_to_host_bitmask(arg
,
6471 case TARGET_F_GETOWN_EX
:
6472 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6474 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6475 return -TARGET_EFAULT
;
6476 target_fox
->type
= tswap32(fox
.type
);
6477 target_fox
->pid
= tswap32(fox
.pid
);
6478 unlock_user_struct(target_fox
, arg
, 1);
6484 case TARGET_F_SETOWN_EX
:
6485 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6486 return -TARGET_EFAULT
;
6487 fox
.type
= tswap32(target_fox
->type
);
6488 fox
.pid
= tswap32(target_fox
->pid
);
6489 unlock_user_struct(target_fox
, arg
, 0);
6490 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6494 case TARGET_F_SETOWN
:
6495 case TARGET_F_GETOWN
:
6496 case TARGET_F_SETSIG
:
6497 case TARGET_F_GETSIG
:
6498 case TARGET_F_SETLEASE
:
6499 case TARGET_F_GETLEASE
:
6500 case TARGET_F_SETPIPE_SZ
:
6501 case TARGET_F_GETPIPE_SZ
:
6502 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6506 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6514 static inline int high2lowuid(int uid
)
6522 static inline int high2lowgid(int gid
)
6530 static inline int low2highuid(int uid
)
6532 if ((int16_t)uid
== -1)
6538 static inline int low2highgid(int gid
)
6540 if ((int16_t)gid
== -1)
6545 static inline int tswapid(int id
)
6550 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6552 #else /* !USE_UID16 */
6553 static inline int high2lowuid(int uid
)
6557 static inline int high2lowgid(int gid
)
6561 static inline int low2highuid(int uid
)
6565 static inline int low2highgid(int gid
)
6569 static inline int tswapid(int id
)
6574 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6576 #endif /* USE_UID16 */
6578 /* We must do direct syscalls for setting UID/GID, because we want to
6579 * implement the Linux system call semantics of "change only for this thread",
6580 * not the libc/POSIX semantics of "change for all threads in process".
6581 * (See http://ewontfix.com/17/ for more details.)
6582 * We use the 32-bit version of the syscalls if present; if it is not
6583 * then either the host architecture supports 32-bit UIDs natively with
6584 * the standard syscall, or the 16-bit UID is the best we can do.
6586 #ifdef __NR_setuid32
6587 #define __NR_sys_setuid __NR_setuid32
6589 #define __NR_sys_setuid __NR_setuid
6591 #ifdef __NR_setgid32
6592 #define __NR_sys_setgid __NR_setgid32
6594 #define __NR_sys_setgid __NR_setgid
6596 #ifdef __NR_setresuid32
6597 #define __NR_sys_setresuid __NR_setresuid32
6599 #define __NR_sys_setresuid __NR_setresuid
6601 #ifdef __NR_setresgid32
6602 #define __NR_sys_setresgid __NR_setresgid32
6604 #define __NR_sys_setresgid __NR_setresgid
6607 _syscall1(int, sys_setuid
, uid_t
, uid
)
6608 _syscall1(int, sys_setgid
, gid_t
, gid
)
6609 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6610 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6612 void syscall_init(void)
6615 const argtype
*arg_type
;
6619 thunk_init(STRUCT_MAX
);
6621 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6622 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6623 #include "syscall_types.h"
6625 #undef STRUCT_SPECIAL
6627 /* Build target_to_host_errno_table[] table from
6628 * host_to_target_errno_table[]. */
6629 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6630 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6633 /* we patch the ioctl size if necessary. We rely on the fact that
6634 no ioctl has all the bits at '1' in the size field */
6636 while (ie
->target_cmd
!= 0) {
6637 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6638 TARGET_IOC_SIZEMASK
) {
6639 arg_type
= ie
->arg_type
;
6640 if (arg_type
[0] != TYPE_PTR
) {
6641 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6646 size
= thunk_type_size(arg_type
, 0);
6647 ie
->target_cmd
= (ie
->target_cmd
&
6648 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6649 (size
<< TARGET_IOC_SIZESHIFT
);
6652 /* automatic consistency check if same arch */
6653 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6654 (defined(__x86_64__) && defined(TARGET_X86_64))
6655 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6656 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6657 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6664 #if TARGET_ABI_BITS == 32
6665 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6667 #ifdef TARGET_WORDS_BIGENDIAN
6668 return ((uint64_t)word0
<< 32) | word1
;
6670 return ((uint64_t)word1
<< 32) | word0
;
6673 #else /* TARGET_ABI_BITS == 32 */
6674 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6678 #endif /* TARGET_ABI_BITS != 32 */
6680 #ifdef TARGET_NR_truncate64
6681 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6686 if (regpairs_aligned(cpu_env
)) {
6690 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6694 #ifdef TARGET_NR_ftruncate64
6695 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6700 if (regpairs_aligned(cpu_env
)) {
6704 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6708 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6709 abi_ulong target_addr
)
6711 struct target_timespec
*target_ts
;
6713 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6714 return -TARGET_EFAULT
;
6715 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6716 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6717 unlock_user_struct(target_ts
, target_addr
, 0);
6721 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6722 struct timespec
*host_ts
)
6724 struct target_timespec
*target_ts
;
6726 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6727 return -TARGET_EFAULT
;
6728 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6729 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6730 unlock_user_struct(target_ts
, target_addr
, 1);
6734 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6735 abi_ulong target_addr
)
6737 struct target_itimerspec
*target_itspec
;
6739 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6740 return -TARGET_EFAULT
;
6743 host_itspec
->it_interval
.tv_sec
=
6744 tswapal(target_itspec
->it_interval
.tv_sec
);
6745 host_itspec
->it_interval
.tv_nsec
=
6746 tswapal(target_itspec
->it_interval
.tv_nsec
);
6747 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6748 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6750 unlock_user_struct(target_itspec
, target_addr
, 1);
6754 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6755 struct itimerspec
*host_its
)
6757 struct target_itimerspec
*target_itspec
;
6759 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6760 return -TARGET_EFAULT
;
6763 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6764 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6766 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6767 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6769 unlock_user_struct(target_itspec
, target_addr
, 0);
6773 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6774 abi_ulong target_addr
)
6776 struct target_sigevent
*target_sevp
;
6778 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6779 return -TARGET_EFAULT
;
6782 /* This union is awkward on 64 bit systems because it has a 32 bit
6783 * integer and a pointer in it; we follow the conversion approach
6784 * used for handling sigval types in signal.c so the guest should get
6785 * the correct value back even if we did a 64 bit byteswap and it's
6786 * using the 32 bit integer.
6788 host_sevp
->sigev_value
.sival_ptr
=
6789 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6790 host_sevp
->sigev_signo
=
6791 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6792 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6793 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6795 unlock_user_struct(target_sevp
, target_addr
, 1);
6799 #if defined(TARGET_NR_mlockall)
6800 static inline int target_to_host_mlockall_arg(int arg
)
6804 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6805 result
|= MCL_CURRENT
;
6807 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6808 result
|= MCL_FUTURE
;
6814 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6815 abi_ulong target_addr
,
6816 struct stat
*host_st
)
6818 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6819 if (((CPUARMState
*)cpu_env
)->eabi
) {
6820 struct target_eabi_stat64
*target_st
;
6822 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6823 return -TARGET_EFAULT
;
6824 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6825 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6826 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6827 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6828 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6830 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6831 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6832 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6833 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6834 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6835 __put_user(host_st
->st_size
, &target_st
->st_size
);
6836 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6837 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6838 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6839 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6840 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6841 unlock_user_struct(target_st
, target_addr
, 1);
6845 #if defined(TARGET_HAS_STRUCT_STAT64)
6846 struct target_stat64
*target_st
;
6848 struct target_stat
*target_st
;
6851 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6852 return -TARGET_EFAULT
;
6853 memset(target_st
, 0, sizeof(*target_st
));
6854 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6855 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6856 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6857 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6859 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6860 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6861 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6862 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6863 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6864 /* XXX: better use of kernel struct */
6865 __put_user(host_st
->st_size
, &target_st
->st_size
);
6866 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6867 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6868 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6869 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6870 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6871 unlock_user_struct(target_st
, target_addr
, 1);
6877 /* ??? Using host futex calls even when target atomic operations
6878 are not really atomic probably breaks things. However implementing
6879 futexes locally would make futexes shared between multiple processes
6880 tricky. However they're probably useless because guest atomic
6881 operations won't work either. */
6882 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6883 target_ulong uaddr2
, int val3
)
6885 struct timespec ts
, *pts
;
6888 /* ??? We assume FUTEX_* constants are the same on both host
6890 #ifdef FUTEX_CMD_MASK
6891 base_op
= op
& FUTEX_CMD_MASK
;
6897 case FUTEX_WAIT_BITSET
:
6900 target_to_host_timespec(pts
, timeout
);
6904 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6907 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6909 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6911 case FUTEX_CMP_REQUEUE
:
6913 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6914 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6915 But the prototype takes a `struct timespec *'; insert casts
6916 to satisfy the compiler. We do not need to tswap TIMEOUT
6917 since it's not compared to guest memory. */
6918 pts
= (struct timespec
*)(uintptr_t) timeout
;
6919 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6921 (base_op
== FUTEX_CMP_REQUEUE
6925 return -TARGET_ENOSYS
;
6928 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6929 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6930 abi_long handle
, abi_long mount_id
,
6933 struct file_handle
*target_fh
;
6934 struct file_handle
*fh
;
6938 unsigned int size
, total_size
;
6940 if (get_user_s32(size
, handle
)) {
6941 return -TARGET_EFAULT
;
6944 name
= lock_user_string(pathname
);
6946 return -TARGET_EFAULT
;
6949 total_size
= sizeof(struct file_handle
) + size
;
6950 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6952 unlock_user(name
, pathname
, 0);
6953 return -TARGET_EFAULT
;
6956 fh
= g_malloc0(total_size
);
6957 fh
->handle_bytes
= size
;
6959 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6960 unlock_user(name
, pathname
, 0);
6962 /* man name_to_handle_at(2):
6963 * Other than the use of the handle_bytes field, the caller should treat
6964 * the file_handle structure as an opaque data type
6967 memcpy(target_fh
, fh
, total_size
);
6968 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6969 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6971 unlock_user(target_fh
, handle
, total_size
);
6973 if (put_user_s32(mid
, mount_id
)) {
6974 return -TARGET_EFAULT
;
6982 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6983 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6986 struct file_handle
*target_fh
;
6987 struct file_handle
*fh
;
6988 unsigned int size
, total_size
;
6991 if (get_user_s32(size
, handle
)) {
6992 return -TARGET_EFAULT
;
6995 total_size
= sizeof(struct file_handle
) + size
;
6996 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6998 return -TARGET_EFAULT
;
7001 fh
= g_memdup(target_fh
, total_size
);
7002 fh
->handle_bytes
= size
;
7003 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7005 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7006 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7010 unlock_user(target_fh
, handle
, total_size
);
7016 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7018 /* signalfd siginfo conversion */
7021 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7022 const struct signalfd_siginfo
*info
)
7024 int sig
= host_to_target_signal(info
->ssi_signo
);
7026 /* linux/signalfd.h defines a ssi_addr_lsb
7027 * not defined in sys/signalfd.h but used by some kernels
7030 #ifdef BUS_MCEERR_AO
7031 if (tinfo
->ssi_signo
== SIGBUS
&&
7032 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7033 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7034 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7035 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7036 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7040 tinfo
->ssi_signo
= tswap32(sig
);
7041 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7042 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7043 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7044 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7045 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7046 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7047 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7048 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7049 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7050 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7051 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7052 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7053 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7054 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7055 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7058 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7062 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7063 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7069 static TargetFdTrans target_signalfd_trans
= {
7070 .host_to_target_data
= host_to_target_data_signalfd
,
7073 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7076 target_sigset_t
*target_mask
;
7080 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7081 return -TARGET_EINVAL
;
7083 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7084 return -TARGET_EFAULT
;
7087 target_to_host_sigset(&host_mask
, target_mask
);
7089 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7091 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7093 fd_trans_register(ret
, &target_signalfd_trans
);
7096 unlock_user_struct(target_mask
, mask
, 0);
7102 /* Map host to target signal numbers for the wait family of syscalls.
7103 Assume all other status bits are the same. */
7104 int host_to_target_waitstatus(int status
)
7106 if (WIFSIGNALED(status
)) {
7107 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7109 if (WIFSTOPPED(status
)) {
7110 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7116 static int open_self_cmdline(void *cpu_env
, int fd
)
7119 bool word_skipped
= false;
7121 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7131 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7134 fd_orig
= close(fd_orig
);
7137 } else if (nb_read
== 0) {
7141 if (!word_skipped
) {
7142 /* Skip the first string, which is the path to qemu-*-static
7143 instead of the actual command. */
7144 cp_buf
= memchr(buf
, 0, nb_read
);
7146 /* Null byte found, skip one string */
7148 nb_read
-= cp_buf
- buf
;
7149 word_skipped
= true;
7154 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7163 return close(fd_orig
);
7166 static int open_self_maps(void *cpu_env
, int fd
)
7168 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7169 TaskState
*ts
= cpu
->opaque
;
7175 fp
= fopen("/proc/self/maps", "r");
7180 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7181 int fields
, dev_maj
, dev_min
, inode
;
7182 uint64_t min
, max
, offset
;
7183 char flag_r
, flag_w
, flag_x
, flag_p
;
7184 char path
[512] = "";
7185 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7186 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7187 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7189 if ((fields
< 10) || (fields
> 11)) {
7192 if (h2g_valid(min
)) {
7193 int flags
= page_get_flags(h2g(min
));
7194 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7195 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7198 if (h2g(min
) == ts
->info
->stack_limit
) {
7199 pstrcpy(path
, sizeof(path
), " [stack]");
7201 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7202 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7203 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7204 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7205 path
[0] ? " " : "", path
);
7215 static int open_self_stat(void *cpu_env
, int fd
)
7217 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7218 TaskState
*ts
= cpu
->opaque
;
7219 abi_ulong start_stack
= ts
->info
->start_stack
;
7222 for (i
= 0; i
< 44; i
++) {
7230 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7231 } else if (i
== 1) {
7233 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7234 } else if (i
== 27) {
7237 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7239 /* for the rest, there is MasterCard */
7240 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7244 if (write(fd
, buf
, len
) != len
) {
7252 static int open_self_auxv(void *cpu_env
, int fd
)
7254 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7255 TaskState
*ts
= cpu
->opaque
;
7256 abi_ulong auxv
= ts
->info
->saved_auxv
;
7257 abi_ulong len
= ts
->info
->auxv_len
;
7261 * Auxiliary vector is stored in target process stack.
7262 * read in whole auxv vector and copy it to file
7264 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7268 r
= write(fd
, ptr
, len
);
7275 lseek(fd
, 0, SEEK_SET
);
7276 unlock_user(ptr
, auxv
, len
);
7282 static int is_proc_myself(const char *filename
, const char *entry
)
7284 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7285 filename
+= strlen("/proc/");
7286 if (!strncmp(filename
, "self/", strlen("self/"))) {
7287 filename
+= strlen("self/");
7288 } else if (*filename
>= '1' && *filename
<= '9') {
7290 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7291 if (!strncmp(filename
, myself
, strlen(myself
))) {
7292 filename
+= strlen(myself
);
7299 if (!strcmp(filename
, entry
)) {
7306 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7307 static int is_proc(const char *filename
, const char *entry
)
7309 return strcmp(filename
, entry
) == 0;
7312 static int open_net_route(void *cpu_env
, int fd
)
7319 fp
= fopen("/proc/net/route", "r");
7326 read
= getline(&line
, &len
, fp
);
7327 dprintf(fd
, "%s", line
);
7331 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7333 uint32_t dest
, gw
, mask
;
7334 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7335 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7336 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7337 &mask
, &mtu
, &window
, &irtt
);
7338 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7339 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7340 metric
, tswap32(mask
), mtu
, window
, irtt
);
7350 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7353 const char *filename
;
7354 int (*fill
)(void *cpu_env
, int fd
);
7355 int (*cmp
)(const char *s1
, const char *s2
);
7357 const struct fake_open
*fake_open
;
7358 static const struct fake_open fakes
[] = {
7359 { "maps", open_self_maps
, is_proc_myself
},
7360 { "stat", open_self_stat
, is_proc_myself
},
7361 { "auxv", open_self_auxv
, is_proc_myself
},
7362 { "cmdline", open_self_cmdline
, is_proc_myself
},
7363 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7364 { "/proc/net/route", open_net_route
, is_proc
},
7366 { NULL
, NULL
, NULL
}
7369 if (is_proc_myself(pathname
, "exe")) {
7370 int execfd
= qemu_getauxval(AT_EXECFD
);
7371 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7374 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7375 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7380 if (fake_open
->filename
) {
7382 char filename
[PATH_MAX
];
7385 /* create temporary file to map stat to */
7386 tmpdir
= getenv("TMPDIR");
7389 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7390 fd
= mkstemp(filename
);
7396 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7402 lseek(fd
, 0, SEEK_SET
);
7407 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7410 #define TIMER_MAGIC 0x0caf0000
7411 #define TIMER_MAGIC_MASK 0xffff0000
7413 /* Convert QEMU provided timer ID back to internal 16bit index format */
7414 static target_timer_t
get_timer_id(abi_long arg
)
7416 target_timer_t timerid
= arg
;
7418 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7419 return -TARGET_EINVAL
;
7424 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7425 return -TARGET_EINVAL
;
7431 /* do_syscall() should always have a single exit point at the end so
7432 that actions, such as logging of syscall results, can be performed.
7433 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7434 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7435 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7436 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7439 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7445 #if defined(DEBUG_ERESTARTSYS)
7446 /* Debug-only code for exercising the syscall-restart code paths
7447 * in the per-architecture cpu main loops: restart every syscall
7448 * the guest makes once before letting it through.
7455 return -TARGET_ERESTARTSYS
;
7461 gemu_log("syscall %d", num
);
7463 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7465 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7468 case TARGET_NR_exit
:
7469 /* In old applications this may be used to implement _exit(2).
7470 However in threaded applictions it is used for thread termination,
7471 and _exit_group is used for application termination.
7472 Do thread termination if we have more then one thread. */
7474 if (block_signals()) {
7475 ret
= -TARGET_ERESTARTSYS
;
7479 if (CPU_NEXT(first_cpu
)) {
7483 /* Remove the CPU from the list. */
7484 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7487 if (ts
->child_tidptr
) {
7488 put_user_u32(0, ts
->child_tidptr
);
7489 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7493 object_unref(OBJECT(cpu
));
7495 rcu_unregister_thread();
7501 gdb_exit(cpu_env
, arg1
);
7503 ret
= 0; /* avoid warning */
7505 case TARGET_NR_read
:
7509 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7511 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7513 fd_trans_host_to_target_data(arg1
)) {
7514 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7516 unlock_user(p
, arg2
, ret
);
7519 case TARGET_NR_write
:
7520 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7522 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7523 unlock_user(p
, arg2
, 0);
7525 #ifdef TARGET_NR_open
7526 case TARGET_NR_open
:
7527 if (!(p
= lock_user_string(arg1
)))
7529 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7530 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7532 fd_trans_unregister(ret
);
7533 unlock_user(p
, arg1
, 0);
7536 case TARGET_NR_openat
:
7537 if (!(p
= lock_user_string(arg2
)))
7539 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7540 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7542 fd_trans_unregister(ret
);
7543 unlock_user(p
, arg2
, 0);
7545 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7546 case TARGET_NR_name_to_handle_at
:
7547 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7550 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7551 case TARGET_NR_open_by_handle_at
:
7552 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7553 fd_trans_unregister(ret
);
7556 case TARGET_NR_close
:
7557 fd_trans_unregister(arg1
);
7558 ret
= get_errno(close(arg1
));
7563 #ifdef TARGET_NR_fork
7564 case TARGET_NR_fork
:
7565 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7568 #ifdef TARGET_NR_waitpid
7569 case TARGET_NR_waitpid
:
7572 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7573 if (!is_error(ret
) && arg2
&& ret
7574 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7579 #ifdef TARGET_NR_waitid
7580 case TARGET_NR_waitid
:
7584 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7585 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7586 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7588 host_to_target_siginfo(p
, &info
);
7589 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7594 #ifdef TARGET_NR_creat /* not on alpha */
7595 case TARGET_NR_creat
:
7596 if (!(p
= lock_user_string(arg1
)))
7598 ret
= get_errno(creat(p
, arg2
));
7599 fd_trans_unregister(ret
);
7600 unlock_user(p
, arg1
, 0);
7603 #ifdef TARGET_NR_link
7604 case TARGET_NR_link
:
7607 p
= lock_user_string(arg1
);
7608 p2
= lock_user_string(arg2
);
7610 ret
= -TARGET_EFAULT
;
7612 ret
= get_errno(link(p
, p2
));
7613 unlock_user(p2
, arg2
, 0);
7614 unlock_user(p
, arg1
, 0);
7618 #if defined(TARGET_NR_linkat)
7619 case TARGET_NR_linkat
:
7624 p
= lock_user_string(arg2
);
7625 p2
= lock_user_string(arg4
);
7627 ret
= -TARGET_EFAULT
;
7629 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7630 unlock_user(p
, arg2
, 0);
7631 unlock_user(p2
, arg4
, 0);
7635 #ifdef TARGET_NR_unlink
7636 case TARGET_NR_unlink
:
7637 if (!(p
= lock_user_string(arg1
)))
7639 ret
= get_errno(unlink(p
));
7640 unlock_user(p
, arg1
, 0);
7643 #if defined(TARGET_NR_unlinkat)
7644 case TARGET_NR_unlinkat
:
7645 if (!(p
= lock_user_string(arg2
)))
7647 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7648 unlock_user(p
, arg2
, 0);
7651 case TARGET_NR_execve
:
7653 char **argp
, **envp
;
7656 abi_ulong guest_argp
;
7657 abi_ulong guest_envp
;
7664 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7665 if (get_user_ual(addr
, gp
))
7673 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7674 if (get_user_ual(addr
, gp
))
7681 argp
= alloca((argc
+ 1) * sizeof(void *));
7682 envp
= alloca((envc
+ 1) * sizeof(void *));
7684 for (gp
= guest_argp
, q
= argp
; gp
;
7685 gp
+= sizeof(abi_ulong
), q
++) {
7686 if (get_user_ual(addr
, gp
))
7690 if (!(*q
= lock_user_string(addr
)))
7692 total_size
+= strlen(*q
) + 1;
7696 for (gp
= guest_envp
, q
= envp
; gp
;
7697 gp
+= sizeof(abi_ulong
), q
++) {
7698 if (get_user_ual(addr
, gp
))
7702 if (!(*q
= lock_user_string(addr
)))
7704 total_size
+= strlen(*q
) + 1;
7708 if (!(p
= lock_user_string(arg1
)))
7710 /* Although execve() is not an interruptible syscall it is
7711 * a special case where we must use the safe_syscall wrapper:
7712 * if we allow a signal to happen before we make the host
7713 * syscall then we will 'lose' it, because at the point of
7714 * execve the process leaves QEMU's control. So we use the
7715 * safe syscall wrapper to ensure that we either take the
7716 * signal as a guest signal, or else it does not happen
7717 * before the execve completes and makes it the other
7718 * program's problem.
7720 ret
= get_errno(safe_execve(p
, argp
, envp
));
7721 unlock_user(p
, arg1
, 0);
7726 ret
= -TARGET_EFAULT
;
7729 for (gp
= guest_argp
, q
= argp
; *q
;
7730 gp
+= sizeof(abi_ulong
), q
++) {
7731 if (get_user_ual(addr
, gp
)
7734 unlock_user(*q
, addr
, 0);
7736 for (gp
= guest_envp
, q
= envp
; *q
;
7737 gp
+= sizeof(abi_ulong
), q
++) {
7738 if (get_user_ual(addr
, gp
)
7741 unlock_user(*q
, addr
, 0);
7745 case TARGET_NR_chdir
:
7746 if (!(p
= lock_user_string(arg1
)))
7748 ret
= get_errno(chdir(p
));
7749 unlock_user(p
, arg1
, 0);
7751 #ifdef TARGET_NR_time
7752 case TARGET_NR_time
:
7755 ret
= get_errno(time(&host_time
));
7758 && put_user_sal(host_time
, arg1
))
7763 #ifdef TARGET_NR_mknod
7764 case TARGET_NR_mknod
:
7765 if (!(p
= lock_user_string(arg1
)))
7767 ret
= get_errno(mknod(p
, arg2
, arg3
));
7768 unlock_user(p
, arg1
, 0);
7771 #if defined(TARGET_NR_mknodat)
7772 case TARGET_NR_mknodat
:
7773 if (!(p
= lock_user_string(arg2
)))
7775 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7776 unlock_user(p
, arg2
, 0);
7779 #ifdef TARGET_NR_chmod
7780 case TARGET_NR_chmod
:
7781 if (!(p
= lock_user_string(arg1
)))
7783 ret
= get_errno(chmod(p
, arg2
));
7784 unlock_user(p
, arg1
, 0);
7787 #ifdef TARGET_NR_break
7788 case TARGET_NR_break
:
7791 #ifdef TARGET_NR_oldstat
7792 case TARGET_NR_oldstat
:
7795 case TARGET_NR_lseek
:
7796 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7798 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7799 /* Alpha specific */
7800 case TARGET_NR_getxpid
:
7801 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7802 ret
= get_errno(getpid());
7805 #ifdef TARGET_NR_getpid
7806 case TARGET_NR_getpid
:
7807 ret
= get_errno(getpid());
7810 case TARGET_NR_mount
:
7812 /* need to look at the data field */
7816 p
= lock_user_string(arg1
);
7824 p2
= lock_user_string(arg2
);
7827 unlock_user(p
, arg1
, 0);
7833 p3
= lock_user_string(arg3
);
7836 unlock_user(p
, arg1
, 0);
7838 unlock_user(p2
, arg2
, 0);
7845 /* FIXME - arg5 should be locked, but it isn't clear how to
7846 * do that since it's not guaranteed to be a NULL-terminated
7850 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7852 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7854 ret
= get_errno(ret
);
7857 unlock_user(p
, arg1
, 0);
7859 unlock_user(p2
, arg2
, 0);
7861 unlock_user(p3
, arg3
, 0);
7865 #ifdef TARGET_NR_umount
7866 case TARGET_NR_umount
:
7867 if (!(p
= lock_user_string(arg1
)))
7869 ret
= get_errno(umount(p
));
7870 unlock_user(p
, arg1
, 0);
7873 #ifdef TARGET_NR_stime /* not on alpha */
7874 case TARGET_NR_stime
:
7877 if (get_user_sal(host_time
, arg1
))
7879 ret
= get_errno(stime(&host_time
));
7883 case TARGET_NR_ptrace
:
7885 #ifdef TARGET_NR_alarm /* not on alpha */
7886 case TARGET_NR_alarm
:
7890 #ifdef TARGET_NR_oldfstat
7891 case TARGET_NR_oldfstat
:
7894 #ifdef TARGET_NR_pause /* not on alpha */
7895 case TARGET_NR_pause
:
7896 if (!block_signals()) {
7897 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7899 ret
= -TARGET_EINTR
;
7902 #ifdef TARGET_NR_utime
7903 case TARGET_NR_utime
:
7905 struct utimbuf tbuf
, *host_tbuf
;
7906 struct target_utimbuf
*target_tbuf
;
7908 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7910 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7911 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7912 unlock_user_struct(target_tbuf
, arg2
, 0);
7917 if (!(p
= lock_user_string(arg1
)))
7919 ret
= get_errno(utime(p
, host_tbuf
));
7920 unlock_user(p
, arg1
, 0);
7924 #ifdef TARGET_NR_utimes
7925 case TARGET_NR_utimes
:
7927 struct timeval
*tvp
, tv
[2];
7929 if (copy_from_user_timeval(&tv
[0], arg2
)
7930 || copy_from_user_timeval(&tv
[1],
7931 arg2
+ sizeof(struct target_timeval
)))
7937 if (!(p
= lock_user_string(arg1
)))
7939 ret
= get_errno(utimes(p
, tvp
));
7940 unlock_user(p
, arg1
, 0);
7944 #if defined(TARGET_NR_futimesat)
7945 case TARGET_NR_futimesat
:
7947 struct timeval
*tvp
, tv
[2];
7949 if (copy_from_user_timeval(&tv
[0], arg3
)
7950 || copy_from_user_timeval(&tv
[1],
7951 arg3
+ sizeof(struct target_timeval
)))
7957 if (!(p
= lock_user_string(arg2
)))
7959 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7960 unlock_user(p
, arg2
, 0);
7964 #ifdef TARGET_NR_stty
7965 case TARGET_NR_stty
:
7968 #ifdef TARGET_NR_gtty
7969 case TARGET_NR_gtty
:
7972 #ifdef TARGET_NR_access
7973 case TARGET_NR_access
:
7974 if (!(p
= lock_user_string(arg1
)))
7976 ret
= get_errno(access(path(p
), arg2
));
7977 unlock_user(p
, arg1
, 0);
7980 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7981 case TARGET_NR_faccessat
:
7982 if (!(p
= lock_user_string(arg2
)))
7984 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7985 unlock_user(p
, arg2
, 0);
7988 #ifdef TARGET_NR_nice /* not on alpha */
7989 case TARGET_NR_nice
:
7990 ret
= get_errno(nice(arg1
));
7993 #ifdef TARGET_NR_ftime
7994 case TARGET_NR_ftime
:
7997 case TARGET_NR_sync
:
8001 case TARGET_NR_kill
:
8002 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8004 #ifdef TARGET_NR_rename
8005 case TARGET_NR_rename
:
8008 p
= lock_user_string(arg1
);
8009 p2
= lock_user_string(arg2
);
8011 ret
= -TARGET_EFAULT
;
8013 ret
= get_errno(rename(p
, p2
));
8014 unlock_user(p2
, arg2
, 0);
8015 unlock_user(p
, arg1
, 0);
8019 #if defined(TARGET_NR_renameat)
8020 case TARGET_NR_renameat
:
8023 p
= lock_user_string(arg2
);
8024 p2
= lock_user_string(arg4
);
8026 ret
= -TARGET_EFAULT
;
8028 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8029 unlock_user(p2
, arg4
, 0);
8030 unlock_user(p
, arg2
, 0);
8034 #ifdef TARGET_NR_mkdir
8035 case TARGET_NR_mkdir
:
8036 if (!(p
= lock_user_string(arg1
)))
8038 ret
= get_errno(mkdir(p
, arg2
));
8039 unlock_user(p
, arg1
, 0);
8042 #if defined(TARGET_NR_mkdirat)
8043 case TARGET_NR_mkdirat
:
8044 if (!(p
= lock_user_string(arg2
)))
8046 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8047 unlock_user(p
, arg2
, 0);
8050 #ifdef TARGET_NR_rmdir
8051 case TARGET_NR_rmdir
:
8052 if (!(p
= lock_user_string(arg1
)))
8054 ret
= get_errno(rmdir(p
));
8055 unlock_user(p
, arg1
, 0);
8059 ret
= get_errno(dup(arg1
));
8061 fd_trans_dup(arg1
, ret
);
8064 #ifdef TARGET_NR_pipe
8065 case TARGET_NR_pipe
:
8066 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8069 #ifdef TARGET_NR_pipe2
8070 case TARGET_NR_pipe2
:
8071 ret
= do_pipe(cpu_env
, arg1
,
8072 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8075 case TARGET_NR_times
:
8077 struct target_tms
*tmsp
;
8079 ret
= get_errno(times(&tms
));
8081 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8084 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8085 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8086 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8087 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8090 ret
= host_to_target_clock_t(ret
);
8093 #ifdef TARGET_NR_prof
8094 case TARGET_NR_prof
:
8097 #ifdef TARGET_NR_signal
8098 case TARGET_NR_signal
:
8101 case TARGET_NR_acct
:
8103 ret
= get_errno(acct(NULL
));
8105 if (!(p
= lock_user_string(arg1
)))
8107 ret
= get_errno(acct(path(p
)));
8108 unlock_user(p
, arg1
, 0);
8111 #ifdef TARGET_NR_umount2
8112 case TARGET_NR_umount2
:
8113 if (!(p
= lock_user_string(arg1
)))
8115 ret
= get_errno(umount2(p
, arg2
));
8116 unlock_user(p
, arg1
, 0);
8119 #ifdef TARGET_NR_lock
8120 case TARGET_NR_lock
:
8123 case TARGET_NR_ioctl
:
8124 ret
= do_ioctl(arg1
, arg2
, arg3
);
8126 case TARGET_NR_fcntl
:
8127 ret
= do_fcntl(arg1
, arg2
, arg3
);
8129 #ifdef TARGET_NR_mpx
8133 case TARGET_NR_setpgid
:
8134 ret
= get_errno(setpgid(arg1
, arg2
));
8136 #ifdef TARGET_NR_ulimit
8137 case TARGET_NR_ulimit
:
8140 #ifdef TARGET_NR_oldolduname
8141 case TARGET_NR_oldolduname
:
8144 case TARGET_NR_umask
:
8145 ret
= get_errno(umask(arg1
));
8147 case TARGET_NR_chroot
:
8148 if (!(p
= lock_user_string(arg1
)))
8150 ret
= get_errno(chroot(p
));
8151 unlock_user(p
, arg1
, 0);
8153 #ifdef TARGET_NR_ustat
8154 case TARGET_NR_ustat
:
8157 #ifdef TARGET_NR_dup2
8158 case TARGET_NR_dup2
:
8159 ret
= get_errno(dup2(arg1
, arg2
));
8161 fd_trans_dup(arg1
, arg2
);
8165 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8166 case TARGET_NR_dup3
:
8167 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8169 fd_trans_dup(arg1
, arg2
);
8173 #ifdef TARGET_NR_getppid /* not on alpha */
8174 case TARGET_NR_getppid
:
8175 ret
= get_errno(getppid());
8178 #ifdef TARGET_NR_getpgrp
8179 case TARGET_NR_getpgrp
:
8180 ret
= get_errno(getpgrp());
8183 case TARGET_NR_setsid
:
8184 ret
= get_errno(setsid());
8186 #ifdef TARGET_NR_sigaction
8187 case TARGET_NR_sigaction
:
8189 #if defined(TARGET_ALPHA)
8190 struct target_sigaction act
, oact
, *pact
= 0;
8191 struct target_old_sigaction
*old_act
;
8193 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8195 act
._sa_handler
= old_act
->_sa_handler
;
8196 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8197 act
.sa_flags
= old_act
->sa_flags
;
8198 act
.sa_restorer
= 0;
8199 unlock_user_struct(old_act
, arg2
, 0);
8202 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8203 if (!is_error(ret
) && arg3
) {
8204 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8206 old_act
->_sa_handler
= oact
._sa_handler
;
8207 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8208 old_act
->sa_flags
= oact
.sa_flags
;
8209 unlock_user_struct(old_act
, arg3
, 1);
8211 #elif defined(TARGET_MIPS)
8212 struct target_sigaction act
, oact
, *pact
, *old_act
;
8215 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8217 act
._sa_handler
= old_act
->_sa_handler
;
8218 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8219 act
.sa_flags
= old_act
->sa_flags
;
8220 unlock_user_struct(old_act
, arg2
, 0);
8226 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8228 if (!is_error(ret
) && arg3
) {
8229 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8231 old_act
->_sa_handler
= oact
._sa_handler
;
8232 old_act
->sa_flags
= oact
.sa_flags
;
8233 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8234 old_act
->sa_mask
.sig
[1] = 0;
8235 old_act
->sa_mask
.sig
[2] = 0;
8236 old_act
->sa_mask
.sig
[3] = 0;
8237 unlock_user_struct(old_act
, arg3
, 1);
8240 struct target_old_sigaction
*old_act
;
8241 struct target_sigaction act
, oact
, *pact
;
8243 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8245 act
._sa_handler
= old_act
->_sa_handler
;
8246 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8247 act
.sa_flags
= old_act
->sa_flags
;
8248 act
.sa_restorer
= old_act
->sa_restorer
;
8249 unlock_user_struct(old_act
, arg2
, 0);
8254 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8255 if (!is_error(ret
) && arg3
) {
8256 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8258 old_act
->_sa_handler
= oact
._sa_handler
;
8259 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8260 old_act
->sa_flags
= oact
.sa_flags
;
8261 old_act
->sa_restorer
= oact
.sa_restorer
;
8262 unlock_user_struct(old_act
, arg3
, 1);
8268 case TARGET_NR_rt_sigaction
:
8270 #if defined(TARGET_ALPHA)
8271 struct target_sigaction act
, oact
, *pact
= 0;
8272 struct target_rt_sigaction
*rt_act
;
8274 if (arg4
!= sizeof(target_sigset_t
)) {
8275 ret
= -TARGET_EINVAL
;
8279 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8281 act
._sa_handler
= rt_act
->_sa_handler
;
8282 act
.sa_mask
= rt_act
->sa_mask
;
8283 act
.sa_flags
= rt_act
->sa_flags
;
8284 act
.sa_restorer
= arg5
;
8285 unlock_user_struct(rt_act
, arg2
, 0);
8288 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8289 if (!is_error(ret
) && arg3
) {
8290 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8292 rt_act
->_sa_handler
= oact
._sa_handler
;
8293 rt_act
->sa_mask
= oact
.sa_mask
;
8294 rt_act
->sa_flags
= oact
.sa_flags
;
8295 unlock_user_struct(rt_act
, arg3
, 1);
8298 struct target_sigaction
*act
;
8299 struct target_sigaction
*oact
;
8301 if (arg4
!= sizeof(target_sigset_t
)) {
8302 ret
= -TARGET_EINVAL
;
8306 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8311 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8312 ret
= -TARGET_EFAULT
;
8313 goto rt_sigaction_fail
;
8317 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8320 unlock_user_struct(act
, arg2
, 0);
8322 unlock_user_struct(oact
, arg3
, 1);
8326 #ifdef TARGET_NR_sgetmask /* not on alpha */
8327 case TARGET_NR_sgetmask
:
8330 abi_ulong target_set
;
8331 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8333 host_to_target_old_sigset(&target_set
, &cur_set
);
8339 #ifdef TARGET_NR_ssetmask /* not on alpha */
8340 case TARGET_NR_ssetmask
:
8342 sigset_t set
, oset
, cur_set
;
8343 abi_ulong target_set
= arg1
;
8344 /* We only have one word of the new mask so we must read
8345 * the rest of it with do_sigprocmask() and OR in this word.
8346 * We are guaranteed that a do_sigprocmask() that only queries
8347 * the signal mask will not fail.
8349 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8351 target_to_host_old_sigset(&set
, &target_set
);
8352 sigorset(&set
, &set
, &cur_set
);
8353 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8355 host_to_target_old_sigset(&target_set
, &oset
);
8361 #ifdef TARGET_NR_sigprocmask
8362 case TARGET_NR_sigprocmask
:
8364 #if defined(TARGET_ALPHA)
8365 sigset_t set
, oldset
;
8370 case TARGET_SIG_BLOCK
:
8373 case TARGET_SIG_UNBLOCK
:
8376 case TARGET_SIG_SETMASK
:
8380 ret
= -TARGET_EINVAL
;
8384 target_to_host_old_sigset(&set
, &mask
);
8386 ret
= do_sigprocmask(how
, &set
, &oldset
);
8387 if (!is_error(ret
)) {
8388 host_to_target_old_sigset(&mask
, &oldset
);
8390 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8393 sigset_t set
, oldset
, *set_ptr
;
8398 case TARGET_SIG_BLOCK
:
8401 case TARGET_SIG_UNBLOCK
:
8404 case TARGET_SIG_SETMASK
:
8408 ret
= -TARGET_EINVAL
;
8411 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8413 target_to_host_old_sigset(&set
, p
);
8414 unlock_user(p
, arg2
, 0);
8420 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8421 if (!is_error(ret
) && arg3
) {
8422 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8424 host_to_target_old_sigset(p
, &oldset
);
8425 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8431 case TARGET_NR_rt_sigprocmask
:
8434 sigset_t set
, oldset
, *set_ptr
;
8436 if (arg4
!= sizeof(target_sigset_t
)) {
8437 ret
= -TARGET_EINVAL
;
8443 case TARGET_SIG_BLOCK
:
8446 case TARGET_SIG_UNBLOCK
:
8449 case TARGET_SIG_SETMASK
:
8453 ret
= -TARGET_EINVAL
;
8456 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8458 target_to_host_sigset(&set
, p
);
8459 unlock_user(p
, arg2
, 0);
8465 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8466 if (!is_error(ret
) && arg3
) {
8467 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8469 host_to_target_sigset(p
, &oldset
);
8470 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8474 #ifdef TARGET_NR_sigpending
8475 case TARGET_NR_sigpending
:
8478 ret
= get_errno(sigpending(&set
));
8479 if (!is_error(ret
)) {
8480 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8482 host_to_target_old_sigset(p
, &set
);
8483 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8488 case TARGET_NR_rt_sigpending
:
8492 /* Yes, this check is >, not != like most. We follow the kernel's
8493 * logic and it does it like this because it implements
8494 * NR_sigpending through the same code path, and in that case
8495 * the old_sigset_t is smaller in size.
8497 if (arg2
> sizeof(target_sigset_t
)) {
8498 ret
= -TARGET_EINVAL
;
8502 ret
= get_errno(sigpending(&set
));
8503 if (!is_error(ret
)) {
8504 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8506 host_to_target_sigset(p
, &set
);
8507 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8511 #ifdef TARGET_NR_sigsuspend
8512 case TARGET_NR_sigsuspend
:
8514 TaskState
*ts
= cpu
->opaque
;
8515 #if defined(TARGET_ALPHA)
8516 abi_ulong mask
= arg1
;
8517 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8519 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8521 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8522 unlock_user(p
, arg1
, 0);
8524 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8526 if (ret
!= -TARGET_ERESTARTSYS
) {
8527 ts
->in_sigsuspend
= 1;
8532 case TARGET_NR_rt_sigsuspend
:
8534 TaskState
*ts
= cpu
->opaque
;
8536 if (arg2
!= sizeof(target_sigset_t
)) {
8537 ret
= -TARGET_EINVAL
;
8540 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8542 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8543 unlock_user(p
, arg1
, 0);
8544 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8546 if (ret
!= -TARGET_ERESTARTSYS
) {
8547 ts
->in_sigsuspend
= 1;
8551 case TARGET_NR_rt_sigtimedwait
:
8554 struct timespec uts
, *puts
;
8557 if (arg4
!= sizeof(target_sigset_t
)) {
8558 ret
= -TARGET_EINVAL
;
8562 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8564 target_to_host_sigset(&set
, p
);
8565 unlock_user(p
, arg1
, 0);
8568 target_to_host_timespec(puts
, arg3
);
8572 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8574 if (!is_error(ret
)) {
8576 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8581 host_to_target_siginfo(p
, &uinfo
);
8582 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8584 ret
= host_to_target_signal(ret
);
8588 case TARGET_NR_rt_sigqueueinfo
:
8592 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8596 target_to_host_siginfo(&uinfo
, p
);
8597 unlock_user(p
, arg1
, 0);
8598 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8601 #ifdef TARGET_NR_sigreturn
8602 case TARGET_NR_sigreturn
:
8603 if (block_signals()) {
8604 ret
= -TARGET_ERESTARTSYS
;
8606 ret
= do_sigreturn(cpu_env
);
8610 case TARGET_NR_rt_sigreturn
:
8611 if (block_signals()) {
8612 ret
= -TARGET_ERESTARTSYS
;
8614 ret
= do_rt_sigreturn(cpu_env
);
8617 case TARGET_NR_sethostname
:
8618 if (!(p
= lock_user_string(arg1
)))
8620 ret
= get_errno(sethostname(p
, arg2
));
8621 unlock_user(p
, arg1
, 0);
8623 case TARGET_NR_setrlimit
:
8625 int resource
= target_to_host_resource(arg1
);
8626 struct target_rlimit
*target_rlim
;
8628 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8630 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8631 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8632 unlock_user_struct(target_rlim
, arg2
, 0);
8633 ret
= get_errno(setrlimit(resource
, &rlim
));
8636 case TARGET_NR_getrlimit
:
8638 int resource
= target_to_host_resource(arg1
);
8639 struct target_rlimit
*target_rlim
;
8642 ret
= get_errno(getrlimit(resource
, &rlim
));
8643 if (!is_error(ret
)) {
8644 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8646 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8647 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8648 unlock_user_struct(target_rlim
, arg2
, 1);
8652 case TARGET_NR_getrusage
:
8654 struct rusage rusage
;
8655 ret
= get_errno(getrusage(arg1
, &rusage
));
8656 if (!is_error(ret
)) {
8657 ret
= host_to_target_rusage(arg2
, &rusage
);
8661 case TARGET_NR_gettimeofday
:
8664 ret
= get_errno(gettimeofday(&tv
, NULL
));
8665 if (!is_error(ret
)) {
8666 if (copy_to_user_timeval(arg1
, &tv
))
8671 case TARGET_NR_settimeofday
:
8673 struct timeval tv
, *ptv
= NULL
;
8674 struct timezone tz
, *ptz
= NULL
;
8677 if (copy_from_user_timeval(&tv
, arg1
)) {
8684 if (copy_from_user_timezone(&tz
, arg2
)) {
8690 ret
= get_errno(settimeofday(ptv
, ptz
));
8693 #if defined(TARGET_NR_select)
8694 case TARGET_NR_select
:
8695 #if defined(TARGET_WANT_NI_OLD_SELECT)
8696 /* some architectures used to have old_select here
8697 * but now ENOSYS it.
8699 ret
= -TARGET_ENOSYS
;
8700 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8701 ret
= do_old_select(arg1
);
8703 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8707 #ifdef TARGET_NR_pselect6
8708 case TARGET_NR_pselect6
:
8710 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8711 fd_set rfds
, wfds
, efds
;
8712 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8713 struct timespec ts
, *ts_ptr
;
8716 * The 6th arg is actually two args smashed together,
8717 * so we cannot use the C library.
8725 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8726 target_sigset_t
*target_sigset
;
8734 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8738 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8742 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8748 * This takes a timespec, and not a timeval, so we cannot
8749 * use the do_select() helper ...
8752 if (target_to_host_timespec(&ts
, ts_addr
)) {
8760 /* Extract the two packed args for the sigset */
8763 sig
.size
= SIGSET_T_SIZE
;
8765 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8769 arg_sigset
= tswapal(arg7
[0]);
8770 arg_sigsize
= tswapal(arg7
[1]);
8771 unlock_user(arg7
, arg6
, 0);
8775 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8776 /* Like the kernel, we enforce correct size sigsets */
8777 ret
= -TARGET_EINVAL
;
8780 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8781 sizeof(*target_sigset
), 1);
8782 if (!target_sigset
) {
8785 target_to_host_sigset(&set
, target_sigset
);
8786 unlock_user(target_sigset
, arg_sigset
, 0);
8794 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8797 if (!is_error(ret
)) {
8798 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8800 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8802 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8805 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8811 #ifdef TARGET_NR_symlink
8812 case TARGET_NR_symlink
:
8815 p
= lock_user_string(arg1
);
8816 p2
= lock_user_string(arg2
);
8818 ret
= -TARGET_EFAULT
;
8820 ret
= get_errno(symlink(p
, p2
));
8821 unlock_user(p2
, arg2
, 0);
8822 unlock_user(p
, arg1
, 0);
8826 #if defined(TARGET_NR_symlinkat)
8827 case TARGET_NR_symlinkat
:
8830 p
= lock_user_string(arg1
);
8831 p2
= lock_user_string(arg3
);
8833 ret
= -TARGET_EFAULT
;
8835 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8836 unlock_user(p2
, arg3
, 0);
8837 unlock_user(p
, arg1
, 0);
8841 #ifdef TARGET_NR_oldlstat
8842 case TARGET_NR_oldlstat
:
8845 #ifdef TARGET_NR_readlink
8846 case TARGET_NR_readlink
:
8849 p
= lock_user_string(arg1
);
8850 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8852 ret
= -TARGET_EFAULT
;
8854 /* Short circuit this for the magic exe check. */
8855 ret
= -TARGET_EINVAL
;
8856 } else if (is_proc_myself((const char *)p
, "exe")) {
8857 char real
[PATH_MAX
], *temp
;
8858 temp
= realpath(exec_path
, real
);
8859 /* Return value is # of bytes that we wrote to the buffer. */
8861 ret
= get_errno(-1);
8863 /* Don't worry about sign mismatch as earlier mapping
8864 * logic would have thrown a bad address error. */
8865 ret
= MIN(strlen(real
), arg3
);
8866 /* We cannot NUL terminate the string. */
8867 memcpy(p2
, real
, ret
);
8870 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8872 unlock_user(p2
, arg2
, ret
);
8873 unlock_user(p
, arg1
, 0);
8877 #if defined(TARGET_NR_readlinkat)
8878 case TARGET_NR_readlinkat
:
8881 p
= lock_user_string(arg2
);
8882 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8884 ret
= -TARGET_EFAULT
;
8885 } else if (is_proc_myself((const char *)p
, "exe")) {
8886 char real
[PATH_MAX
], *temp
;
8887 temp
= realpath(exec_path
, real
);
8888 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8889 snprintf((char *)p2
, arg4
, "%s", real
);
8891 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8893 unlock_user(p2
, arg3
, ret
);
8894 unlock_user(p
, arg2
, 0);
8898 #ifdef TARGET_NR_uselib
8899 case TARGET_NR_uselib
:
8902 #ifdef TARGET_NR_swapon
8903 case TARGET_NR_swapon
:
8904 if (!(p
= lock_user_string(arg1
)))
8906 ret
= get_errno(swapon(p
, arg2
));
8907 unlock_user(p
, arg1
, 0);
8910 case TARGET_NR_reboot
:
8911 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8912 /* arg4 must be ignored in all other cases */
8913 p
= lock_user_string(arg4
);
8917 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8918 unlock_user(p
, arg4
, 0);
8920 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8923 #ifdef TARGET_NR_readdir
8924 case TARGET_NR_readdir
:
8927 #ifdef TARGET_NR_mmap
8928 case TARGET_NR_mmap
:
8929 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8930 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8931 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8932 || defined(TARGET_S390X)
8935 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8936 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8944 unlock_user(v
, arg1
, 0);
8945 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8946 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8950 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8951 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8957 #ifdef TARGET_NR_mmap2
8958 case TARGET_NR_mmap2
:
8960 #define MMAP_SHIFT 12
8962 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8963 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8965 arg6
<< MMAP_SHIFT
));
8968 case TARGET_NR_munmap
:
8969 ret
= get_errno(target_munmap(arg1
, arg2
));
8971 case TARGET_NR_mprotect
:
8973 TaskState
*ts
= cpu
->opaque
;
8974 /* Special hack to detect libc making the stack executable. */
8975 if ((arg3
& PROT_GROWSDOWN
)
8976 && arg1
>= ts
->info
->stack_limit
8977 && arg1
<= ts
->info
->start_stack
) {
8978 arg3
&= ~PROT_GROWSDOWN
;
8979 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8980 arg1
= ts
->info
->stack_limit
;
8983 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8985 #ifdef TARGET_NR_mremap
8986 case TARGET_NR_mremap
:
8987 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8990 /* ??? msync/mlock/munlock are broken for softmmu. */
8991 #ifdef TARGET_NR_msync
8992 case TARGET_NR_msync
:
8993 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8996 #ifdef TARGET_NR_mlock
8997 case TARGET_NR_mlock
:
8998 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9001 #ifdef TARGET_NR_munlock
9002 case TARGET_NR_munlock
:
9003 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9006 #ifdef TARGET_NR_mlockall
9007 case TARGET_NR_mlockall
:
9008 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9011 #ifdef TARGET_NR_munlockall
9012 case TARGET_NR_munlockall
:
9013 ret
= get_errno(munlockall());
9016 case TARGET_NR_truncate
:
9017 if (!(p
= lock_user_string(arg1
)))
9019 ret
= get_errno(truncate(p
, arg2
));
9020 unlock_user(p
, arg1
, 0);
9022 case TARGET_NR_ftruncate
:
9023 ret
= get_errno(ftruncate(arg1
, arg2
));
9025 case TARGET_NR_fchmod
:
9026 ret
= get_errno(fchmod(arg1
, arg2
));
9028 #if defined(TARGET_NR_fchmodat)
9029 case TARGET_NR_fchmodat
:
9030 if (!(p
= lock_user_string(arg2
)))
9032 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9033 unlock_user(p
, arg2
, 0);
9036 case TARGET_NR_getpriority
:
9037 /* Note that negative values are valid for getpriority, so we must
9038 differentiate based on errno settings. */
9040 ret
= getpriority(arg1
, arg2
);
9041 if (ret
== -1 && errno
!= 0) {
9042 ret
= -host_to_target_errno(errno
);
9046 /* Return value is the unbiased priority. Signal no error. */
9047 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9049 /* Return value is a biased priority to avoid negative numbers. */
9053 case TARGET_NR_setpriority
:
9054 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9056 #ifdef TARGET_NR_profil
9057 case TARGET_NR_profil
:
9060 case TARGET_NR_statfs
:
9061 if (!(p
= lock_user_string(arg1
)))
9063 ret
= get_errno(statfs(path(p
), &stfs
));
9064 unlock_user(p
, arg1
, 0);
9066 if (!is_error(ret
)) {
9067 struct target_statfs
*target_stfs
;
9069 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9071 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9072 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9073 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9074 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9075 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9076 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9077 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9078 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9079 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9080 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9081 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9082 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9083 unlock_user_struct(target_stfs
, arg2
, 1);
9086 case TARGET_NR_fstatfs
:
9087 ret
= get_errno(fstatfs(arg1
, &stfs
));
9088 goto convert_statfs
;
9089 #ifdef TARGET_NR_statfs64
9090 case TARGET_NR_statfs64
:
9091 if (!(p
= lock_user_string(arg1
)))
9093 ret
= get_errno(statfs(path(p
), &stfs
));
9094 unlock_user(p
, arg1
, 0);
9096 if (!is_error(ret
)) {
9097 struct target_statfs64
*target_stfs
;
9099 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9101 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9102 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9103 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9104 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9105 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9106 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9107 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9108 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9109 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9110 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9111 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9112 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9113 unlock_user_struct(target_stfs
, arg3
, 1);
9116 case TARGET_NR_fstatfs64
:
9117 ret
= get_errno(fstatfs(arg1
, &stfs
));
9118 goto convert_statfs64
;
9120 #ifdef TARGET_NR_ioperm
9121 case TARGET_NR_ioperm
:
9124 #ifdef TARGET_NR_socketcall
9125 case TARGET_NR_socketcall
:
9126 ret
= do_socketcall(arg1
, arg2
);
9129 #ifdef TARGET_NR_accept
9130 case TARGET_NR_accept
:
9131 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9134 #ifdef TARGET_NR_accept4
9135 case TARGET_NR_accept4
:
9136 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9139 #ifdef TARGET_NR_bind
9140 case TARGET_NR_bind
:
9141 ret
= do_bind(arg1
, arg2
, arg3
);
9144 #ifdef TARGET_NR_connect
9145 case TARGET_NR_connect
:
9146 ret
= do_connect(arg1
, arg2
, arg3
);
9149 #ifdef TARGET_NR_getpeername
9150 case TARGET_NR_getpeername
:
9151 ret
= do_getpeername(arg1
, arg2
, arg3
);
9154 #ifdef TARGET_NR_getsockname
9155 case TARGET_NR_getsockname
:
9156 ret
= do_getsockname(arg1
, arg2
, arg3
);
9159 #ifdef TARGET_NR_getsockopt
9160 case TARGET_NR_getsockopt
:
9161 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9164 #ifdef TARGET_NR_listen
9165 case TARGET_NR_listen
:
9166 ret
= get_errno(listen(arg1
, arg2
));
9169 #ifdef TARGET_NR_recv
9170 case TARGET_NR_recv
:
9171 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9174 #ifdef TARGET_NR_recvfrom
9175 case TARGET_NR_recvfrom
:
9176 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9179 #ifdef TARGET_NR_recvmsg
9180 case TARGET_NR_recvmsg
:
9181 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9184 #ifdef TARGET_NR_send
9185 case TARGET_NR_send
:
9186 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9189 #ifdef TARGET_NR_sendmsg
9190 case TARGET_NR_sendmsg
:
9191 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9194 #ifdef TARGET_NR_sendmmsg
9195 case TARGET_NR_sendmmsg
:
9196 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9198 case TARGET_NR_recvmmsg
:
9199 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9202 #ifdef TARGET_NR_sendto
9203 case TARGET_NR_sendto
:
9204 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9207 #ifdef TARGET_NR_shutdown
9208 case TARGET_NR_shutdown
:
9209 ret
= get_errno(shutdown(arg1
, arg2
));
9212 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9213 case TARGET_NR_getrandom
:
9214 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9218 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9219 unlock_user(p
, arg1
, ret
);
9222 #ifdef TARGET_NR_socket
9223 case TARGET_NR_socket
:
9224 ret
= do_socket(arg1
, arg2
, arg3
);
9225 fd_trans_unregister(ret
);
9228 #ifdef TARGET_NR_socketpair
9229 case TARGET_NR_socketpair
:
9230 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9233 #ifdef TARGET_NR_setsockopt
9234 case TARGET_NR_setsockopt
:
9235 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9239 case TARGET_NR_syslog
:
9240 if (!(p
= lock_user_string(arg2
)))
9242 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9243 unlock_user(p
, arg2
, 0);
9246 case TARGET_NR_setitimer
:
9248 struct itimerval value
, ovalue
, *pvalue
;
9252 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9253 || copy_from_user_timeval(&pvalue
->it_value
,
9254 arg2
+ sizeof(struct target_timeval
)))
9259 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9260 if (!is_error(ret
) && arg3
) {
9261 if (copy_to_user_timeval(arg3
,
9262 &ovalue
.it_interval
)
9263 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9269 case TARGET_NR_getitimer
:
9271 struct itimerval value
;
9273 ret
= get_errno(getitimer(arg1
, &value
));
9274 if (!is_error(ret
) && arg2
) {
9275 if (copy_to_user_timeval(arg2
,
9277 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9283 #ifdef TARGET_NR_stat
9284 case TARGET_NR_stat
:
9285 if (!(p
= lock_user_string(arg1
)))
9287 ret
= get_errno(stat(path(p
), &st
));
9288 unlock_user(p
, arg1
, 0);
9291 #ifdef TARGET_NR_lstat
9292 case TARGET_NR_lstat
:
9293 if (!(p
= lock_user_string(arg1
)))
9295 ret
= get_errno(lstat(path(p
), &st
));
9296 unlock_user(p
, arg1
, 0);
9299 case TARGET_NR_fstat
:
9301 ret
= get_errno(fstat(arg1
, &st
));
9302 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9305 if (!is_error(ret
)) {
9306 struct target_stat
*target_st
;
9308 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9310 memset(target_st
, 0, sizeof(*target_st
));
9311 __put_user(st
.st_dev
, &target_st
->st_dev
);
9312 __put_user(st
.st_ino
, &target_st
->st_ino
);
9313 __put_user(st
.st_mode
, &target_st
->st_mode
);
9314 __put_user(st
.st_uid
, &target_st
->st_uid
);
9315 __put_user(st
.st_gid
, &target_st
->st_gid
);
9316 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9317 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9318 __put_user(st
.st_size
, &target_st
->st_size
);
9319 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9320 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9321 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9322 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9323 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9324 unlock_user_struct(target_st
, arg2
, 1);
9328 #ifdef TARGET_NR_olduname
9329 case TARGET_NR_olduname
:
9332 #ifdef TARGET_NR_iopl
9333 case TARGET_NR_iopl
:
9336 case TARGET_NR_vhangup
:
9337 ret
= get_errno(vhangup());
9339 #ifdef TARGET_NR_idle
9340 case TARGET_NR_idle
:
9343 #ifdef TARGET_NR_syscall
9344 case TARGET_NR_syscall
:
9345 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9346 arg6
, arg7
, arg8
, 0);
9349 case TARGET_NR_wait4
:
9352 abi_long status_ptr
= arg2
;
9353 struct rusage rusage
, *rusage_ptr
;
9354 abi_ulong target_rusage
= arg4
;
9355 abi_long rusage_err
;
9357 rusage_ptr
= &rusage
;
9360 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9361 if (!is_error(ret
)) {
9362 if (status_ptr
&& ret
) {
9363 status
= host_to_target_waitstatus(status
);
9364 if (put_user_s32(status
, status_ptr
))
9367 if (target_rusage
) {
9368 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9376 #ifdef TARGET_NR_swapoff
9377 case TARGET_NR_swapoff
:
9378 if (!(p
= lock_user_string(arg1
)))
9380 ret
= get_errno(swapoff(p
));
9381 unlock_user(p
, arg1
, 0);
9384 case TARGET_NR_sysinfo
:
9386 struct target_sysinfo
*target_value
;
9387 struct sysinfo value
;
9388 ret
= get_errno(sysinfo(&value
));
9389 if (!is_error(ret
) && arg1
)
9391 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9393 __put_user(value
.uptime
, &target_value
->uptime
);
9394 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9395 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9396 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9397 __put_user(value
.totalram
, &target_value
->totalram
);
9398 __put_user(value
.freeram
, &target_value
->freeram
);
9399 __put_user(value
.sharedram
, &target_value
->sharedram
);
9400 __put_user(value
.bufferram
, &target_value
->bufferram
);
9401 __put_user(value
.totalswap
, &target_value
->totalswap
);
9402 __put_user(value
.freeswap
, &target_value
->freeswap
);
9403 __put_user(value
.procs
, &target_value
->procs
);
9404 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9405 __put_user(value
.freehigh
, &target_value
->freehigh
);
9406 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9407 unlock_user_struct(target_value
, arg1
, 1);
9411 #ifdef TARGET_NR_ipc
9413 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9416 #ifdef TARGET_NR_semget
9417 case TARGET_NR_semget
:
9418 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9421 #ifdef TARGET_NR_semop
9422 case TARGET_NR_semop
:
9423 ret
= do_semop(arg1
, arg2
, arg3
);
9426 #ifdef TARGET_NR_semctl
9427 case TARGET_NR_semctl
:
9428 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9431 #ifdef TARGET_NR_msgctl
9432 case TARGET_NR_msgctl
:
9433 ret
= do_msgctl(arg1
, arg2
, arg3
);
9436 #ifdef TARGET_NR_msgget
9437 case TARGET_NR_msgget
:
9438 ret
= get_errno(msgget(arg1
, arg2
));
9441 #ifdef TARGET_NR_msgrcv
9442 case TARGET_NR_msgrcv
:
9443 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9446 #ifdef TARGET_NR_msgsnd
9447 case TARGET_NR_msgsnd
:
9448 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9451 #ifdef TARGET_NR_shmget
9452 case TARGET_NR_shmget
:
9453 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9456 #ifdef TARGET_NR_shmctl
9457 case TARGET_NR_shmctl
:
9458 ret
= do_shmctl(arg1
, arg2
, arg3
);
9461 #ifdef TARGET_NR_shmat
9462 case TARGET_NR_shmat
:
9463 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9466 #ifdef TARGET_NR_shmdt
9467 case TARGET_NR_shmdt
:
9468 ret
= do_shmdt(arg1
);
9471 case TARGET_NR_fsync
:
9472 ret
= get_errno(fsync(arg1
));
9474 case TARGET_NR_clone
:
9475 /* Linux manages to have three different orderings for its
9476 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9477 * match the kernel's CONFIG_CLONE_* settings.
9478 * Microblaze is further special in that it uses a sixth
9479 * implicit argument to clone for the TLS pointer.
9481 #if defined(TARGET_MICROBLAZE)
9482 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9483 #elif defined(TARGET_CLONE_BACKWARDS)
9484 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9485 #elif defined(TARGET_CLONE_BACKWARDS2)
9486 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9488 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9491 #ifdef __NR_exit_group
9492 /* new thread calls */
9493 case TARGET_NR_exit_group
:
9497 gdb_exit(cpu_env
, arg1
);
9498 ret
= get_errno(exit_group(arg1
));
9501 case TARGET_NR_setdomainname
:
9502 if (!(p
= lock_user_string(arg1
)))
9504 ret
= get_errno(setdomainname(p
, arg2
));
9505 unlock_user(p
, arg1
, 0);
9507 case TARGET_NR_uname
:
9508 /* no need to transcode because we use the linux syscall */
9510 struct new_utsname
* buf
;
9512 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9514 ret
= get_errno(sys_uname(buf
));
9515 if (!is_error(ret
)) {
9516 /* Overwrite the native machine name with whatever is being
9518 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9519 /* Allow the user to override the reported release. */
9520 if (qemu_uname_release
&& *qemu_uname_release
) {
9521 g_strlcpy(buf
->release
, qemu_uname_release
,
9522 sizeof(buf
->release
));
9525 unlock_user_struct(buf
, arg1
, 1);
9529 case TARGET_NR_modify_ldt
:
9530 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9532 #if !defined(TARGET_X86_64)
9533 case TARGET_NR_vm86old
:
9535 case TARGET_NR_vm86
:
9536 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9540 case TARGET_NR_adjtimex
:
9542 #ifdef TARGET_NR_create_module
9543 case TARGET_NR_create_module
:
9545 case TARGET_NR_init_module
:
9546 case TARGET_NR_delete_module
:
9547 #ifdef TARGET_NR_get_kernel_syms
9548 case TARGET_NR_get_kernel_syms
:
9551 case TARGET_NR_quotactl
:
9553 case TARGET_NR_getpgid
:
9554 ret
= get_errno(getpgid(arg1
));
9556 case TARGET_NR_fchdir
:
9557 ret
= get_errno(fchdir(arg1
));
9559 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9560 case TARGET_NR_bdflush
:
9563 #ifdef TARGET_NR_sysfs
9564 case TARGET_NR_sysfs
:
9567 case TARGET_NR_personality
:
9568 ret
= get_errno(personality(arg1
));
9570 #ifdef TARGET_NR_afs_syscall
9571 case TARGET_NR_afs_syscall
:
9574 #ifdef TARGET_NR__llseek /* Not on alpha */
9575 case TARGET_NR__llseek
:
9578 #if !defined(__NR_llseek)
9579 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9581 ret
= get_errno(res
);
9586 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9588 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9594 #ifdef TARGET_NR_getdents
9595 case TARGET_NR_getdents
:
9596 #ifdef __NR_getdents
9597 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9599 struct target_dirent
*target_dirp
;
9600 struct linux_dirent
*dirp
;
9601 abi_long count
= arg3
;
9603 dirp
= g_try_malloc(count
);
9605 ret
= -TARGET_ENOMEM
;
9609 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9610 if (!is_error(ret
)) {
9611 struct linux_dirent
*de
;
9612 struct target_dirent
*tde
;
9614 int reclen
, treclen
;
9615 int count1
, tnamelen
;
9619 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9623 reclen
= de
->d_reclen
;
9624 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9625 assert(tnamelen
>= 0);
9626 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9627 assert(count1
+ treclen
<= count
);
9628 tde
->d_reclen
= tswap16(treclen
);
9629 tde
->d_ino
= tswapal(de
->d_ino
);
9630 tde
->d_off
= tswapal(de
->d_off
);
9631 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9632 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9634 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9638 unlock_user(target_dirp
, arg2
, ret
);
9644 struct linux_dirent
*dirp
;
9645 abi_long count
= arg3
;
9647 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9649 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9650 if (!is_error(ret
)) {
9651 struct linux_dirent
*de
;
9656 reclen
= de
->d_reclen
;
9659 de
->d_reclen
= tswap16(reclen
);
9660 tswapls(&de
->d_ino
);
9661 tswapls(&de
->d_off
);
9662 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9666 unlock_user(dirp
, arg2
, ret
);
9670 /* Implement getdents in terms of getdents64 */
9672 struct linux_dirent64
*dirp
;
9673 abi_long count
= arg3
;
9675 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9679 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9680 if (!is_error(ret
)) {
9681 /* Convert the dirent64 structs to target dirent. We do this
9682 * in-place, since we can guarantee that a target_dirent is no
9683 * larger than a dirent64; however this means we have to be
9684 * careful to read everything before writing in the new format.
9686 struct linux_dirent64
*de
;
9687 struct target_dirent
*tde
;
9692 tde
= (struct target_dirent
*)dirp
;
9694 int namelen
, treclen
;
9695 int reclen
= de
->d_reclen
;
9696 uint64_t ino
= de
->d_ino
;
9697 int64_t off
= de
->d_off
;
9698 uint8_t type
= de
->d_type
;
9700 namelen
= strlen(de
->d_name
);
9701 treclen
= offsetof(struct target_dirent
, d_name
)
9703 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9705 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9706 tde
->d_ino
= tswapal(ino
);
9707 tde
->d_off
= tswapal(off
);
9708 tde
->d_reclen
= tswap16(treclen
);
9709 /* The target_dirent type is in what was formerly a padding
9710 * byte at the end of the structure:
9712 *(((char *)tde
) + treclen
- 1) = type
;
9714 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9715 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9721 unlock_user(dirp
, arg2
, ret
);
9725 #endif /* TARGET_NR_getdents */
9726 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9727 case TARGET_NR_getdents64
:
9729 struct linux_dirent64
*dirp
;
9730 abi_long count
= arg3
;
9731 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9733 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9734 if (!is_error(ret
)) {
9735 struct linux_dirent64
*de
;
9740 reclen
= de
->d_reclen
;
9743 de
->d_reclen
= tswap16(reclen
);
9744 tswap64s((uint64_t *)&de
->d_ino
);
9745 tswap64s((uint64_t *)&de
->d_off
);
9746 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9750 unlock_user(dirp
, arg2
, ret
);
9753 #endif /* TARGET_NR_getdents64 */
9754 #if defined(TARGET_NR__newselect)
9755 case TARGET_NR__newselect
:
9756 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9759 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9760 # ifdef TARGET_NR_poll
9761 case TARGET_NR_poll
:
9763 # ifdef TARGET_NR_ppoll
9764 case TARGET_NR_ppoll
:
9767 struct target_pollfd
*target_pfd
;
9768 unsigned int nfds
= arg2
;
9775 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9776 ret
= -TARGET_EINVAL
;
9780 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9781 sizeof(struct target_pollfd
) * nfds
, 1);
9786 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9787 for (i
= 0; i
< nfds
; i
++) {
9788 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9789 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9794 # ifdef TARGET_NR_ppoll
9795 case TARGET_NR_ppoll
:
9797 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9798 target_sigset_t
*target_set
;
9799 sigset_t _set
, *set
= &_set
;
9802 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9803 unlock_user(target_pfd
, arg1
, 0);
9811 if (arg5
!= sizeof(target_sigset_t
)) {
9812 unlock_user(target_pfd
, arg1
, 0);
9813 ret
= -TARGET_EINVAL
;
9817 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9819 unlock_user(target_pfd
, arg1
, 0);
9822 target_to_host_sigset(set
, target_set
);
9827 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9828 set
, SIGSET_T_SIZE
));
9830 if (!is_error(ret
) && arg3
) {
9831 host_to_target_timespec(arg3
, timeout_ts
);
9834 unlock_user(target_set
, arg4
, 0);
9839 # ifdef TARGET_NR_poll
9840 case TARGET_NR_poll
:
9842 struct timespec ts
, *pts
;
9845 /* Convert ms to secs, ns */
9846 ts
.tv_sec
= arg3
/ 1000;
9847 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9850 /* -ve poll() timeout means "infinite" */
9853 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9858 g_assert_not_reached();
9861 if (!is_error(ret
)) {
9862 for(i
= 0; i
< nfds
; i
++) {
9863 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9866 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9870 case TARGET_NR_flock
:
9871 /* NOTE: the flock constant seems to be the same for every
9873 ret
= get_errno(safe_flock(arg1
, arg2
));
9875 case TARGET_NR_readv
:
9877 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9879 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9880 unlock_iovec(vec
, arg2
, arg3
, 1);
9882 ret
= -host_to_target_errno(errno
);
9886 case TARGET_NR_writev
:
9888 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9890 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9891 unlock_iovec(vec
, arg2
, arg3
, 0);
9893 ret
= -host_to_target_errno(errno
);
9897 case TARGET_NR_getsid
:
9898 ret
= get_errno(getsid(arg1
));
9900 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9901 case TARGET_NR_fdatasync
:
9902 ret
= get_errno(fdatasync(arg1
));
9905 #ifdef TARGET_NR__sysctl
9906 case TARGET_NR__sysctl
:
9907 /* We don't implement this, but ENOTDIR is always a safe
9909 ret
= -TARGET_ENOTDIR
;
9912 case TARGET_NR_sched_getaffinity
:
9914 unsigned int mask_size
;
9915 unsigned long *mask
;
9918 * sched_getaffinity needs multiples of ulong, so need to take
9919 * care of mismatches between target ulong and host ulong sizes.
9921 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9922 ret
= -TARGET_EINVAL
;
9925 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9927 mask
= alloca(mask_size
);
9928 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9930 if (!is_error(ret
)) {
9932 /* More data returned than the caller's buffer will fit.
9933 * This only happens if sizeof(abi_long) < sizeof(long)
9934 * and the caller passed us a buffer holding an odd number
9935 * of abi_longs. If the host kernel is actually using the
9936 * extra 4 bytes then fail EINVAL; otherwise we can just
9937 * ignore them and only copy the interesting part.
9939 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9940 if (numcpus
> arg2
* 8) {
9941 ret
= -TARGET_EINVAL
;
9947 if (copy_to_user(arg3
, mask
, ret
)) {
9953 case TARGET_NR_sched_setaffinity
:
9955 unsigned int mask_size
;
9956 unsigned long *mask
;
9959 * sched_setaffinity needs multiples of ulong, so need to take
9960 * care of mismatches between target ulong and host ulong sizes.
9962 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9963 ret
= -TARGET_EINVAL
;
9966 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9968 mask
= alloca(mask_size
);
9969 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9972 memcpy(mask
, p
, arg2
);
9973 unlock_user_struct(p
, arg2
, 0);
9975 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9978 case TARGET_NR_sched_setparam
:
9980 struct sched_param
*target_schp
;
9981 struct sched_param schp
;
9984 return -TARGET_EINVAL
;
9986 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9988 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9989 unlock_user_struct(target_schp
, arg2
, 0);
9990 ret
= get_errno(sched_setparam(arg1
, &schp
));
9993 case TARGET_NR_sched_getparam
:
9995 struct sched_param
*target_schp
;
9996 struct sched_param schp
;
9999 return -TARGET_EINVAL
;
10001 ret
= get_errno(sched_getparam(arg1
, &schp
));
10002 if (!is_error(ret
)) {
10003 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10005 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10006 unlock_user_struct(target_schp
, arg2
, 1);
10010 case TARGET_NR_sched_setscheduler
:
10012 struct sched_param
*target_schp
;
10013 struct sched_param schp
;
10015 return -TARGET_EINVAL
;
10017 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10019 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10020 unlock_user_struct(target_schp
, arg3
, 0);
10021 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10024 case TARGET_NR_sched_getscheduler
:
10025 ret
= get_errno(sched_getscheduler(arg1
));
10027 case TARGET_NR_sched_yield
:
10028 ret
= get_errno(sched_yield());
10030 case TARGET_NR_sched_get_priority_max
:
10031 ret
= get_errno(sched_get_priority_max(arg1
));
10033 case TARGET_NR_sched_get_priority_min
:
10034 ret
= get_errno(sched_get_priority_min(arg1
));
10036 case TARGET_NR_sched_rr_get_interval
:
10038 struct timespec ts
;
10039 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10040 if (!is_error(ret
)) {
10041 ret
= host_to_target_timespec(arg2
, &ts
);
10045 case TARGET_NR_nanosleep
:
10047 struct timespec req
, rem
;
10048 target_to_host_timespec(&req
, arg1
);
10049 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10050 if (is_error(ret
) && arg2
) {
10051 host_to_target_timespec(arg2
, &rem
);
10055 #ifdef TARGET_NR_query_module
10056 case TARGET_NR_query_module
:
10057 goto unimplemented
;
10059 #ifdef TARGET_NR_nfsservctl
10060 case TARGET_NR_nfsservctl
:
10061 goto unimplemented
;
10063 case TARGET_NR_prctl
:
10065 case PR_GET_PDEATHSIG
:
10068 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10069 if (!is_error(ret
) && arg2
10070 && put_user_ual(deathsig
, arg2
)) {
10078 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10082 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10083 arg3
, arg4
, arg5
));
10084 unlock_user(name
, arg2
, 16);
10089 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10093 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10094 arg3
, arg4
, arg5
));
10095 unlock_user(name
, arg2
, 0);
10100 /* Most prctl options have no pointer arguments */
10101 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10105 #ifdef TARGET_NR_arch_prctl
10106 case TARGET_NR_arch_prctl
:
10107 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10108 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10111 goto unimplemented
;
10114 #ifdef TARGET_NR_pread64
10115 case TARGET_NR_pread64
:
10116 if (regpairs_aligned(cpu_env
)) {
10120 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10122 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10123 unlock_user(p
, arg2
, ret
);
10125 case TARGET_NR_pwrite64
:
10126 if (regpairs_aligned(cpu_env
)) {
10130 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10132 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10133 unlock_user(p
, arg2
, 0);
10136 case TARGET_NR_getcwd
:
10137 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10139 ret
= get_errno(sys_getcwd1(p
, arg2
));
10140 unlock_user(p
, arg1
, ret
);
10142 case TARGET_NR_capget
:
10143 case TARGET_NR_capset
:
10145 struct target_user_cap_header
*target_header
;
10146 struct target_user_cap_data
*target_data
= NULL
;
10147 struct __user_cap_header_struct header
;
10148 struct __user_cap_data_struct data
[2];
10149 struct __user_cap_data_struct
*dataptr
= NULL
;
10150 int i
, target_datalen
;
10151 int data_items
= 1;
10153 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10156 header
.version
= tswap32(target_header
->version
);
10157 header
.pid
= tswap32(target_header
->pid
);
10159 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10160 /* Version 2 and up takes pointer to two user_data structs */
10164 target_datalen
= sizeof(*target_data
) * data_items
;
10167 if (num
== TARGET_NR_capget
) {
10168 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10170 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10172 if (!target_data
) {
10173 unlock_user_struct(target_header
, arg1
, 0);
10177 if (num
== TARGET_NR_capset
) {
10178 for (i
= 0; i
< data_items
; i
++) {
10179 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10180 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10181 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10188 if (num
== TARGET_NR_capget
) {
10189 ret
= get_errno(capget(&header
, dataptr
));
10191 ret
= get_errno(capset(&header
, dataptr
));
10194 /* The kernel always updates version for both capget and capset */
10195 target_header
->version
= tswap32(header
.version
);
10196 unlock_user_struct(target_header
, arg1
, 1);
10199 if (num
== TARGET_NR_capget
) {
10200 for (i
= 0; i
< data_items
; i
++) {
10201 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10202 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10203 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10205 unlock_user(target_data
, arg2
, target_datalen
);
10207 unlock_user(target_data
, arg2
, 0);
10212 case TARGET_NR_sigaltstack
:
10213 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10216 #ifdef CONFIG_SENDFILE
10217 case TARGET_NR_sendfile
:
10219 off_t
*offp
= NULL
;
10222 ret
= get_user_sal(off
, arg3
);
10223 if (is_error(ret
)) {
10228 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10229 if (!is_error(ret
) && arg3
) {
10230 abi_long ret2
= put_user_sal(off
, arg3
);
10231 if (is_error(ret2
)) {
10237 #ifdef TARGET_NR_sendfile64
10238 case TARGET_NR_sendfile64
:
10240 off_t
*offp
= NULL
;
10243 ret
= get_user_s64(off
, arg3
);
10244 if (is_error(ret
)) {
10249 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10250 if (!is_error(ret
) && arg3
) {
10251 abi_long ret2
= put_user_s64(off
, arg3
);
10252 if (is_error(ret2
)) {
10260 case TARGET_NR_sendfile
:
10261 #ifdef TARGET_NR_sendfile64
10262 case TARGET_NR_sendfile64
:
10264 goto unimplemented
;
10267 #ifdef TARGET_NR_getpmsg
10268 case TARGET_NR_getpmsg
:
10269 goto unimplemented
;
10271 #ifdef TARGET_NR_putpmsg
10272 case TARGET_NR_putpmsg
:
10273 goto unimplemented
;
10275 #ifdef TARGET_NR_vfork
10276 case TARGET_NR_vfork
:
10277 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10281 #ifdef TARGET_NR_ugetrlimit
10282 case TARGET_NR_ugetrlimit
:
10284 struct rlimit rlim
;
10285 int resource
= target_to_host_resource(arg1
);
10286 ret
= get_errno(getrlimit(resource
, &rlim
));
10287 if (!is_error(ret
)) {
10288 struct target_rlimit
*target_rlim
;
10289 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10291 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10292 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10293 unlock_user_struct(target_rlim
, arg2
, 1);
10298 #ifdef TARGET_NR_truncate64
10299 case TARGET_NR_truncate64
:
10300 if (!(p
= lock_user_string(arg1
)))
10302 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10303 unlock_user(p
, arg1
, 0);
10306 #ifdef TARGET_NR_ftruncate64
10307 case TARGET_NR_ftruncate64
:
10308 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10311 #ifdef TARGET_NR_stat64
10312 case TARGET_NR_stat64
:
10313 if (!(p
= lock_user_string(arg1
)))
10315 ret
= get_errno(stat(path(p
), &st
));
10316 unlock_user(p
, arg1
, 0);
10317 if (!is_error(ret
))
10318 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10321 #ifdef TARGET_NR_lstat64
10322 case TARGET_NR_lstat64
:
10323 if (!(p
= lock_user_string(arg1
)))
10325 ret
= get_errno(lstat(path(p
), &st
));
10326 unlock_user(p
, arg1
, 0);
10327 if (!is_error(ret
))
10328 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10331 #ifdef TARGET_NR_fstat64
10332 case TARGET_NR_fstat64
:
10333 ret
= get_errno(fstat(arg1
, &st
));
10334 if (!is_error(ret
))
10335 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10338 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10339 #ifdef TARGET_NR_fstatat64
10340 case TARGET_NR_fstatat64
:
10342 #ifdef TARGET_NR_newfstatat
10343 case TARGET_NR_newfstatat
:
10345 if (!(p
= lock_user_string(arg2
)))
10347 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10348 if (!is_error(ret
))
10349 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10352 #ifdef TARGET_NR_lchown
10353 case TARGET_NR_lchown
:
10354 if (!(p
= lock_user_string(arg1
)))
10356 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10357 unlock_user(p
, arg1
, 0);
10360 #ifdef TARGET_NR_getuid
10361 case TARGET_NR_getuid
:
10362 ret
= get_errno(high2lowuid(getuid()));
10365 #ifdef TARGET_NR_getgid
10366 case TARGET_NR_getgid
:
10367 ret
= get_errno(high2lowgid(getgid()));
10370 #ifdef TARGET_NR_geteuid
10371 case TARGET_NR_geteuid
:
10372 ret
= get_errno(high2lowuid(geteuid()));
10375 #ifdef TARGET_NR_getegid
10376 case TARGET_NR_getegid
:
10377 ret
= get_errno(high2lowgid(getegid()));
10380 case TARGET_NR_setreuid
:
10381 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10383 case TARGET_NR_setregid
:
10384 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10386 case TARGET_NR_getgroups
:
10388 int gidsetsize
= arg1
;
10389 target_id
*target_grouplist
;
10393 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10394 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10395 if (gidsetsize
== 0)
10397 if (!is_error(ret
)) {
10398 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10399 if (!target_grouplist
)
10401 for(i
= 0;i
< ret
; i
++)
10402 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10403 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10407 case TARGET_NR_setgroups
:
10409 int gidsetsize
= arg1
;
10410 target_id
*target_grouplist
;
10411 gid_t
*grouplist
= NULL
;
10414 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10415 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10416 if (!target_grouplist
) {
10417 ret
= -TARGET_EFAULT
;
10420 for (i
= 0; i
< gidsetsize
; i
++) {
10421 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10423 unlock_user(target_grouplist
, arg2
, 0);
10425 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10428 case TARGET_NR_fchown
:
10429 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10431 #if defined(TARGET_NR_fchownat)
10432 case TARGET_NR_fchownat
:
10433 if (!(p
= lock_user_string(arg2
)))
10435 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10436 low2highgid(arg4
), arg5
));
10437 unlock_user(p
, arg2
, 0);
10440 #ifdef TARGET_NR_setresuid
10441 case TARGET_NR_setresuid
:
10442 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10444 low2highuid(arg3
)));
10447 #ifdef TARGET_NR_getresuid
10448 case TARGET_NR_getresuid
:
10450 uid_t ruid
, euid
, suid
;
10451 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10452 if (!is_error(ret
)) {
10453 if (put_user_id(high2lowuid(ruid
), arg1
)
10454 || put_user_id(high2lowuid(euid
), arg2
)
10455 || put_user_id(high2lowuid(suid
), arg3
))
10461 #ifdef TARGET_NR_getresgid
10462 case TARGET_NR_setresgid
:
10463 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10465 low2highgid(arg3
)));
10468 #ifdef TARGET_NR_getresgid
10469 case TARGET_NR_getresgid
:
10471 gid_t rgid
, egid
, sgid
;
10472 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10473 if (!is_error(ret
)) {
10474 if (put_user_id(high2lowgid(rgid
), arg1
)
10475 || put_user_id(high2lowgid(egid
), arg2
)
10476 || put_user_id(high2lowgid(sgid
), arg3
))
10482 #ifdef TARGET_NR_chown
10483 case TARGET_NR_chown
:
10484 if (!(p
= lock_user_string(arg1
)))
10486 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10487 unlock_user(p
, arg1
, 0);
10490 case TARGET_NR_setuid
:
10491 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10493 case TARGET_NR_setgid
:
10494 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10496 case TARGET_NR_setfsuid
:
10497 ret
= get_errno(setfsuid(arg1
));
10499 case TARGET_NR_setfsgid
:
10500 ret
= get_errno(setfsgid(arg1
));
10503 #ifdef TARGET_NR_lchown32
10504 case TARGET_NR_lchown32
:
10505 if (!(p
= lock_user_string(arg1
)))
10507 ret
= get_errno(lchown(p
, arg2
, arg3
));
10508 unlock_user(p
, arg1
, 0);
10511 #ifdef TARGET_NR_getuid32
10512 case TARGET_NR_getuid32
:
10513 ret
= get_errno(getuid());
10517 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10518 /* Alpha specific */
10519 case TARGET_NR_getxuid
:
10523 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10525 ret
= get_errno(getuid());
10528 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10529 /* Alpha specific */
10530 case TARGET_NR_getxgid
:
10534 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10536 ret
= get_errno(getgid());
10539 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10540 /* Alpha specific */
10541 case TARGET_NR_osf_getsysinfo
:
10542 ret
= -TARGET_EOPNOTSUPP
;
10544 case TARGET_GSI_IEEE_FP_CONTROL
:
10546 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10548 /* Copied from linux ieee_fpcr_to_swcr. */
10549 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10550 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10551 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10552 | SWCR_TRAP_ENABLE_DZE
10553 | SWCR_TRAP_ENABLE_OVF
);
10554 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10555 | SWCR_TRAP_ENABLE_INE
);
10556 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10557 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10559 if (put_user_u64 (swcr
, arg2
))
10565 /* case GSI_IEEE_STATE_AT_SIGNAL:
10566 -- Not implemented in linux kernel.
10568 -- Retrieves current unaligned access state; not much used.
10569 case GSI_PROC_TYPE:
10570 -- Retrieves implver information; surely not used.
10571 case GSI_GET_HWRPB:
10572 -- Grabs a copy of the HWRPB; surely not used.
10577 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10578 /* Alpha specific */
10579 case TARGET_NR_osf_setsysinfo
:
10580 ret
= -TARGET_EOPNOTSUPP
;
10582 case TARGET_SSI_IEEE_FP_CONTROL
:
10584 uint64_t swcr
, fpcr
, orig_fpcr
;
10586 if (get_user_u64 (swcr
, arg2
)) {
10589 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10590 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10592 /* Copied from linux ieee_swcr_to_fpcr. */
10593 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10594 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10595 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10596 | SWCR_TRAP_ENABLE_DZE
10597 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10598 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10599 | SWCR_TRAP_ENABLE_INE
)) << 57;
10600 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10601 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10603 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10608 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10610 uint64_t exc
, fpcr
, orig_fpcr
;
10613 if (get_user_u64(exc
, arg2
)) {
10617 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10619 /* We only add to the exception status here. */
10620 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10622 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10625 /* Old exceptions are not signaled. */
10626 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10628 /* If any exceptions set by this call,
10629 and are unmasked, send a signal. */
10631 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10632 si_code
= TARGET_FPE_FLTRES
;
10634 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10635 si_code
= TARGET_FPE_FLTUND
;
10637 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10638 si_code
= TARGET_FPE_FLTOVF
;
10640 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10641 si_code
= TARGET_FPE_FLTDIV
;
10643 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10644 si_code
= TARGET_FPE_FLTINV
;
10646 if (si_code
!= 0) {
10647 target_siginfo_t info
;
10648 info
.si_signo
= SIGFPE
;
10650 info
.si_code
= si_code
;
10651 info
._sifields
._sigfault
._addr
10652 = ((CPUArchState
*)cpu_env
)->pc
;
10653 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10654 QEMU_SI_FAULT
, &info
);
10659 /* case SSI_NVPAIRS:
10660 -- Used with SSIN_UACPROC to enable unaligned accesses.
10661 case SSI_IEEE_STATE_AT_SIGNAL:
10662 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10663 -- Not implemented in linux kernel
10668 #ifdef TARGET_NR_osf_sigprocmask
10669 /* Alpha specific. */
10670 case TARGET_NR_osf_sigprocmask
:
10674 sigset_t set
, oldset
;
10677 case TARGET_SIG_BLOCK
:
10680 case TARGET_SIG_UNBLOCK
:
10683 case TARGET_SIG_SETMASK
:
10687 ret
= -TARGET_EINVAL
;
10691 target_to_host_old_sigset(&set
, &mask
);
10692 ret
= do_sigprocmask(how
, &set
, &oldset
);
10694 host_to_target_old_sigset(&mask
, &oldset
);
10701 #ifdef TARGET_NR_getgid32
10702 case TARGET_NR_getgid32
:
10703 ret
= get_errno(getgid());
10706 #ifdef TARGET_NR_geteuid32
10707 case TARGET_NR_geteuid32
:
10708 ret
= get_errno(geteuid());
10711 #ifdef TARGET_NR_getegid32
10712 case TARGET_NR_getegid32
:
10713 ret
= get_errno(getegid());
10716 #ifdef TARGET_NR_setreuid32
10717 case TARGET_NR_setreuid32
:
10718 ret
= get_errno(setreuid(arg1
, arg2
));
10721 #ifdef TARGET_NR_setregid32
10722 case TARGET_NR_setregid32
:
10723 ret
= get_errno(setregid(arg1
, arg2
));
10726 #ifdef TARGET_NR_getgroups32
10727 case TARGET_NR_getgroups32
:
10729 int gidsetsize
= arg1
;
10730 uint32_t *target_grouplist
;
10734 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10735 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10736 if (gidsetsize
== 0)
10738 if (!is_error(ret
)) {
10739 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10740 if (!target_grouplist
) {
10741 ret
= -TARGET_EFAULT
;
10744 for(i
= 0;i
< ret
; i
++)
10745 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10746 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10751 #ifdef TARGET_NR_setgroups32
10752 case TARGET_NR_setgroups32
:
10754 int gidsetsize
= arg1
;
10755 uint32_t *target_grouplist
;
10759 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10760 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10761 if (!target_grouplist
) {
10762 ret
= -TARGET_EFAULT
;
10765 for(i
= 0;i
< gidsetsize
; i
++)
10766 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10767 unlock_user(target_grouplist
, arg2
, 0);
10768 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10772 #ifdef TARGET_NR_fchown32
10773 case TARGET_NR_fchown32
:
10774 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10777 #ifdef TARGET_NR_setresuid32
10778 case TARGET_NR_setresuid32
:
10779 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10782 #ifdef TARGET_NR_getresuid32
10783 case TARGET_NR_getresuid32
:
10785 uid_t ruid
, euid
, suid
;
10786 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10787 if (!is_error(ret
)) {
10788 if (put_user_u32(ruid
, arg1
)
10789 || put_user_u32(euid
, arg2
)
10790 || put_user_u32(suid
, arg3
))
10796 #ifdef TARGET_NR_setresgid32
10797 case TARGET_NR_setresgid32
:
10798 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10801 #ifdef TARGET_NR_getresgid32
10802 case TARGET_NR_getresgid32
:
10804 gid_t rgid
, egid
, sgid
;
10805 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10806 if (!is_error(ret
)) {
10807 if (put_user_u32(rgid
, arg1
)
10808 || put_user_u32(egid
, arg2
)
10809 || put_user_u32(sgid
, arg3
))
10815 #ifdef TARGET_NR_chown32
10816 case TARGET_NR_chown32
:
10817 if (!(p
= lock_user_string(arg1
)))
10819 ret
= get_errno(chown(p
, arg2
, arg3
));
10820 unlock_user(p
, arg1
, 0);
10823 #ifdef TARGET_NR_setuid32
10824 case TARGET_NR_setuid32
:
10825 ret
= get_errno(sys_setuid(arg1
));
10828 #ifdef TARGET_NR_setgid32
10829 case TARGET_NR_setgid32
:
10830 ret
= get_errno(sys_setgid(arg1
));
10833 #ifdef TARGET_NR_setfsuid32
10834 case TARGET_NR_setfsuid32
:
10835 ret
= get_errno(setfsuid(arg1
));
10838 #ifdef TARGET_NR_setfsgid32
10839 case TARGET_NR_setfsgid32
:
10840 ret
= get_errno(setfsgid(arg1
));
10844 case TARGET_NR_pivot_root
:
10845 goto unimplemented
;
10846 #ifdef TARGET_NR_mincore
10847 case TARGET_NR_mincore
:
10850 ret
= -TARGET_EFAULT
;
10851 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10853 if (!(p
= lock_user_string(arg3
)))
10855 ret
= get_errno(mincore(a
, arg2
, p
));
10856 unlock_user(p
, arg3
, ret
);
10858 unlock_user(a
, arg1
, 0);
10862 #ifdef TARGET_NR_arm_fadvise64_64
10863 case TARGET_NR_arm_fadvise64_64
:
10864 /* arm_fadvise64_64 looks like fadvise64_64 but
10865 * with different argument order: fd, advice, offset, len
10866 * rather than the usual fd, offset, len, advice.
10867 * Note that offset and len are both 64-bit so appear as
10868 * pairs of 32-bit registers.
10870 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10871 target_offset64(arg5
, arg6
), arg2
);
10872 ret
= -host_to_target_errno(ret
);
10876 #if TARGET_ABI_BITS == 32
10878 #ifdef TARGET_NR_fadvise64_64
10879 case TARGET_NR_fadvise64_64
:
10880 /* 6 args: fd, offset (high, low), len (high, low), advice */
10881 if (regpairs_aligned(cpu_env
)) {
10882 /* offset is in (3,4), len in (5,6) and advice in 7 */
10889 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10890 target_offset64(arg2
, arg3
),
10891 target_offset64(arg4
, arg5
),
10896 #ifdef TARGET_NR_fadvise64
10897 case TARGET_NR_fadvise64
:
10898 /* 5 args: fd, offset (high, low), len, advice */
10899 if (regpairs_aligned(cpu_env
)) {
10900 /* offset is in (3,4), len in 5 and advice in 6 */
10906 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10907 target_offset64(arg2
, arg3
),
10912 #else /* not a 32-bit ABI */
10913 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10914 #ifdef TARGET_NR_fadvise64_64
10915 case TARGET_NR_fadvise64_64
:
10917 #ifdef TARGET_NR_fadvise64
10918 case TARGET_NR_fadvise64
:
10920 #ifdef TARGET_S390X
10922 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10923 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10924 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10925 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10929 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10932 #endif /* end of 64-bit ABI fadvise handling */
10934 #ifdef TARGET_NR_madvise
10935 case TARGET_NR_madvise
:
10936 /* A straight passthrough may not be safe because qemu sometimes
10937 turns private file-backed mappings into anonymous mappings.
10938 This will break MADV_DONTNEED.
10939 This is a hint, so ignoring and returning success is ok. */
10940 ret
= get_errno(0);
10943 #if TARGET_ABI_BITS == 32
10944 case TARGET_NR_fcntl64
:
10948 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10949 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10952 if (((CPUARMState
*)cpu_env
)->eabi
) {
10953 copyfrom
= copy_from_user_eabi_flock64
;
10954 copyto
= copy_to_user_eabi_flock64
;
10958 cmd
= target_to_host_fcntl_cmd(arg2
);
10959 if (cmd
== -TARGET_EINVAL
) {
10965 case TARGET_F_GETLK64
:
10966 ret
= copyfrom(&fl
, arg3
);
10970 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10972 ret
= copyto(arg3
, &fl
);
10976 case TARGET_F_SETLK64
:
10977 case TARGET_F_SETLKW64
:
10978 ret
= copyfrom(&fl
, arg3
);
10982 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10985 ret
= do_fcntl(arg1
, arg2
, arg3
);
10991 #ifdef TARGET_NR_cacheflush
10992 case TARGET_NR_cacheflush
:
10993 /* self-modifying code is handled automatically, so nothing needed */
10997 #ifdef TARGET_NR_security
10998 case TARGET_NR_security
:
10999 goto unimplemented
;
11001 #ifdef TARGET_NR_getpagesize
11002 case TARGET_NR_getpagesize
:
11003 ret
= TARGET_PAGE_SIZE
;
11006 case TARGET_NR_gettid
:
11007 ret
= get_errno(gettid());
11009 #ifdef TARGET_NR_readahead
11010 case TARGET_NR_readahead
:
11011 #if TARGET_ABI_BITS == 32
11012 if (regpairs_aligned(cpu_env
)) {
11017 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11019 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11024 #ifdef TARGET_NR_setxattr
11025 case TARGET_NR_listxattr
:
11026 case TARGET_NR_llistxattr
:
11030 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11032 ret
= -TARGET_EFAULT
;
11036 p
= lock_user_string(arg1
);
11038 if (num
== TARGET_NR_listxattr
) {
11039 ret
= get_errno(listxattr(p
, b
, arg3
));
11041 ret
= get_errno(llistxattr(p
, b
, arg3
));
11044 ret
= -TARGET_EFAULT
;
11046 unlock_user(p
, arg1
, 0);
11047 unlock_user(b
, arg2
, arg3
);
11050 case TARGET_NR_flistxattr
:
11054 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11056 ret
= -TARGET_EFAULT
;
11060 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11061 unlock_user(b
, arg2
, arg3
);
11064 case TARGET_NR_setxattr
:
11065 case TARGET_NR_lsetxattr
:
11067 void *p
, *n
, *v
= 0;
11069 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11071 ret
= -TARGET_EFAULT
;
11075 p
= lock_user_string(arg1
);
11076 n
= lock_user_string(arg2
);
11078 if (num
== TARGET_NR_setxattr
) {
11079 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11081 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11084 ret
= -TARGET_EFAULT
;
11086 unlock_user(p
, arg1
, 0);
11087 unlock_user(n
, arg2
, 0);
11088 unlock_user(v
, arg3
, 0);
11091 case TARGET_NR_fsetxattr
:
11095 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11097 ret
= -TARGET_EFAULT
;
11101 n
= lock_user_string(arg2
);
11103 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11105 ret
= -TARGET_EFAULT
;
11107 unlock_user(n
, arg2
, 0);
11108 unlock_user(v
, arg3
, 0);
11111 case TARGET_NR_getxattr
:
11112 case TARGET_NR_lgetxattr
:
11114 void *p
, *n
, *v
= 0;
11116 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11118 ret
= -TARGET_EFAULT
;
11122 p
= lock_user_string(arg1
);
11123 n
= lock_user_string(arg2
);
11125 if (num
== TARGET_NR_getxattr
) {
11126 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11128 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11131 ret
= -TARGET_EFAULT
;
11133 unlock_user(p
, arg1
, 0);
11134 unlock_user(n
, arg2
, 0);
11135 unlock_user(v
, arg3
, arg4
);
11138 case TARGET_NR_fgetxattr
:
11142 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11144 ret
= -TARGET_EFAULT
;
11148 n
= lock_user_string(arg2
);
11150 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11152 ret
= -TARGET_EFAULT
;
11154 unlock_user(n
, arg2
, 0);
11155 unlock_user(v
, arg3
, arg4
);
11158 case TARGET_NR_removexattr
:
11159 case TARGET_NR_lremovexattr
:
11162 p
= lock_user_string(arg1
);
11163 n
= lock_user_string(arg2
);
11165 if (num
== TARGET_NR_removexattr
) {
11166 ret
= get_errno(removexattr(p
, n
));
11168 ret
= get_errno(lremovexattr(p
, n
));
11171 ret
= -TARGET_EFAULT
;
11173 unlock_user(p
, arg1
, 0);
11174 unlock_user(n
, arg2
, 0);
11177 case TARGET_NR_fremovexattr
:
11180 n
= lock_user_string(arg2
);
11182 ret
= get_errno(fremovexattr(arg1
, n
));
11184 ret
= -TARGET_EFAULT
;
11186 unlock_user(n
, arg2
, 0);
11190 #endif /* CONFIG_ATTR */
11191 #ifdef TARGET_NR_set_thread_area
11192 case TARGET_NR_set_thread_area
:
11193 #if defined(TARGET_MIPS)
11194 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11197 #elif defined(TARGET_CRIS)
11199 ret
= -TARGET_EINVAL
;
11201 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11205 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11206 ret
= do_set_thread_area(cpu_env
, arg1
);
11208 #elif defined(TARGET_M68K)
11210 TaskState
*ts
= cpu
->opaque
;
11211 ts
->tp_value
= arg1
;
11216 goto unimplemented_nowarn
;
11219 #ifdef TARGET_NR_get_thread_area
11220 case TARGET_NR_get_thread_area
:
11221 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11222 ret
= do_get_thread_area(cpu_env
, arg1
);
11224 #elif defined(TARGET_M68K)
11226 TaskState
*ts
= cpu
->opaque
;
11227 ret
= ts
->tp_value
;
11231 goto unimplemented_nowarn
;
11234 #ifdef TARGET_NR_getdomainname
11235 case TARGET_NR_getdomainname
:
11236 goto unimplemented_nowarn
;
11239 #ifdef TARGET_NR_clock_gettime
11240 case TARGET_NR_clock_gettime
:
11242 struct timespec ts
;
11243 ret
= get_errno(clock_gettime(arg1
, &ts
));
11244 if (!is_error(ret
)) {
11245 host_to_target_timespec(arg2
, &ts
);
11250 #ifdef TARGET_NR_clock_getres
11251 case TARGET_NR_clock_getres
:
11253 struct timespec ts
;
11254 ret
= get_errno(clock_getres(arg1
, &ts
));
11255 if (!is_error(ret
)) {
11256 host_to_target_timespec(arg2
, &ts
);
11261 #ifdef TARGET_NR_clock_nanosleep
11262 case TARGET_NR_clock_nanosleep
:
11264 struct timespec ts
;
11265 target_to_host_timespec(&ts
, arg3
);
11266 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11267 &ts
, arg4
? &ts
: NULL
));
11269 host_to_target_timespec(arg4
, &ts
);
11271 #if defined(TARGET_PPC)
11272 /* clock_nanosleep is odd in that it returns positive errno values.
11273 * On PPC, CR0 bit 3 should be set in such a situation. */
11274 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11275 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11283 case TARGET_NR_set_tid_address
:
11284 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11288 case TARGET_NR_tkill
:
11289 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11292 case TARGET_NR_tgkill
:
11293 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11294 target_to_host_signal(arg3
)));
11297 #ifdef TARGET_NR_set_robust_list
11298 case TARGET_NR_set_robust_list
:
11299 case TARGET_NR_get_robust_list
:
11300 /* The ABI for supporting robust futexes has userspace pass
11301 * the kernel a pointer to a linked list which is updated by
11302 * userspace after the syscall; the list is walked by the kernel
11303 * when the thread exits. Since the linked list in QEMU guest
11304 * memory isn't a valid linked list for the host and we have
11305 * no way to reliably intercept the thread-death event, we can't
11306 * support these. Silently return ENOSYS so that guest userspace
11307 * falls back to a non-robust futex implementation (which should
11308 * be OK except in the corner case of the guest crashing while
11309 * holding a mutex that is shared with another process via
11312 goto unimplemented_nowarn
;
11315 #if defined(TARGET_NR_utimensat)
11316 case TARGET_NR_utimensat
:
11318 struct timespec
*tsp
, ts
[2];
11322 target_to_host_timespec(ts
, arg3
);
11323 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11327 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11329 if (!(p
= lock_user_string(arg2
))) {
11330 ret
= -TARGET_EFAULT
;
11333 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11334 unlock_user(p
, arg2
, 0);
11339 case TARGET_NR_futex
:
11340 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11342 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11343 case TARGET_NR_inotify_init
:
11344 ret
= get_errno(sys_inotify_init());
11347 #ifdef CONFIG_INOTIFY1
11348 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11349 case TARGET_NR_inotify_init1
:
11350 ret
= get_errno(sys_inotify_init1(arg1
));
11354 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11355 case TARGET_NR_inotify_add_watch
:
11356 p
= lock_user_string(arg2
);
11357 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11358 unlock_user(p
, arg2
, 0);
11361 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11362 case TARGET_NR_inotify_rm_watch
:
11363 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11367 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11368 case TARGET_NR_mq_open
:
11370 struct mq_attr posix_mq_attr
, *attrp
;
11372 p
= lock_user_string(arg1
- 1);
11374 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11375 attrp
= &posix_mq_attr
;
11379 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11380 unlock_user (p
, arg1
, 0);
11384 case TARGET_NR_mq_unlink
:
11385 p
= lock_user_string(arg1
- 1);
11387 ret
= -TARGET_EFAULT
;
11390 ret
= get_errno(mq_unlink(p
));
11391 unlock_user (p
, arg1
, 0);
11394 case TARGET_NR_mq_timedsend
:
11396 struct timespec ts
;
11398 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11400 target_to_host_timespec(&ts
, arg5
);
11401 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11402 host_to_target_timespec(arg5
, &ts
);
11404 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11406 unlock_user (p
, arg2
, arg3
);
11410 case TARGET_NR_mq_timedreceive
:
11412 struct timespec ts
;
11415 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11417 target_to_host_timespec(&ts
, arg5
);
11418 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11420 host_to_target_timespec(arg5
, &ts
);
11422 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11425 unlock_user (p
, arg2
, arg3
);
11427 put_user_u32(prio
, arg4
);
11431 /* Not implemented for now... */
11432 /* case TARGET_NR_mq_notify: */
11435 case TARGET_NR_mq_getsetattr
:
11437 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11440 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11441 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11444 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11445 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11452 #ifdef CONFIG_SPLICE
11453 #ifdef TARGET_NR_tee
11454 case TARGET_NR_tee
:
11456 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11460 #ifdef TARGET_NR_splice
11461 case TARGET_NR_splice
:
11463 loff_t loff_in
, loff_out
;
11464 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11466 if (get_user_u64(loff_in
, arg2
)) {
11469 ploff_in
= &loff_in
;
11472 if (get_user_u64(loff_out
, arg4
)) {
11475 ploff_out
= &loff_out
;
11477 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11479 if (put_user_u64(loff_in
, arg2
)) {
11484 if (put_user_u64(loff_out
, arg4
)) {
11491 #ifdef TARGET_NR_vmsplice
11492 case TARGET_NR_vmsplice
:
11494 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11496 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11497 unlock_iovec(vec
, arg2
, arg3
, 0);
11499 ret
= -host_to_target_errno(errno
);
11504 #endif /* CONFIG_SPLICE */
11505 #ifdef CONFIG_EVENTFD
11506 #if defined(TARGET_NR_eventfd)
11507 case TARGET_NR_eventfd
:
11508 ret
= get_errno(eventfd(arg1
, 0));
11509 fd_trans_unregister(ret
);
11512 #if defined(TARGET_NR_eventfd2)
11513 case TARGET_NR_eventfd2
:
11515 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11516 if (arg2
& TARGET_O_NONBLOCK
) {
11517 host_flags
|= O_NONBLOCK
;
11519 if (arg2
& TARGET_O_CLOEXEC
) {
11520 host_flags
|= O_CLOEXEC
;
11522 ret
= get_errno(eventfd(arg1
, host_flags
));
11523 fd_trans_unregister(ret
);
11527 #endif /* CONFIG_EVENTFD */
11528 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11529 case TARGET_NR_fallocate
:
11530 #if TARGET_ABI_BITS == 32
11531 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11532 target_offset64(arg5
, arg6
)));
11534 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11538 #if defined(CONFIG_SYNC_FILE_RANGE)
11539 #if defined(TARGET_NR_sync_file_range)
11540 case TARGET_NR_sync_file_range
:
11541 #if TARGET_ABI_BITS == 32
11542 #if defined(TARGET_MIPS)
11543 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11544 target_offset64(arg5
, arg6
), arg7
));
11546 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11547 target_offset64(arg4
, arg5
), arg6
));
11548 #endif /* !TARGET_MIPS */
11550 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11554 #if defined(TARGET_NR_sync_file_range2)
11555 case TARGET_NR_sync_file_range2
:
11556 /* This is like sync_file_range but the arguments are reordered */
11557 #if TARGET_ABI_BITS == 32
11558 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11559 target_offset64(arg5
, arg6
), arg2
));
11561 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11566 #if defined(TARGET_NR_signalfd4)
11567 case TARGET_NR_signalfd4
:
11568 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11571 #if defined(TARGET_NR_signalfd)
11572 case TARGET_NR_signalfd
:
11573 ret
= do_signalfd4(arg1
, arg2
, 0);
11576 #if defined(CONFIG_EPOLL)
11577 #if defined(TARGET_NR_epoll_create)
11578 case TARGET_NR_epoll_create
:
11579 ret
= get_errno(epoll_create(arg1
));
11582 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11583 case TARGET_NR_epoll_create1
:
11584 ret
= get_errno(epoll_create1(arg1
));
11587 #if defined(TARGET_NR_epoll_ctl)
11588 case TARGET_NR_epoll_ctl
:
11590 struct epoll_event ep
;
11591 struct epoll_event
*epp
= 0;
11593 struct target_epoll_event
*target_ep
;
11594 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11597 ep
.events
= tswap32(target_ep
->events
);
11598 /* The epoll_data_t union is just opaque data to the kernel,
11599 * so we transfer all 64 bits across and need not worry what
11600 * actual data type it is.
11602 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11603 unlock_user_struct(target_ep
, arg4
, 0);
11606 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11611 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11612 #if defined(TARGET_NR_epoll_wait)
11613 case TARGET_NR_epoll_wait
:
11615 #if defined(TARGET_NR_epoll_pwait)
11616 case TARGET_NR_epoll_pwait
:
11619 struct target_epoll_event
*target_ep
;
11620 struct epoll_event
*ep
;
11622 int maxevents
= arg3
;
11623 int timeout
= arg4
;
11625 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11626 ret
= -TARGET_EINVAL
;
11630 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11631 maxevents
* sizeof(struct target_epoll_event
), 1);
11636 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11639 #if defined(TARGET_NR_epoll_pwait)
11640 case TARGET_NR_epoll_pwait
:
11642 target_sigset_t
*target_set
;
11643 sigset_t _set
, *set
= &_set
;
11646 if (arg6
!= sizeof(target_sigset_t
)) {
11647 ret
= -TARGET_EINVAL
;
11651 target_set
= lock_user(VERIFY_READ
, arg5
,
11652 sizeof(target_sigset_t
), 1);
11654 unlock_user(target_ep
, arg2
, 0);
11657 target_to_host_sigset(set
, target_set
);
11658 unlock_user(target_set
, arg5
, 0);
11663 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11664 set
, SIGSET_T_SIZE
));
11668 #if defined(TARGET_NR_epoll_wait)
11669 case TARGET_NR_epoll_wait
:
11670 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11675 ret
= -TARGET_ENOSYS
;
11677 if (!is_error(ret
)) {
11679 for (i
= 0; i
< ret
; i
++) {
11680 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11681 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11684 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11689 #ifdef TARGET_NR_prlimit64
11690 case TARGET_NR_prlimit64
:
11692 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11693 struct target_rlimit64
*target_rnew
, *target_rold
;
11694 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11695 int resource
= target_to_host_resource(arg2
);
11697 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11700 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11701 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11702 unlock_user_struct(target_rnew
, arg3
, 0);
11706 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11707 if (!is_error(ret
) && arg4
) {
11708 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11711 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11712 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11713 unlock_user_struct(target_rold
, arg4
, 1);
11718 #ifdef TARGET_NR_gethostname
11719 case TARGET_NR_gethostname
:
11721 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11723 ret
= get_errno(gethostname(name
, arg2
));
11724 unlock_user(name
, arg1
, arg2
);
11726 ret
= -TARGET_EFAULT
;
11731 #ifdef TARGET_NR_atomic_cmpxchg_32
11732 case TARGET_NR_atomic_cmpxchg_32
:
11734 /* should use start_exclusive from main.c */
11735 abi_ulong mem_value
;
11736 if (get_user_u32(mem_value
, arg6
)) {
11737 target_siginfo_t info
;
11738 info
.si_signo
= SIGSEGV
;
11740 info
.si_code
= TARGET_SEGV_MAPERR
;
11741 info
._sifields
._sigfault
._addr
= arg6
;
11742 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11743 QEMU_SI_FAULT
, &info
);
11747 if (mem_value
== arg2
)
11748 put_user_u32(arg1
, arg6
);
11753 #ifdef TARGET_NR_atomic_barrier
11754 case TARGET_NR_atomic_barrier
:
11756 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11762 #ifdef TARGET_NR_timer_create
11763 case TARGET_NR_timer_create
:
11765 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11767 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11770 int timer_index
= next_free_host_timer();
11772 if (timer_index
< 0) {
11773 ret
= -TARGET_EAGAIN
;
11775 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11778 phost_sevp
= &host_sevp
;
11779 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11785 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11789 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11798 #ifdef TARGET_NR_timer_settime
11799 case TARGET_NR_timer_settime
:
11801 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11802 * struct itimerspec * old_value */
11803 target_timer_t timerid
= get_timer_id(arg1
);
11807 } else if (arg3
== 0) {
11808 ret
= -TARGET_EINVAL
;
11810 timer_t htimer
= g_posix_timers
[timerid
];
11811 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11813 target_to_host_itimerspec(&hspec_new
, arg3
);
11815 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11816 host_to_target_itimerspec(arg2
, &hspec_old
);
11822 #ifdef TARGET_NR_timer_gettime
11823 case TARGET_NR_timer_gettime
:
11825 /* args: timer_t timerid, struct itimerspec *curr_value */
11826 target_timer_t timerid
= get_timer_id(arg1
);
11830 } else if (!arg2
) {
11831 ret
= -TARGET_EFAULT
;
11833 timer_t htimer
= g_posix_timers
[timerid
];
11834 struct itimerspec hspec
;
11835 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11837 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11838 ret
= -TARGET_EFAULT
;
11845 #ifdef TARGET_NR_timer_getoverrun
11846 case TARGET_NR_timer_getoverrun
:
11848 /* args: timer_t timerid */
11849 target_timer_t timerid
= get_timer_id(arg1
);
11854 timer_t htimer
= g_posix_timers
[timerid
];
11855 ret
= get_errno(timer_getoverrun(htimer
));
11857 fd_trans_unregister(ret
);
11862 #ifdef TARGET_NR_timer_delete
11863 case TARGET_NR_timer_delete
:
11865 /* args: timer_t timerid */
11866 target_timer_t timerid
= get_timer_id(arg1
);
11871 timer_t htimer
= g_posix_timers
[timerid
];
11872 ret
= get_errno(timer_delete(htimer
));
11873 g_posix_timers
[timerid
] = 0;
11879 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11880 case TARGET_NR_timerfd_create
:
11881 ret
= get_errno(timerfd_create(arg1
,
11882 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11886 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11887 case TARGET_NR_timerfd_gettime
:
11889 struct itimerspec its_curr
;
11891 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11893 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11900 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11901 case TARGET_NR_timerfd_settime
:
11903 struct itimerspec its_new
, its_old
, *p_new
;
11906 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11914 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11916 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11923 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11924 case TARGET_NR_ioprio_get
:
11925 ret
= get_errno(ioprio_get(arg1
, arg2
));
11929 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11930 case TARGET_NR_ioprio_set
:
11931 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11935 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11936 case TARGET_NR_setns
:
11937 ret
= get_errno(setns(arg1
, arg2
));
11940 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11941 case TARGET_NR_unshare
:
11942 ret
= get_errno(unshare(arg1
));
11948 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11949 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11950 unimplemented_nowarn
:
11952 ret
= -TARGET_ENOSYS
;
11957 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11960 print_syscall_ret(num
, ret
);
11961 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11964 ret
= -TARGET_EFAULT
;