4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
39 int __clone2(int (*fn
)(void *), void *child_stack_base
,
40 size_t stack_size
, int flags
, void *arg
, ...);
42 #include <sys/socket.h>
46 #include <sys/times.h>
49 #include <sys/statfs.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
271 loff_t
*, res
, uint
, wh
);
273 _syscall3(int,sys_rt_sigqueueinfo
,int,pid
,int,sig
,siginfo_t
*,uinfo
)
274 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group
,int,error_code
)
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address
,int *,tidptr
)
281 #if defined(TARGET_NR_futex) && defined(__NR_futex)
282 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
283 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
285 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
286 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
287 unsigned long *, user_mask_ptr
);
288 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
289 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
290 unsigned long *, user_mask_ptr
);
291 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
293 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
294 struct __user_cap_data_struct
*, data
);
295 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
296 struct __user_cap_data_struct
*, data
);
297 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
298 _syscall2(int, ioprio_get
, int, which
, int, who
)
300 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
301 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
303 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
304 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
307 static bitmask_transtbl fcntl_flags_tbl
[] = {
308 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
309 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
310 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
311 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
312 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
313 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
314 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
315 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
316 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
317 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
318 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
319 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
320 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
321 #if defined(O_DIRECT)
322 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
324 #if defined(O_NOATIME)
325 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
327 #if defined(O_CLOEXEC)
328 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
331 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
333 /* Don't terminate the list prematurely on 64-bit host+guest. */
334 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
335 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
342 QEMU_IFLA_BR_FORWARD_DELAY
,
343 QEMU_IFLA_BR_HELLO_TIME
,
344 QEMU_IFLA_BR_MAX_AGE
,
345 QEMU_IFLA_BR_AGEING_TIME
,
346 QEMU_IFLA_BR_STP_STATE
,
347 QEMU_IFLA_BR_PRIORITY
,
348 QEMU_IFLA_BR_VLAN_FILTERING
,
349 QEMU_IFLA_BR_VLAN_PROTOCOL
,
350 QEMU_IFLA_BR_GROUP_FWD_MASK
,
351 QEMU_IFLA_BR_ROOT_ID
,
352 QEMU_IFLA_BR_BRIDGE_ID
,
353 QEMU_IFLA_BR_ROOT_PORT
,
354 QEMU_IFLA_BR_ROOT_PATH_COST
,
355 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
357 QEMU_IFLA_BR_HELLO_TIMER
,
358 QEMU_IFLA_BR_TCN_TIMER
,
359 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
360 QEMU_IFLA_BR_GC_TIMER
,
361 QEMU_IFLA_BR_GROUP_ADDR
,
362 QEMU_IFLA_BR_FDB_FLUSH
,
363 QEMU_IFLA_BR_MCAST_ROUTER
,
364 QEMU_IFLA_BR_MCAST_SNOOPING
,
365 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
366 QEMU_IFLA_BR_MCAST_QUERIER
,
367 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
368 QEMU_IFLA_BR_MCAST_HASH_MAX
,
369 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
370 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
371 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
372 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
373 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
374 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
375 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
376 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
377 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
378 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
379 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
380 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
382 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
383 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
407 QEMU_IFLA_NET_NS_PID
,
410 QEMU_IFLA_VFINFO_LIST
,
418 QEMU_IFLA_PROMISCUITY
,
419 QEMU_IFLA_NUM_TX_QUEUES
,
420 QEMU_IFLA_NUM_RX_QUEUES
,
422 QEMU_IFLA_PHYS_PORT_ID
,
423 QEMU_IFLA_CARRIER_CHANGES
,
424 QEMU_IFLA_PHYS_SWITCH_ID
,
425 QEMU_IFLA_LINK_NETNSID
,
426 QEMU_IFLA_PHYS_PORT_NAME
,
427 QEMU_IFLA_PROTO_DOWN
,
428 QEMU_IFLA_GSO_MAX_SEGS
,
429 QEMU_IFLA_GSO_MAX_SIZE
,
436 QEMU_IFLA_BRPORT_UNSPEC
,
437 QEMU_IFLA_BRPORT_STATE
,
438 QEMU_IFLA_BRPORT_PRIORITY
,
439 QEMU_IFLA_BRPORT_COST
,
440 QEMU_IFLA_BRPORT_MODE
,
441 QEMU_IFLA_BRPORT_GUARD
,
442 QEMU_IFLA_BRPORT_PROTECT
,
443 QEMU_IFLA_BRPORT_FAST_LEAVE
,
444 QEMU_IFLA_BRPORT_LEARNING
,
445 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
446 QEMU_IFLA_BRPORT_PROXYARP
,
447 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
448 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
449 QEMU_IFLA_BRPORT_ROOT_ID
,
450 QEMU_IFLA_BRPORT_BRIDGE_ID
,
451 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
452 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
455 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
456 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
457 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
458 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
459 QEMU_IFLA_BRPORT_HOLD_TIMER
,
460 QEMU_IFLA_BRPORT_FLUSH
,
461 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
462 QEMU_IFLA_BRPORT_PAD
,
463 QEMU___IFLA_BRPORT_MAX
467 QEMU_IFLA_INFO_UNSPEC
,
470 QEMU_IFLA_INFO_XSTATS
,
471 QEMU_IFLA_INFO_SLAVE_KIND
,
472 QEMU_IFLA_INFO_SLAVE_DATA
,
473 QEMU___IFLA_INFO_MAX
,
477 QEMU_IFLA_INET_UNSPEC
,
479 QEMU___IFLA_INET_MAX
,
483 QEMU_IFLA_INET6_UNSPEC
,
484 QEMU_IFLA_INET6_FLAGS
,
485 QEMU_IFLA_INET6_CONF
,
486 QEMU_IFLA_INET6_STATS
,
487 QEMU_IFLA_INET6_MCAST
,
488 QEMU_IFLA_INET6_CACHEINFO
,
489 QEMU_IFLA_INET6_ICMP6STATS
,
490 QEMU_IFLA_INET6_TOKEN
,
491 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
492 QEMU___IFLA_INET6_MAX
495 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
496 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
497 typedef struct TargetFdTrans
{
498 TargetFdDataFunc host_to_target_data
;
499 TargetFdDataFunc target_to_host_data
;
500 TargetFdAddrFunc target_to_host_addr
;
503 static TargetFdTrans
**target_fd_trans
;
505 static unsigned int target_fd_max
;
507 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
509 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
510 return target_fd_trans
[fd
]->target_to_host_data
;
515 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
517 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
518 return target_fd_trans
[fd
]->host_to_target_data
;
523 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
525 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
526 return target_fd_trans
[fd
]->target_to_host_addr
;
531 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
535 if (fd
>= target_fd_max
) {
536 oldmax
= target_fd_max
;
537 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
538 target_fd_trans
= g_renew(TargetFdTrans
*,
539 target_fd_trans
, target_fd_max
);
540 memset((void *)(target_fd_trans
+ oldmax
), 0,
541 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
543 target_fd_trans
[fd
] = trans
;
546 static void fd_trans_unregister(int fd
)
548 if (fd
>= 0 && fd
< target_fd_max
) {
549 target_fd_trans
[fd
] = NULL
;
553 static void fd_trans_dup(int oldfd
, int newfd
)
555 fd_trans_unregister(newfd
);
556 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
557 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
561 static int sys_getcwd1(char *buf
, size_t size
)
563 if (getcwd(buf
, size
) == NULL
) {
564 /* getcwd() sets errno */
567 return strlen(buf
)+1;
570 #ifdef TARGET_NR_utimensat
571 #if defined(__NR_utimensat)
572 #define __NR_sys_utimensat __NR_utimensat
573 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
574 const struct timespec
*,tsp
,int,flags
)
576 static int sys_utimensat(int dirfd
, const char *pathname
,
577 const struct timespec times
[2], int flags
)
583 #endif /* TARGET_NR_utimensat */
585 #ifdef CONFIG_INOTIFY
586 #include <sys/inotify.h>
588 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
589 static int sys_inotify_init(void)
591 return (inotify_init());
594 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
595 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
597 return (inotify_add_watch(fd
, pathname
, mask
));
600 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
601 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
603 return (inotify_rm_watch(fd
, wd
));
606 #ifdef CONFIG_INOTIFY1
607 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
608 static int sys_inotify_init1(int flags
)
610 return (inotify_init1(flags
));
615 /* Userspace can usually survive runtime without inotify */
616 #undef TARGET_NR_inotify_init
617 #undef TARGET_NR_inotify_init1
618 #undef TARGET_NR_inotify_add_watch
619 #undef TARGET_NR_inotify_rm_watch
620 #endif /* CONFIG_INOTIFY */
622 #if defined(TARGET_NR_prlimit64)
623 #ifndef __NR_prlimit64
624 # define __NR_prlimit64 -1
626 #define __NR_sys_prlimit64 __NR_prlimit64
627 /* The glibc rlimit structure may not be that used by the underlying syscall */
628 struct host_rlimit64
{
632 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
633 const struct host_rlimit64
*, new_limit
,
634 struct host_rlimit64
*, old_limit
)
638 #if defined(TARGET_NR_timer_create)
639 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
640 static timer_t g_posix_timers
[32] = { 0, } ;
642 static inline int next_free_host_timer(void)
645 /* FIXME: Does finding the next free slot require a lock? */
646 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
647 if (g_posix_timers
[k
] == 0) {
648 g_posix_timers
[k
] = (timer_t
) 1;
656 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
658 static inline int regpairs_aligned(void *cpu_env
) {
659 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
661 #elif defined(TARGET_MIPS)
662 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
663 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
664 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
665 * of registers which translates to the same as ARM/MIPS, because we start with
667 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
669 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
672 #define ERRNO_TABLE_SIZE 1200
674 /* target_to_host_errno_table[] is initialized from
675 * host_to_target_errno_table[] in syscall_init(). */
676 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
680 * This list is the union of errno values overridden in asm-<arch>/errno.h
681 * minus the errnos that are not actually generic to all archs.
683 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
684 [EAGAIN
] = TARGET_EAGAIN
,
685 [EIDRM
] = TARGET_EIDRM
,
686 [ECHRNG
] = TARGET_ECHRNG
,
687 [EL2NSYNC
] = TARGET_EL2NSYNC
,
688 [EL3HLT
] = TARGET_EL3HLT
,
689 [EL3RST
] = TARGET_EL3RST
,
690 [ELNRNG
] = TARGET_ELNRNG
,
691 [EUNATCH
] = TARGET_EUNATCH
,
692 [ENOCSI
] = TARGET_ENOCSI
,
693 [EL2HLT
] = TARGET_EL2HLT
,
694 [EDEADLK
] = TARGET_EDEADLK
,
695 [ENOLCK
] = TARGET_ENOLCK
,
696 [EBADE
] = TARGET_EBADE
,
697 [EBADR
] = TARGET_EBADR
,
698 [EXFULL
] = TARGET_EXFULL
,
699 [ENOANO
] = TARGET_ENOANO
,
700 [EBADRQC
] = TARGET_EBADRQC
,
701 [EBADSLT
] = TARGET_EBADSLT
,
702 [EBFONT
] = TARGET_EBFONT
,
703 [ENOSTR
] = TARGET_ENOSTR
,
704 [ENODATA
] = TARGET_ENODATA
,
705 [ETIME
] = TARGET_ETIME
,
706 [ENOSR
] = TARGET_ENOSR
,
707 [ENONET
] = TARGET_ENONET
,
708 [ENOPKG
] = TARGET_ENOPKG
,
709 [EREMOTE
] = TARGET_EREMOTE
,
710 [ENOLINK
] = TARGET_ENOLINK
,
711 [EADV
] = TARGET_EADV
,
712 [ESRMNT
] = TARGET_ESRMNT
,
713 [ECOMM
] = TARGET_ECOMM
,
714 [EPROTO
] = TARGET_EPROTO
,
715 [EDOTDOT
] = TARGET_EDOTDOT
,
716 [EMULTIHOP
] = TARGET_EMULTIHOP
,
717 [EBADMSG
] = TARGET_EBADMSG
,
718 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
719 [EOVERFLOW
] = TARGET_EOVERFLOW
,
720 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
721 [EBADFD
] = TARGET_EBADFD
,
722 [EREMCHG
] = TARGET_EREMCHG
,
723 [ELIBACC
] = TARGET_ELIBACC
,
724 [ELIBBAD
] = TARGET_ELIBBAD
,
725 [ELIBSCN
] = TARGET_ELIBSCN
,
726 [ELIBMAX
] = TARGET_ELIBMAX
,
727 [ELIBEXEC
] = TARGET_ELIBEXEC
,
728 [EILSEQ
] = TARGET_EILSEQ
,
729 [ENOSYS
] = TARGET_ENOSYS
,
730 [ELOOP
] = TARGET_ELOOP
,
731 [ERESTART
] = TARGET_ERESTART
,
732 [ESTRPIPE
] = TARGET_ESTRPIPE
,
733 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
734 [EUSERS
] = TARGET_EUSERS
,
735 [ENOTSOCK
] = TARGET_ENOTSOCK
,
736 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
737 [EMSGSIZE
] = TARGET_EMSGSIZE
,
738 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
739 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
740 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
741 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
742 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
743 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
744 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
745 [EADDRINUSE
] = TARGET_EADDRINUSE
,
746 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
747 [ENETDOWN
] = TARGET_ENETDOWN
,
748 [ENETUNREACH
] = TARGET_ENETUNREACH
,
749 [ENETRESET
] = TARGET_ENETRESET
,
750 [ECONNABORTED
] = TARGET_ECONNABORTED
,
751 [ECONNRESET
] = TARGET_ECONNRESET
,
752 [ENOBUFS
] = TARGET_ENOBUFS
,
753 [EISCONN
] = TARGET_EISCONN
,
754 [ENOTCONN
] = TARGET_ENOTCONN
,
755 [EUCLEAN
] = TARGET_EUCLEAN
,
756 [ENOTNAM
] = TARGET_ENOTNAM
,
757 [ENAVAIL
] = TARGET_ENAVAIL
,
758 [EISNAM
] = TARGET_EISNAM
,
759 [EREMOTEIO
] = TARGET_EREMOTEIO
,
760 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
761 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
762 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
763 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
764 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
765 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
766 [EALREADY
] = TARGET_EALREADY
,
767 [EINPROGRESS
] = TARGET_EINPROGRESS
,
768 [ESTALE
] = TARGET_ESTALE
,
769 [ECANCELED
] = TARGET_ECANCELED
,
770 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
771 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
773 [ENOKEY
] = TARGET_ENOKEY
,
776 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
779 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
782 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
785 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
787 #ifdef ENOTRECOVERABLE
788 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
792 static inline int host_to_target_errno(int err
)
794 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
795 host_to_target_errno_table
[err
]) {
796 return host_to_target_errno_table
[err
];
801 static inline int target_to_host_errno(int err
)
803 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
804 target_to_host_errno_table
[err
]) {
805 return target_to_host_errno_table
[err
];
810 static inline abi_long
get_errno(abi_long ret
)
813 return -host_to_target_errno(errno
);
818 static inline int is_error(abi_long ret
)
820 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
823 const char *target_strerror(int err
)
825 if (err
== TARGET_ERESTARTSYS
) {
826 return "To be restarted";
828 if (err
== TARGET_QEMU_ESIGRETURN
) {
829 return "Successful exit from sigreturn";
832 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
835 return strerror(target_to_host_errno(err
));
838 #define safe_syscall0(type, name) \
839 static type safe_##name(void) \
841 return safe_syscall(__NR_##name); \
844 #define safe_syscall1(type, name, type1, arg1) \
845 static type safe_##name(type1 arg1) \
847 return safe_syscall(__NR_##name, arg1); \
850 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
851 static type safe_##name(type1 arg1, type2 arg2) \
853 return safe_syscall(__NR_##name, arg1, arg2); \
856 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
857 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
859 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
862 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
864 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
866 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
869 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
870 type4, arg4, type5, arg5) \
871 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
874 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
877 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
878 type4, arg4, type5, arg5, type6, arg6) \
879 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
880 type5 arg5, type6 arg6) \
882 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
885 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
886 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
887 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
888 int, flags
, mode_t
, mode
)
889 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
890 struct rusage
*, rusage
)
891 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
892 int, options
, struct rusage
*, rusage
)
893 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
894 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
895 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
896 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
897 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
899 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
900 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
902 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
903 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
904 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
905 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
906 safe_syscall2(int, tkill
, int, tid
, int, sig
)
907 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
908 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
909 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
910 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
912 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
913 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
914 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
915 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
916 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
917 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
918 safe_syscall2(int, flock
, int, fd
, int, operation
)
919 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
920 const struct timespec
*, uts
, size_t, sigsetsize
)
921 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
923 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
924 struct timespec
*, rem
)
925 #ifdef TARGET_NR_clock_nanosleep
926 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
927 const struct timespec
*, req
, struct timespec
*, rem
)
930 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
932 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
933 long, msgtype
, int, flags
)
934 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
935 unsigned, nsops
, const struct timespec
*, timeout
)
937 /* This host kernel architecture uses a single ipc syscall; fake up
938 * wrappers for the sub-operations to hide this implementation detail.
939 * Annoyingly we can't include linux/ipc.h to get the constant definitions
940 * for the call parameter because some structs in there conflict with the
941 * sys/ipc.h ones. So we just define them here, and rely on them being
942 * the same for all host architectures.
944 #define Q_SEMTIMEDOP 4
947 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
949 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
950 void *, ptr
, long, fifth
)
951 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
953 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
955 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
957 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
959 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
960 const struct timespec
*timeout
)
962 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
966 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
967 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
968 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
969 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
970 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
972 /* We do ioctl like this rather than via safe_syscall3 to preserve the
973 * "third argument might be integer or pointer or not present" behaviour of
976 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
977 /* Similarly for fcntl. Note that callers must always:
978 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
979 * use the flock64 struct rather than unsuffixed flock
980 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
983 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
985 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
988 static inline int host_to_target_sock_type(int host_type
)
992 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
994 target_type
= TARGET_SOCK_DGRAM
;
997 target_type
= TARGET_SOCK_STREAM
;
1000 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1004 #if defined(SOCK_CLOEXEC)
1005 if (host_type
& SOCK_CLOEXEC
) {
1006 target_type
|= TARGET_SOCK_CLOEXEC
;
1010 #if defined(SOCK_NONBLOCK)
1011 if (host_type
& SOCK_NONBLOCK
) {
1012 target_type
|= TARGET_SOCK_NONBLOCK
;
1019 static abi_ulong target_brk
;
1020 static abi_ulong target_original_brk
;
1021 static abi_ulong brk_page
;
1023 void target_set_brk(abi_ulong new_brk
)
1025 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1026 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1029 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1030 #define DEBUGF_BRK(message, args...)
1032 /* do_brk() must return target values and target errnos. */
1033 abi_long
do_brk(abi_ulong new_brk
)
1035 abi_long mapped_addr
;
1036 abi_ulong new_alloc_size
;
1038 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1041 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1044 if (new_brk
< target_original_brk
) {
1045 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1050 /* If the new brk is less than the highest page reserved to the
1051 * target heap allocation, set it and we're almost done... */
1052 if (new_brk
<= brk_page
) {
1053 /* Heap contents are initialized to zero, as for anonymous
1055 if (new_brk
> target_brk
) {
1056 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1058 target_brk
= new_brk
;
1059 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1063 /* We need to allocate more memory after the brk... Note that
1064 * we don't use MAP_FIXED because that will map over the top of
1065 * any existing mapping (like the one with the host libc or qemu
1066 * itself); instead we treat "mapped but at wrong address" as
1067 * a failure and unmap again.
1069 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1070 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1071 PROT_READ
|PROT_WRITE
,
1072 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1074 if (mapped_addr
== brk_page
) {
1075 /* Heap contents are initialized to zero, as for anonymous
1076 * mapped pages. Technically the new pages are already
1077 * initialized to zero since they *are* anonymous mapped
1078 * pages, however we have to take care with the contents that
1079 * come from the remaining part of the previous page: it may
1080 * contains garbage data due to a previous heap usage (grown
1081 * then shrunken). */
1082 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1084 target_brk
= new_brk
;
1085 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1086 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1089 } else if (mapped_addr
!= -1) {
1090 /* Mapped but at wrong address, meaning there wasn't actually
1091 * enough space for this brk.
1093 target_munmap(mapped_addr
, new_alloc_size
);
1095 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1098 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1101 #if defined(TARGET_ALPHA)
1102 /* We (partially) emulate OSF/1 on Alpha, which requires we
1103 return a proper errno, not an unchanged brk value. */
1104 return -TARGET_ENOMEM
;
1106 /* For everything else, return the previous break. */
1110 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1111 abi_ulong target_fds_addr
,
1115 abi_ulong b
, *target_fds
;
1117 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1118 if (!(target_fds
= lock_user(VERIFY_READ
,
1120 sizeof(abi_ulong
) * nw
,
1122 return -TARGET_EFAULT
;
1126 for (i
= 0; i
< nw
; i
++) {
1127 /* grab the abi_ulong */
1128 __get_user(b
, &target_fds
[i
]);
1129 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1130 /* check the bit inside the abi_ulong */
1137 unlock_user(target_fds
, target_fds_addr
, 0);
1142 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1143 abi_ulong target_fds_addr
,
1146 if (target_fds_addr
) {
1147 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1148 return -TARGET_EFAULT
;
1156 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1162 abi_ulong
*target_fds
;
1164 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1165 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1167 sizeof(abi_ulong
) * nw
,
1169 return -TARGET_EFAULT
;
1172 for (i
= 0; i
< nw
; i
++) {
1174 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1175 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1178 __put_user(v
, &target_fds
[i
]);
1181 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1186 #if defined(__alpha__)
1187 #define HOST_HZ 1024
1192 static inline abi_long
host_to_target_clock_t(long ticks
)
1194 #if HOST_HZ == TARGET_HZ
1197 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1201 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1202 const struct rusage
*rusage
)
1204 struct target_rusage
*target_rusage
;
1206 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1207 return -TARGET_EFAULT
;
1208 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1209 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1210 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1211 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1212 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1213 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1214 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1215 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1216 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1217 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1218 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1219 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1220 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1221 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1222 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1223 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1224 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1225 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1226 unlock_user_struct(target_rusage
, target_addr
, 1);
1231 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1233 abi_ulong target_rlim_swap
;
1236 target_rlim_swap
= tswapal(target_rlim
);
1237 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1238 return RLIM_INFINITY
;
1240 result
= target_rlim_swap
;
1241 if (target_rlim_swap
!= (rlim_t
)result
)
1242 return RLIM_INFINITY
;
1247 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1249 abi_ulong target_rlim_swap
;
1252 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1253 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1255 target_rlim_swap
= rlim
;
1256 result
= tswapal(target_rlim_swap
);
1261 static inline int target_to_host_resource(int code
)
1264 case TARGET_RLIMIT_AS
:
1266 case TARGET_RLIMIT_CORE
:
1268 case TARGET_RLIMIT_CPU
:
1270 case TARGET_RLIMIT_DATA
:
1272 case TARGET_RLIMIT_FSIZE
:
1273 return RLIMIT_FSIZE
;
1274 case TARGET_RLIMIT_LOCKS
:
1275 return RLIMIT_LOCKS
;
1276 case TARGET_RLIMIT_MEMLOCK
:
1277 return RLIMIT_MEMLOCK
;
1278 case TARGET_RLIMIT_MSGQUEUE
:
1279 return RLIMIT_MSGQUEUE
;
1280 case TARGET_RLIMIT_NICE
:
1282 case TARGET_RLIMIT_NOFILE
:
1283 return RLIMIT_NOFILE
;
1284 case TARGET_RLIMIT_NPROC
:
1285 return RLIMIT_NPROC
;
1286 case TARGET_RLIMIT_RSS
:
1288 case TARGET_RLIMIT_RTPRIO
:
1289 return RLIMIT_RTPRIO
;
1290 case TARGET_RLIMIT_SIGPENDING
:
1291 return RLIMIT_SIGPENDING
;
1292 case TARGET_RLIMIT_STACK
:
1293 return RLIMIT_STACK
;
1299 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1300 abi_ulong target_tv_addr
)
1302 struct target_timeval
*target_tv
;
1304 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1305 return -TARGET_EFAULT
;
1307 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1308 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1310 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1315 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1316 const struct timeval
*tv
)
1318 struct target_timeval
*target_tv
;
1320 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1321 return -TARGET_EFAULT
;
1323 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1324 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1326 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1331 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1332 abi_ulong target_tz_addr
)
1334 struct target_timezone
*target_tz
;
1336 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1337 return -TARGET_EFAULT
;
1340 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1341 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1343 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1351 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1352 abi_ulong target_mq_attr_addr
)
1354 struct target_mq_attr
*target_mq_attr
;
1356 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1357 target_mq_attr_addr
, 1))
1358 return -TARGET_EFAULT
;
1360 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1361 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1362 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1363 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1365 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1370 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1371 const struct mq_attr
*attr
)
1373 struct target_mq_attr
*target_mq_attr
;
1375 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1376 target_mq_attr_addr
, 0))
1377 return -TARGET_EFAULT
;
1379 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1380 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1381 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1382 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1384 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1390 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1391 /* do_select() must return target values and target errnos. */
1392 static abi_long
do_select(int n
,
1393 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1394 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1396 fd_set rfds
, wfds
, efds
;
1397 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1399 struct timespec ts
, *ts_ptr
;
1402 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1406 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1410 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1415 if (target_tv_addr
) {
1416 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1417 return -TARGET_EFAULT
;
1418 ts
.tv_sec
= tv
.tv_sec
;
1419 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1425 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1428 if (!is_error(ret
)) {
1429 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1430 return -TARGET_EFAULT
;
1431 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1432 return -TARGET_EFAULT
;
1433 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1434 return -TARGET_EFAULT
;
1436 if (target_tv_addr
) {
1437 tv
.tv_sec
= ts
.tv_sec
;
1438 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1439 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1440 return -TARGET_EFAULT
;
1448 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1449 static abi_long
do_old_select(abi_ulong arg1
)
1451 struct target_sel_arg_struct
*sel
;
1452 abi_ulong inp
, outp
, exp
, tvp
;
1455 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1456 return -TARGET_EFAULT
;
1459 nsel
= tswapal(sel
->n
);
1460 inp
= tswapal(sel
->inp
);
1461 outp
= tswapal(sel
->outp
);
1462 exp
= tswapal(sel
->exp
);
1463 tvp
= tswapal(sel
->tvp
);
1465 unlock_user_struct(sel
, arg1
, 0);
1467 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1472 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1475 return pipe2(host_pipe
, flags
);
1481 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1482 int flags
, int is_pipe2
)
1486 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1489 return get_errno(ret
);
1491 /* Several targets have special calling conventions for the original
1492 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1494 #if defined(TARGET_ALPHA)
1495 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1496 return host_pipe
[0];
1497 #elif defined(TARGET_MIPS)
1498 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1499 return host_pipe
[0];
1500 #elif defined(TARGET_SH4)
1501 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1502 return host_pipe
[0];
1503 #elif defined(TARGET_SPARC)
1504 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1505 return host_pipe
[0];
1509 if (put_user_s32(host_pipe
[0], pipedes
)
1510 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1511 return -TARGET_EFAULT
;
1512 return get_errno(ret
);
1515 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1516 abi_ulong target_addr
,
1519 struct target_ip_mreqn
*target_smreqn
;
1521 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1523 return -TARGET_EFAULT
;
1524 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1525 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1526 if (len
== sizeof(struct target_ip_mreqn
))
1527 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1528 unlock_user(target_smreqn
, target_addr
, 0);
1533 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1534 abi_ulong target_addr
,
1537 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1538 sa_family_t sa_family
;
1539 struct target_sockaddr
*target_saddr
;
1541 if (fd_trans_target_to_host_addr(fd
)) {
1542 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1545 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1547 return -TARGET_EFAULT
;
1549 sa_family
= tswap16(target_saddr
->sa_family
);
1551 /* Oops. The caller might send a incomplete sun_path; sun_path
1552 * must be terminated by \0 (see the manual page), but
1553 * unfortunately it is quite common to specify sockaddr_un
1554 * length as "strlen(x->sun_path)" while it should be
1555 * "strlen(...) + 1". We'll fix that here if needed.
1556 * Linux kernel has a similar feature.
1559 if (sa_family
== AF_UNIX
) {
1560 if (len
< unix_maxlen
&& len
> 0) {
1561 char *cp
= (char*)target_saddr
;
1563 if ( cp
[len
-1] && !cp
[len
] )
1566 if (len
> unix_maxlen
)
1570 memcpy(addr
, target_saddr
, len
);
1571 addr
->sa_family
= sa_family
;
1572 if (sa_family
== AF_NETLINK
) {
1573 struct sockaddr_nl
*nladdr
;
1575 nladdr
= (struct sockaddr_nl
*)addr
;
1576 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1577 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1578 } else if (sa_family
== AF_PACKET
) {
1579 struct target_sockaddr_ll
*lladdr
;
1581 lladdr
= (struct target_sockaddr_ll
*)addr
;
1582 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1583 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1585 unlock_user(target_saddr
, target_addr
, 0);
1590 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1591 struct sockaddr
*addr
,
1594 struct target_sockaddr
*target_saddr
;
1600 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1602 return -TARGET_EFAULT
;
1603 memcpy(target_saddr
, addr
, len
);
1604 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1605 sizeof(target_saddr
->sa_family
)) {
1606 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1608 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1609 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1610 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1611 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1612 } else if (addr
->sa_family
== AF_PACKET
) {
1613 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1614 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1615 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1617 unlock_user(target_saddr
, target_addr
, len
);
1622 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1623 struct target_msghdr
*target_msgh
)
1625 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1626 abi_long msg_controllen
;
1627 abi_ulong target_cmsg_addr
;
1628 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1629 socklen_t space
= 0;
1631 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1632 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1634 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1635 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1636 target_cmsg_start
= target_cmsg
;
1638 return -TARGET_EFAULT
;
1640 while (cmsg
&& target_cmsg
) {
1641 void *data
= CMSG_DATA(cmsg
);
1642 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1644 int len
= tswapal(target_cmsg
->cmsg_len
)
1645 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1647 space
+= CMSG_SPACE(len
);
1648 if (space
> msgh
->msg_controllen
) {
1649 space
-= CMSG_SPACE(len
);
1650 /* This is a QEMU bug, since we allocated the payload
1651 * area ourselves (unlike overflow in host-to-target
1652 * conversion, which is just the guest giving us a buffer
1653 * that's too small). It can't happen for the payload types
1654 * we currently support; if it becomes an issue in future
1655 * we would need to improve our allocation strategy to
1656 * something more intelligent than "twice the size of the
1657 * target buffer we're reading from".
1659 gemu_log("Host cmsg overflow\n");
1663 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1664 cmsg
->cmsg_level
= SOL_SOCKET
;
1666 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1668 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1669 cmsg
->cmsg_len
= CMSG_LEN(len
);
1671 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1672 int *fd
= (int *)data
;
1673 int *target_fd
= (int *)target_data
;
1674 int i
, numfds
= len
/ sizeof(int);
1676 for (i
= 0; i
< numfds
; i
++) {
1677 __get_user(fd
[i
], target_fd
+ i
);
1679 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1680 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1681 struct ucred
*cred
= (struct ucred
*)data
;
1682 struct target_ucred
*target_cred
=
1683 (struct target_ucred
*)target_data
;
1685 __get_user(cred
->pid
, &target_cred
->pid
);
1686 __get_user(cred
->uid
, &target_cred
->uid
);
1687 __get_user(cred
->gid
, &target_cred
->gid
);
1689 gemu_log("Unsupported ancillary data: %d/%d\n",
1690 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1691 memcpy(data
, target_data
, len
);
1694 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1695 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1698 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1700 msgh
->msg_controllen
= space
;
1704 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1705 struct msghdr
*msgh
)
1707 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1708 abi_long msg_controllen
;
1709 abi_ulong target_cmsg_addr
;
1710 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1711 socklen_t space
= 0;
1713 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1714 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1716 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1717 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1718 target_cmsg_start
= target_cmsg
;
1720 return -TARGET_EFAULT
;
1722 while (cmsg
&& target_cmsg
) {
1723 void *data
= CMSG_DATA(cmsg
);
1724 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1726 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1727 int tgt_len
, tgt_space
;
1729 /* We never copy a half-header but may copy half-data;
1730 * this is Linux's behaviour in put_cmsg(). Note that
1731 * truncation here is a guest problem (which we report
1732 * to the guest via the CTRUNC bit), unlike truncation
1733 * in target_to_host_cmsg, which is a QEMU bug.
1735 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1736 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1740 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1741 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1743 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1745 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1747 tgt_len
= TARGET_CMSG_LEN(len
);
1749 /* Payload types which need a different size of payload on
1750 * the target must adjust tgt_len here.
1752 switch (cmsg
->cmsg_level
) {
1754 switch (cmsg
->cmsg_type
) {
1756 tgt_len
= sizeof(struct target_timeval
);
1765 if (msg_controllen
< tgt_len
) {
1766 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1767 tgt_len
= msg_controllen
;
1770 /* We must now copy-and-convert len bytes of payload
1771 * into tgt_len bytes of destination space. Bear in mind
1772 * that in both source and destination we may be dealing
1773 * with a truncated value!
1775 switch (cmsg
->cmsg_level
) {
1777 switch (cmsg
->cmsg_type
) {
1780 int *fd
= (int *)data
;
1781 int *target_fd
= (int *)target_data
;
1782 int i
, numfds
= tgt_len
/ sizeof(int);
1784 for (i
= 0; i
< numfds
; i
++) {
1785 __put_user(fd
[i
], target_fd
+ i
);
1791 struct timeval
*tv
= (struct timeval
*)data
;
1792 struct target_timeval
*target_tv
=
1793 (struct target_timeval
*)target_data
;
1795 if (len
!= sizeof(struct timeval
) ||
1796 tgt_len
!= sizeof(struct target_timeval
)) {
1800 /* copy struct timeval to target */
1801 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1802 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1805 case SCM_CREDENTIALS
:
1807 struct ucred
*cred
= (struct ucred
*)data
;
1808 struct target_ucred
*target_cred
=
1809 (struct target_ucred
*)target_data
;
1811 __put_user(cred
->pid
, &target_cred
->pid
);
1812 __put_user(cred
->uid
, &target_cred
->uid
);
1813 __put_user(cred
->gid
, &target_cred
->gid
);
1823 gemu_log("Unsupported ancillary data: %d/%d\n",
1824 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1825 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1826 if (tgt_len
> len
) {
1827 memset(target_data
+ len
, 0, tgt_len
- len
);
1831 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1832 tgt_space
= TARGET_CMSG_SPACE(len
);
1833 if (msg_controllen
< tgt_space
) {
1834 tgt_space
= msg_controllen
;
1836 msg_controllen
-= tgt_space
;
1838 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1839 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1842 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1844 target_msgh
->msg_controllen
= tswapal(space
);
1848 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1850 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1851 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1852 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1853 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1854 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1857 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1859 abi_long (*host_to_target_nlmsg
)
1860 (struct nlmsghdr
*))
1865 while (len
> sizeof(struct nlmsghdr
)) {
1867 nlmsg_len
= nlh
->nlmsg_len
;
1868 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1873 switch (nlh
->nlmsg_type
) {
1875 tswap_nlmsghdr(nlh
);
1881 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1882 e
->error
= tswap32(e
->error
);
1883 tswap_nlmsghdr(&e
->msg
);
1884 tswap_nlmsghdr(nlh
);
1888 ret
= host_to_target_nlmsg(nlh
);
1890 tswap_nlmsghdr(nlh
);
1895 tswap_nlmsghdr(nlh
);
1896 len
-= NLMSG_ALIGN(nlmsg_len
);
1897 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
1902 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
1904 abi_long (*target_to_host_nlmsg
)
1905 (struct nlmsghdr
*))
1909 while (len
> sizeof(struct nlmsghdr
)) {
1910 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
1911 tswap32(nlh
->nlmsg_len
) > len
) {
1914 tswap_nlmsghdr(nlh
);
1915 switch (nlh
->nlmsg_type
) {
1922 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1923 e
->error
= tswap32(e
->error
);
1924 tswap_nlmsghdr(&e
->msg
);
1928 ret
= target_to_host_nlmsg(nlh
);
1933 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
1934 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
1939 #ifdef CONFIG_RTNETLINK
1940 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
1941 size_t len
, void *context
,
1942 abi_long (*host_to_target_nlattr
)
1946 unsigned short nla_len
;
1949 while (len
> sizeof(struct nlattr
)) {
1950 nla_len
= nlattr
->nla_len
;
1951 if (nla_len
< sizeof(struct nlattr
) ||
1955 ret
= host_to_target_nlattr(nlattr
, context
);
1956 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
1957 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
1961 len
-= NLA_ALIGN(nla_len
);
1962 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
1967 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
1969 abi_long (*host_to_target_rtattr
)
1972 unsigned short rta_len
;
1975 while (len
> sizeof(struct rtattr
)) {
1976 rta_len
= rtattr
->rta_len
;
1977 if (rta_len
< sizeof(struct rtattr
) ||
1981 ret
= host_to_target_rtattr(rtattr
);
1982 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
1983 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
1987 len
-= RTA_ALIGN(rta_len
);
1988 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
1993 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1995 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2002 switch (nlattr
->nla_type
) {
2004 case QEMU_IFLA_BR_FDB_FLUSH
:
2007 case QEMU_IFLA_BR_GROUP_ADDR
:
2010 case QEMU_IFLA_BR_VLAN_FILTERING
:
2011 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2012 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2013 case QEMU_IFLA_BR_MCAST_ROUTER
:
2014 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2015 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2016 case QEMU_IFLA_BR_MCAST_QUERIER
:
2017 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2018 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2019 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2022 case QEMU_IFLA_BR_PRIORITY
:
2023 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2024 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2025 case QEMU_IFLA_BR_ROOT_PORT
:
2026 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2027 u16
= NLA_DATA(nlattr
);
2028 *u16
= tswap16(*u16
);
2031 case QEMU_IFLA_BR_FORWARD_DELAY
:
2032 case QEMU_IFLA_BR_HELLO_TIME
:
2033 case QEMU_IFLA_BR_MAX_AGE
:
2034 case QEMU_IFLA_BR_AGEING_TIME
:
2035 case QEMU_IFLA_BR_STP_STATE
:
2036 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2037 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2038 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2039 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2040 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2041 u32
= NLA_DATA(nlattr
);
2042 *u32
= tswap32(*u32
);
2045 case QEMU_IFLA_BR_HELLO_TIMER
:
2046 case QEMU_IFLA_BR_TCN_TIMER
:
2047 case QEMU_IFLA_BR_GC_TIMER
:
2048 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2049 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2050 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2051 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2052 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2053 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2054 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2055 u64
= NLA_DATA(nlattr
);
2056 *u64
= tswap64(*u64
);
2058 /* ifla_bridge_id: uin8_t[] */
2059 case QEMU_IFLA_BR_ROOT_ID
:
2060 case QEMU_IFLA_BR_BRIDGE_ID
:
2063 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2069 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2076 switch (nlattr
->nla_type
) {
2078 case QEMU_IFLA_BRPORT_STATE
:
2079 case QEMU_IFLA_BRPORT_MODE
:
2080 case QEMU_IFLA_BRPORT_GUARD
:
2081 case QEMU_IFLA_BRPORT_PROTECT
:
2082 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2083 case QEMU_IFLA_BRPORT_LEARNING
:
2084 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2085 case QEMU_IFLA_BRPORT_PROXYARP
:
2086 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2087 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2088 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2089 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2090 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2093 case QEMU_IFLA_BRPORT_PRIORITY
:
2094 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2095 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2096 case QEMU_IFLA_BRPORT_ID
:
2097 case QEMU_IFLA_BRPORT_NO
:
2098 u16
= NLA_DATA(nlattr
);
2099 *u16
= tswap16(*u16
);
2102 case QEMU_IFLA_BRPORT_COST
:
2103 u32
= NLA_DATA(nlattr
);
2104 *u32
= tswap32(*u32
);
2107 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2108 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2109 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2110 u64
= NLA_DATA(nlattr
);
2111 *u64
= tswap64(*u64
);
2113 /* ifla_bridge_id: uint8_t[] */
2114 case QEMU_IFLA_BRPORT_ROOT_ID
:
2115 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2118 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2124 struct linkinfo_context
{
2131 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2134 struct linkinfo_context
*li_context
= context
;
2136 switch (nlattr
->nla_type
) {
2138 case QEMU_IFLA_INFO_KIND
:
2139 li_context
->name
= NLA_DATA(nlattr
);
2140 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2142 case QEMU_IFLA_INFO_SLAVE_KIND
:
2143 li_context
->slave_name
= NLA_DATA(nlattr
);
2144 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2147 case QEMU_IFLA_INFO_XSTATS
:
2148 /* FIXME: only used by CAN */
2151 case QEMU_IFLA_INFO_DATA
:
2152 if (strncmp(li_context
->name
, "bridge",
2153 li_context
->len
) == 0) {
2154 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2157 host_to_target_data_bridge_nlattr
);
2159 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2162 case QEMU_IFLA_INFO_SLAVE_DATA
:
2163 if (strncmp(li_context
->slave_name
, "bridge",
2164 li_context
->slave_len
) == 0) {
2165 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2168 host_to_target_slave_data_bridge_nlattr
);
2170 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2171 li_context
->slave_name
);
2175 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2182 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2188 switch (nlattr
->nla_type
) {
2189 case QEMU_IFLA_INET_CONF
:
2190 u32
= NLA_DATA(nlattr
);
2191 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2193 u32
[i
] = tswap32(u32
[i
]);
2197 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2202 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2207 struct ifla_cacheinfo
*ci
;
2210 switch (nlattr
->nla_type
) {
2212 case QEMU_IFLA_INET6_TOKEN
:
2215 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2218 case QEMU_IFLA_INET6_FLAGS
:
2219 u32
= NLA_DATA(nlattr
);
2220 *u32
= tswap32(*u32
);
2223 case QEMU_IFLA_INET6_CONF
:
2224 u32
= NLA_DATA(nlattr
);
2225 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2227 u32
[i
] = tswap32(u32
[i
]);
2230 /* ifla_cacheinfo */
2231 case QEMU_IFLA_INET6_CACHEINFO
:
2232 ci
= NLA_DATA(nlattr
);
2233 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2234 ci
->tstamp
= tswap32(ci
->tstamp
);
2235 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2236 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2239 case QEMU_IFLA_INET6_STATS
:
2240 case QEMU_IFLA_INET6_ICMP6STATS
:
2241 u64
= NLA_DATA(nlattr
);
2242 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2244 u64
[i
] = tswap64(u64
[i
]);
2248 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2253 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2256 switch (nlattr
->nla_type
) {
2258 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2260 host_to_target_data_inet_nlattr
);
2262 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2264 host_to_target_data_inet6_nlattr
);
2266 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2272 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2275 struct rtnl_link_stats
*st
;
2276 struct rtnl_link_stats64
*st64
;
2277 struct rtnl_link_ifmap
*map
;
2278 struct linkinfo_context li_context
;
2280 switch (rtattr
->rta_type
) {
2282 case QEMU_IFLA_ADDRESS
:
2283 case QEMU_IFLA_BROADCAST
:
2285 case QEMU_IFLA_IFNAME
:
2286 case QEMU_IFLA_QDISC
:
2289 case QEMU_IFLA_OPERSTATE
:
2290 case QEMU_IFLA_LINKMODE
:
2291 case QEMU_IFLA_CARRIER
:
2292 case QEMU_IFLA_PROTO_DOWN
:
2296 case QEMU_IFLA_LINK
:
2297 case QEMU_IFLA_WEIGHT
:
2298 case QEMU_IFLA_TXQLEN
:
2299 case QEMU_IFLA_CARRIER_CHANGES
:
2300 case QEMU_IFLA_NUM_RX_QUEUES
:
2301 case QEMU_IFLA_NUM_TX_QUEUES
:
2302 case QEMU_IFLA_PROMISCUITY
:
2303 case QEMU_IFLA_EXT_MASK
:
2304 case QEMU_IFLA_LINK_NETNSID
:
2305 case QEMU_IFLA_GROUP
:
2306 case QEMU_IFLA_MASTER
:
2307 case QEMU_IFLA_NUM_VF
:
2308 u32
= RTA_DATA(rtattr
);
2309 *u32
= tswap32(*u32
);
2311 /* struct rtnl_link_stats */
2312 case QEMU_IFLA_STATS
:
2313 st
= RTA_DATA(rtattr
);
2314 st
->rx_packets
= tswap32(st
->rx_packets
);
2315 st
->tx_packets
= tswap32(st
->tx_packets
);
2316 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2317 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2318 st
->rx_errors
= tswap32(st
->rx_errors
);
2319 st
->tx_errors
= tswap32(st
->tx_errors
);
2320 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2321 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2322 st
->multicast
= tswap32(st
->multicast
);
2323 st
->collisions
= tswap32(st
->collisions
);
2325 /* detailed rx_errors: */
2326 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2327 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2328 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2329 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2330 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2331 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2333 /* detailed tx_errors */
2334 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2335 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2336 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2337 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2338 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2341 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2342 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2344 /* struct rtnl_link_stats64 */
2345 case QEMU_IFLA_STATS64
:
2346 st64
= RTA_DATA(rtattr
);
2347 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2348 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2349 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2350 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2351 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2352 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2353 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2354 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2355 st64
->multicast
= tswap64(st64
->multicast
);
2356 st64
->collisions
= tswap64(st64
->collisions
);
2358 /* detailed rx_errors: */
2359 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2360 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2361 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2362 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2363 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2364 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2366 /* detailed tx_errors */
2367 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2368 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2369 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2370 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2371 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2374 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2375 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2377 /* struct rtnl_link_ifmap */
2379 map
= RTA_DATA(rtattr
);
2380 map
->mem_start
= tswap64(map
->mem_start
);
2381 map
->mem_end
= tswap64(map
->mem_end
);
2382 map
->base_addr
= tswap64(map
->base_addr
);
2383 map
->irq
= tswap16(map
->irq
);
2386 case QEMU_IFLA_LINKINFO
:
2387 memset(&li_context
, 0, sizeof(li_context
));
2388 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2390 host_to_target_data_linkinfo_nlattr
);
2391 case QEMU_IFLA_AF_SPEC
:
2392 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2394 host_to_target_data_spec_nlattr
);
2396 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2402 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2405 struct ifa_cacheinfo
*ci
;
2407 switch (rtattr
->rta_type
) {
2408 /* binary: depends on family type */
2418 u32
= RTA_DATA(rtattr
);
2419 *u32
= tswap32(*u32
);
2421 /* struct ifa_cacheinfo */
2423 ci
= RTA_DATA(rtattr
);
2424 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2425 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2426 ci
->cstamp
= tswap32(ci
->cstamp
);
2427 ci
->tstamp
= tswap32(ci
->tstamp
);
2430 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2436 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2439 switch (rtattr
->rta_type
) {
2440 /* binary: depends on family type */
2449 u32
= RTA_DATA(rtattr
);
2450 *u32
= tswap32(*u32
);
2453 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2459 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2460 uint32_t rtattr_len
)
2462 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2463 host_to_target_data_link_rtattr
);
2466 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2467 uint32_t rtattr_len
)
2469 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2470 host_to_target_data_addr_rtattr
);
2473 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2474 uint32_t rtattr_len
)
2476 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2477 host_to_target_data_route_rtattr
);
2480 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2483 struct ifinfomsg
*ifi
;
2484 struct ifaddrmsg
*ifa
;
2487 nlmsg_len
= nlh
->nlmsg_len
;
2488 switch (nlh
->nlmsg_type
) {
2492 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2493 ifi
= NLMSG_DATA(nlh
);
2494 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2495 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2496 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2497 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2498 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2499 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2505 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2506 ifa
= NLMSG_DATA(nlh
);
2507 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2508 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2509 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2515 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2516 rtm
= NLMSG_DATA(nlh
);
2517 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2518 host_to_target_route_rtattr(RTM_RTA(rtm
),
2519 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2523 return -TARGET_EINVAL
;
2528 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2531 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2534 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2536 abi_long (*target_to_host_rtattr
)
2541 while (len
>= sizeof(struct rtattr
)) {
2542 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2543 tswap16(rtattr
->rta_len
) > len
) {
2546 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2547 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2548 ret
= target_to_host_rtattr(rtattr
);
2552 len
-= RTA_ALIGN(rtattr
->rta_len
);
2553 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2554 RTA_ALIGN(rtattr
->rta_len
));
2559 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2561 switch (rtattr
->rta_type
) {
2563 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2569 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2571 switch (rtattr
->rta_type
) {
2572 /* binary: depends on family type */
2577 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2583 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2586 switch (rtattr
->rta_type
) {
2587 /* binary: depends on family type */
2594 u32
= RTA_DATA(rtattr
);
2595 *u32
= tswap32(*u32
);
2598 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2604 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2605 uint32_t rtattr_len
)
2607 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2608 target_to_host_data_link_rtattr
);
2611 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2612 uint32_t rtattr_len
)
2614 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2615 target_to_host_data_addr_rtattr
);
2618 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2619 uint32_t rtattr_len
)
2621 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2622 target_to_host_data_route_rtattr
);
2625 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2627 struct ifinfomsg
*ifi
;
2628 struct ifaddrmsg
*ifa
;
2631 switch (nlh
->nlmsg_type
) {
2636 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2637 ifi
= NLMSG_DATA(nlh
);
2638 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2639 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2640 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2641 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2642 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2643 NLMSG_LENGTH(sizeof(*ifi
)));
2649 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2650 ifa
= NLMSG_DATA(nlh
);
2651 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2652 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2653 NLMSG_LENGTH(sizeof(*ifa
)));
2660 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2661 rtm
= NLMSG_DATA(nlh
);
2662 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2663 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2664 NLMSG_LENGTH(sizeof(*rtm
)));
2668 return -TARGET_EOPNOTSUPP
;
2673 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2675 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2677 #endif /* CONFIG_RTNETLINK */
2679 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2681 switch (nlh
->nlmsg_type
) {
2683 gemu_log("Unknown host audit message type %d\n",
2685 return -TARGET_EINVAL
;
2690 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2693 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2696 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2698 switch (nlh
->nlmsg_type
) {
2700 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2701 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2704 gemu_log("Unknown target audit message type %d\n",
2706 return -TARGET_EINVAL
;
2712 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2714 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2717 /* do_setsockopt() Must return target values and target errnos. */
2718 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2719 abi_ulong optval_addr
, socklen_t optlen
)
2723 struct ip_mreqn
*ip_mreq
;
2724 struct ip_mreq_source
*ip_mreq_source
;
2728 /* TCP options all take an 'int' value. */
2729 if (optlen
< sizeof(uint32_t))
2730 return -TARGET_EINVAL
;
2732 if (get_user_u32(val
, optval_addr
))
2733 return -TARGET_EFAULT
;
2734 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2741 case IP_ROUTER_ALERT
:
2745 case IP_MTU_DISCOVER
:
2751 case IP_MULTICAST_TTL
:
2752 case IP_MULTICAST_LOOP
:
2754 if (optlen
>= sizeof(uint32_t)) {
2755 if (get_user_u32(val
, optval_addr
))
2756 return -TARGET_EFAULT
;
2757 } else if (optlen
>= 1) {
2758 if (get_user_u8(val
, optval_addr
))
2759 return -TARGET_EFAULT
;
2761 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2763 case IP_ADD_MEMBERSHIP
:
2764 case IP_DROP_MEMBERSHIP
:
2765 if (optlen
< sizeof (struct target_ip_mreq
) ||
2766 optlen
> sizeof (struct target_ip_mreqn
))
2767 return -TARGET_EINVAL
;
2769 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2770 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2771 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2774 case IP_BLOCK_SOURCE
:
2775 case IP_UNBLOCK_SOURCE
:
2776 case IP_ADD_SOURCE_MEMBERSHIP
:
2777 case IP_DROP_SOURCE_MEMBERSHIP
:
2778 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2779 return -TARGET_EINVAL
;
2781 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2782 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2783 unlock_user (ip_mreq_source
, optval_addr
, 0);
2792 case IPV6_MTU_DISCOVER
:
2795 case IPV6_RECVPKTINFO
:
2797 if (optlen
< sizeof(uint32_t)) {
2798 return -TARGET_EINVAL
;
2800 if (get_user_u32(val
, optval_addr
)) {
2801 return -TARGET_EFAULT
;
2803 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2804 &val
, sizeof(val
)));
2813 /* struct icmp_filter takes an u32 value */
2814 if (optlen
< sizeof(uint32_t)) {
2815 return -TARGET_EINVAL
;
2818 if (get_user_u32(val
, optval_addr
)) {
2819 return -TARGET_EFAULT
;
2821 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2822 &val
, sizeof(val
)));
2829 case TARGET_SOL_SOCKET
:
2831 case TARGET_SO_RCVTIMEO
:
2835 optname
= SO_RCVTIMEO
;
2838 if (optlen
!= sizeof(struct target_timeval
)) {
2839 return -TARGET_EINVAL
;
2842 if (copy_from_user_timeval(&tv
, optval_addr
)) {
2843 return -TARGET_EFAULT
;
2846 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2850 case TARGET_SO_SNDTIMEO
:
2851 optname
= SO_SNDTIMEO
;
2853 case TARGET_SO_ATTACH_FILTER
:
2855 struct target_sock_fprog
*tfprog
;
2856 struct target_sock_filter
*tfilter
;
2857 struct sock_fprog fprog
;
2858 struct sock_filter
*filter
;
2861 if (optlen
!= sizeof(*tfprog
)) {
2862 return -TARGET_EINVAL
;
2864 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
2865 return -TARGET_EFAULT
;
2867 if (!lock_user_struct(VERIFY_READ
, tfilter
,
2868 tswapal(tfprog
->filter
), 0)) {
2869 unlock_user_struct(tfprog
, optval_addr
, 1);
2870 return -TARGET_EFAULT
;
2873 fprog
.len
= tswap16(tfprog
->len
);
2874 filter
= g_try_new(struct sock_filter
, fprog
.len
);
2875 if (filter
== NULL
) {
2876 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2877 unlock_user_struct(tfprog
, optval_addr
, 1);
2878 return -TARGET_ENOMEM
;
2880 for (i
= 0; i
< fprog
.len
; i
++) {
2881 filter
[i
].code
= tswap16(tfilter
[i
].code
);
2882 filter
[i
].jt
= tfilter
[i
].jt
;
2883 filter
[i
].jf
= tfilter
[i
].jf
;
2884 filter
[i
].k
= tswap32(tfilter
[i
].k
);
2886 fprog
.filter
= filter
;
2888 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
2889 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
2892 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
2893 unlock_user_struct(tfprog
, optval_addr
, 1);
2896 case TARGET_SO_BINDTODEVICE
:
2898 char *dev_ifname
, *addr_ifname
;
2900 if (optlen
> IFNAMSIZ
- 1) {
2901 optlen
= IFNAMSIZ
- 1;
2903 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2905 return -TARGET_EFAULT
;
2907 optname
= SO_BINDTODEVICE
;
2908 addr_ifname
= alloca(IFNAMSIZ
);
2909 memcpy(addr_ifname
, dev_ifname
, optlen
);
2910 addr_ifname
[optlen
] = 0;
2911 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
2912 addr_ifname
, optlen
));
2913 unlock_user (dev_ifname
, optval_addr
, 0);
2916 /* Options with 'int' argument. */
2917 case TARGET_SO_DEBUG
:
2920 case TARGET_SO_REUSEADDR
:
2921 optname
= SO_REUSEADDR
;
2923 case TARGET_SO_TYPE
:
2926 case TARGET_SO_ERROR
:
2929 case TARGET_SO_DONTROUTE
:
2930 optname
= SO_DONTROUTE
;
2932 case TARGET_SO_BROADCAST
:
2933 optname
= SO_BROADCAST
;
2935 case TARGET_SO_SNDBUF
:
2936 optname
= SO_SNDBUF
;
2938 case TARGET_SO_SNDBUFFORCE
:
2939 optname
= SO_SNDBUFFORCE
;
2941 case TARGET_SO_RCVBUF
:
2942 optname
= SO_RCVBUF
;
2944 case TARGET_SO_RCVBUFFORCE
:
2945 optname
= SO_RCVBUFFORCE
;
2947 case TARGET_SO_KEEPALIVE
:
2948 optname
= SO_KEEPALIVE
;
2950 case TARGET_SO_OOBINLINE
:
2951 optname
= SO_OOBINLINE
;
2953 case TARGET_SO_NO_CHECK
:
2954 optname
= SO_NO_CHECK
;
2956 case TARGET_SO_PRIORITY
:
2957 optname
= SO_PRIORITY
;
2960 case TARGET_SO_BSDCOMPAT
:
2961 optname
= SO_BSDCOMPAT
;
2964 case TARGET_SO_PASSCRED
:
2965 optname
= SO_PASSCRED
;
2967 case TARGET_SO_PASSSEC
:
2968 optname
= SO_PASSSEC
;
2970 case TARGET_SO_TIMESTAMP
:
2971 optname
= SO_TIMESTAMP
;
2973 case TARGET_SO_RCVLOWAT
:
2974 optname
= SO_RCVLOWAT
;
2980 if (optlen
< sizeof(uint32_t))
2981 return -TARGET_EINVAL
;
2983 if (get_user_u32(val
, optval_addr
))
2984 return -TARGET_EFAULT
;
2985 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
2989 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
2990 ret
= -TARGET_ENOPROTOOPT
;
2995 /* do_getsockopt() Must return target values and target errnos. */
2996 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
2997 abi_ulong optval_addr
, abi_ulong optlen
)
3004 case TARGET_SOL_SOCKET
:
3007 /* These don't just return a single integer */
3008 case TARGET_SO_LINGER
:
3009 case TARGET_SO_RCVTIMEO
:
3010 case TARGET_SO_SNDTIMEO
:
3011 case TARGET_SO_PEERNAME
:
3013 case TARGET_SO_PEERCRED
: {
3016 struct target_ucred
*tcr
;
3018 if (get_user_u32(len
, optlen
)) {
3019 return -TARGET_EFAULT
;
3022 return -TARGET_EINVAL
;
3026 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3034 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3035 return -TARGET_EFAULT
;
3037 __put_user(cr
.pid
, &tcr
->pid
);
3038 __put_user(cr
.uid
, &tcr
->uid
);
3039 __put_user(cr
.gid
, &tcr
->gid
);
3040 unlock_user_struct(tcr
, optval_addr
, 1);
3041 if (put_user_u32(len
, optlen
)) {
3042 return -TARGET_EFAULT
;
3046 /* Options with 'int' argument. */
3047 case TARGET_SO_DEBUG
:
3050 case TARGET_SO_REUSEADDR
:
3051 optname
= SO_REUSEADDR
;
3053 case TARGET_SO_TYPE
:
3056 case TARGET_SO_ERROR
:
3059 case TARGET_SO_DONTROUTE
:
3060 optname
= SO_DONTROUTE
;
3062 case TARGET_SO_BROADCAST
:
3063 optname
= SO_BROADCAST
;
3065 case TARGET_SO_SNDBUF
:
3066 optname
= SO_SNDBUF
;
3068 case TARGET_SO_RCVBUF
:
3069 optname
= SO_RCVBUF
;
3071 case TARGET_SO_KEEPALIVE
:
3072 optname
= SO_KEEPALIVE
;
3074 case TARGET_SO_OOBINLINE
:
3075 optname
= SO_OOBINLINE
;
3077 case TARGET_SO_NO_CHECK
:
3078 optname
= SO_NO_CHECK
;
3080 case TARGET_SO_PRIORITY
:
3081 optname
= SO_PRIORITY
;
3084 case TARGET_SO_BSDCOMPAT
:
3085 optname
= SO_BSDCOMPAT
;
3088 case TARGET_SO_PASSCRED
:
3089 optname
= SO_PASSCRED
;
3091 case TARGET_SO_TIMESTAMP
:
3092 optname
= SO_TIMESTAMP
;
3094 case TARGET_SO_RCVLOWAT
:
3095 optname
= SO_RCVLOWAT
;
3097 case TARGET_SO_ACCEPTCONN
:
3098 optname
= SO_ACCEPTCONN
;
3105 /* TCP options all take an 'int' value. */
3107 if (get_user_u32(len
, optlen
))
3108 return -TARGET_EFAULT
;
3110 return -TARGET_EINVAL
;
3112 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3115 if (optname
== SO_TYPE
) {
3116 val
= host_to_target_sock_type(val
);
3121 if (put_user_u32(val
, optval_addr
))
3122 return -TARGET_EFAULT
;
3124 if (put_user_u8(val
, optval_addr
))
3125 return -TARGET_EFAULT
;
3127 if (put_user_u32(len
, optlen
))
3128 return -TARGET_EFAULT
;
3135 case IP_ROUTER_ALERT
:
3139 case IP_MTU_DISCOVER
:
3145 case IP_MULTICAST_TTL
:
3146 case IP_MULTICAST_LOOP
:
3147 if (get_user_u32(len
, optlen
))
3148 return -TARGET_EFAULT
;
3150 return -TARGET_EINVAL
;
3152 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3155 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3157 if (put_user_u32(len
, optlen
)
3158 || put_user_u8(val
, optval_addr
))
3159 return -TARGET_EFAULT
;
3161 if (len
> sizeof(int))
3163 if (put_user_u32(len
, optlen
)
3164 || put_user_u32(val
, optval_addr
))
3165 return -TARGET_EFAULT
;
3169 ret
= -TARGET_ENOPROTOOPT
;
3175 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3177 ret
= -TARGET_EOPNOTSUPP
;
3183 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3184 abi_ulong count
, int copy
)
3186 struct target_iovec
*target_vec
;
3188 abi_ulong total_len
, max_len
;
3191 bool bad_address
= false;
3197 if (count
> IOV_MAX
) {
3202 vec
= g_try_new0(struct iovec
, count
);
3208 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3209 count
* sizeof(struct target_iovec
), 1);
3210 if (target_vec
== NULL
) {
3215 /* ??? If host page size > target page size, this will result in a
3216 value larger than what we can actually support. */
3217 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3220 for (i
= 0; i
< count
; i
++) {
3221 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3222 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3227 } else if (len
== 0) {
3228 /* Zero length pointer is ignored. */
3229 vec
[i
].iov_base
= 0;
3231 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3232 /* If the first buffer pointer is bad, this is a fault. But
3233 * subsequent bad buffers will result in a partial write; this
3234 * is realized by filling the vector with null pointers and
3236 if (!vec
[i
].iov_base
) {
3247 if (len
> max_len
- total_len
) {
3248 len
= max_len
- total_len
;
3251 vec
[i
].iov_len
= len
;
3255 unlock_user(target_vec
, target_addr
, 0);
3260 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3261 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3264 unlock_user(target_vec
, target_addr
, 0);
3271 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3272 abi_ulong count
, int copy
)
3274 struct target_iovec
*target_vec
;
3277 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3278 count
* sizeof(struct target_iovec
), 1);
3280 for (i
= 0; i
< count
; i
++) {
3281 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3282 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3286 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3288 unlock_user(target_vec
, target_addr
, 0);
3294 static inline int target_to_host_sock_type(int *type
)
3297 int target_type
= *type
;
3299 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3300 case TARGET_SOCK_DGRAM
:
3301 host_type
= SOCK_DGRAM
;
3303 case TARGET_SOCK_STREAM
:
3304 host_type
= SOCK_STREAM
;
3307 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3310 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3311 #if defined(SOCK_CLOEXEC)
3312 host_type
|= SOCK_CLOEXEC
;
3314 return -TARGET_EINVAL
;
3317 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3318 #if defined(SOCK_NONBLOCK)
3319 host_type
|= SOCK_NONBLOCK
;
3320 #elif !defined(O_NONBLOCK)
3321 return -TARGET_EINVAL
;
3328 /* Try to emulate socket type flags after socket creation. */
3329 static int sock_flags_fixup(int fd
, int target_type
)
3331 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3332 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3333 int flags
= fcntl(fd
, F_GETFL
);
3334 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3336 return -TARGET_EINVAL
;
3343 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3344 abi_ulong target_addr
,
3347 struct sockaddr
*addr
= host_addr
;
3348 struct target_sockaddr
*target_saddr
;
3350 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3351 if (!target_saddr
) {
3352 return -TARGET_EFAULT
;
3355 memcpy(addr
, target_saddr
, len
);
3356 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3357 /* spkt_protocol is big-endian */
3359 unlock_user(target_saddr
, target_addr
, 0);
3363 static TargetFdTrans target_packet_trans
= {
3364 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3367 #ifdef CONFIG_RTNETLINK
3368 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3372 ret
= target_to_host_nlmsg_route(buf
, len
);
3380 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3384 ret
= host_to_target_nlmsg_route(buf
, len
);
3392 static TargetFdTrans target_netlink_route_trans
= {
3393 .target_to_host_data
= netlink_route_target_to_host
,
3394 .host_to_target_data
= netlink_route_host_to_target
,
3396 #endif /* CONFIG_RTNETLINK */
3398 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3402 ret
= target_to_host_nlmsg_audit(buf
, len
);
3410 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3414 ret
= host_to_target_nlmsg_audit(buf
, len
);
3422 static TargetFdTrans target_netlink_audit_trans
= {
3423 .target_to_host_data
= netlink_audit_target_to_host
,
3424 .host_to_target_data
= netlink_audit_host_to_target
,
3427 /* do_socket() Must return target values and target errnos. */
3428 static abi_long
do_socket(int domain
, int type
, int protocol
)
3430 int target_type
= type
;
3433 ret
= target_to_host_sock_type(&type
);
3438 if (domain
== PF_NETLINK
&& !(
3439 #ifdef CONFIG_RTNETLINK
3440 protocol
== NETLINK_ROUTE
||
3442 protocol
== NETLINK_KOBJECT_UEVENT
||
3443 protocol
== NETLINK_AUDIT
)) {
3444 return -EPFNOSUPPORT
;
3447 if (domain
== AF_PACKET
||
3448 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3449 protocol
= tswap16(protocol
);
3452 ret
= get_errno(socket(domain
, type
, protocol
));
3454 ret
= sock_flags_fixup(ret
, target_type
);
3455 if (type
== SOCK_PACKET
) {
3456 /* Manage an obsolete case :
3457 * if socket type is SOCK_PACKET, bind by name
3459 fd_trans_register(ret
, &target_packet_trans
);
3460 } else if (domain
== PF_NETLINK
) {
3462 #ifdef CONFIG_RTNETLINK
3464 fd_trans_register(ret
, &target_netlink_route_trans
);
3467 case NETLINK_KOBJECT_UEVENT
:
3468 /* nothing to do: messages are strings */
3471 fd_trans_register(ret
, &target_netlink_audit_trans
);
3474 g_assert_not_reached();
3481 /* do_bind() Must return target values and target errnos. */
3482 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3488 if ((int)addrlen
< 0) {
3489 return -TARGET_EINVAL
;
3492 addr
= alloca(addrlen
+1);
3494 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3498 return get_errno(bind(sockfd
, addr
, addrlen
));
3501 /* do_connect() Must return target values and target errnos. */
3502 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3508 if ((int)addrlen
< 0) {
3509 return -TARGET_EINVAL
;
3512 addr
= alloca(addrlen
+1);
3514 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3518 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3521 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3522 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3523 int flags
, int send
)
3529 abi_ulong target_vec
;
3531 if (msgp
->msg_name
) {
3532 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3533 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3534 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3535 tswapal(msgp
->msg_name
),
3537 if (ret
== -TARGET_EFAULT
) {
3538 /* For connected sockets msg_name and msg_namelen must
3539 * be ignored, so returning EFAULT immediately is wrong.
3540 * Instead, pass a bad msg_name to the host kernel, and
3541 * let it decide whether to return EFAULT or not.
3543 msg
.msg_name
= (void *)-1;
3548 msg
.msg_name
= NULL
;
3549 msg
.msg_namelen
= 0;
3551 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3552 msg
.msg_control
= alloca(msg
.msg_controllen
);
3553 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3555 count
= tswapal(msgp
->msg_iovlen
);
3556 target_vec
= tswapal(msgp
->msg_iov
);
3558 if (count
> IOV_MAX
) {
3559 /* sendrcvmsg returns a different errno for this condition than
3560 * readv/writev, so we must catch it here before lock_iovec() does.
3562 ret
= -TARGET_EMSGSIZE
;
3566 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3567 target_vec
, count
, send
);
3569 ret
= -host_to_target_errno(errno
);
3572 msg
.msg_iovlen
= count
;
3576 if (fd_trans_target_to_host_data(fd
)) {
3579 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3580 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3581 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3582 msg
.msg_iov
->iov_len
);
3584 msg
.msg_iov
->iov_base
= host_msg
;
3585 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3589 ret
= target_to_host_cmsg(&msg
, msgp
);
3591 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3595 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3596 if (!is_error(ret
)) {
3598 if (fd_trans_host_to_target_data(fd
)) {
3599 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3602 ret
= host_to_target_cmsg(msgp
, &msg
);
3604 if (!is_error(ret
)) {
3605 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3606 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3607 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3608 msg
.msg_name
, msg
.msg_namelen
);
3620 unlock_iovec(vec
, target_vec
, count
, !send
);
3625 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3626 int flags
, int send
)
3629 struct target_msghdr
*msgp
;
3631 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3635 return -TARGET_EFAULT
;
3637 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3638 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3642 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3643 * so it might not have this *mmsg-specific flag either.
3645 #ifndef MSG_WAITFORONE
3646 #define MSG_WAITFORONE 0x10000
3649 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3650 unsigned int vlen
, unsigned int flags
,
3653 struct target_mmsghdr
*mmsgp
;
3657 if (vlen
> UIO_MAXIOV
) {
3661 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3663 return -TARGET_EFAULT
;
3666 for (i
= 0; i
< vlen
; i
++) {
3667 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3668 if (is_error(ret
)) {
3671 mmsgp
[i
].msg_len
= tswap32(ret
);
3672 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3673 if (flags
& MSG_WAITFORONE
) {
3674 flags
|= MSG_DONTWAIT
;
3678 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3680 /* Return number of datagrams sent if we sent any at all;
3681 * otherwise return the error.
3689 /* do_accept4() Must return target values and target errnos. */
3690 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3691 abi_ulong target_addrlen_addr
, int flags
)
3698 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3700 if (target_addr
== 0) {
3701 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3704 /* linux returns EINVAL if addrlen pointer is invalid */
3705 if (get_user_u32(addrlen
, target_addrlen_addr
))
3706 return -TARGET_EINVAL
;
3708 if ((int)addrlen
< 0) {
3709 return -TARGET_EINVAL
;
3712 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3713 return -TARGET_EINVAL
;
3715 addr
= alloca(addrlen
);
3717 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3718 if (!is_error(ret
)) {
3719 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3720 if (put_user_u32(addrlen
, target_addrlen_addr
))
3721 ret
= -TARGET_EFAULT
;
3726 /* do_getpeername() Must return target values and target errnos. */
3727 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3728 abi_ulong target_addrlen_addr
)
3734 if (get_user_u32(addrlen
, target_addrlen_addr
))
3735 return -TARGET_EFAULT
;
3737 if ((int)addrlen
< 0) {
3738 return -TARGET_EINVAL
;
3741 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3742 return -TARGET_EFAULT
;
3744 addr
= alloca(addrlen
);
3746 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3747 if (!is_error(ret
)) {
3748 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3749 if (put_user_u32(addrlen
, target_addrlen_addr
))
3750 ret
= -TARGET_EFAULT
;
3755 /* do_getsockname() Must return target values and target errnos. */
3756 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3757 abi_ulong target_addrlen_addr
)
3763 if (get_user_u32(addrlen
, target_addrlen_addr
))
3764 return -TARGET_EFAULT
;
3766 if ((int)addrlen
< 0) {
3767 return -TARGET_EINVAL
;
3770 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3771 return -TARGET_EFAULT
;
3773 addr
= alloca(addrlen
);
3775 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3776 if (!is_error(ret
)) {
3777 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3778 if (put_user_u32(addrlen
, target_addrlen_addr
))
3779 ret
= -TARGET_EFAULT
;
3784 /* do_socketpair() Must return target values and target errnos. */
3785 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3786 abi_ulong target_tab_addr
)
3791 target_to_host_sock_type(&type
);
3793 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3794 if (!is_error(ret
)) {
3795 if (put_user_s32(tab
[0], target_tab_addr
)
3796 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3797 ret
= -TARGET_EFAULT
;
3802 /* do_sendto() Must return target values and target errnos. */
3803 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3804 abi_ulong target_addr
, socklen_t addrlen
)
3808 void *copy_msg
= NULL
;
3811 if ((int)addrlen
< 0) {
3812 return -TARGET_EINVAL
;
3815 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3817 return -TARGET_EFAULT
;
3818 if (fd_trans_target_to_host_data(fd
)) {
3819 copy_msg
= host_msg
;
3820 host_msg
= g_malloc(len
);
3821 memcpy(host_msg
, copy_msg
, len
);
3822 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3828 addr
= alloca(addrlen
+1);
3829 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3833 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3835 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3840 host_msg
= copy_msg
;
3842 unlock_user(host_msg
, msg
, 0);
3846 /* do_recvfrom() Must return target values and target errnos. */
3847 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
3848 abi_ulong target_addr
,
3849 abi_ulong target_addrlen
)
3856 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
3858 return -TARGET_EFAULT
;
3860 if (get_user_u32(addrlen
, target_addrlen
)) {
3861 ret
= -TARGET_EFAULT
;
3864 if ((int)addrlen
< 0) {
3865 ret
= -TARGET_EINVAL
;
3868 addr
= alloca(addrlen
);
3869 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
3872 addr
= NULL
; /* To keep compiler quiet. */
3873 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
3875 if (!is_error(ret
)) {
3876 if (fd_trans_host_to_target_data(fd
)) {
3877 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
3880 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3881 if (put_user_u32(addrlen
, target_addrlen
)) {
3882 ret
= -TARGET_EFAULT
;
3886 unlock_user(host_msg
, msg
, len
);
3889 unlock_user(host_msg
, msg
, 0);
3894 #ifdef TARGET_NR_socketcall
3895 /* do_socketcall() Must return target values and target errnos. */
3896 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
3898 static const unsigned ac
[] = { /* number of arguments per call */
3899 [SOCKOP_socket
] = 3, /* domain, type, protocol */
3900 [SOCKOP_bind
] = 3, /* sockfd, addr, addrlen */
3901 [SOCKOP_connect
] = 3, /* sockfd, addr, addrlen */
3902 [SOCKOP_listen
] = 2, /* sockfd, backlog */
3903 [SOCKOP_accept
] = 3, /* sockfd, addr, addrlen */
3904 [SOCKOP_accept4
] = 4, /* sockfd, addr, addrlen, flags */
3905 [SOCKOP_getsockname
] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_getpeername
] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_socketpair
] = 4, /* domain, type, protocol, tab */
3908 [SOCKOP_send
] = 4, /* sockfd, msg, len, flags */
3909 [SOCKOP_recv
] = 4, /* sockfd, msg, len, flags */
3910 [SOCKOP_sendto
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3911 [SOCKOP_recvfrom
] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3912 [SOCKOP_shutdown
] = 2, /* sockfd, how */
3913 [SOCKOP_sendmsg
] = 3, /* sockfd, msg, flags */
3914 [SOCKOP_recvmsg
] = 3, /* sockfd, msg, flags */
3915 [SOCKOP_sendmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3916 [SOCKOP_recvmmsg
] = 4, /* sockfd, msgvec, vlen, flags */
3917 [SOCKOP_setsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3918 [SOCKOP_getsockopt
] = 5, /* sockfd, level, optname, optval, optlen */
3920 abi_long a
[6]; /* max 6 args */
3922 /* first, collect the arguments in a[] according to ac[] */
3923 if (num
>= 0 && num
< ARRAY_SIZE(ac
)) {
3925 assert(ARRAY_SIZE(a
) >= ac
[num
]); /* ensure we have space for args */
3926 for (i
= 0; i
< ac
[num
]; ++i
) {
3927 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
3928 return -TARGET_EFAULT
;
3933 /* now when we have the args, actually handle the call */
3935 case SOCKOP_socket
: /* domain, type, protocol */
3936 return do_socket(a
[0], a
[1], a
[2]);
3937 case SOCKOP_bind
: /* sockfd, addr, addrlen */
3938 return do_bind(a
[0], a
[1], a
[2]);
3939 case SOCKOP_connect
: /* sockfd, addr, addrlen */
3940 return do_connect(a
[0], a
[1], a
[2]);
3941 case SOCKOP_listen
: /* sockfd, backlog */
3942 return get_errno(listen(a
[0], a
[1]));
3943 case SOCKOP_accept
: /* sockfd, addr, addrlen */
3944 return do_accept4(a
[0], a
[1], a
[2], 0);
3945 case SOCKOP_accept4
: /* sockfd, addr, addrlen, flags */
3946 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
3947 case SOCKOP_getsockname
: /* sockfd, addr, addrlen */
3948 return do_getsockname(a
[0], a
[1], a
[2]);
3949 case SOCKOP_getpeername
: /* sockfd, addr, addrlen */
3950 return do_getpeername(a
[0], a
[1], a
[2]);
3951 case SOCKOP_socketpair
: /* domain, type, protocol, tab */
3952 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
3953 case SOCKOP_send
: /* sockfd, msg, len, flags */
3954 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
3955 case SOCKOP_recv
: /* sockfd, msg, len, flags */
3956 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
3957 case SOCKOP_sendto
: /* sockfd, msg, len, flags, addr, addrlen */
3958 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3959 case SOCKOP_recvfrom
: /* sockfd, msg, len, flags, addr, addrlen */
3960 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
3961 case SOCKOP_shutdown
: /* sockfd, how */
3962 return get_errno(shutdown(a
[0], a
[1]));
3963 case SOCKOP_sendmsg
: /* sockfd, msg, flags */
3964 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
3965 case SOCKOP_recvmsg
: /* sockfd, msg, flags */
3966 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
3967 case SOCKOP_sendmmsg
: /* sockfd, msgvec, vlen, flags */
3968 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
3969 case SOCKOP_recvmmsg
: /* sockfd, msgvec, vlen, flags */
3970 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
3971 case SOCKOP_setsockopt
: /* sockfd, level, optname, optval, optlen */
3972 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3973 case SOCKOP_getsockopt
: /* sockfd, level, optname, optval, optlen */
3974 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
3976 gemu_log("Unsupported socketcall: %d\n", num
);
3977 return -TARGET_ENOSYS
;
3982 #define N_SHM_REGIONS 32
3984 static struct shm_region
{
3988 } shm_regions
[N_SHM_REGIONS
];
3990 #ifndef TARGET_SEMID64_DS
3991 /* asm-generic version of this struct */
3992 struct target_semid64_ds
3994 struct target_ipc_perm sem_perm
;
3995 abi_ulong sem_otime
;
3996 #if TARGET_ABI_BITS == 32
3997 abi_ulong __unused1
;
3999 abi_ulong sem_ctime
;
4000 #if TARGET_ABI_BITS == 32
4001 abi_ulong __unused2
;
4003 abi_ulong sem_nsems
;
4004 abi_ulong __unused3
;
4005 abi_ulong __unused4
;
4009 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4010 abi_ulong target_addr
)
4012 struct target_ipc_perm
*target_ip
;
4013 struct target_semid64_ds
*target_sd
;
4015 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4016 return -TARGET_EFAULT
;
4017 target_ip
= &(target_sd
->sem_perm
);
4018 host_ip
->__key
= tswap32(target_ip
->__key
);
4019 host_ip
->uid
= tswap32(target_ip
->uid
);
4020 host_ip
->gid
= tswap32(target_ip
->gid
);
4021 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4022 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4023 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4024 host_ip
->mode
= tswap32(target_ip
->mode
);
4026 host_ip
->mode
= tswap16(target_ip
->mode
);
4028 #if defined(TARGET_PPC)
4029 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4031 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4033 unlock_user_struct(target_sd
, target_addr
, 0);
4037 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4038 struct ipc_perm
*host_ip
)
4040 struct target_ipc_perm
*target_ip
;
4041 struct target_semid64_ds
*target_sd
;
4043 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4044 return -TARGET_EFAULT
;
4045 target_ip
= &(target_sd
->sem_perm
);
4046 target_ip
->__key
= tswap32(host_ip
->__key
);
4047 target_ip
->uid
= tswap32(host_ip
->uid
);
4048 target_ip
->gid
= tswap32(host_ip
->gid
);
4049 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4050 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4051 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4052 target_ip
->mode
= tswap32(host_ip
->mode
);
4054 target_ip
->mode
= tswap16(host_ip
->mode
);
4056 #if defined(TARGET_PPC)
4057 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4059 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4061 unlock_user_struct(target_sd
, target_addr
, 1);
4065 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4066 abi_ulong target_addr
)
4068 struct target_semid64_ds
*target_sd
;
4070 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4071 return -TARGET_EFAULT
;
4072 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4073 return -TARGET_EFAULT
;
4074 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4075 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4076 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4077 unlock_user_struct(target_sd
, target_addr
, 0);
4081 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4082 struct semid_ds
*host_sd
)
4084 struct target_semid64_ds
*target_sd
;
4086 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4087 return -TARGET_EFAULT
;
4088 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4089 return -TARGET_EFAULT
;
4090 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4091 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4092 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4093 unlock_user_struct(target_sd
, target_addr
, 1);
4097 struct target_seminfo
{
4110 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4111 struct seminfo
*host_seminfo
)
4113 struct target_seminfo
*target_seminfo
;
4114 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4115 return -TARGET_EFAULT
;
4116 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4117 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4118 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4119 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4120 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4121 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4122 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4123 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4124 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4125 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4126 unlock_user_struct(target_seminfo
, target_addr
, 1);
4132 struct semid_ds
*buf
;
4133 unsigned short *array
;
4134 struct seminfo
*__buf
;
4137 union target_semun
{
4144 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4145 abi_ulong target_addr
)
4148 unsigned short *array
;
4150 struct semid_ds semid_ds
;
4153 semun
.buf
= &semid_ds
;
4155 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4157 return get_errno(ret
);
4159 nsems
= semid_ds
.sem_nsems
;
4161 *host_array
= g_try_new(unsigned short, nsems
);
4163 return -TARGET_ENOMEM
;
4165 array
= lock_user(VERIFY_READ
, target_addr
,
4166 nsems
*sizeof(unsigned short), 1);
4168 g_free(*host_array
);
4169 return -TARGET_EFAULT
;
4172 for(i
=0; i
<nsems
; i
++) {
4173 __get_user((*host_array
)[i
], &array
[i
]);
4175 unlock_user(array
, target_addr
, 0);
4180 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4181 unsigned short **host_array
)
4184 unsigned short *array
;
4186 struct semid_ds semid_ds
;
4189 semun
.buf
= &semid_ds
;
4191 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4193 return get_errno(ret
);
4195 nsems
= semid_ds
.sem_nsems
;
4197 array
= lock_user(VERIFY_WRITE
, target_addr
,
4198 nsems
*sizeof(unsigned short), 0);
4200 return -TARGET_EFAULT
;
4202 for(i
=0; i
<nsems
; i
++) {
4203 __put_user((*host_array
)[i
], &array
[i
]);
4205 g_free(*host_array
);
4206 unlock_user(array
, target_addr
, 1);
4211 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4212 abi_ulong target_arg
)
4214 union target_semun target_su
= { .buf
= target_arg
};
4216 struct semid_ds dsarg
;
4217 unsigned short *array
= NULL
;
4218 struct seminfo seminfo
;
4219 abi_long ret
= -TARGET_EINVAL
;
4226 /* In 64 bit cross-endian situations, we will erroneously pick up
4227 * the wrong half of the union for the "val" element. To rectify
4228 * this, the entire 8-byte structure is byteswapped, followed by
4229 * a swap of the 4 byte val field. In other cases, the data is
4230 * already in proper host byte order. */
4231 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4232 target_su
.buf
= tswapal(target_su
.buf
);
4233 arg
.val
= tswap32(target_su
.val
);
4235 arg
.val
= target_su
.val
;
4237 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4241 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4245 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4246 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4253 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4257 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4258 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4264 arg
.__buf
= &seminfo
;
4265 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4266 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4274 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4281 struct target_sembuf
{
4282 unsigned short sem_num
;
4287 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4288 abi_ulong target_addr
,
4291 struct target_sembuf
*target_sembuf
;
4294 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4295 nsops
*sizeof(struct target_sembuf
), 1);
4297 return -TARGET_EFAULT
;
4299 for(i
=0; i
<nsops
; i
++) {
4300 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4301 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4302 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4305 unlock_user(target_sembuf
, target_addr
, 0);
4310 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4312 struct sembuf sops
[nsops
];
4314 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4315 return -TARGET_EFAULT
;
4317 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4320 struct target_msqid_ds
4322 struct target_ipc_perm msg_perm
;
4323 abi_ulong msg_stime
;
4324 #if TARGET_ABI_BITS == 32
4325 abi_ulong __unused1
;
4327 abi_ulong msg_rtime
;
4328 #if TARGET_ABI_BITS == 32
4329 abi_ulong __unused2
;
4331 abi_ulong msg_ctime
;
4332 #if TARGET_ABI_BITS == 32
4333 abi_ulong __unused3
;
4335 abi_ulong __msg_cbytes
;
4337 abi_ulong msg_qbytes
;
4338 abi_ulong msg_lspid
;
4339 abi_ulong msg_lrpid
;
4340 abi_ulong __unused4
;
4341 abi_ulong __unused5
;
4344 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4345 abi_ulong target_addr
)
4347 struct target_msqid_ds
*target_md
;
4349 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4350 return -TARGET_EFAULT
;
4351 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4352 return -TARGET_EFAULT
;
4353 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4354 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4355 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4356 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4357 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4358 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4359 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4360 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4361 unlock_user_struct(target_md
, target_addr
, 0);
4365 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4366 struct msqid_ds
*host_md
)
4368 struct target_msqid_ds
*target_md
;
4370 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4371 return -TARGET_EFAULT
;
4372 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4373 return -TARGET_EFAULT
;
4374 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4375 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4376 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4377 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4378 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4379 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4380 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4381 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4382 unlock_user_struct(target_md
, target_addr
, 1);
4386 struct target_msginfo
{
4394 unsigned short int msgseg
;
4397 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4398 struct msginfo
*host_msginfo
)
4400 struct target_msginfo
*target_msginfo
;
4401 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4402 return -TARGET_EFAULT
;
4403 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4404 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4405 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4406 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4407 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4408 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4409 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4410 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4411 unlock_user_struct(target_msginfo
, target_addr
, 1);
4415 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4417 struct msqid_ds dsarg
;
4418 struct msginfo msginfo
;
4419 abi_long ret
= -TARGET_EINVAL
;
4427 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4428 return -TARGET_EFAULT
;
4429 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4430 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4431 return -TARGET_EFAULT
;
4434 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4438 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4439 if (host_to_target_msginfo(ptr
, &msginfo
))
4440 return -TARGET_EFAULT
;
4447 struct target_msgbuf
{
4452 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4453 ssize_t msgsz
, int msgflg
)
4455 struct target_msgbuf
*target_mb
;
4456 struct msgbuf
*host_mb
;
4460 return -TARGET_EINVAL
;
4463 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4464 return -TARGET_EFAULT
;
4465 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4467 unlock_user_struct(target_mb
, msgp
, 0);
4468 return -TARGET_ENOMEM
;
4470 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4471 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4472 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4474 unlock_user_struct(target_mb
, msgp
, 0);
4479 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4480 ssize_t msgsz
, abi_long msgtyp
,
4483 struct target_msgbuf
*target_mb
;
4485 struct msgbuf
*host_mb
;
4489 return -TARGET_EINVAL
;
4492 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4493 return -TARGET_EFAULT
;
4495 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4497 ret
= -TARGET_ENOMEM
;
4500 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4503 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4504 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4505 if (!target_mtext
) {
4506 ret
= -TARGET_EFAULT
;
4509 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4510 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4513 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4517 unlock_user_struct(target_mb
, msgp
, 1);
4522 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4523 abi_ulong target_addr
)
4525 struct target_shmid_ds
*target_sd
;
4527 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4528 return -TARGET_EFAULT
;
4529 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4530 return -TARGET_EFAULT
;
4531 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4532 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4533 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4534 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4535 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4536 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4537 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4538 unlock_user_struct(target_sd
, target_addr
, 0);
4542 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4543 struct shmid_ds
*host_sd
)
4545 struct target_shmid_ds
*target_sd
;
4547 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4548 return -TARGET_EFAULT
;
4549 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4550 return -TARGET_EFAULT
;
4551 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4552 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4553 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4554 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4555 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4556 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4557 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4558 unlock_user_struct(target_sd
, target_addr
, 1);
4562 struct target_shminfo
{
4570 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4571 struct shminfo
*host_shminfo
)
4573 struct target_shminfo
*target_shminfo
;
4574 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4575 return -TARGET_EFAULT
;
4576 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4577 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4578 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4579 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4580 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4581 unlock_user_struct(target_shminfo
, target_addr
, 1);
4585 struct target_shm_info
{
4590 abi_ulong swap_attempts
;
4591 abi_ulong swap_successes
;
4594 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4595 struct shm_info
*host_shm_info
)
4597 struct target_shm_info
*target_shm_info
;
4598 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4599 return -TARGET_EFAULT
;
4600 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4601 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4602 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4603 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4604 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4605 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4606 unlock_user_struct(target_shm_info
, target_addr
, 1);
4610 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4612 struct shmid_ds dsarg
;
4613 struct shminfo shminfo
;
4614 struct shm_info shm_info
;
4615 abi_long ret
= -TARGET_EINVAL
;
4623 if (target_to_host_shmid_ds(&dsarg
, buf
))
4624 return -TARGET_EFAULT
;
4625 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4626 if (host_to_target_shmid_ds(buf
, &dsarg
))
4627 return -TARGET_EFAULT
;
4630 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4631 if (host_to_target_shminfo(buf
, &shminfo
))
4632 return -TARGET_EFAULT
;
4635 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4636 if (host_to_target_shm_info(buf
, &shm_info
))
4637 return -TARGET_EFAULT
;
4642 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4649 #ifndef TARGET_FORCE_SHMLBA
4650 /* For most architectures, SHMLBA is the same as the page size;
4651 * some architectures have larger values, in which case they should
4652 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4653 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4654 * and defining its own value for SHMLBA.
4656 * The kernel also permits SHMLBA to be set by the architecture to a
4657 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4658 * this means that addresses are rounded to the large size if
4659 * SHM_RND is set but addresses not aligned to that size are not rejected
4660 * as long as they are at least page-aligned. Since the only architecture
4661 * which uses this is ia64 this code doesn't provide for that oddity.
4663 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4665 return TARGET_PAGE_SIZE
;
4669 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4670 int shmid
, abi_ulong shmaddr
, int shmflg
)
4674 struct shmid_ds shm_info
;
4678 /* find out the length of the shared memory segment */
4679 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4680 if (is_error(ret
)) {
4681 /* can't get length, bail out */
4685 shmlba
= target_shmlba(cpu_env
);
4687 if (shmaddr
& (shmlba
- 1)) {
4688 if (shmflg
& SHM_RND
) {
4689 shmaddr
&= ~(shmlba
- 1);
4691 return -TARGET_EINVAL
;
4698 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4700 abi_ulong mmap_start
;
4702 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4704 if (mmap_start
== -1) {
4706 host_raddr
= (void *)-1;
4708 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4711 if (host_raddr
== (void *)-1) {
4713 return get_errno((long)host_raddr
);
4715 raddr
=h2g((unsigned long)host_raddr
);
4717 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4718 PAGE_VALID
| PAGE_READ
|
4719 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4721 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4722 if (!shm_regions
[i
].in_use
) {
4723 shm_regions
[i
].in_use
= true;
4724 shm_regions
[i
].start
= raddr
;
4725 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4735 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4739 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4740 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4741 shm_regions
[i
].in_use
= false;
4742 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4747 return get_errno(shmdt(g2h(shmaddr
)));
4750 #ifdef TARGET_NR_ipc
4751 /* ??? This only works with linear mappings. */
4752 /* do_ipc() must return target values and target errnos. */
4753 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4754 unsigned int call
, abi_long first
,
4755 abi_long second
, abi_long third
,
4756 abi_long ptr
, abi_long fifth
)
4761 version
= call
>> 16;
4766 ret
= do_semop(first
, ptr
, second
);
4770 ret
= get_errno(semget(first
, second
, third
));
4773 case IPCOP_semctl
: {
4774 /* The semun argument to semctl is passed by value, so dereference the
4777 get_user_ual(atptr
, ptr
);
4778 ret
= do_semctl(first
, second
, third
, atptr
);
4783 ret
= get_errno(msgget(first
, second
));
4787 ret
= do_msgsnd(first
, ptr
, second
, third
);
4791 ret
= do_msgctl(first
, second
, ptr
);
4798 struct target_ipc_kludge
{
4803 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4804 ret
= -TARGET_EFAULT
;
4808 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4810 unlock_user_struct(tmp
, ptr
, 0);
4814 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4823 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4824 if (is_error(raddr
))
4825 return get_errno(raddr
);
4826 if (put_user_ual(raddr
, third
))
4827 return -TARGET_EFAULT
;
4831 ret
= -TARGET_EINVAL
;
4836 ret
= do_shmdt(ptr
);
4840 /* IPC_* flag values are the same on all linux platforms */
4841 ret
= get_errno(shmget(first
, second
, third
));
4844 /* IPC_* and SHM_* command values are the same on all linux platforms */
4846 ret
= do_shmctl(first
, second
, ptr
);
4849 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
4850 ret
= -TARGET_ENOSYS
;
4857 /* kernel structure types definitions */
4859 #define STRUCT(name, ...) STRUCT_ ## name,
4860 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4862 #include "syscall_types.h"
4866 #undef STRUCT_SPECIAL
4868 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4869 #define STRUCT_SPECIAL(name)
4870 #include "syscall_types.h"
4872 #undef STRUCT_SPECIAL
4874 typedef struct IOCTLEntry IOCTLEntry
;
4876 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4877 int fd
, int cmd
, abi_long arg
);
4881 unsigned int host_cmd
;
4884 do_ioctl_fn
*do_ioctl
;
4885 const argtype arg_type
[5];
4888 #define IOC_R 0x0001
4889 #define IOC_W 0x0002
4890 #define IOC_RW (IOC_R | IOC_W)
4892 #define MAX_STRUCT_SIZE 4096
4894 #ifdef CONFIG_FIEMAP
4895 /* So fiemap access checks don't overflow on 32 bit systems.
4896 * This is very slightly smaller than the limit imposed by
4897 * the underlying kernel.
4899 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4900 / sizeof(struct fiemap_extent))
4902 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4903 int fd
, int cmd
, abi_long arg
)
4905 /* The parameter for this ioctl is a struct fiemap followed
4906 * by an array of struct fiemap_extent whose size is set
4907 * in fiemap->fm_extent_count. The array is filled in by the
4910 int target_size_in
, target_size_out
;
4912 const argtype
*arg_type
= ie
->arg_type
;
4913 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
4916 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
4920 assert(arg_type
[0] == TYPE_PTR
);
4921 assert(ie
->access
== IOC_RW
);
4923 target_size_in
= thunk_type_size(arg_type
, 0);
4924 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
4926 return -TARGET_EFAULT
;
4928 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
4929 unlock_user(argptr
, arg
, 0);
4930 fm
= (struct fiemap
*)buf_temp
;
4931 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
4932 return -TARGET_EINVAL
;
4935 outbufsz
= sizeof (*fm
) +
4936 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
4938 if (outbufsz
> MAX_STRUCT_SIZE
) {
4939 /* We can't fit all the extents into the fixed size buffer.
4940 * Allocate one that is large enough and use it instead.
4942 fm
= g_try_malloc(outbufsz
);
4944 return -TARGET_ENOMEM
;
4946 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
4949 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
4950 if (!is_error(ret
)) {
4951 target_size_out
= target_size_in
;
4952 /* An extent_count of 0 means we were only counting the extents
4953 * so there are no structs to copy
4955 if (fm
->fm_extent_count
!= 0) {
4956 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
4958 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
4960 ret
= -TARGET_EFAULT
;
4962 /* Convert the struct fiemap */
4963 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
4964 if (fm
->fm_extent_count
!= 0) {
4965 p
= argptr
+ target_size_in
;
4966 /* ...and then all the struct fiemap_extents */
4967 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
4968 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
4973 unlock_user(argptr
, arg
, target_size_out
);
4983 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
4984 int fd
, int cmd
, abi_long arg
)
4986 const argtype
*arg_type
= ie
->arg_type
;
4990 struct ifconf
*host_ifconf
;
4992 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
4993 int target_ifreq_size
;
4998 abi_long target_ifc_buf
;
5002 assert(arg_type
[0] == TYPE_PTR
);
5003 assert(ie
->access
== IOC_RW
);
5006 target_size
= thunk_type_size(arg_type
, 0);
5008 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5010 return -TARGET_EFAULT
;
5011 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5012 unlock_user(argptr
, arg
, 0);
5014 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5015 target_ifc_len
= host_ifconf
->ifc_len
;
5016 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5018 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5019 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5020 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5022 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5023 if (outbufsz
> MAX_STRUCT_SIZE
) {
5024 /* We can't fit all the extents into the fixed size buffer.
5025 * Allocate one that is large enough and use it instead.
5027 host_ifconf
= malloc(outbufsz
);
5029 return -TARGET_ENOMEM
;
5031 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5034 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5036 host_ifconf
->ifc_len
= host_ifc_len
;
5037 host_ifconf
->ifc_buf
= host_ifc_buf
;
5039 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5040 if (!is_error(ret
)) {
5041 /* convert host ifc_len to target ifc_len */
5043 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5044 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5045 host_ifconf
->ifc_len
= target_ifc_len
;
5047 /* restore target ifc_buf */
5049 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5051 /* copy struct ifconf to target user */
5053 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5055 return -TARGET_EFAULT
;
5056 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5057 unlock_user(argptr
, arg
, target_size
);
5059 /* copy ifreq[] to target user */
5061 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5062 for (i
= 0; i
< nb_ifreq
; i
++) {
5063 thunk_convert(argptr
+ i
* target_ifreq_size
,
5064 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5065 ifreq_arg_type
, THUNK_TARGET
);
5067 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5077 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5078 int cmd
, abi_long arg
)
5081 struct dm_ioctl
*host_dm
;
5082 abi_long guest_data
;
5083 uint32_t guest_data_size
;
5085 const argtype
*arg_type
= ie
->arg_type
;
5087 void *big_buf
= NULL
;
5091 target_size
= thunk_type_size(arg_type
, 0);
5092 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5094 ret
= -TARGET_EFAULT
;
5097 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5098 unlock_user(argptr
, arg
, 0);
5100 /* buf_temp is too small, so fetch things into a bigger buffer */
5101 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5102 memcpy(big_buf
, buf_temp
, target_size
);
5106 guest_data
= arg
+ host_dm
->data_start
;
5107 if ((guest_data
- arg
) < 0) {
5108 ret
= -TARGET_EINVAL
;
5111 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5112 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5114 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5116 ret
= -TARGET_EFAULT
;
5120 switch (ie
->host_cmd
) {
5122 case DM_LIST_DEVICES
:
5125 case DM_DEV_SUSPEND
:
5128 case DM_TABLE_STATUS
:
5129 case DM_TABLE_CLEAR
:
5131 case DM_LIST_VERSIONS
:
5135 case DM_DEV_SET_GEOMETRY
:
5136 /* data contains only strings */
5137 memcpy(host_data
, argptr
, guest_data_size
);
5140 memcpy(host_data
, argptr
, guest_data_size
);
5141 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5145 void *gspec
= argptr
;
5146 void *cur_data
= host_data
;
5147 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5148 int spec_size
= thunk_type_size(arg_type
, 0);
5151 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5152 struct dm_target_spec
*spec
= cur_data
;
5156 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5157 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5159 spec
->next
= sizeof(*spec
) + slen
;
5160 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5162 cur_data
+= spec
->next
;
5167 ret
= -TARGET_EINVAL
;
5168 unlock_user(argptr
, guest_data
, 0);
5171 unlock_user(argptr
, guest_data
, 0);
5173 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5174 if (!is_error(ret
)) {
5175 guest_data
= arg
+ host_dm
->data_start
;
5176 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5177 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5178 switch (ie
->host_cmd
) {
5183 case DM_DEV_SUSPEND
:
5186 case DM_TABLE_CLEAR
:
5188 case DM_DEV_SET_GEOMETRY
:
5189 /* no return data */
5191 case DM_LIST_DEVICES
:
5193 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5194 uint32_t remaining_data
= guest_data_size
;
5195 void *cur_data
= argptr
;
5196 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5197 int nl_size
= 12; /* can't use thunk_size due to alignment */
5200 uint32_t next
= nl
->next
;
5202 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5204 if (remaining_data
< nl
->next
) {
5205 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5208 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5209 strcpy(cur_data
+ nl_size
, nl
->name
);
5210 cur_data
+= nl
->next
;
5211 remaining_data
-= nl
->next
;
5215 nl
= (void*)nl
+ next
;
5220 case DM_TABLE_STATUS
:
5222 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5223 void *cur_data
= argptr
;
5224 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5225 int spec_size
= thunk_type_size(arg_type
, 0);
5228 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5229 uint32_t next
= spec
->next
;
5230 int slen
= strlen((char*)&spec
[1]) + 1;
5231 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5232 if (guest_data_size
< spec
->next
) {
5233 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5236 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5237 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5238 cur_data
= argptr
+ spec
->next
;
5239 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5245 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5246 int count
= *(uint32_t*)hdata
;
5247 uint64_t *hdev
= hdata
+ 8;
5248 uint64_t *gdev
= argptr
+ 8;
5251 *(uint32_t*)argptr
= tswap32(count
);
5252 for (i
= 0; i
< count
; i
++) {
5253 *gdev
= tswap64(*hdev
);
5259 case DM_LIST_VERSIONS
:
5261 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5262 uint32_t remaining_data
= guest_data_size
;
5263 void *cur_data
= argptr
;
5264 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5265 int vers_size
= thunk_type_size(arg_type
, 0);
5268 uint32_t next
= vers
->next
;
5270 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5272 if (remaining_data
< vers
->next
) {
5273 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5276 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5277 strcpy(cur_data
+ vers_size
, vers
->name
);
5278 cur_data
+= vers
->next
;
5279 remaining_data
-= vers
->next
;
5283 vers
= (void*)vers
+ next
;
5288 unlock_user(argptr
, guest_data
, 0);
5289 ret
= -TARGET_EINVAL
;
5292 unlock_user(argptr
, guest_data
, guest_data_size
);
5294 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5296 ret
= -TARGET_EFAULT
;
5299 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5300 unlock_user(argptr
, arg
, target_size
);
5307 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5308 int cmd
, abi_long arg
)
5312 const argtype
*arg_type
= ie
->arg_type
;
5313 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5316 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5317 struct blkpg_partition host_part
;
5319 /* Read and convert blkpg */
5321 target_size
= thunk_type_size(arg_type
, 0);
5322 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5324 ret
= -TARGET_EFAULT
;
5327 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5328 unlock_user(argptr
, arg
, 0);
5330 switch (host_blkpg
->op
) {
5331 case BLKPG_ADD_PARTITION
:
5332 case BLKPG_DEL_PARTITION
:
5333 /* payload is struct blkpg_partition */
5336 /* Unknown opcode */
5337 ret
= -TARGET_EINVAL
;
5341 /* Read and convert blkpg->data */
5342 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5343 target_size
= thunk_type_size(part_arg_type
, 0);
5344 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5346 ret
= -TARGET_EFAULT
;
5349 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5350 unlock_user(argptr
, arg
, 0);
5352 /* Swizzle the data pointer to our local copy and call! */
5353 host_blkpg
->data
= &host_part
;
5354 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5360 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5361 int fd
, int cmd
, abi_long arg
)
5363 const argtype
*arg_type
= ie
->arg_type
;
5364 const StructEntry
*se
;
5365 const argtype
*field_types
;
5366 const int *dst_offsets
, *src_offsets
;
5369 abi_ulong
*target_rt_dev_ptr
;
5370 unsigned long *host_rt_dev_ptr
;
5374 assert(ie
->access
== IOC_W
);
5375 assert(*arg_type
== TYPE_PTR
);
5377 assert(*arg_type
== TYPE_STRUCT
);
5378 target_size
= thunk_type_size(arg_type
, 0);
5379 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5381 return -TARGET_EFAULT
;
5384 assert(*arg_type
== (int)STRUCT_rtentry
);
5385 se
= struct_entries
+ *arg_type
++;
5386 assert(se
->convert
[0] == NULL
);
5387 /* convert struct here to be able to catch rt_dev string */
5388 field_types
= se
->field_types
;
5389 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5390 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5391 for (i
= 0; i
< se
->nb_fields
; i
++) {
5392 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5393 assert(*field_types
== TYPE_PTRVOID
);
5394 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5395 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5396 if (*target_rt_dev_ptr
!= 0) {
5397 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5398 tswapal(*target_rt_dev_ptr
));
5399 if (!*host_rt_dev_ptr
) {
5400 unlock_user(argptr
, arg
, 0);
5401 return -TARGET_EFAULT
;
5404 *host_rt_dev_ptr
= 0;
5409 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5410 argptr
+ src_offsets
[i
],
5411 field_types
, THUNK_HOST
);
5413 unlock_user(argptr
, arg
, 0);
5415 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5416 if (*host_rt_dev_ptr
!= 0) {
5417 unlock_user((void *)*host_rt_dev_ptr
,
5418 *target_rt_dev_ptr
, 0);
5423 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5424 int fd
, int cmd
, abi_long arg
)
5426 int sig
= target_to_host_signal(arg
);
5427 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5430 static IOCTLEntry ioctl_entries
[] = {
5431 #define IOCTL(cmd, access, ...) \
5432 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5433 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5434 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5439 /* ??? Implement proper locking for ioctls. */
5440 /* do_ioctl() Must return target values and target errnos. */
5441 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5443 const IOCTLEntry
*ie
;
5444 const argtype
*arg_type
;
5446 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5452 if (ie
->target_cmd
== 0) {
5453 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5454 return -TARGET_ENOSYS
;
5456 if (ie
->target_cmd
== cmd
)
5460 arg_type
= ie
->arg_type
;
5462 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5465 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5468 switch(arg_type
[0]) {
5471 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5475 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5479 target_size
= thunk_type_size(arg_type
, 0);
5480 switch(ie
->access
) {
5482 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5483 if (!is_error(ret
)) {
5484 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5486 return -TARGET_EFAULT
;
5487 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5488 unlock_user(argptr
, arg
, target_size
);
5492 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5494 return -TARGET_EFAULT
;
5495 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5496 unlock_user(argptr
, arg
, 0);
5497 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5501 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5503 return -TARGET_EFAULT
;
5504 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5505 unlock_user(argptr
, arg
, 0);
5506 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5507 if (!is_error(ret
)) {
5508 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5510 return -TARGET_EFAULT
;
5511 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5512 unlock_user(argptr
, arg
, target_size
);
5518 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5519 (long)cmd
, arg_type
[0]);
5520 ret
= -TARGET_ENOSYS
;
5526 static const bitmask_transtbl iflag_tbl
[] = {
5527 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5528 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5529 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5530 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5531 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5532 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5533 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5534 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5535 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5536 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5537 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5538 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5539 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5540 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5544 static const bitmask_transtbl oflag_tbl
[] = {
5545 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5546 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5547 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5548 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5549 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5550 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5551 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5552 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5553 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5554 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5555 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5556 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5557 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5558 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5559 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5560 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5561 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5562 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5563 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5564 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5565 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5566 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5567 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5568 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5572 static const bitmask_transtbl cflag_tbl
[] = {
5573 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5574 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5575 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5576 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5577 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5578 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5579 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5580 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5581 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5582 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5583 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5584 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5585 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5586 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5587 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5588 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5589 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5590 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5591 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5592 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5593 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5594 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5595 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5596 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5597 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5598 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5599 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5600 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5601 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5602 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5603 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5607 static const bitmask_transtbl lflag_tbl
[] = {
5608 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5609 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5610 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5611 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5612 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5613 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5614 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5615 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5616 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5617 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5618 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5619 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5620 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5621 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5622 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5626 static void target_to_host_termios (void *dst
, const void *src
)
5628 struct host_termios
*host
= dst
;
5629 const struct target_termios
*target
= src
;
5632 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5634 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5636 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5638 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5639 host
->c_line
= target
->c_line
;
5641 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5642 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5643 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5644 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5645 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5646 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5647 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5648 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5649 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5650 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5651 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5652 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5653 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5654 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5655 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5656 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5657 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5658 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5661 static void host_to_target_termios (void *dst
, const void *src
)
5663 struct target_termios
*target
= dst
;
5664 const struct host_termios
*host
= src
;
5667 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5669 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5671 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5673 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5674 target
->c_line
= host
->c_line
;
5676 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5677 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5678 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5679 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5680 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5681 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5682 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5683 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5684 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5685 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5686 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5687 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5688 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5689 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5690 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5691 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5692 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5693 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5696 static const StructEntry struct_termios_def
= {
5697 .convert
= { host_to_target_termios
, target_to_host_termios
},
5698 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5699 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5702 static bitmask_transtbl mmap_flags_tbl
[] = {
5703 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5704 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5705 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5706 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5707 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5708 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5709 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5710 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5711 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5716 #if defined(TARGET_I386)
5718 /* NOTE: there is really one LDT for all the threads */
5719 static uint8_t *ldt_table
;
5721 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5728 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5729 if (size
> bytecount
)
5731 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5733 return -TARGET_EFAULT
;
5734 /* ??? Should this by byteswapped? */
5735 memcpy(p
, ldt_table
, size
);
5736 unlock_user(p
, ptr
, size
);
5740 /* XXX: add locking support */
5741 static abi_long
write_ldt(CPUX86State
*env
,
5742 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5744 struct target_modify_ldt_ldt_s ldt_info
;
5745 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5746 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5747 int seg_not_present
, useable
, lm
;
5748 uint32_t *lp
, entry_1
, entry_2
;
5750 if (bytecount
!= sizeof(ldt_info
))
5751 return -TARGET_EINVAL
;
5752 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5753 return -TARGET_EFAULT
;
5754 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5755 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5756 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5757 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5758 unlock_user_struct(target_ldt_info
, ptr
, 0);
5760 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5761 return -TARGET_EINVAL
;
5762 seg_32bit
= ldt_info
.flags
& 1;
5763 contents
= (ldt_info
.flags
>> 1) & 3;
5764 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5765 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5766 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5767 useable
= (ldt_info
.flags
>> 6) & 1;
5771 lm
= (ldt_info
.flags
>> 7) & 1;
5773 if (contents
== 3) {
5775 return -TARGET_EINVAL
;
5776 if (seg_not_present
== 0)
5777 return -TARGET_EINVAL
;
5779 /* allocate the LDT */
5781 env
->ldt
.base
= target_mmap(0,
5782 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5783 PROT_READ
|PROT_WRITE
,
5784 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5785 if (env
->ldt
.base
== -1)
5786 return -TARGET_ENOMEM
;
5787 memset(g2h(env
->ldt
.base
), 0,
5788 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5789 env
->ldt
.limit
= 0xffff;
5790 ldt_table
= g2h(env
->ldt
.base
);
5793 /* NOTE: same code as Linux kernel */
5794 /* Allow LDTs to be cleared by the user. */
5795 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5798 read_exec_only
== 1 &&
5800 limit_in_pages
== 0 &&
5801 seg_not_present
== 1 &&
5809 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5810 (ldt_info
.limit
& 0x0ffff);
5811 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5812 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5813 (ldt_info
.limit
& 0xf0000) |
5814 ((read_exec_only
^ 1) << 9) |
5816 ((seg_not_present
^ 1) << 15) |
5818 (limit_in_pages
<< 23) |
5822 entry_2
|= (useable
<< 20);
5824 /* Install the new entry ... */
5826 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5827 lp
[0] = tswap32(entry_1
);
5828 lp
[1] = tswap32(entry_2
);
5832 /* specific and weird i386 syscalls */
5833 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
5834 unsigned long bytecount
)
5840 ret
= read_ldt(ptr
, bytecount
);
5843 ret
= write_ldt(env
, ptr
, bytecount
, 1);
5846 ret
= write_ldt(env
, ptr
, bytecount
, 0);
5849 ret
= -TARGET_ENOSYS
;
5855 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5856 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5858 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5859 struct target_modify_ldt_ldt_s ldt_info
;
5860 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5861 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5862 int seg_not_present
, useable
, lm
;
5863 uint32_t *lp
, entry_1
, entry_2
;
5866 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5867 if (!target_ldt_info
)
5868 return -TARGET_EFAULT
;
5869 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5870 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5871 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5872 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5873 if (ldt_info
.entry_number
== -1) {
5874 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
5875 if (gdt_table
[i
] == 0) {
5876 ldt_info
.entry_number
= i
;
5877 target_ldt_info
->entry_number
= tswap32(i
);
5882 unlock_user_struct(target_ldt_info
, ptr
, 1);
5884 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
5885 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
5886 return -TARGET_EINVAL
;
5887 seg_32bit
= ldt_info
.flags
& 1;
5888 contents
= (ldt_info
.flags
>> 1) & 3;
5889 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5890 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5891 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5892 useable
= (ldt_info
.flags
>> 6) & 1;
5896 lm
= (ldt_info
.flags
>> 7) & 1;
5899 if (contents
== 3) {
5900 if (seg_not_present
== 0)
5901 return -TARGET_EINVAL
;
5904 /* NOTE: same code as Linux kernel */
5905 /* Allow LDTs to be cleared by the user. */
5906 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5907 if ((contents
== 0 &&
5908 read_exec_only
== 1 &&
5910 limit_in_pages
== 0 &&
5911 seg_not_present
== 1 &&
5919 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5920 (ldt_info
.limit
& 0x0ffff);
5921 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5922 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5923 (ldt_info
.limit
& 0xf0000) |
5924 ((read_exec_only
^ 1) << 9) |
5926 ((seg_not_present
^ 1) << 15) |
5928 (limit_in_pages
<< 23) |
5933 /* Install the new entry ... */
5935 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
5936 lp
[0] = tswap32(entry_1
);
5937 lp
[1] = tswap32(entry_2
);
5941 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
5943 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5944 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
5945 uint32_t base_addr
, limit
, flags
;
5946 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
5947 int seg_not_present
, useable
, lm
;
5948 uint32_t *lp
, entry_1
, entry_2
;
5950 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
5951 if (!target_ldt_info
)
5952 return -TARGET_EFAULT
;
5953 idx
= tswap32(target_ldt_info
->entry_number
);
5954 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
5955 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
5956 unlock_user_struct(target_ldt_info
, ptr
, 1);
5957 return -TARGET_EINVAL
;
5959 lp
= (uint32_t *)(gdt_table
+ idx
);
5960 entry_1
= tswap32(lp
[0]);
5961 entry_2
= tswap32(lp
[1]);
5963 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
5964 contents
= (entry_2
>> 10) & 3;
5965 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
5966 seg_32bit
= (entry_2
>> 22) & 1;
5967 limit_in_pages
= (entry_2
>> 23) & 1;
5968 useable
= (entry_2
>> 20) & 1;
5972 lm
= (entry_2
>> 21) & 1;
5974 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
5975 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
5976 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
5977 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
5978 base_addr
= (entry_1
>> 16) |
5979 (entry_2
& 0xff000000) |
5980 ((entry_2
& 0xff) << 16);
5981 target_ldt_info
->base_addr
= tswapal(base_addr
);
5982 target_ldt_info
->limit
= tswap32(limit
);
5983 target_ldt_info
->flags
= tswap32(flags
);
5984 unlock_user_struct(target_ldt_info
, ptr
, 1);
5987 #endif /* TARGET_I386 && TARGET_ABI32 */
5989 #ifndef TARGET_ABI32
5990 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
5997 case TARGET_ARCH_SET_GS
:
5998 case TARGET_ARCH_SET_FS
:
5999 if (code
== TARGET_ARCH_SET_GS
)
6003 cpu_x86_load_seg(env
, idx
, 0);
6004 env
->segs
[idx
].base
= addr
;
6006 case TARGET_ARCH_GET_GS
:
6007 case TARGET_ARCH_GET_FS
:
6008 if (code
== TARGET_ARCH_GET_GS
)
6012 val
= env
->segs
[idx
].base
;
6013 if (put_user(val
, addr
, abi_ulong
))
6014 ret
= -TARGET_EFAULT
;
6017 ret
= -TARGET_EINVAL
;
6024 #endif /* defined(TARGET_I386) */
6026 #define NEW_STACK_SIZE 0x40000
6029 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6032 pthread_mutex_t mutex
;
6033 pthread_cond_t cond
;
6036 abi_ulong child_tidptr
;
6037 abi_ulong parent_tidptr
;
6041 static void *clone_func(void *arg
)
6043 new_thread_info
*info
= arg
;
6048 rcu_register_thread();
6050 cpu
= ENV_GET_CPU(env
);
6052 ts
= (TaskState
*)cpu
->opaque
;
6053 info
->tid
= gettid();
6054 cpu
->host_tid
= info
->tid
;
6056 if (info
->child_tidptr
)
6057 put_user_u32(info
->tid
, info
->child_tidptr
);
6058 if (info
->parent_tidptr
)
6059 put_user_u32(info
->tid
, info
->parent_tidptr
);
6060 /* Enable signals. */
6061 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6062 /* Signal to the parent that we're ready. */
6063 pthread_mutex_lock(&info
->mutex
);
6064 pthread_cond_broadcast(&info
->cond
);
6065 pthread_mutex_unlock(&info
->mutex
);
6066 /* Wait until the parent has finshed initializing the tls state. */
6067 pthread_mutex_lock(&clone_lock
);
6068 pthread_mutex_unlock(&clone_lock
);
6074 /* do_fork() Must return host values and target errnos (unlike most
6075 do_*() functions). */
6076 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6077 abi_ulong parent_tidptr
, target_ulong newtls
,
6078 abi_ulong child_tidptr
)
6080 CPUState
*cpu
= ENV_GET_CPU(env
);
6084 CPUArchState
*new_env
;
6087 flags
&= ~CLONE_IGNORED_FLAGS
;
6089 /* Emulate vfork() with fork() */
6090 if (flags
& CLONE_VFORK
)
6091 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6093 if (flags
& CLONE_VM
) {
6094 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6095 new_thread_info info
;
6096 pthread_attr_t attr
;
6098 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6099 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6100 return -TARGET_EINVAL
;
6103 ts
= g_new0(TaskState
, 1);
6104 init_task_state(ts
);
6105 /* we create a new CPU instance. */
6106 new_env
= cpu_copy(env
);
6107 /* Init regs that differ from the parent. */
6108 cpu_clone_regs(new_env
, newsp
);
6109 new_cpu
= ENV_GET_CPU(new_env
);
6110 new_cpu
->opaque
= ts
;
6111 ts
->bprm
= parent_ts
->bprm
;
6112 ts
->info
= parent_ts
->info
;
6113 ts
->signal_mask
= parent_ts
->signal_mask
;
6115 if (flags
& CLONE_CHILD_CLEARTID
) {
6116 ts
->child_tidptr
= child_tidptr
;
6119 if (flags
& CLONE_SETTLS
) {
6120 cpu_set_tls (new_env
, newtls
);
6123 /* Grab a mutex so that thread setup appears atomic. */
6124 pthread_mutex_lock(&clone_lock
);
6126 memset(&info
, 0, sizeof(info
));
6127 pthread_mutex_init(&info
.mutex
, NULL
);
6128 pthread_mutex_lock(&info
.mutex
);
6129 pthread_cond_init(&info
.cond
, NULL
);
6131 if (flags
& CLONE_CHILD_SETTID
) {
6132 info
.child_tidptr
= child_tidptr
;
6134 if (flags
& CLONE_PARENT_SETTID
) {
6135 info
.parent_tidptr
= parent_tidptr
;
6138 ret
= pthread_attr_init(&attr
);
6139 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6140 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6141 /* It is not safe to deliver signals until the child has finished
6142 initializing, so temporarily block all signals. */
6143 sigfillset(&sigmask
);
6144 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6146 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6147 /* TODO: Free new CPU state if thread creation failed. */
6149 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6150 pthread_attr_destroy(&attr
);
6152 /* Wait for the child to initialize. */
6153 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6158 pthread_mutex_unlock(&info
.mutex
);
6159 pthread_cond_destroy(&info
.cond
);
6160 pthread_mutex_destroy(&info
.mutex
);
6161 pthread_mutex_unlock(&clone_lock
);
6163 /* if no CLONE_VM, we consider it is a fork */
6164 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6165 return -TARGET_EINVAL
;
6168 /* We can't support custom termination signals */
6169 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6170 return -TARGET_EINVAL
;
6173 if (block_signals()) {
6174 return -TARGET_ERESTARTSYS
;
6180 /* Child Process. */
6182 cpu_clone_regs(env
, newsp
);
6184 /* There is a race condition here. The parent process could
6185 theoretically read the TID in the child process before the child
6186 tid is set. This would require using either ptrace
6187 (not implemented) or having *_tidptr to point at a shared memory
6188 mapping. We can't repeat the spinlock hack used above because
6189 the child process gets its own copy of the lock. */
6190 if (flags
& CLONE_CHILD_SETTID
)
6191 put_user_u32(gettid(), child_tidptr
);
6192 if (flags
& CLONE_PARENT_SETTID
)
6193 put_user_u32(gettid(), parent_tidptr
);
6194 ts
= (TaskState
*)cpu
->opaque
;
6195 if (flags
& CLONE_SETTLS
)
6196 cpu_set_tls (env
, newtls
);
6197 if (flags
& CLONE_CHILD_CLEARTID
)
6198 ts
->child_tidptr
= child_tidptr
;
6206 /* warning : doesn't handle linux specific flags... */
6207 static int target_to_host_fcntl_cmd(int cmd
)
6210 case TARGET_F_DUPFD
:
6211 case TARGET_F_GETFD
:
6212 case TARGET_F_SETFD
:
6213 case TARGET_F_GETFL
:
6214 case TARGET_F_SETFL
:
6216 case TARGET_F_GETLK
:
6218 case TARGET_F_SETLK
:
6220 case TARGET_F_SETLKW
:
6222 case TARGET_F_GETOWN
:
6224 case TARGET_F_SETOWN
:
6226 case TARGET_F_GETSIG
:
6228 case TARGET_F_SETSIG
:
6230 #if TARGET_ABI_BITS == 32
6231 case TARGET_F_GETLK64
:
6233 case TARGET_F_SETLK64
:
6235 case TARGET_F_SETLKW64
:
6238 case TARGET_F_SETLEASE
:
6240 case TARGET_F_GETLEASE
:
6242 #ifdef F_DUPFD_CLOEXEC
6243 case TARGET_F_DUPFD_CLOEXEC
:
6244 return F_DUPFD_CLOEXEC
;
6246 case TARGET_F_NOTIFY
:
6249 case TARGET_F_GETOWN_EX
:
6253 case TARGET_F_SETOWN_EX
:
6257 case TARGET_F_SETPIPE_SZ
:
6258 return F_SETPIPE_SZ
;
6259 case TARGET_F_GETPIPE_SZ
:
6260 return F_GETPIPE_SZ
;
6263 return -TARGET_EINVAL
;
6265 return -TARGET_EINVAL
;
6268 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6269 static const bitmask_transtbl flock_tbl
[] = {
6270 TRANSTBL_CONVERT(F_RDLCK
),
6271 TRANSTBL_CONVERT(F_WRLCK
),
6272 TRANSTBL_CONVERT(F_UNLCK
),
6273 TRANSTBL_CONVERT(F_EXLCK
),
6274 TRANSTBL_CONVERT(F_SHLCK
),
6278 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6279 abi_ulong target_flock_addr
)
6281 struct target_flock
*target_fl
;
6284 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6285 return -TARGET_EFAULT
;
6288 __get_user(l_type
, &target_fl
->l_type
);
6289 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6290 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6291 __get_user(fl
->l_start
, &target_fl
->l_start
);
6292 __get_user(fl
->l_len
, &target_fl
->l_len
);
6293 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6294 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6298 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6299 const struct flock64
*fl
)
6301 struct target_flock
*target_fl
;
6304 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6305 return -TARGET_EFAULT
;
6308 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6309 __put_user(l_type
, &target_fl
->l_type
);
6310 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6311 __put_user(fl
->l_start
, &target_fl
->l_start
);
6312 __put_user(fl
->l_len
, &target_fl
->l_len
);
6313 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6314 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6318 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6319 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6321 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6322 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6323 abi_ulong target_flock_addr
)
6325 struct target_eabi_flock64
*target_fl
;
6328 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6329 return -TARGET_EFAULT
;
6332 __get_user(l_type
, &target_fl
->l_type
);
6333 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6334 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6335 __get_user(fl
->l_start
, &target_fl
->l_start
);
6336 __get_user(fl
->l_len
, &target_fl
->l_len
);
6337 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6338 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6342 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6343 const struct flock64
*fl
)
6345 struct target_eabi_flock64
*target_fl
;
6348 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6349 return -TARGET_EFAULT
;
6352 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6353 __put_user(l_type
, &target_fl
->l_type
);
6354 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6355 __put_user(fl
->l_start
, &target_fl
->l_start
);
6356 __put_user(fl
->l_len
, &target_fl
->l_len
);
6357 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6358 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6363 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6364 abi_ulong target_flock_addr
)
6366 struct target_flock64
*target_fl
;
6369 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6370 return -TARGET_EFAULT
;
6373 __get_user(l_type
, &target_fl
->l_type
);
6374 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6375 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6376 __get_user(fl
->l_start
, &target_fl
->l_start
);
6377 __get_user(fl
->l_len
, &target_fl
->l_len
);
6378 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6379 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6383 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6384 const struct flock64
*fl
)
6386 struct target_flock64
*target_fl
;
6389 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6390 return -TARGET_EFAULT
;
6393 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6394 __put_user(l_type
, &target_fl
->l_type
);
6395 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6396 __put_user(fl
->l_start
, &target_fl
->l_start
);
6397 __put_user(fl
->l_len
, &target_fl
->l_len
);
6398 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6399 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6403 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6405 struct flock64 fl64
;
6407 struct f_owner_ex fox
;
6408 struct target_f_owner_ex
*target_fox
;
6411 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6413 if (host_cmd
== -TARGET_EINVAL
)
6417 case TARGET_F_GETLK
:
6418 ret
= copy_from_user_flock(&fl64
, arg
);
6422 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6424 ret
= copy_to_user_flock(arg
, &fl64
);
6428 case TARGET_F_SETLK
:
6429 case TARGET_F_SETLKW
:
6430 ret
= copy_from_user_flock(&fl64
, arg
);
6434 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6437 case TARGET_F_GETLK64
:
6438 ret
= copy_from_user_flock64(&fl64
, arg
);
6442 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6444 ret
= copy_to_user_flock64(arg
, &fl64
);
6447 case TARGET_F_SETLK64
:
6448 case TARGET_F_SETLKW64
:
6449 ret
= copy_from_user_flock64(&fl64
, arg
);
6453 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6456 case TARGET_F_GETFL
:
6457 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6459 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6463 case TARGET_F_SETFL
:
6464 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6465 target_to_host_bitmask(arg
,
6470 case TARGET_F_GETOWN_EX
:
6471 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6473 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6474 return -TARGET_EFAULT
;
6475 target_fox
->type
= tswap32(fox
.type
);
6476 target_fox
->pid
= tswap32(fox
.pid
);
6477 unlock_user_struct(target_fox
, arg
, 1);
6483 case TARGET_F_SETOWN_EX
:
6484 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6485 return -TARGET_EFAULT
;
6486 fox
.type
= tswap32(target_fox
->type
);
6487 fox
.pid
= tswap32(target_fox
->pid
);
6488 unlock_user_struct(target_fox
, arg
, 0);
6489 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6493 case TARGET_F_SETOWN
:
6494 case TARGET_F_GETOWN
:
6495 case TARGET_F_SETSIG
:
6496 case TARGET_F_GETSIG
:
6497 case TARGET_F_SETLEASE
:
6498 case TARGET_F_GETLEASE
:
6499 case TARGET_F_SETPIPE_SZ
:
6500 case TARGET_F_GETPIPE_SZ
:
6501 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6505 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6513 static inline int high2lowuid(int uid
)
6521 static inline int high2lowgid(int gid
)
6529 static inline int low2highuid(int uid
)
6531 if ((int16_t)uid
== -1)
6537 static inline int low2highgid(int gid
)
6539 if ((int16_t)gid
== -1)
6544 static inline int tswapid(int id
)
6549 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6551 #else /* !USE_UID16 */
6552 static inline int high2lowuid(int uid
)
6556 static inline int high2lowgid(int gid
)
6560 static inline int low2highuid(int uid
)
6564 static inline int low2highgid(int gid
)
6568 static inline int tswapid(int id
)
6573 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6575 #endif /* USE_UID16 */
6577 /* We must do direct syscalls for setting UID/GID, because we want to
6578 * implement the Linux system call semantics of "change only for this thread",
6579 * not the libc/POSIX semantics of "change for all threads in process".
6580 * (See http://ewontfix.com/17/ for more details.)
6581 * We use the 32-bit version of the syscalls if present; if it is not
6582 * then either the host architecture supports 32-bit UIDs natively with
6583 * the standard syscall, or the 16-bit UID is the best we can do.
6585 #ifdef __NR_setuid32
6586 #define __NR_sys_setuid __NR_setuid32
6588 #define __NR_sys_setuid __NR_setuid
6590 #ifdef __NR_setgid32
6591 #define __NR_sys_setgid __NR_setgid32
6593 #define __NR_sys_setgid __NR_setgid
6595 #ifdef __NR_setresuid32
6596 #define __NR_sys_setresuid __NR_setresuid32
6598 #define __NR_sys_setresuid __NR_setresuid
6600 #ifdef __NR_setresgid32
6601 #define __NR_sys_setresgid __NR_setresgid32
6603 #define __NR_sys_setresgid __NR_setresgid
6606 _syscall1(int, sys_setuid
, uid_t
, uid
)
6607 _syscall1(int, sys_setgid
, gid_t
, gid
)
6608 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6609 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6611 void syscall_init(void)
6614 const argtype
*arg_type
;
6618 thunk_init(STRUCT_MAX
);
6620 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6621 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6622 #include "syscall_types.h"
6624 #undef STRUCT_SPECIAL
6626 /* Build target_to_host_errno_table[] table from
6627 * host_to_target_errno_table[]. */
6628 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6629 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6632 /* we patch the ioctl size if necessary. We rely on the fact that
6633 no ioctl has all the bits at '1' in the size field */
6635 while (ie
->target_cmd
!= 0) {
6636 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6637 TARGET_IOC_SIZEMASK
) {
6638 arg_type
= ie
->arg_type
;
6639 if (arg_type
[0] != TYPE_PTR
) {
6640 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6645 size
= thunk_type_size(arg_type
, 0);
6646 ie
->target_cmd
= (ie
->target_cmd
&
6647 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6648 (size
<< TARGET_IOC_SIZESHIFT
);
6651 /* automatic consistency check if same arch */
6652 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6653 (defined(__x86_64__) && defined(TARGET_X86_64))
6654 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6655 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6656 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6663 #if TARGET_ABI_BITS == 32
6664 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6666 #ifdef TARGET_WORDS_BIGENDIAN
6667 return ((uint64_t)word0
<< 32) | word1
;
6669 return ((uint64_t)word1
<< 32) | word0
;
6672 #else /* TARGET_ABI_BITS == 32 */
6673 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6677 #endif /* TARGET_ABI_BITS != 32 */
6679 #ifdef TARGET_NR_truncate64
6680 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6685 if (regpairs_aligned(cpu_env
)) {
6689 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6693 #ifdef TARGET_NR_ftruncate64
6694 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6699 if (regpairs_aligned(cpu_env
)) {
6703 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6707 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6708 abi_ulong target_addr
)
6710 struct target_timespec
*target_ts
;
6712 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6713 return -TARGET_EFAULT
;
6714 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6715 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6716 unlock_user_struct(target_ts
, target_addr
, 0);
6720 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6721 struct timespec
*host_ts
)
6723 struct target_timespec
*target_ts
;
6725 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6726 return -TARGET_EFAULT
;
6727 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6728 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6729 unlock_user_struct(target_ts
, target_addr
, 1);
6733 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6734 abi_ulong target_addr
)
6736 struct target_itimerspec
*target_itspec
;
6738 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6739 return -TARGET_EFAULT
;
6742 host_itspec
->it_interval
.tv_sec
=
6743 tswapal(target_itspec
->it_interval
.tv_sec
);
6744 host_itspec
->it_interval
.tv_nsec
=
6745 tswapal(target_itspec
->it_interval
.tv_nsec
);
6746 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6747 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6749 unlock_user_struct(target_itspec
, target_addr
, 1);
6753 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6754 struct itimerspec
*host_its
)
6756 struct target_itimerspec
*target_itspec
;
6758 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6759 return -TARGET_EFAULT
;
6762 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6763 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6765 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6766 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6768 unlock_user_struct(target_itspec
, target_addr
, 0);
6772 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
6773 abi_ulong target_addr
)
6775 struct target_sigevent
*target_sevp
;
6777 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
6778 return -TARGET_EFAULT
;
6781 /* This union is awkward on 64 bit systems because it has a 32 bit
6782 * integer and a pointer in it; we follow the conversion approach
6783 * used for handling sigval types in signal.c so the guest should get
6784 * the correct value back even if we did a 64 bit byteswap and it's
6785 * using the 32 bit integer.
6787 host_sevp
->sigev_value
.sival_ptr
=
6788 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
6789 host_sevp
->sigev_signo
=
6790 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
6791 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
6792 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
6794 unlock_user_struct(target_sevp
, target_addr
, 1);
6798 #if defined(TARGET_NR_mlockall)
6799 static inline int target_to_host_mlockall_arg(int arg
)
6803 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
6804 result
|= MCL_CURRENT
;
6806 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
6807 result
|= MCL_FUTURE
;
6813 static inline abi_long
host_to_target_stat64(void *cpu_env
,
6814 abi_ulong target_addr
,
6815 struct stat
*host_st
)
6817 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6818 if (((CPUARMState
*)cpu_env
)->eabi
) {
6819 struct target_eabi_stat64
*target_st
;
6821 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6822 return -TARGET_EFAULT
;
6823 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
6824 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6825 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6826 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6827 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6829 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6830 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6831 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6832 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6833 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6834 __put_user(host_st
->st_size
, &target_st
->st_size
);
6835 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6836 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6837 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6838 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6839 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6840 unlock_user_struct(target_st
, target_addr
, 1);
6844 #if defined(TARGET_HAS_STRUCT_STAT64)
6845 struct target_stat64
*target_st
;
6847 struct target_stat
*target_st
;
6850 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
6851 return -TARGET_EFAULT
;
6852 memset(target_st
, 0, sizeof(*target_st
));
6853 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
6854 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
6855 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6856 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
6858 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
6859 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
6860 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
6861 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
6862 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
6863 /* XXX: better use of kernel struct */
6864 __put_user(host_st
->st_size
, &target_st
->st_size
);
6865 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
6866 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
6867 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
6868 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
6869 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
6870 unlock_user_struct(target_st
, target_addr
, 1);
6876 /* ??? Using host futex calls even when target atomic operations
6877 are not really atomic probably breaks things. However implementing
6878 futexes locally would make futexes shared between multiple processes
6879 tricky. However they're probably useless because guest atomic
6880 operations won't work either. */
6881 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
6882 target_ulong uaddr2
, int val3
)
6884 struct timespec ts
, *pts
;
6887 /* ??? We assume FUTEX_* constants are the same on both host
6889 #ifdef FUTEX_CMD_MASK
6890 base_op
= op
& FUTEX_CMD_MASK
;
6896 case FUTEX_WAIT_BITSET
:
6899 target_to_host_timespec(pts
, timeout
);
6903 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
6906 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6908 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
6910 case FUTEX_CMP_REQUEUE
:
6912 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6913 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6914 But the prototype takes a `struct timespec *'; insert casts
6915 to satisfy the compiler. We do not need to tswap TIMEOUT
6916 since it's not compared to guest memory. */
6917 pts
= (struct timespec
*)(uintptr_t) timeout
;
6918 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
6920 (base_op
== FUTEX_CMP_REQUEUE
6924 return -TARGET_ENOSYS
;
6927 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6928 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
6929 abi_long handle
, abi_long mount_id
,
6932 struct file_handle
*target_fh
;
6933 struct file_handle
*fh
;
6937 unsigned int size
, total_size
;
6939 if (get_user_s32(size
, handle
)) {
6940 return -TARGET_EFAULT
;
6943 name
= lock_user_string(pathname
);
6945 return -TARGET_EFAULT
;
6948 total_size
= sizeof(struct file_handle
) + size
;
6949 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
6951 unlock_user(name
, pathname
, 0);
6952 return -TARGET_EFAULT
;
6955 fh
= g_malloc0(total_size
);
6956 fh
->handle_bytes
= size
;
6958 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
6959 unlock_user(name
, pathname
, 0);
6961 /* man name_to_handle_at(2):
6962 * Other than the use of the handle_bytes field, the caller should treat
6963 * the file_handle structure as an opaque data type
6966 memcpy(target_fh
, fh
, total_size
);
6967 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
6968 target_fh
->handle_type
= tswap32(fh
->handle_type
);
6970 unlock_user(target_fh
, handle
, total_size
);
6972 if (put_user_s32(mid
, mount_id
)) {
6973 return -TARGET_EFAULT
;
6981 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6982 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
6985 struct file_handle
*target_fh
;
6986 struct file_handle
*fh
;
6987 unsigned int size
, total_size
;
6990 if (get_user_s32(size
, handle
)) {
6991 return -TARGET_EFAULT
;
6994 total_size
= sizeof(struct file_handle
) + size
;
6995 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
6997 return -TARGET_EFAULT
;
7000 fh
= g_memdup(target_fh
, total_size
);
7001 fh
->handle_bytes
= size
;
7002 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7004 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7005 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7009 unlock_user(target_fh
, handle
, total_size
);
7015 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7017 /* signalfd siginfo conversion */
7020 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7021 const struct signalfd_siginfo
*info
)
7023 int sig
= host_to_target_signal(info
->ssi_signo
);
7025 /* linux/signalfd.h defines a ssi_addr_lsb
7026 * not defined in sys/signalfd.h but used by some kernels
7029 #ifdef BUS_MCEERR_AO
7030 if (tinfo
->ssi_signo
== SIGBUS
&&
7031 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7032 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7033 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7034 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7035 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7039 tinfo
->ssi_signo
= tswap32(sig
);
7040 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7041 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7042 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7043 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7044 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7045 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7046 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7047 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7048 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7049 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7050 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7051 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7052 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7053 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7054 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7057 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7061 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7062 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7068 static TargetFdTrans target_signalfd_trans
= {
7069 .host_to_target_data
= host_to_target_data_signalfd
,
7072 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7075 target_sigset_t
*target_mask
;
7079 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7080 return -TARGET_EINVAL
;
7082 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7083 return -TARGET_EFAULT
;
7086 target_to_host_sigset(&host_mask
, target_mask
);
7088 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7090 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7092 fd_trans_register(ret
, &target_signalfd_trans
);
7095 unlock_user_struct(target_mask
, mask
, 0);
7101 /* Map host to target signal numbers for the wait family of syscalls.
7102 Assume all other status bits are the same. */
7103 int host_to_target_waitstatus(int status
)
7105 if (WIFSIGNALED(status
)) {
7106 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7108 if (WIFSTOPPED(status
)) {
7109 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7115 static int open_self_cmdline(void *cpu_env
, int fd
)
7118 bool word_skipped
= false;
7120 fd_orig
= open("/proc/self/cmdline", O_RDONLY
);
7130 nb_read
= read(fd_orig
, buf
, sizeof(buf
));
7133 fd_orig
= close(fd_orig
);
7136 } else if (nb_read
== 0) {
7140 if (!word_skipped
) {
7141 /* Skip the first string, which is the path to qemu-*-static
7142 instead of the actual command. */
7143 cp_buf
= memchr(buf
, 0, nb_read
);
7145 /* Null byte found, skip one string */
7147 nb_read
-= cp_buf
- buf
;
7148 word_skipped
= true;
7153 if (write(fd
, cp_buf
, nb_read
) != nb_read
) {
7162 return close(fd_orig
);
7165 static int open_self_maps(void *cpu_env
, int fd
)
7167 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7168 TaskState
*ts
= cpu
->opaque
;
7174 fp
= fopen("/proc/self/maps", "r");
7179 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7180 int fields
, dev_maj
, dev_min
, inode
;
7181 uint64_t min
, max
, offset
;
7182 char flag_r
, flag_w
, flag_x
, flag_p
;
7183 char path
[512] = "";
7184 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7185 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7186 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7188 if ((fields
< 10) || (fields
> 11)) {
7191 if (h2g_valid(min
)) {
7192 int flags
= page_get_flags(h2g(min
));
7193 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7194 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7197 if (h2g(min
) == ts
->info
->stack_limit
) {
7198 pstrcpy(path
, sizeof(path
), " [stack]");
7200 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7201 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7202 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7203 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7204 path
[0] ? " " : "", path
);
7214 static int open_self_stat(void *cpu_env
, int fd
)
7216 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7217 TaskState
*ts
= cpu
->opaque
;
7218 abi_ulong start_stack
= ts
->info
->start_stack
;
7221 for (i
= 0; i
< 44; i
++) {
7229 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7230 } else if (i
== 1) {
7232 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7233 } else if (i
== 27) {
7236 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7238 /* for the rest, there is MasterCard */
7239 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7243 if (write(fd
, buf
, len
) != len
) {
7251 static int open_self_auxv(void *cpu_env
, int fd
)
7253 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7254 TaskState
*ts
= cpu
->opaque
;
7255 abi_ulong auxv
= ts
->info
->saved_auxv
;
7256 abi_ulong len
= ts
->info
->auxv_len
;
7260 * Auxiliary vector is stored in target process stack.
7261 * read in whole auxv vector and copy it to file
7263 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7267 r
= write(fd
, ptr
, len
);
7274 lseek(fd
, 0, SEEK_SET
);
7275 unlock_user(ptr
, auxv
, len
);
7281 static int is_proc_myself(const char *filename
, const char *entry
)
7283 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7284 filename
+= strlen("/proc/");
7285 if (!strncmp(filename
, "self/", strlen("self/"))) {
7286 filename
+= strlen("self/");
7287 } else if (*filename
>= '1' && *filename
<= '9') {
7289 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7290 if (!strncmp(filename
, myself
, strlen(myself
))) {
7291 filename
+= strlen(myself
);
7298 if (!strcmp(filename
, entry
)) {
7305 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7306 static int is_proc(const char *filename
, const char *entry
)
7308 return strcmp(filename
, entry
) == 0;
7311 static int open_net_route(void *cpu_env
, int fd
)
7318 fp
= fopen("/proc/net/route", "r");
7325 read
= getline(&line
, &len
, fp
);
7326 dprintf(fd
, "%s", line
);
7330 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7332 uint32_t dest
, gw
, mask
;
7333 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7334 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7335 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7336 &mask
, &mtu
, &window
, &irtt
);
7337 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7338 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7339 metric
, tswap32(mask
), mtu
, window
, irtt
);
7349 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7352 const char *filename
;
7353 int (*fill
)(void *cpu_env
, int fd
);
7354 int (*cmp
)(const char *s1
, const char *s2
);
7356 const struct fake_open
*fake_open
;
7357 static const struct fake_open fakes
[] = {
7358 { "maps", open_self_maps
, is_proc_myself
},
7359 { "stat", open_self_stat
, is_proc_myself
},
7360 { "auxv", open_self_auxv
, is_proc_myself
},
7361 { "cmdline", open_self_cmdline
, is_proc_myself
},
7362 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7363 { "/proc/net/route", open_net_route
, is_proc
},
7365 { NULL
, NULL
, NULL
}
7368 if (is_proc_myself(pathname
, "exe")) {
7369 int execfd
= qemu_getauxval(AT_EXECFD
);
7370 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7373 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7374 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7379 if (fake_open
->filename
) {
7381 char filename
[PATH_MAX
];
7384 /* create temporary file to map stat to */
7385 tmpdir
= getenv("TMPDIR");
7388 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7389 fd
= mkstemp(filename
);
7395 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7401 lseek(fd
, 0, SEEK_SET
);
7406 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7409 #define TIMER_MAGIC 0x0caf0000
7410 #define TIMER_MAGIC_MASK 0xffff0000
7412 /* Convert QEMU provided timer ID back to internal 16bit index format */
7413 static target_timer_t
get_timer_id(abi_long arg
)
7415 target_timer_t timerid
= arg
;
7417 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7418 return -TARGET_EINVAL
;
7423 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7424 return -TARGET_EINVAL
;
7430 /* do_syscall() should always have a single exit point at the end so
7431 that actions, such as logging of syscall results, can be performed.
7432 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7433 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7434 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7435 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7438 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7444 #if defined(DEBUG_ERESTARTSYS)
7445 /* Debug-only code for exercising the syscall-restart code paths
7446 * in the per-architecture cpu main loops: restart every syscall
7447 * the guest makes once before letting it through.
7454 return -TARGET_ERESTARTSYS
;
7460 gemu_log("syscall %d", num
);
7462 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7464 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7467 case TARGET_NR_exit
:
7468 /* In old applications this may be used to implement _exit(2).
7469 However in threaded applictions it is used for thread termination,
7470 and _exit_group is used for application termination.
7471 Do thread termination if we have more then one thread. */
7473 if (block_signals()) {
7474 ret
= -TARGET_ERESTARTSYS
;
7478 if (CPU_NEXT(first_cpu
)) {
7482 /* Remove the CPU from the list. */
7483 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7486 if (ts
->child_tidptr
) {
7487 put_user_u32(0, ts
->child_tidptr
);
7488 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7492 object_unref(OBJECT(cpu
));
7494 rcu_unregister_thread();
7500 gdb_exit(cpu_env
, arg1
);
7502 ret
= 0; /* avoid warning */
7504 case TARGET_NR_read
:
7508 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7510 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7512 fd_trans_host_to_target_data(arg1
)) {
7513 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7515 unlock_user(p
, arg2
, ret
);
7518 case TARGET_NR_write
:
7519 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7521 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7522 unlock_user(p
, arg2
, 0);
7524 #ifdef TARGET_NR_open
7525 case TARGET_NR_open
:
7526 if (!(p
= lock_user_string(arg1
)))
7528 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7529 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7531 fd_trans_unregister(ret
);
7532 unlock_user(p
, arg1
, 0);
7535 case TARGET_NR_openat
:
7536 if (!(p
= lock_user_string(arg2
)))
7538 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7539 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7541 fd_trans_unregister(ret
);
7542 unlock_user(p
, arg2
, 0);
7544 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7545 case TARGET_NR_name_to_handle_at
:
7546 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7549 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7550 case TARGET_NR_open_by_handle_at
:
7551 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7552 fd_trans_unregister(ret
);
7555 case TARGET_NR_close
:
7556 fd_trans_unregister(arg1
);
7557 ret
= get_errno(close(arg1
));
7562 #ifdef TARGET_NR_fork
7563 case TARGET_NR_fork
:
7564 ret
= get_errno(do_fork(cpu_env
, SIGCHLD
, 0, 0, 0, 0));
7567 #ifdef TARGET_NR_waitpid
7568 case TARGET_NR_waitpid
:
7571 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7572 if (!is_error(ret
) && arg2
&& ret
7573 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7578 #ifdef TARGET_NR_waitid
7579 case TARGET_NR_waitid
:
7583 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7584 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7585 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7587 host_to_target_siginfo(p
, &info
);
7588 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7593 #ifdef TARGET_NR_creat /* not on alpha */
7594 case TARGET_NR_creat
:
7595 if (!(p
= lock_user_string(arg1
)))
7597 ret
= get_errno(creat(p
, arg2
));
7598 fd_trans_unregister(ret
);
7599 unlock_user(p
, arg1
, 0);
7602 #ifdef TARGET_NR_link
7603 case TARGET_NR_link
:
7606 p
= lock_user_string(arg1
);
7607 p2
= lock_user_string(arg2
);
7609 ret
= -TARGET_EFAULT
;
7611 ret
= get_errno(link(p
, p2
));
7612 unlock_user(p2
, arg2
, 0);
7613 unlock_user(p
, arg1
, 0);
7617 #if defined(TARGET_NR_linkat)
7618 case TARGET_NR_linkat
:
7623 p
= lock_user_string(arg2
);
7624 p2
= lock_user_string(arg4
);
7626 ret
= -TARGET_EFAULT
;
7628 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7629 unlock_user(p
, arg2
, 0);
7630 unlock_user(p2
, arg4
, 0);
7634 #ifdef TARGET_NR_unlink
7635 case TARGET_NR_unlink
:
7636 if (!(p
= lock_user_string(arg1
)))
7638 ret
= get_errno(unlink(p
));
7639 unlock_user(p
, arg1
, 0);
7642 #if defined(TARGET_NR_unlinkat)
7643 case TARGET_NR_unlinkat
:
7644 if (!(p
= lock_user_string(arg2
)))
7646 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7647 unlock_user(p
, arg2
, 0);
7650 case TARGET_NR_execve
:
7652 char **argp
, **envp
;
7655 abi_ulong guest_argp
;
7656 abi_ulong guest_envp
;
7663 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7664 if (get_user_ual(addr
, gp
))
7672 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7673 if (get_user_ual(addr
, gp
))
7680 argp
= alloca((argc
+ 1) * sizeof(void *));
7681 envp
= alloca((envc
+ 1) * sizeof(void *));
7683 for (gp
= guest_argp
, q
= argp
; gp
;
7684 gp
+= sizeof(abi_ulong
), q
++) {
7685 if (get_user_ual(addr
, gp
))
7689 if (!(*q
= lock_user_string(addr
)))
7691 total_size
+= strlen(*q
) + 1;
7695 for (gp
= guest_envp
, q
= envp
; gp
;
7696 gp
+= sizeof(abi_ulong
), q
++) {
7697 if (get_user_ual(addr
, gp
))
7701 if (!(*q
= lock_user_string(addr
)))
7703 total_size
+= strlen(*q
) + 1;
7707 if (!(p
= lock_user_string(arg1
)))
7709 /* Although execve() is not an interruptible syscall it is
7710 * a special case where we must use the safe_syscall wrapper:
7711 * if we allow a signal to happen before we make the host
7712 * syscall then we will 'lose' it, because at the point of
7713 * execve the process leaves QEMU's control. So we use the
7714 * safe syscall wrapper to ensure that we either take the
7715 * signal as a guest signal, or else it does not happen
7716 * before the execve completes and makes it the other
7717 * program's problem.
7719 ret
= get_errno(safe_execve(p
, argp
, envp
));
7720 unlock_user(p
, arg1
, 0);
7725 ret
= -TARGET_EFAULT
;
7728 for (gp
= guest_argp
, q
= argp
; *q
;
7729 gp
+= sizeof(abi_ulong
), q
++) {
7730 if (get_user_ual(addr
, gp
)
7733 unlock_user(*q
, addr
, 0);
7735 for (gp
= guest_envp
, q
= envp
; *q
;
7736 gp
+= sizeof(abi_ulong
), q
++) {
7737 if (get_user_ual(addr
, gp
)
7740 unlock_user(*q
, addr
, 0);
7744 case TARGET_NR_chdir
:
7745 if (!(p
= lock_user_string(arg1
)))
7747 ret
= get_errno(chdir(p
));
7748 unlock_user(p
, arg1
, 0);
7750 #ifdef TARGET_NR_time
7751 case TARGET_NR_time
:
7754 ret
= get_errno(time(&host_time
));
7757 && put_user_sal(host_time
, arg1
))
7762 #ifdef TARGET_NR_mknod
7763 case TARGET_NR_mknod
:
7764 if (!(p
= lock_user_string(arg1
)))
7766 ret
= get_errno(mknod(p
, arg2
, arg3
));
7767 unlock_user(p
, arg1
, 0);
7770 #if defined(TARGET_NR_mknodat)
7771 case TARGET_NR_mknodat
:
7772 if (!(p
= lock_user_string(arg2
)))
7774 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
7775 unlock_user(p
, arg2
, 0);
7778 #ifdef TARGET_NR_chmod
7779 case TARGET_NR_chmod
:
7780 if (!(p
= lock_user_string(arg1
)))
7782 ret
= get_errno(chmod(p
, arg2
));
7783 unlock_user(p
, arg1
, 0);
7786 #ifdef TARGET_NR_break
7787 case TARGET_NR_break
:
7790 #ifdef TARGET_NR_oldstat
7791 case TARGET_NR_oldstat
:
7794 case TARGET_NR_lseek
:
7795 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
7797 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7798 /* Alpha specific */
7799 case TARGET_NR_getxpid
:
7800 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
7801 ret
= get_errno(getpid());
7804 #ifdef TARGET_NR_getpid
7805 case TARGET_NR_getpid
:
7806 ret
= get_errno(getpid());
7809 case TARGET_NR_mount
:
7811 /* need to look at the data field */
7815 p
= lock_user_string(arg1
);
7823 p2
= lock_user_string(arg2
);
7826 unlock_user(p
, arg1
, 0);
7832 p3
= lock_user_string(arg3
);
7835 unlock_user(p
, arg1
, 0);
7837 unlock_user(p2
, arg2
, 0);
7844 /* FIXME - arg5 should be locked, but it isn't clear how to
7845 * do that since it's not guaranteed to be a NULL-terminated
7849 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
7851 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
7853 ret
= get_errno(ret
);
7856 unlock_user(p
, arg1
, 0);
7858 unlock_user(p2
, arg2
, 0);
7860 unlock_user(p3
, arg3
, 0);
7864 #ifdef TARGET_NR_umount
7865 case TARGET_NR_umount
:
7866 if (!(p
= lock_user_string(arg1
)))
7868 ret
= get_errno(umount(p
));
7869 unlock_user(p
, arg1
, 0);
7872 #ifdef TARGET_NR_stime /* not on alpha */
7873 case TARGET_NR_stime
:
7876 if (get_user_sal(host_time
, arg1
))
7878 ret
= get_errno(stime(&host_time
));
7882 case TARGET_NR_ptrace
:
7884 #ifdef TARGET_NR_alarm /* not on alpha */
7885 case TARGET_NR_alarm
:
7889 #ifdef TARGET_NR_oldfstat
7890 case TARGET_NR_oldfstat
:
7893 #ifdef TARGET_NR_pause /* not on alpha */
7894 case TARGET_NR_pause
:
7895 if (!block_signals()) {
7896 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
7898 ret
= -TARGET_EINTR
;
7901 #ifdef TARGET_NR_utime
7902 case TARGET_NR_utime
:
7904 struct utimbuf tbuf
, *host_tbuf
;
7905 struct target_utimbuf
*target_tbuf
;
7907 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
7909 tbuf
.actime
= tswapal(target_tbuf
->actime
);
7910 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
7911 unlock_user_struct(target_tbuf
, arg2
, 0);
7916 if (!(p
= lock_user_string(arg1
)))
7918 ret
= get_errno(utime(p
, host_tbuf
));
7919 unlock_user(p
, arg1
, 0);
7923 #ifdef TARGET_NR_utimes
7924 case TARGET_NR_utimes
:
7926 struct timeval
*tvp
, tv
[2];
7928 if (copy_from_user_timeval(&tv
[0], arg2
)
7929 || copy_from_user_timeval(&tv
[1],
7930 arg2
+ sizeof(struct target_timeval
)))
7936 if (!(p
= lock_user_string(arg1
)))
7938 ret
= get_errno(utimes(p
, tvp
));
7939 unlock_user(p
, arg1
, 0);
7943 #if defined(TARGET_NR_futimesat)
7944 case TARGET_NR_futimesat
:
7946 struct timeval
*tvp
, tv
[2];
7948 if (copy_from_user_timeval(&tv
[0], arg3
)
7949 || copy_from_user_timeval(&tv
[1],
7950 arg3
+ sizeof(struct target_timeval
)))
7956 if (!(p
= lock_user_string(arg2
)))
7958 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
7959 unlock_user(p
, arg2
, 0);
7963 #ifdef TARGET_NR_stty
7964 case TARGET_NR_stty
:
7967 #ifdef TARGET_NR_gtty
7968 case TARGET_NR_gtty
:
7971 #ifdef TARGET_NR_access
7972 case TARGET_NR_access
:
7973 if (!(p
= lock_user_string(arg1
)))
7975 ret
= get_errno(access(path(p
), arg2
));
7976 unlock_user(p
, arg1
, 0);
7979 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7980 case TARGET_NR_faccessat
:
7981 if (!(p
= lock_user_string(arg2
)))
7983 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
7984 unlock_user(p
, arg2
, 0);
7987 #ifdef TARGET_NR_nice /* not on alpha */
7988 case TARGET_NR_nice
:
7989 ret
= get_errno(nice(arg1
));
7992 #ifdef TARGET_NR_ftime
7993 case TARGET_NR_ftime
:
7996 case TARGET_NR_sync
:
8000 case TARGET_NR_kill
:
8001 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8003 #ifdef TARGET_NR_rename
8004 case TARGET_NR_rename
:
8007 p
= lock_user_string(arg1
);
8008 p2
= lock_user_string(arg2
);
8010 ret
= -TARGET_EFAULT
;
8012 ret
= get_errno(rename(p
, p2
));
8013 unlock_user(p2
, arg2
, 0);
8014 unlock_user(p
, arg1
, 0);
8018 #if defined(TARGET_NR_renameat)
8019 case TARGET_NR_renameat
:
8022 p
= lock_user_string(arg2
);
8023 p2
= lock_user_string(arg4
);
8025 ret
= -TARGET_EFAULT
;
8027 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8028 unlock_user(p2
, arg4
, 0);
8029 unlock_user(p
, arg2
, 0);
8033 #ifdef TARGET_NR_mkdir
8034 case TARGET_NR_mkdir
:
8035 if (!(p
= lock_user_string(arg1
)))
8037 ret
= get_errno(mkdir(p
, arg2
));
8038 unlock_user(p
, arg1
, 0);
8041 #if defined(TARGET_NR_mkdirat)
8042 case TARGET_NR_mkdirat
:
8043 if (!(p
= lock_user_string(arg2
)))
8045 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8046 unlock_user(p
, arg2
, 0);
8049 #ifdef TARGET_NR_rmdir
8050 case TARGET_NR_rmdir
:
8051 if (!(p
= lock_user_string(arg1
)))
8053 ret
= get_errno(rmdir(p
));
8054 unlock_user(p
, arg1
, 0);
8058 ret
= get_errno(dup(arg1
));
8060 fd_trans_dup(arg1
, ret
);
8063 #ifdef TARGET_NR_pipe
8064 case TARGET_NR_pipe
:
8065 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8068 #ifdef TARGET_NR_pipe2
8069 case TARGET_NR_pipe2
:
8070 ret
= do_pipe(cpu_env
, arg1
,
8071 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8074 case TARGET_NR_times
:
8076 struct target_tms
*tmsp
;
8078 ret
= get_errno(times(&tms
));
8080 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8083 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8084 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8085 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8086 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8089 ret
= host_to_target_clock_t(ret
);
8092 #ifdef TARGET_NR_prof
8093 case TARGET_NR_prof
:
8096 #ifdef TARGET_NR_signal
8097 case TARGET_NR_signal
:
8100 case TARGET_NR_acct
:
8102 ret
= get_errno(acct(NULL
));
8104 if (!(p
= lock_user_string(arg1
)))
8106 ret
= get_errno(acct(path(p
)));
8107 unlock_user(p
, arg1
, 0);
8110 #ifdef TARGET_NR_umount2
8111 case TARGET_NR_umount2
:
8112 if (!(p
= lock_user_string(arg1
)))
8114 ret
= get_errno(umount2(p
, arg2
));
8115 unlock_user(p
, arg1
, 0);
8118 #ifdef TARGET_NR_lock
8119 case TARGET_NR_lock
:
8122 case TARGET_NR_ioctl
:
8123 ret
= do_ioctl(arg1
, arg2
, arg3
);
8125 case TARGET_NR_fcntl
:
8126 ret
= do_fcntl(arg1
, arg2
, arg3
);
8128 #ifdef TARGET_NR_mpx
8132 case TARGET_NR_setpgid
:
8133 ret
= get_errno(setpgid(arg1
, arg2
));
8135 #ifdef TARGET_NR_ulimit
8136 case TARGET_NR_ulimit
:
8139 #ifdef TARGET_NR_oldolduname
8140 case TARGET_NR_oldolduname
:
8143 case TARGET_NR_umask
:
8144 ret
= get_errno(umask(arg1
));
8146 case TARGET_NR_chroot
:
8147 if (!(p
= lock_user_string(arg1
)))
8149 ret
= get_errno(chroot(p
));
8150 unlock_user(p
, arg1
, 0);
8152 #ifdef TARGET_NR_ustat
8153 case TARGET_NR_ustat
:
8156 #ifdef TARGET_NR_dup2
8157 case TARGET_NR_dup2
:
8158 ret
= get_errno(dup2(arg1
, arg2
));
8160 fd_trans_dup(arg1
, arg2
);
8164 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8165 case TARGET_NR_dup3
:
8166 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8168 fd_trans_dup(arg1
, arg2
);
8172 #ifdef TARGET_NR_getppid /* not on alpha */
8173 case TARGET_NR_getppid
:
8174 ret
= get_errno(getppid());
8177 #ifdef TARGET_NR_getpgrp
8178 case TARGET_NR_getpgrp
:
8179 ret
= get_errno(getpgrp());
8182 case TARGET_NR_setsid
:
8183 ret
= get_errno(setsid());
8185 #ifdef TARGET_NR_sigaction
8186 case TARGET_NR_sigaction
:
8188 #if defined(TARGET_ALPHA)
8189 struct target_sigaction act
, oact
, *pact
= 0;
8190 struct target_old_sigaction
*old_act
;
8192 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8194 act
._sa_handler
= old_act
->_sa_handler
;
8195 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8196 act
.sa_flags
= old_act
->sa_flags
;
8197 act
.sa_restorer
= 0;
8198 unlock_user_struct(old_act
, arg2
, 0);
8201 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8202 if (!is_error(ret
) && arg3
) {
8203 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8205 old_act
->_sa_handler
= oact
._sa_handler
;
8206 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8207 old_act
->sa_flags
= oact
.sa_flags
;
8208 unlock_user_struct(old_act
, arg3
, 1);
8210 #elif defined(TARGET_MIPS)
8211 struct target_sigaction act
, oact
, *pact
, *old_act
;
8214 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8216 act
._sa_handler
= old_act
->_sa_handler
;
8217 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8218 act
.sa_flags
= old_act
->sa_flags
;
8219 unlock_user_struct(old_act
, arg2
, 0);
8225 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8227 if (!is_error(ret
) && arg3
) {
8228 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8230 old_act
->_sa_handler
= oact
._sa_handler
;
8231 old_act
->sa_flags
= oact
.sa_flags
;
8232 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8233 old_act
->sa_mask
.sig
[1] = 0;
8234 old_act
->sa_mask
.sig
[2] = 0;
8235 old_act
->sa_mask
.sig
[3] = 0;
8236 unlock_user_struct(old_act
, arg3
, 1);
8239 struct target_old_sigaction
*old_act
;
8240 struct target_sigaction act
, oact
, *pact
;
8242 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8244 act
._sa_handler
= old_act
->_sa_handler
;
8245 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8246 act
.sa_flags
= old_act
->sa_flags
;
8247 act
.sa_restorer
= old_act
->sa_restorer
;
8248 unlock_user_struct(old_act
, arg2
, 0);
8253 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8254 if (!is_error(ret
) && arg3
) {
8255 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8257 old_act
->_sa_handler
= oact
._sa_handler
;
8258 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8259 old_act
->sa_flags
= oact
.sa_flags
;
8260 old_act
->sa_restorer
= oact
.sa_restorer
;
8261 unlock_user_struct(old_act
, arg3
, 1);
8267 case TARGET_NR_rt_sigaction
:
8269 #if defined(TARGET_ALPHA)
8270 struct target_sigaction act
, oact
, *pact
= 0;
8271 struct target_rt_sigaction
*rt_act
;
8273 if (arg4
!= sizeof(target_sigset_t
)) {
8274 ret
= -TARGET_EINVAL
;
8278 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8280 act
._sa_handler
= rt_act
->_sa_handler
;
8281 act
.sa_mask
= rt_act
->sa_mask
;
8282 act
.sa_flags
= rt_act
->sa_flags
;
8283 act
.sa_restorer
= arg5
;
8284 unlock_user_struct(rt_act
, arg2
, 0);
8287 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8288 if (!is_error(ret
) && arg3
) {
8289 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8291 rt_act
->_sa_handler
= oact
._sa_handler
;
8292 rt_act
->sa_mask
= oact
.sa_mask
;
8293 rt_act
->sa_flags
= oact
.sa_flags
;
8294 unlock_user_struct(rt_act
, arg3
, 1);
8297 struct target_sigaction
*act
;
8298 struct target_sigaction
*oact
;
8300 if (arg4
!= sizeof(target_sigset_t
)) {
8301 ret
= -TARGET_EINVAL
;
8305 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8310 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8311 ret
= -TARGET_EFAULT
;
8312 goto rt_sigaction_fail
;
8316 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8319 unlock_user_struct(act
, arg2
, 0);
8321 unlock_user_struct(oact
, arg3
, 1);
8325 #ifdef TARGET_NR_sgetmask /* not on alpha */
8326 case TARGET_NR_sgetmask
:
8329 abi_ulong target_set
;
8330 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8332 host_to_target_old_sigset(&target_set
, &cur_set
);
8338 #ifdef TARGET_NR_ssetmask /* not on alpha */
8339 case TARGET_NR_ssetmask
:
8341 sigset_t set
, oset
, cur_set
;
8342 abi_ulong target_set
= arg1
;
8343 /* We only have one word of the new mask so we must read
8344 * the rest of it with do_sigprocmask() and OR in this word.
8345 * We are guaranteed that a do_sigprocmask() that only queries
8346 * the signal mask will not fail.
8348 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8350 target_to_host_old_sigset(&set
, &target_set
);
8351 sigorset(&set
, &set
, &cur_set
);
8352 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8354 host_to_target_old_sigset(&target_set
, &oset
);
8360 #ifdef TARGET_NR_sigprocmask
8361 case TARGET_NR_sigprocmask
:
8363 #if defined(TARGET_ALPHA)
8364 sigset_t set
, oldset
;
8369 case TARGET_SIG_BLOCK
:
8372 case TARGET_SIG_UNBLOCK
:
8375 case TARGET_SIG_SETMASK
:
8379 ret
= -TARGET_EINVAL
;
8383 target_to_host_old_sigset(&set
, &mask
);
8385 ret
= do_sigprocmask(how
, &set
, &oldset
);
8386 if (!is_error(ret
)) {
8387 host_to_target_old_sigset(&mask
, &oldset
);
8389 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8392 sigset_t set
, oldset
, *set_ptr
;
8397 case TARGET_SIG_BLOCK
:
8400 case TARGET_SIG_UNBLOCK
:
8403 case TARGET_SIG_SETMASK
:
8407 ret
= -TARGET_EINVAL
;
8410 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8412 target_to_host_old_sigset(&set
, p
);
8413 unlock_user(p
, arg2
, 0);
8419 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8420 if (!is_error(ret
) && arg3
) {
8421 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8423 host_to_target_old_sigset(p
, &oldset
);
8424 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8430 case TARGET_NR_rt_sigprocmask
:
8433 sigset_t set
, oldset
, *set_ptr
;
8435 if (arg4
!= sizeof(target_sigset_t
)) {
8436 ret
= -TARGET_EINVAL
;
8442 case TARGET_SIG_BLOCK
:
8445 case TARGET_SIG_UNBLOCK
:
8448 case TARGET_SIG_SETMASK
:
8452 ret
= -TARGET_EINVAL
;
8455 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8457 target_to_host_sigset(&set
, p
);
8458 unlock_user(p
, arg2
, 0);
8464 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8465 if (!is_error(ret
) && arg3
) {
8466 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8468 host_to_target_sigset(p
, &oldset
);
8469 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8473 #ifdef TARGET_NR_sigpending
8474 case TARGET_NR_sigpending
:
8477 ret
= get_errno(sigpending(&set
));
8478 if (!is_error(ret
)) {
8479 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8481 host_to_target_old_sigset(p
, &set
);
8482 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8487 case TARGET_NR_rt_sigpending
:
8491 /* Yes, this check is >, not != like most. We follow the kernel's
8492 * logic and it does it like this because it implements
8493 * NR_sigpending through the same code path, and in that case
8494 * the old_sigset_t is smaller in size.
8496 if (arg2
> sizeof(target_sigset_t
)) {
8497 ret
= -TARGET_EINVAL
;
8501 ret
= get_errno(sigpending(&set
));
8502 if (!is_error(ret
)) {
8503 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8505 host_to_target_sigset(p
, &set
);
8506 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8510 #ifdef TARGET_NR_sigsuspend
8511 case TARGET_NR_sigsuspend
:
8513 TaskState
*ts
= cpu
->opaque
;
8514 #if defined(TARGET_ALPHA)
8515 abi_ulong mask
= arg1
;
8516 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8518 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8520 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8521 unlock_user(p
, arg1
, 0);
8523 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8525 if (ret
!= -TARGET_ERESTARTSYS
) {
8526 ts
->in_sigsuspend
= 1;
8531 case TARGET_NR_rt_sigsuspend
:
8533 TaskState
*ts
= cpu
->opaque
;
8535 if (arg2
!= sizeof(target_sigset_t
)) {
8536 ret
= -TARGET_EINVAL
;
8539 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8541 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8542 unlock_user(p
, arg1
, 0);
8543 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8545 if (ret
!= -TARGET_ERESTARTSYS
) {
8546 ts
->in_sigsuspend
= 1;
8550 case TARGET_NR_rt_sigtimedwait
:
8553 struct timespec uts
, *puts
;
8556 if (arg4
!= sizeof(target_sigset_t
)) {
8557 ret
= -TARGET_EINVAL
;
8561 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8563 target_to_host_sigset(&set
, p
);
8564 unlock_user(p
, arg1
, 0);
8567 target_to_host_timespec(puts
, arg3
);
8571 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8573 if (!is_error(ret
)) {
8575 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8580 host_to_target_siginfo(p
, &uinfo
);
8581 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8583 ret
= host_to_target_signal(ret
);
8587 case TARGET_NR_rt_sigqueueinfo
:
8591 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8595 target_to_host_siginfo(&uinfo
, p
);
8596 unlock_user(p
, arg1
, 0);
8597 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8600 #ifdef TARGET_NR_sigreturn
8601 case TARGET_NR_sigreturn
:
8602 if (block_signals()) {
8603 ret
= -TARGET_ERESTARTSYS
;
8605 ret
= do_sigreturn(cpu_env
);
8609 case TARGET_NR_rt_sigreturn
:
8610 if (block_signals()) {
8611 ret
= -TARGET_ERESTARTSYS
;
8613 ret
= do_rt_sigreturn(cpu_env
);
8616 case TARGET_NR_sethostname
:
8617 if (!(p
= lock_user_string(arg1
)))
8619 ret
= get_errno(sethostname(p
, arg2
));
8620 unlock_user(p
, arg1
, 0);
8622 case TARGET_NR_setrlimit
:
8624 int resource
= target_to_host_resource(arg1
);
8625 struct target_rlimit
*target_rlim
;
8627 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8629 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8630 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8631 unlock_user_struct(target_rlim
, arg2
, 0);
8632 ret
= get_errno(setrlimit(resource
, &rlim
));
8635 case TARGET_NR_getrlimit
:
8637 int resource
= target_to_host_resource(arg1
);
8638 struct target_rlimit
*target_rlim
;
8641 ret
= get_errno(getrlimit(resource
, &rlim
));
8642 if (!is_error(ret
)) {
8643 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8645 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8646 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8647 unlock_user_struct(target_rlim
, arg2
, 1);
8651 case TARGET_NR_getrusage
:
8653 struct rusage rusage
;
8654 ret
= get_errno(getrusage(arg1
, &rusage
));
8655 if (!is_error(ret
)) {
8656 ret
= host_to_target_rusage(arg2
, &rusage
);
8660 case TARGET_NR_gettimeofday
:
8663 ret
= get_errno(gettimeofday(&tv
, NULL
));
8664 if (!is_error(ret
)) {
8665 if (copy_to_user_timeval(arg1
, &tv
))
8670 case TARGET_NR_settimeofday
:
8672 struct timeval tv
, *ptv
= NULL
;
8673 struct timezone tz
, *ptz
= NULL
;
8676 if (copy_from_user_timeval(&tv
, arg1
)) {
8683 if (copy_from_user_timezone(&tz
, arg2
)) {
8689 ret
= get_errno(settimeofday(ptv
, ptz
));
8692 #if defined(TARGET_NR_select)
8693 case TARGET_NR_select
:
8694 #if defined(TARGET_WANT_NI_OLD_SELECT)
8695 /* some architectures used to have old_select here
8696 * but now ENOSYS it.
8698 ret
= -TARGET_ENOSYS
;
8699 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8700 ret
= do_old_select(arg1
);
8702 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8706 #ifdef TARGET_NR_pselect6
8707 case TARGET_NR_pselect6
:
8709 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
8710 fd_set rfds
, wfds
, efds
;
8711 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
8712 struct timespec ts
, *ts_ptr
;
8715 * The 6th arg is actually two args smashed together,
8716 * so we cannot use the C library.
8724 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
8725 target_sigset_t
*target_sigset
;
8733 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
8737 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
8741 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
8747 * This takes a timespec, and not a timeval, so we cannot
8748 * use the do_select() helper ...
8751 if (target_to_host_timespec(&ts
, ts_addr
)) {
8759 /* Extract the two packed args for the sigset */
8762 sig
.size
= SIGSET_T_SIZE
;
8764 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
8768 arg_sigset
= tswapal(arg7
[0]);
8769 arg_sigsize
= tswapal(arg7
[1]);
8770 unlock_user(arg7
, arg6
, 0);
8774 if (arg_sigsize
!= sizeof(*target_sigset
)) {
8775 /* Like the kernel, we enforce correct size sigsets */
8776 ret
= -TARGET_EINVAL
;
8779 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
8780 sizeof(*target_sigset
), 1);
8781 if (!target_sigset
) {
8784 target_to_host_sigset(&set
, target_sigset
);
8785 unlock_user(target_sigset
, arg_sigset
, 0);
8793 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
8796 if (!is_error(ret
)) {
8797 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
8799 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
8801 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
8804 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
8810 #ifdef TARGET_NR_symlink
8811 case TARGET_NR_symlink
:
8814 p
= lock_user_string(arg1
);
8815 p2
= lock_user_string(arg2
);
8817 ret
= -TARGET_EFAULT
;
8819 ret
= get_errno(symlink(p
, p2
));
8820 unlock_user(p2
, arg2
, 0);
8821 unlock_user(p
, arg1
, 0);
8825 #if defined(TARGET_NR_symlinkat)
8826 case TARGET_NR_symlinkat
:
8829 p
= lock_user_string(arg1
);
8830 p2
= lock_user_string(arg3
);
8832 ret
= -TARGET_EFAULT
;
8834 ret
= get_errno(symlinkat(p
, arg2
, p2
));
8835 unlock_user(p2
, arg3
, 0);
8836 unlock_user(p
, arg1
, 0);
8840 #ifdef TARGET_NR_oldlstat
8841 case TARGET_NR_oldlstat
:
8844 #ifdef TARGET_NR_readlink
8845 case TARGET_NR_readlink
:
8848 p
= lock_user_string(arg1
);
8849 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
8851 ret
= -TARGET_EFAULT
;
8853 /* Short circuit this for the magic exe check. */
8854 ret
= -TARGET_EINVAL
;
8855 } else if (is_proc_myself((const char *)p
, "exe")) {
8856 char real
[PATH_MAX
], *temp
;
8857 temp
= realpath(exec_path
, real
);
8858 /* Return value is # of bytes that we wrote to the buffer. */
8860 ret
= get_errno(-1);
8862 /* Don't worry about sign mismatch as earlier mapping
8863 * logic would have thrown a bad address error. */
8864 ret
= MIN(strlen(real
), arg3
);
8865 /* We cannot NUL terminate the string. */
8866 memcpy(p2
, real
, ret
);
8869 ret
= get_errno(readlink(path(p
), p2
, arg3
));
8871 unlock_user(p2
, arg2
, ret
);
8872 unlock_user(p
, arg1
, 0);
8876 #if defined(TARGET_NR_readlinkat)
8877 case TARGET_NR_readlinkat
:
8880 p
= lock_user_string(arg2
);
8881 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
8883 ret
= -TARGET_EFAULT
;
8884 } else if (is_proc_myself((const char *)p
, "exe")) {
8885 char real
[PATH_MAX
], *temp
;
8886 temp
= realpath(exec_path
, real
);
8887 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
8888 snprintf((char *)p2
, arg4
, "%s", real
);
8890 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
8892 unlock_user(p2
, arg3
, ret
);
8893 unlock_user(p
, arg2
, 0);
8897 #ifdef TARGET_NR_uselib
8898 case TARGET_NR_uselib
:
8901 #ifdef TARGET_NR_swapon
8902 case TARGET_NR_swapon
:
8903 if (!(p
= lock_user_string(arg1
)))
8905 ret
= get_errno(swapon(p
, arg2
));
8906 unlock_user(p
, arg1
, 0);
8909 case TARGET_NR_reboot
:
8910 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
8911 /* arg4 must be ignored in all other cases */
8912 p
= lock_user_string(arg4
);
8916 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
8917 unlock_user(p
, arg4
, 0);
8919 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
8922 #ifdef TARGET_NR_readdir
8923 case TARGET_NR_readdir
:
8926 #ifdef TARGET_NR_mmap
8927 case TARGET_NR_mmap
:
8928 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8929 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8930 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8931 || defined(TARGET_S390X)
8934 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
8935 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
8943 unlock_user(v
, arg1
, 0);
8944 ret
= get_errno(target_mmap(v1
, v2
, v3
,
8945 target_to_host_bitmask(v4
, mmap_flags_tbl
),
8949 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8950 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8956 #ifdef TARGET_NR_mmap2
8957 case TARGET_NR_mmap2
:
8959 #define MMAP_SHIFT 12
8961 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
8962 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
8964 arg6
<< MMAP_SHIFT
));
8967 case TARGET_NR_munmap
:
8968 ret
= get_errno(target_munmap(arg1
, arg2
));
8970 case TARGET_NR_mprotect
:
8972 TaskState
*ts
= cpu
->opaque
;
8973 /* Special hack to detect libc making the stack executable. */
8974 if ((arg3
& PROT_GROWSDOWN
)
8975 && arg1
>= ts
->info
->stack_limit
8976 && arg1
<= ts
->info
->start_stack
) {
8977 arg3
&= ~PROT_GROWSDOWN
;
8978 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
8979 arg1
= ts
->info
->stack_limit
;
8982 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
8984 #ifdef TARGET_NR_mremap
8985 case TARGET_NR_mremap
:
8986 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
8989 /* ??? msync/mlock/munlock are broken for softmmu. */
8990 #ifdef TARGET_NR_msync
8991 case TARGET_NR_msync
:
8992 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
8995 #ifdef TARGET_NR_mlock
8996 case TARGET_NR_mlock
:
8997 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9000 #ifdef TARGET_NR_munlock
9001 case TARGET_NR_munlock
:
9002 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9005 #ifdef TARGET_NR_mlockall
9006 case TARGET_NR_mlockall
:
9007 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9010 #ifdef TARGET_NR_munlockall
9011 case TARGET_NR_munlockall
:
9012 ret
= get_errno(munlockall());
9015 case TARGET_NR_truncate
:
9016 if (!(p
= lock_user_string(arg1
)))
9018 ret
= get_errno(truncate(p
, arg2
));
9019 unlock_user(p
, arg1
, 0);
9021 case TARGET_NR_ftruncate
:
9022 ret
= get_errno(ftruncate(arg1
, arg2
));
9024 case TARGET_NR_fchmod
:
9025 ret
= get_errno(fchmod(arg1
, arg2
));
9027 #if defined(TARGET_NR_fchmodat)
9028 case TARGET_NR_fchmodat
:
9029 if (!(p
= lock_user_string(arg2
)))
9031 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9032 unlock_user(p
, arg2
, 0);
9035 case TARGET_NR_getpriority
:
9036 /* Note that negative values are valid for getpriority, so we must
9037 differentiate based on errno settings. */
9039 ret
= getpriority(arg1
, arg2
);
9040 if (ret
== -1 && errno
!= 0) {
9041 ret
= -host_to_target_errno(errno
);
9045 /* Return value is the unbiased priority. Signal no error. */
9046 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9048 /* Return value is a biased priority to avoid negative numbers. */
9052 case TARGET_NR_setpriority
:
9053 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9055 #ifdef TARGET_NR_profil
9056 case TARGET_NR_profil
:
9059 case TARGET_NR_statfs
:
9060 if (!(p
= lock_user_string(arg1
)))
9062 ret
= get_errno(statfs(path(p
), &stfs
));
9063 unlock_user(p
, arg1
, 0);
9065 if (!is_error(ret
)) {
9066 struct target_statfs
*target_stfs
;
9068 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9070 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9071 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9072 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9073 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9074 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9075 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9076 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9077 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9078 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9079 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9080 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9081 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9082 unlock_user_struct(target_stfs
, arg2
, 1);
9085 case TARGET_NR_fstatfs
:
9086 ret
= get_errno(fstatfs(arg1
, &stfs
));
9087 goto convert_statfs
;
9088 #ifdef TARGET_NR_statfs64
9089 case TARGET_NR_statfs64
:
9090 if (!(p
= lock_user_string(arg1
)))
9092 ret
= get_errno(statfs(path(p
), &stfs
));
9093 unlock_user(p
, arg1
, 0);
9095 if (!is_error(ret
)) {
9096 struct target_statfs64
*target_stfs
;
9098 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9100 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9101 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9102 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9103 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9104 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9105 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9106 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9107 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9108 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9109 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9110 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9111 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9112 unlock_user_struct(target_stfs
, arg3
, 1);
9115 case TARGET_NR_fstatfs64
:
9116 ret
= get_errno(fstatfs(arg1
, &stfs
));
9117 goto convert_statfs64
;
9119 #ifdef TARGET_NR_ioperm
9120 case TARGET_NR_ioperm
:
9123 #ifdef TARGET_NR_socketcall
9124 case TARGET_NR_socketcall
:
9125 ret
= do_socketcall(arg1
, arg2
);
9128 #ifdef TARGET_NR_accept
9129 case TARGET_NR_accept
:
9130 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9133 #ifdef TARGET_NR_accept4
9134 case TARGET_NR_accept4
:
9135 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9138 #ifdef TARGET_NR_bind
9139 case TARGET_NR_bind
:
9140 ret
= do_bind(arg1
, arg2
, arg3
);
9143 #ifdef TARGET_NR_connect
9144 case TARGET_NR_connect
:
9145 ret
= do_connect(arg1
, arg2
, arg3
);
9148 #ifdef TARGET_NR_getpeername
9149 case TARGET_NR_getpeername
:
9150 ret
= do_getpeername(arg1
, arg2
, arg3
);
9153 #ifdef TARGET_NR_getsockname
9154 case TARGET_NR_getsockname
:
9155 ret
= do_getsockname(arg1
, arg2
, arg3
);
9158 #ifdef TARGET_NR_getsockopt
9159 case TARGET_NR_getsockopt
:
9160 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9163 #ifdef TARGET_NR_listen
9164 case TARGET_NR_listen
:
9165 ret
= get_errno(listen(arg1
, arg2
));
9168 #ifdef TARGET_NR_recv
9169 case TARGET_NR_recv
:
9170 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9173 #ifdef TARGET_NR_recvfrom
9174 case TARGET_NR_recvfrom
:
9175 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9178 #ifdef TARGET_NR_recvmsg
9179 case TARGET_NR_recvmsg
:
9180 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9183 #ifdef TARGET_NR_send
9184 case TARGET_NR_send
:
9185 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9188 #ifdef TARGET_NR_sendmsg
9189 case TARGET_NR_sendmsg
:
9190 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9193 #ifdef TARGET_NR_sendmmsg
9194 case TARGET_NR_sendmmsg
:
9195 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9197 case TARGET_NR_recvmmsg
:
9198 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9201 #ifdef TARGET_NR_sendto
9202 case TARGET_NR_sendto
:
9203 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9206 #ifdef TARGET_NR_shutdown
9207 case TARGET_NR_shutdown
:
9208 ret
= get_errno(shutdown(arg1
, arg2
));
9211 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9212 case TARGET_NR_getrandom
:
9213 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9217 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9218 unlock_user(p
, arg1
, ret
);
9221 #ifdef TARGET_NR_socket
9222 case TARGET_NR_socket
:
9223 ret
= do_socket(arg1
, arg2
, arg3
);
9224 fd_trans_unregister(ret
);
9227 #ifdef TARGET_NR_socketpair
9228 case TARGET_NR_socketpair
:
9229 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9232 #ifdef TARGET_NR_setsockopt
9233 case TARGET_NR_setsockopt
:
9234 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9238 case TARGET_NR_syslog
:
9239 if (!(p
= lock_user_string(arg2
)))
9241 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9242 unlock_user(p
, arg2
, 0);
9245 case TARGET_NR_setitimer
:
9247 struct itimerval value
, ovalue
, *pvalue
;
9251 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9252 || copy_from_user_timeval(&pvalue
->it_value
,
9253 arg2
+ sizeof(struct target_timeval
)))
9258 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9259 if (!is_error(ret
) && arg3
) {
9260 if (copy_to_user_timeval(arg3
,
9261 &ovalue
.it_interval
)
9262 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9268 case TARGET_NR_getitimer
:
9270 struct itimerval value
;
9272 ret
= get_errno(getitimer(arg1
, &value
));
9273 if (!is_error(ret
) && arg2
) {
9274 if (copy_to_user_timeval(arg2
,
9276 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9282 #ifdef TARGET_NR_stat
9283 case TARGET_NR_stat
:
9284 if (!(p
= lock_user_string(arg1
)))
9286 ret
= get_errno(stat(path(p
), &st
));
9287 unlock_user(p
, arg1
, 0);
9290 #ifdef TARGET_NR_lstat
9291 case TARGET_NR_lstat
:
9292 if (!(p
= lock_user_string(arg1
)))
9294 ret
= get_errno(lstat(path(p
), &st
));
9295 unlock_user(p
, arg1
, 0);
9298 case TARGET_NR_fstat
:
9300 ret
= get_errno(fstat(arg1
, &st
));
9301 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9304 if (!is_error(ret
)) {
9305 struct target_stat
*target_st
;
9307 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9309 memset(target_st
, 0, sizeof(*target_st
));
9310 __put_user(st
.st_dev
, &target_st
->st_dev
);
9311 __put_user(st
.st_ino
, &target_st
->st_ino
);
9312 __put_user(st
.st_mode
, &target_st
->st_mode
);
9313 __put_user(st
.st_uid
, &target_st
->st_uid
);
9314 __put_user(st
.st_gid
, &target_st
->st_gid
);
9315 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9316 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9317 __put_user(st
.st_size
, &target_st
->st_size
);
9318 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9319 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9320 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9321 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9322 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9323 unlock_user_struct(target_st
, arg2
, 1);
9327 #ifdef TARGET_NR_olduname
9328 case TARGET_NR_olduname
:
9331 #ifdef TARGET_NR_iopl
9332 case TARGET_NR_iopl
:
9335 case TARGET_NR_vhangup
:
9336 ret
= get_errno(vhangup());
9338 #ifdef TARGET_NR_idle
9339 case TARGET_NR_idle
:
9342 #ifdef TARGET_NR_syscall
9343 case TARGET_NR_syscall
:
9344 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9345 arg6
, arg7
, arg8
, 0);
9348 case TARGET_NR_wait4
:
9351 abi_long status_ptr
= arg2
;
9352 struct rusage rusage
, *rusage_ptr
;
9353 abi_ulong target_rusage
= arg4
;
9354 abi_long rusage_err
;
9356 rusage_ptr
= &rusage
;
9359 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9360 if (!is_error(ret
)) {
9361 if (status_ptr
&& ret
) {
9362 status
= host_to_target_waitstatus(status
);
9363 if (put_user_s32(status
, status_ptr
))
9366 if (target_rusage
) {
9367 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9375 #ifdef TARGET_NR_swapoff
9376 case TARGET_NR_swapoff
:
9377 if (!(p
= lock_user_string(arg1
)))
9379 ret
= get_errno(swapoff(p
));
9380 unlock_user(p
, arg1
, 0);
9383 case TARGET_NR_sysinfo
:
9385 struct target_sysinfo
*target_value
;
9386 struct sysinfo value
;
9387 ret
= get_errno(sysinfo(&value
));
9388 if (!is_error(ret
) && arg1
)
9390 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9392 __put_user(value
.uptime
, &target_value
->uptime
);
9393 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9394 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9395 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9396 __put_user(value
.totalram
, &target_value
->totalram
);
9397 __put_user(value
.freeram
, &target_value
->freeram
);
9398 __put_user(value
.sharedram
, &target_value
->sharedram
);
9399 __put_user(value
.bufferram
, &target_value
->bufferram
);
9400 __put_user(value
.totalswap
, &target_value
->totalswap
);
9401 __put_user(value
.freeswap
, &target_value
->freeswap
);
9402 __put_user(value
.procs
, &target_value
->procs
);
9403 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9404 __put_user(value
.freehigh
, &target_value
->freehigh
);
9405 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9406 unlock_user_struct(target_value
, arg1
, 1);
9410 #ifdef TARGET_NR_ipc
9412 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9415 #ifdef TARGET_NR_semget
9416 case TARGET_NR_semget
:
9417 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9420 #ifdef TARGET_NR_semop
9421 case TARGET_NR_semop
:
9422 ret
= do_semop(arg1
, arg2
, arg3
);
9425 #ifdef TARGET_NR_semctl
9426 case TARGET_NR_semctl
:
9427 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9430 #ifdef TARGET_NR_msgctl
9431 case TARGET_NR_msgctl
:
9432 ret
= do_msgctl(arg1
, arg2
, arg3
);
9435 #ifdef TARGET_NR_msgget
9436 case TARGET_NR_msgget
:
9437 ret
= get_errno(msgget(arg1
, arg2
));
9440 #ifdef TARGET_NR_msgrcv
9441 case TARGET_NR_msgrcv
:
9442 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9445 #ifdef TARGET_NR_msgsnd
9446 case TARGET_NR_msgsnd
:
9447 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9450 #ifdef TARGET_NR_shmget
9451 case TARGET_NR_shmget
:
9452 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9455 #ifdef TARGET_NR_shmctl
9456 case TARGET_NR_shmctl
:
9457 ret
= do_shmctl(arg1
, arg2
, arg3
);
9460 #ifdef TARGET_NR_shmat
9461 case TARGET_NR_shmat
:
9462 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9465 #ifdef TARGET_NR_shmdt
9466 case TARGET_NR_shmdt
:
9467 ret
= do_shmdt(arg1
);
9470 case TARGET_NR_fsync
:
9471 ret
= get_errno(fsync(arg1
));
9473 case TARGET_NR_clone
:
9474 /* Linux manages to have three different orderings for its
9475 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9476 * match the kernel's CONFIG_CLONE_* settings.
9477 * Microblaze is further special in that it uses a sixth
9478 * implicit argument to clone for the TLS pointer.
9480 #if defined(TARGET_MICROBLAZE)
9481 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9482 #elif defined(TARGET_CLONE_BACKWARDS)
9483 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9484 #elif defined(TARGET_CLONE_BACKWARDS2)
9485 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9487 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9490 #ifdef __NR_exit_group
9491 /* new thread calls */
9492 case TARGET_NR_exit_group
:
9496 gdb_exit(cpu_env
, arg1
);
9497 ret
= get_errno(exit_group(arg1
));
9500 case TARGET_NR_setdomainname
:
9501 if (!(p
= lock_user_string(arg1
)))
9503 ret
= get_errno(setdomainname(p
, arg2
));
9504 unlock_user(p
, arg1
, 0);
9506 case TARGET_NR_uname
:
9507 /* no need to transcode because we use the linux syscall */
9509 struct new_utsname
* buf
;
9511 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9513 ret
= get_errno(sys_uname(buf
));
9514 if (!is_error(ret
)) {
9515 /* Overwrite the native machine name with whatever is being
9517 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9518 /* Allow the user to override the reported release. */
9519 if (qemu_uname_release
&& *qemu_uname_release
) {
9520 g_strlcpy(buf
->release
, qemu_uname_release
,
9521 sizeof(buf
->release
));
9524 unlock_user_struct(buf
, arg1
, 1);
9528 case TARGET_NR_modify_ldt
:
9529 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9531 #if !defined(TARGET_X86_64)
9532 case TARGET_NR_vm86old
:
9534 case TARGET_NR_vm86
:
9535 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9539 case TARGET_NR_adjtimex
:
9541 #ifdef TARGET_NR_create_module
9542 case TARGET_NR_create_module
:
9544 case TARGET_NR_init_module
:
9545 case TARGET_NR_delete_module
:
9546 #ifdef TARGET_NR_get_kernel_syms
9547 case TARGET_NR_get_kernel_syms
:
9550 case TARGET_NR_quotactl
:
9552 case TARGET_NR_getpgid
:
9553 ret
= get_errno(getpgid(arg1
));
9555 case TARGET_NR_fchdir
:
9556 ret
= get_errno(fchdir(arg1
));
9558 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9559 case TARGET_NR_bdflush
:
9562 #ifdef TARGET_NR_sysfs
9563 case TARGET_NR_sysfs
:
9566 case TARGET_NR_personality
:
9567 ret
= get_errno(personality(arg1
));
9569 #ifdef TARGET_NR_afs_syscall
9570 case TARGET_NR_afs_syscall
:
9573 #ifdef TARGET_NR__llseek /* Not on alpha */
9574 case TARGET_NR__llseek
:
9577 #if !defined(__NR_llseek)
9578 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9580 ret
= get_errno(res
);
9585 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9587 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9593 #ifdef TARGET_NR_getdents
9594 case TARGET_NR_getdents
:
9595 #ifdef __NR_getdents
9596 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9598 struct target_dirent
*target_dirp
;
9599 struct linux_dirent
*dirp
;
9600 abi_long count
= arg3
;
9602 dirp
= g_try_malloc(count
);
9604 ret
= -TARGET_ENOMEM
;
9608 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9609 if (!is_error(ret
)) {
9610 struct linux_dirent
*de
;
9611 struct target_dirent
*tde
;
9613 int reclen
, treclen
;
9614 int count1
, tnamelen
;
9618 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9622 reclen
= de
->d_reclen
;
9623 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9624 assert(tnamelen
>= 0);
9625 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9626 assert(count1
+ treclen
<= count
);
9627 tde
->d_reclen
= tswap16(treclen
);
9628 tde
->d_ino
= tswapal(de
->d_ino
);
9629 tde
->d_off
= tswapal(de
->d_off
);
9630 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9631 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9633 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9637 unlock_user(target_dirp
, arg2
, ret
);
9643 struct linux_dirent
*dirp
;
9644 abi_long count
= arg3
;
9646 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9648 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9649 if (!is_error(ret
)) {
9650 struct linux_dirent
*de
;
9655 reclen
= de
->d_reclen
;
9658 de
->d_reclen
= tswap16(reclen
);
9659 tswapls(&de
->d_ino
);
9660 tswapls(&de
->d_off
);
9661 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9665 unlock_user(dirp
, arg2
, ret
);
9669 /* Implement getdents in terms of getdents64 */
9671 struct linux_dirent64
*dirp
;
9672 abi_long count
= arg3
;
9674 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
9678 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9679 if (!is_error(ret
)) {
9680 /* Convert the dirent64 structs to target dirent. We do this
9681 * in-place, since we can guarantee that a target_dirent is no
9682 * larger than a dirent64; however this means we have to be
9683 * careful to read everything before writing in the new format.
9685 struct linux_dirent64
*de
;
9686 struct target_dirent
*tde
;
9691 tde
= (struct target_dirent
*)dirp
;
9693 int namelen
, treclen
;
9694 int reclen
= de
->d_reclen
;
9695 uint64_t ino
= de
->d_ino
;
9696 int64_t off
= de
->d_off
;
9697 uint8_t type
= de
->d_type
;
9699 namelen
= strlen(de
->d_name
);
9700 treclen
= offsetof(struct target_dirent
, d_name
)
9702 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
9704 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
9705 tde
->d_ino
= tswapal(ino
);
9706 tde
->d_off
= tswapal(off
);
9707 tde
->d_reclen
= tswap16(treclen
);
9708 /* The target_dirent type is in what was formerly a padding
9709 * byte at the end of the structure:
9711 *(((char *)tde
) + treclen
- 1) = type
;
9713 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9714 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9720 unlock_user(dirp
, arg2
, ret
);
9724 #endif /* TARGET_NR_getdents */
9725 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9726 case TARGET_NR_getdents64
:
9728 struct linux_dirent64
*dirp
;
9729 abi_long count
= arg3
;
9730 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9732 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
9733 if (!is_error(ret
)) {
9734 struct linux_dirent64
*de
;
9739 reclen
= de
->d_reclen
;
9742 de
->d_reclen
= tswap16(reclen
);
9743 tswap64s((uint64_t *)&de
->d_ino
);
9744 tswap64s((uint64_t *)&de
->d_off
);
9745 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
9749 unlock_user(dirp
, arg2
, ret
);
9752 #endif /* TARGET_NR_getdents64 */
9753 #if defined(TARGET_NR__newselect)
9754 case TARGET_NR__newselect
:
9755 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9758 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9759 # ifdef TARGET_NR_poll
9760 case TARGET_NR_poll
:
9762 # ifdef TARGET_NR_ppoll
9763 case TARGET_NR_ppoll
:
9766 struct target_pollfd
*target_pfd
;
9767 unsigned int nfds
= arg2
;
9774 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
9775 ret
= -TARGET_EINVAL
;
9779 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
9780 sizeof(struct target_pollfd
) * nfds
, 1);
9785 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
9786 for (i
= 0; i
< nfds
; i
++) {
9787 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
9788 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
9793 # ifdef TARGET_NR_ppoll
9794 case TARGET_NR_ppoll
:
9796 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
9797 target_sigset_t
*target_set
;
9798 sigset_t _set
, *set
= &_set
;
9801 if (target_to_host_timespec(timeout_ts
, arg3
)) {
9802 unlock_user(target_pfd
, arg1
, 0);
9810 if (arg5
!= sizeof(target_sigset_t
)) {
9811 unlock_user(target_pfd
, arg1
, 0);
9812 ret
= -TARGET_EINVAL
;
9816 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
9818 unlock_user(target_pfd
, arg1
, 0);
9821 target_to_host_sigset(set
, target_set
);
9826 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
9827 set
, SIGSET_T_SIZE
));
9829 if (!is_error(ret
) && arg3
) {
9830 host_to_target_timespec(arg3
, timeout_ts
);
9833 unlock_user(target_set
, arg4
, 0);
9838 # ifdef TARGET_NR_poll
9839 case TARGET_NR_poll
:
9841 struct timespec ts
, *pts
;
9844 /* Convert ms to secs, ns */
9845 ts
.tv_sec
= arg3
/ 1000;
9846 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
9849 /* -ve poll() timeout means "infinite" */
9852 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
9857 g_assert_not_reached();
9860 if (!is_error(ret
)) {
9861 for(i
= 0; i
< nfds
; i
++) {
9862 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
9865 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
9869 case TARGET_NR_flock
:
9870 /* NOTE: the flock constant seems to be the same for every
9872 ret
= get_errno(safe_flock(arg1
, arg2
));
9874 case TARGET_NR_readv
:
9876 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
9878 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
9879 unlock_iovec(vec
, arg2
, arg3
, 1);
9881 ret
= -host_to_target_errno(errno
);
9885 case TARGET_NR_writev
:
9887 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
9889 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
9890 unlock_iovec(vec
, arg2
, arg3
, 0);
9892 ret
= -host_to_target_errno(errno
);
9896 case TARGET_NR_getsid
:
9897 ret
= get_errno(getsid(arg1
));
9899 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9900 case TARGET_NR_fdatasync
:
9901 ret
= get_errno(fdatasync(arg1
));
9904 #ifdef TARGET_NR__sysctl
9905 case TARGET_NR__sysctl
:
9906 /* We don't implement this, but ENOTDIR is always a safe
9908 ret
= -TARGET_ENOTDIR
;
9911 case TARGET_NR_sched_getaffinity
:
9913 unsigned int mask_size
;
9914 unsigned long *mask
;
9917 * sched_getaffinity needs multiples of ulong, so need to take
9918 * care of mismatches between target ulong and host ulong sizes.
9920 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9921 ret
= -TARGET_EINVAL
;
9924 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9926 mask
= alloca(mask_size
);
9927 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
9929 if (!is_error(ret
)) {
9931 /* More data returned than the caller's buffer will fit.
9932 * This only happens if sizeof(abi_long) < sizeof(long)
9933 * and the caller passed us a buffer holding an odd number
9934 * of abi_longs. If the host kernel is actually using the
9935 * extra 4 bytes then fail EINVAL; otherwise we can just
9936 * ignore them and only copy the interesting part.
9938 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
9939 if (numcpus
> arg2
* 8) {
9940 ret
= -TARGET_EINVAL
;
9946 if (copy_to_user(arg3
, mask
, ret
)) {
9952 case TARGET_NR_sched_setaffinity
:
9954 unsigned int mask_size
;
9955 unsigned long *mask
;
9958 * sched_setaffinity needs multiples of ulong, so need to take
9959 * care of mismatches between target ulong and host ulong sizes.
9961 if (arg2
& (sizeof(abi_ulong
) - 1)) {
9962 ret
= -TARGET_EINVAL
;
9965 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
9967 mask
= alloca(mask_size
);
9968 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
9971 memcpy(mask
, p
, arg2
);
9972 unlock_user_struct(p
, arg2
, 0);
9974 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
9977 case TARGET_NR_sched_setparam
:
9979 struct sched_param
*target_schp
;
9980 struct sched_param schp
;
9983 return -TARGET_EINVAL
;
9985 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
9987 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
9988 unlock_user_struct(target_schp
, arg2
, 0);
9989 ret
= get_errno(sched_setparam(arg1
, &schp
));
9992 case TARGET_NR_sched_getparam
:
9994 struct sched_param
*target_schp
;
9995 struct sched_param schp
;
9998 return -TARGET_EINVAL
;
10000 ret
= get_errno(sched_getparam(arg1
, &schp
));
10001 if (!is_error(ret
)) {
10002 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10004 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10005 unlock_user_struct(target_schp
, arg2
, 1);
10009 case TARGET_NR_sched_setscheduler
:
10011 struct sched_param
*target_schp
;
10012 struct sched_param schp
;
10014 return -TARGET_EINVAL
;
10016 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10018 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10019 unlock_user_struct(target_schp
, arg3
, 0);
10020 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10023 case TARGET_NR_sched_getscheduler
:
10024 ret
= get_errno(sched_getscheduler(arg1
));
10026 case TARGET_NR_sched_yield
:
10027 ret
= get_errno(sched_yield());
10029 case TARGET_NR_sched_get_priority_max
:
10030 ret
= get_errno(sched_get_priority_max(arg1
));
10032 case TARGET_NR_sched_get_priority_min
:
10033 ret
= get_errno(sched_get_priority_min(arg1
));
10035 case TARGET_NR_sched_rr_get_interval
:
10037 struct timespec ts
;
10038 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10039 if (!is_error(ret
)) {
10040 ret
= host_to_target_timespec(arg2
, &ts
);
10044 case TARGET_NR_nanosleep
:
10046 struct timespec req
, rem
;
10047 target_to_host_timespec(&req
, arg1
);
10048 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10049 if (is_error(ret
) && arg2
) {
10050 host_to_target_timespec(arg2
, &rem
);
10054 #ifdef TARGET_NR_query_module
10055 case TARGET_NR_query_module
:
10056 goto unimplemented
;
10058 #ifdef TARGET_NR_nfsservctl
10059 case TARGET_NR_nfsservctl
:
10060 goto unimplemented
;
10062 case TARGET_NR_prctl
:
10064 case PR_GET_PDEATHSIG
:
10067 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10068 if (!is_error(ret
) && arg2
10069 && put_user_ual(deathsig
, arg2
)) {
10077 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10081 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10082 arg3
, arg4
, arg5
));
10083 unlock_user(name
, arg2
, 16);
10088 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10092 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10093 arg3
, arg4
, arg5
));
10094 unlock_user(name
, arg2
, 0);
10099 /* Most prctl options have no pointer arguments */
10100 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10104 #ifdef TARGET_NR_arch_prctl
10105 case TARGET_NR_arch_prctl
:
10106 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10107 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10110 goto unimplemented
;
10113 #ifdef TARGET_NR_pread64
10114 case TARGET_NR_pread64
:
10115 if (regpairs_aligned(cpu_env
)) {
10119 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10121 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10122 unlock_user(p
, arg2
, ret
);
10124 case TARGET_NR_pwrite64
:
10125 if (regpairs_aligned(cpu_env
)) {
10129 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10131 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10132 unlock_user(p
, arg2
, 0);
10135 case TARGET_NR_getcwd
:
10136 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10138 ret
= get_errno(sys_getcwd1(p
, arg2
));
10139 unlock_user(p
, arg1
, ret
);
10141 case TARGET_NR_capget
:
10142 case TARGET_NR_capset
:
10144 struct target_user_cap_header
*target_header
;
10145 struct target_user_cap_data
*target_data
= NULL
;
10146 struct __user_cap_header_struct header
;
10147 struct __user_cap_data_struct data
[2];
10148 struct __user_cap_data_struct
*dataptr
= NULL
;
10149 int i
, target_datalen
;
10150 int data_items
= 1;
10152 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10155 header
.version
= tswap32(target_header
->version
);
10156 header
.pid
= tswap32(target_header
->pid
);
10158 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10159 /* Version 2 and up takes pointer to two user_data structs */
10163 target_datalen
= sizeof(*target_data
) * data_items
;
10166 if (num
== TARGET_NR_capget
) {
10167 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10169 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10171 if (!target_data
) {
10172 unlock_user_struct(target_header
, arg1
, 0);
10176 if (num
== TARGET_NR_capset
) {
10177 for (i
= 0; i
< data_items
; i
++) {
10178 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10179 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10180 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10187 if (num
== TARGET_NR_capget
) {
10188 ret
= get_errno(capget(&header
, dataptr
));
10190 ret
= get_errno(capset(&header
, dataptr
));
10193 /* The kernel always updates version for both capget and capset */
10194 target_header
->version
= tswap32(header
.version
);
10195 unlock_user_struct(target_header
, arg1
, 1);
10198 if (num
== TARGET_NR_capget
) {
10199 for (i
= 0; i
< data_items
; i
++) {
10200 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10201 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10202 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10204 unlock_user(target_data
, arg2
, target_datalen
);
10206 unlock_user(target_data
, arg2
, 0);
10211 case TARGET_NR_sigaltstack
:
10212 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10215 #ifdef CONFIG_SENDFILE
10216 case TARGET_NR_sendfile
:
10218 off_t
*offp
= NULL
;
10221 ret
= get_user_sal(off
, arg3
);
10222 if (is_error(ret
)) {
10227 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10228 if (!is_error(ret
) && arg3
) {
10229 abi_long ret2
= put_user_sal(off
, arg3
);
10230 if (is_error(ret2
)) {
10236 #ifdef TARGET_NR_sendfile64
10237 case TARGET_NR_sendfile64
:
10239 off_t
*offp
= NULL
;
10242 ret
= get_user_s64(off
, arg3
);
10243 if (is_error(ret
)) {
10248 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10249 if (!is_error(ret
) && arg3
) {
10250 abi_long ret2
= put_user_s64(off
, arg3
);
10251 if (is_error(ret2
)) {
10259 case TARGET_NR_sendfile
:
10260 #ifdef TARGET_NR_sendfile64
10261 case TARGET_NR_sendfile64
:
10263 goto unimplemented
;
10266 #ifdef TARGET_NR_getpmsg
10267 case TARGET_NR_getpmsg
:
10268 goto unimplemented
;
10270 #ifdef TARGET_NR_putpmsg
10271 case TARGET_NR_putpmsg
:
10272 goto unimplemented
;
10274 #ifdef TARGET_NR_vfork
10275 case TARGET_NR_vfork
:
10276 ret
= get_errno(do_fork(cpu_env
, CLONE_VFORK
| CLONE_VM
| SIGCHLD
,
10280 #ifdef TARGET_NR_ugetrlimit
10281 case TARGET_NR_ugetrlimit
:
10283 struct rlimit rlim
;
10284 int resource
= target_to_host_resource(arg1
);
10285 ret
= get_errno(getrlimit(resource
, &rlim
));
10286 if (!is_error(ret
)) {
10287 struct target_rlimit
*target_rlim
;
10288 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10290 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10291 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10292 unlock_user_struct(target_rlim
, arg2
, 1);
10297 #ifdef TARGET_NR_truncate64
10298 case TARGET_NR_truncate64
:
10299 if (!(p
= lock_user_string(arg1
)))
10301 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10302 unlock_user(p
, arg1
, 0);
10305 #ifdef TARGET_NR_ftruncate64
10306 case TARGET_NR_ftruncate64
:
10307 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10310 #ifdef TARGET_NR_stat64
10311 case TARGET_NR_stat64
:
10312 if (!(p
= lock_user_string(arg1
)))
10314 ret
= get_errno(stat(path(p
), &st
));
10315 unlock_user(p
, arg1
, 0);
10316 if (!is_error(ret
))
10317 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10320 #ifdef TARGET_NR_lstat64
10321 case TARGET_NR_lstat64
:
10322 if (!(p
= lock_user_string(arg1
)))
10324 ret
= get_errno(lstat(path(p
), &st
));
10325 unlock_user(p
, arg1
, 0);
10326 if (!is_error(ret
))
10327 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10330 #ifdef TARGET_NR_fstat64
10331 case TARGET_NR_fstat64
:
10332 ret
= get_errno(fstat(arg1
, &st
));
10333 if (!is_error(ret
))
10334 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10337 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10338 #ifdef TARGET_NR_fstatat64
10339 case TARGET_NR_fstatat64
:
10341 #ifdef TARGET_NR_newfstatat
10342 case TARGET_NR_newfstatat
:
10344 if (!(p
= lock_user_string(arg2
)))
10346 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10347 if (!is_error(ret
))
10348 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10351 #ifdef TARGET_NR_lchown
10352 case TARGET_NR_lchown
:
10353 if (!(p
= lock_user_string(arg1
)))
10355 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10356 unlock_user(p
, arg1
, 0);
10359 #ifdef TARGET_NR_getuid
10360 case TARGET_NR_getuid
:
10361 ret
= get_errno(high2lowuid(getuid()));
10364 #ifdef TARGET_NR_getgid
10365 case TARGET_NR_getgid
:
10366 ret
= get_errno(high2lowgid(getgid()));
10369 #ifdef TARGET_NR_geteuid
10370 case TARGET_NR_geteuid
:
10371 ret
= get_errno(high2lowuid(geteuid()));
10374 #ifdef TARGET_NR_getegid
10375 case TARGET_NR_getegid
:
10376 ret
= get_errno(high2lowgid(getegid()));
10379 case TARGET_NR_setreuid
:
10380 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10382 case TARGET_NR_setregid
:
10383 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10385 case TARGET_NR_getgroups
:
10387 int gidsetsize
= arg1
;
10388 target_id
*target_grouplist
;
10392 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10393 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10394 if (gidsetsize
== 0)
10396 if (!is_error(ret
)) {
10397 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10398 if (!target_grouplist
)
10400 for(i
= 0;i
< ret
; i
++)
10401 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10402 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10406 case TARGET_NR_setgroups
:
10408 int gidsetsize
= arg1
;
10409 target_id
*target_grouplist
;
10410 gid_t
*grouplist
= NULL
;
10413 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10414 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10415 if (!target_grouplist
) {
10416 ret
= -TARGET_EFAULT
;
10419 for (i
= 0; i
< gidsetsize
; i
++) {
10420 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10422 unlock_user(target_grouplist
, arg2
, 0);
10424 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10427 case TARGET_NR_fchown
:
10428 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10430 #if defined(TARGET_NR_fchownat)
10431 case TARGET_NR_fchownat
:
10432 if (!(p
= lock_user_string(arg2
)))
10434 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10435 low2highgid(arg4
), arg5
));
10436 unlock_user(p
, arg2
, 0);
10439 #ifdef TARGET_NR_setresuid
10440 case TARGET_NR_setresuid
:
10441 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10443 low2highuid(arg3
)));
10446 #ifdef TARGET_NR_getresuid
10447 case TARGET_NR_getresuid
:
10449 uid_t ruid
, euid
, suid
;
10450 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10451 if (!is_error(ret
)) {
10452 if (put_user_id(high2lowuid(ruid
), arg1
)
10453 || put_user_id(high2lowuid(euid
), arg2
)
10454 || put_user_id(high2lowuid(suid
), arg3
))
10460 #ifdef TARGET_NR_getresgid
10461 case TARGET_NR_setresgid
:
10462 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10464 low2highgid(arg3
)));
10467 #ifdef TARGET_NR_getresgid
10468 case TARGET_NR_getresgid
:
10470 gid_t rgid
, egid
, sgid
;
10471 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10472 if (!is_error(ret
)) {
10473 if (put_user_id(high2lowgid(rgid
), arg1
)
10474 || put_user_id(high2lowgid(egid
), arg2
)
10475 || put_user_id(high2lowgid(sgid
), arg3
))
10481 #ifdef TARGET_NR_chown
10482 case TARGET_NR_chown
:
10483 if (!(p
= lock_user_string(arg1
)))
10485 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10486 unlock_user(p
, arg1
, 0);
10489 case TARGET_NR_setuid
:
10490 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10492 case TARGET_NR_setgid
:
10493 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10495 case TARGET_NR_setfsuid
:
10496 ret
= get_errno(setfsuid(arg1
));
10498 case TARGET_NR_setfsgid
:
10499 ret
= get_errno(setfsgid(arg1
));
10502 #ifdef TARGET_NR_lchown32
10503 case TARGET_NR_lchown32
:
10504 if (!(p
= lock_user_string(arg1
)))
10506 ret
= get_errno(lchown(p
, arg2
, arg3
));
10507 unlock_user(p
, arg1
, 0);
10510 #ifdef TARGET_NR_getuid32
10511 case TARGET_NR_getuid32
:
10512 ret
= get_errno(getuid());
10516 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10517 /* Alpha specific */
10518 case TARGET_NR_getxuid
:
10522 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10524 ret
= get_errno(getuid());
10527 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10528 /* Alpha specific */
10529 case TARGET_NR_getxgid
:
10533 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10535 ret
= get_errno(getgid());
10538 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10539 /* Alpha specific */
10540 case TARGET_NR_osf_getsysinfo
:
10541 ret
= -TARGET_EOPNOTSUPP
;
10543 case TARGET_GSI_IEEE_FP_CONTROL
:
10545 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10547 /* Copied from linux ieee_fpcr_to_swcr. */
10548 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10549 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10550 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10551 | SWCR_TRAP_ENABLE_DZE
10552 | SWCR_TRAP_ENABLE_OVF
);
10553 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10554 | SWCR_TRAP_ENABLE_INE
);
10555 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10556 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10558 if (put_user_u64 (swcr
, arg2
))
10564 /* case GSI_IEEE_STATE_AT_SIGNAL:
10565 -- Not implemented in linux kernel.
10567 -- Retrieves current unaligned access state; not much used.
10568 case GSI_PROC_TYPE:
10569 -- Retrieves implver information; surely not used.
10570 case GSI_GET_HWRPB:
10571 -- Grabs a copy of the HWRPB; surely not used.
10576 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10577 /* Alpha specific */
10578 case TARGET_NR_osf_setsysinfo
:
10579 ret
= -TARGET_EOPNOTSUPP
;
10581 case TARGET_SSI_IEEE_FP_CONTROL
:
10583 uint64_t swcr
, fpcr
, orig_fpcr
;
10585 if (get_user_u64 (swcr
, arg2
)) {
10588 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10589 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10591 /* Copied from linux ieee_swcr_to_fpcr. */
10592 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10593 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10594 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10595 | SWCR_TRAP_ENABLE_DZE
10596 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10597 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10598 | SWCR_TRAP_ENABLE_INE
)) << 57;
10599 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10600 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10602 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10607 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10609 uint64_t exc
, fpcr
, orig_fpcr
;
10612 if (get_user_u64(exc
, arg2
)) {
10616 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10618 /* We only add to the exception status here. */
10619 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
10621 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10624 /* Old exceptions are not signaled. */
10625 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
10627 /* If any exceptions set by this call,
10628 and are unmasked, send a signal. */
10630 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
10631 si_code
= TARGET_FPE_FLTRES
;
10633 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
10634 si_code
= TARGET_FPE_FLTUND
;
10636 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
10637 si_code
= TARGET_FPE_FLTOVF
;
10639 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
10640 si_code
= TARGET_FPE_FLTDIV
;
10642 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
10643 si_code
= TARGET_FPE_FLTINV
;
10645 if (si_code
!= 0) {
10646 target_siginfo_t info
;
10647 info
.si_signo
= SIGFPE
;
10649 info
.si_code
= si_code
;
10650 info
._sifields
._sigfault
._addr
10651 = ((CPUArchState
*)cpu_env
)->pc
;
10652 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
10653 QEMU_SI_FAULT
, &info
);
10658 /* case SSI_NVPAIRS:
10659 -- Used with SSIN_UACPROC to enable unaligned accesses.
10660 case SSI_IEEE_STATE_AT_SIGNAL:
10661 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10662 -- Not implemented in linux kernel
10667 #ifdef TARGET_NR_osf_sigprocmask
10668 /* Alpha specific. */
10669 case TARGET_NR_osf_sigprocmask
:
10673 sigset_t set
, oldset
;
10676 case TARGET_SIG_BLOCK
:
10679 case TARGET_SIG_UNBLOCK
:
10682 case TARGET_SIG_SETMASK
:
10686 ret
= -TARGET_EINVAL
;
10690 target_to_host_old_sigset(&set
, &mask
);
10691 ret
= do_sigprocmask(how
, &set
, &oldset
);
10693 host_to_target_old_sigset(&mask
, &oldset
);
10700 #ifdef TARGET_NR_getgid32
10701 case TARGET_NR_getgid32
:
10702 ret
= get_errno(getgid());
10705 #ifdef TARGET_NR_geteuid32
10706 case TARGET_NR_geteuid32
:
10707 ret
= get_errno(geteuid());
10710 #ifdef TARGET_NR_getegid32
10711 case TARGET_NR_getegid32
:
10712 ret
= get_errno(getegid());
10715 #ifdef TARGET_NR_setreuid32
10716 case TARGET_NR_setreuid32
:
10717 ret
= get_errno(setreuid(arg1
, arg2
));
10720 #ifdef TARGET_NR_setregid32
10721 case TARGET_NR_setregid32
:
10722 ret
= get_errno(setregid(arg1
, arg2
));
10725 #ifdef TARGET_NR_getgroups32
10726 case TARGET_NR_getgroups32
:
10728 int gidsetsize
= arg1
;
10729 uint32_t *target_grouplist
;
10733 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10734 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10735 if (gidsetsize
== 0)
10737 if (!is_error(ret
)) {
10738 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
10739 if (!target_grouplist
) {
10740 ret
= -TARGET_EFAULT
;
10743 for(i
= 0;i
< ret
; i
++)
10744 target_grouplist
[i
] = tswap32(grouplist
[i
]);
10745 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
10750 #ifdef TARGET_NR_setgroups32
10751 case TARGET_NR_setgroups32
:
10753 int gidsetsize
= arg1
;
10754 uint32_t *target_grouplist
;
10758 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10759 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
10760 if (!target_grouplist
) {
10761 ret
= -TARGET_EFAULT
;
10764 for(i
= 0;i
< gidsetsize
; i
++)
10765 grouplist
[i
] = tswap32(target_grouplist
[i
]);
10766 unlock_user(target_grouplist
, arg2
, 0);
10767 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10771 #ifdef TARGET_NR_fchown32
10772 case TARGET_NR_fchown32
:
10773 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
10776 #ifdef TARGET_NR_setresuid32
10777 case TARGET_NR_setresuid32
:
10778 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
10781 #ifdef TARGET_NR_getresuid32
10782 case TARGET_NR_getresuid32
:
10784 uid_t ruid
, euid
, suid
;
10785 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10786 if (!is_error(ret
)) {
10787 if (put_user_u32(ruid
, arg1
)
10788 || put_user_u32(euid
, arg2
)
10789 || put_user_u32(suid
, arg3
))
10795 #ifdef TARGET_NR_setresgid32
10796 case TARGET_NR_setresgid32
:
10797 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
10800 #ifdef TARGET_NR_getresgid32
10801 case TARGET_NR_getresgid32
:
10803 gid_t rgid
, egid
, sgid
;
10804 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10805 if (!is_error(ret
)) {
10806 if (put_user_u32(rgid
, arg1
)
10807 || put_user_u32(egid
, arg2
)
10808 || put_user_u32(sgid
, arg3
))
10814 #ifdef TARGET_NR_chown32
10815 case TARGET_NR_chown32
:
10816 if (!(p
= lock_user_string(arg1
)))
10818 ret
= get_errno(chown(p
, arg2
, arg3
));
10819 unlock_user(p
, arg1
, 0);
10822 #ifdef TARGET_NR_setuid32
10823 case TARGET_NR_setuid32
:
10824 ret
= get_errno(sys_setuid(arg1
));
10827 #ifdef TARGET_NR_setgid32
10828 case TARGET_NR_setgid32
:
10829 ret
= get_errno(sys_setgid(arg1
));
10832 #ifdef TARGET_NR_setfsuid32
10833 case TARGET_NR_setfsuid32
:
10834 ret
= get_errno(setfsuid(arg1
));
10837 #ifdef TARGET_NR_setfsgid32
10838 case TARGET_NR_setfsgid32
:
10839 ret
= get_errno(setfsgid(arg1
));
10843 case TARGET_NR_pivot_root
:
10844 goto unimplemented
;
10845 #ifdef TARGET_NR_mincore
10846 case TARGET_NR_mincore
:
10849 ret
= -TARGET_EFAULT
;
10850 if (!(a
= lock_user(VERIFY_READ
, arg1
,arg2
, 0)))
10852 if (!(p
= lock_user_string(arg3
)))
10854 ret
= get_errno(mincore(a
, arg2
, p
));
10855 unlock_user(p
, arg3
, ret
);
10857 unlock_user(a
, arg1
, 0);
10861 #ifdef TARGET_NR_arm_fadvise64_64
10862 case TARGET_NR_arm_fadvise64_64
:
10863 /* arm_fadvise64_64 looks like fadvise64_64 but
10864 * with different argument order: fd, advice, offset, len
10865 * rather than the usual fd, offset, len, advice.
10866 * Note that offset and len are both 64-bit so appear as
10867 * pairs of 32-bit registers.
10869 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
10870 target_offset64(arg5
, arg6
), arg2
);
10871 ret
= -host_to_target_errno(ret
);
10875 #if TARGET_ABI_BITS == 32
10877 #ifdef TARGET_NR_fadvise64_64
10878 case TARGET_NR_fadvise64_64
:
10879 /* 6 args: fd, offset (high, low), len (high, low), advice */
10880 if (regpairs_aligned(cpu_env
)) {
10881 /* offset is in (3,4), len in (5,6) and advice in 7 */
10888 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10889 target_offset64(arg2
, arg3
),
10890 target_offset64(arg4
, arg5
),
10895 #ifdef TARGET_NR_fadvise64
10896 case TARGET_NR_fadvise64
:
10897 /* 5 args: fd, offset (high, low), len, advice */
10898 if (regpairs_aligned(cpu_env
)) {
10899 /* offset is in (3,4), len in 5 and advice in 6 */
10905 ret
= -host_to_target_errno(posix_fadvise(arg1
,
10906 target_offset64(arg2
, arg3
),
10911 #else /* not a 32-bit ABI */
10912 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10913 #ifdef TARGET_NR_fadvise64_64
10914 case TARGET_NR_fadvise64_64
:
10916 #ifdef TARGET_NR_fadvise64
10917 case TARGET_NR_fadvise64
:
10919 #ifdef TARGET_S390X
10921 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
10922 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
10923 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
10924 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
10928 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
10931 #endif /* end of 64-bit ABI fadvise handling */
10933 #ifdef TARGET_NR_madvise
10934 case TARGET_NR_madvise
:
10935 /* A straight passthrough may not be safe because qemu sometimes
10936 turns private file-backed mappings into anonymous mappings.
10937 This will break MADV_DONTNEED.
10938 This is a hint, so ignoring and returning success is ok. */
10939 ret
= get_errno(0);
10942 #if TARGET_ABI_BITS == 32
10943 case TARGET_NR_fcntl64
:
10947 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
10948 to_flock64_fn
*copyto
= copy_to_user_flock64
;
10951 if (((CPUARMState
*)cpu_env
)->eabi
) {
10952 copyfrom
= copy_from_user_eabi_flock64
;
10953 copyto
= copy_to_user_eabi_flock64
;
10957 cmd
= target_to_host_fcntl_cmd(arg2
);
10958 if (cmd
== -TARGET_EINVAL
) {
10964 case TARGET_F_GETLK64
:
10965 ret
= copyfrom(&fl
, arg3
);
10969 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
10971 ret
= copyto(arg3
, &fl
);
10975 case TARGET_F_SETLK64
:
10976 case TARGET_F_SETLKW64
:
10977 ret
= copyfrom(&fl
, arg3
);
10981 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
10984 ret
= do_fcntl(arg1
, arg2
, arg3
);
10990 #ifdef TARGET_NR_cacheflush
10991 case TARGET_NR_cacheflush
:
10992 /* self-modifying code is handled automatically, so nothing needed */
10996 #ifdef TARGET_NR_security
10997 case TARGET_NR_security
:
10998 goto unimplemented
;
11000 #ifdef TARGET_NR_getpagesize
11001 case TARGET_NR_getpagesize
:
11002 ret
= TARGET_PAGE_SIZE
;
11005 case TARGET_NR_gettid
:
11006 ret
= get_errno(gettid());
11008 #ifdef TARGET_NR_readahead
11009 case TARGET_NR_readahead
:
11010 #if TARGET_ABI_BITS == 32
11011 if (regpairs_aligned(cpu_env
)) {
11016 ret
= get_errno(readahead(arg1
, ((off64_t
)arg3
<< 32) | arg2
, arg4
));
11018 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11023 #ifdef TARGET_NR_setxattr
11024 case TARGET_NR_listxattr
:
11025 case TARGET_NR_llistxattr
:
11029 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11031 ret
= -TARGET_EFAULT
;
11035 p
= lock_user_string(arg1
);
11037 if (num
== TARGET_NR_listxattr
) {
11038 ret
= get_errno(listxattr(p
, b
, arg3
));
11040 ret
= get_errno(llistxattr(p
, b
, arg3
));
11043 ret
= -TARGET_EFAULT
;
11045 unlock_user(p
, arg1
, 0);
11046 unlock_user(b
, arg2
, arg3
);
11049 case TARGET_NR_flistxattr
:
11053 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11055 ret
= -TARGET_EFAULT
;
11059 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11060 unlock_user(b
, arg2
, arg3
);
11063 case TARGET_NR_setxattr
:
11064 case TARGET_NR_lsetxattr
:
11066 void *p
, *n
, *v
= 0;
11068 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11070 ret
= -TARGET_EFAULT
;
11074 p
= lock_user_string(arg1
);
11075 n
= lock_user_string(arg2
);
11077 if (num
== TARGET_NR_setxattr
) {
11078 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11080 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11083 ret
= -TARGET_EFAULT
;
11085 unlock_user(p
, arg1
, 0);
11086 unlock_user(n
, arg2
, 0);
11087 unlock_user(v
, arg3
, 0);
11090 case TARGET_NR_fsetxattr
:
11094 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11096 ret
= -TARGET_EFAULT
;
11100 n
= lock_user_string(arg2
);
11102 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11104 ret
= -TARGET_EFAULT
;
11106 unlock_user(n
, arg2
, 0);
11107 unlock_user(v
, arg3
, 0);
11110 case TARGET_NR_getxattr
:
11111 case TARGET_NR_lgetxattr
:
11113 void *p
, *n
, *v
= 0;
11115 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11117 ret
= -TARGET_EFAULT
;
11121 p
= lock_user_string(arg1
);
11122 n
= lock_user_string(arg2
);
11124 if (num
== TARGET_NR_getxattr
) {
11125 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11127 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11130 ret
= -TARGET_EFAULT
;
11132 unlock_user(p
, arg1
, 0);
11133 unlock_user(n
, arg2
, 0);
11134 unlock_user(v
, arg3
, arg4
);
11137 case TARGET_NR_fgetxattr
:
11141 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11143 ret
= -TARGET_EFAULT
;
11147 n
= lock_user_string(arg2
);
11149 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11151 ret
= -TARGET_EFAULT
;
11153 unlock_user(n
, arg2
, 0);
11154 unlock_user(v
, arg3
, arg4
);
11157 case TARGET_NR_removexattr
:
11158 case TARGET_NR_lremovexattr
:
11161 p
= lock_user_string(arg1
);
11162 n
= lock_user_string(arg2
);
11164 if (num
== TARGET_NR_removexattr
) {
11165 ret
= get_errno(removexattr(p
, n
));
11167 ret
= get_errno(lremovexattr(p
, n
));
11170 ret
= -TARGET_EFAULT
;
11172 unlock_user(p
, arg1
, 0);
11173 unlock_user(n
, arg2
, 0);
11176 case TARGET_NR_fremovexattr
:
11179 n
= lock_user_string(arg2
);
11181 ret
= get_errno(fremovexattr(arg1
, n
));
11183 ret
= -TARGET_EFAULT
;
11185 unlock_user(n
, arg2
, 0);
11189 #endif /* CONFIG_ATTR */
11190 #ifdef TARGET_NR_set_thread_area
11191 case TARGET_NR_set_thread_area
:
11192 #if defined(TARGET_MIPS)
11193 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11196 #elif defined(TARGET_CRIS)
11198 ret
= -TARGET_EINVAL
;
11200 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11204 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11205 ret
= do_set_thread_area(cpu_env
, arg1
);
11207 #elif defined(TARGET_M68K)
11209 TaskState
*ts
= cpu
->opaque
;
11210 ts
->tp_value
= arg1
;
11215 goto unimplemented_nowarn
;
11218 #ifdef TARGET_NR_get_thread_area
11219 case TARGET_NR_get_thread_area
:
11220 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11221 ret
= do_get_thread_area(cpu_env
, arg1
);
11223 #elif defined(TARGET_M68K)
11225 TaskState
*ts
= cpu
->opaque
;
11226 ret
= ts
->tp_value
;
11230 goto unimplemented_nowarn
;
11233 #ifdef TARGET_NR_getdomainname
11234 case TARGET_NR_getdomainname
:
11235 goto unimplemented_nowarn
;
11238 #ifdef TARGET_NR_clock_gettime
11239 case TARGET_NR_clock_gettime
:
11241 struct timespec ts
;
11242 ret
= get_errno(clock_gettime(arg1
, &ts
));
11243 if (!is_error(ret
)) {
11244 host_to_target_timespec(arg2
, &ts
);
11249 #ifdef TARGET_NR_clock_getres
11250 case TARGET_NR_clock_getres
:
11252 struct timespec ts
;
11253 ret
= get_errno(clock_getres(arg1
, &ts
));
11254 if (!is_error(ret
)) {
11255 host_to_target_timespec(arg2
, &ts
);
11260 #ifdef TARGET_NR_clock_nanosleep
11261 case TARGET_NR_clock_nanosleep
:
11263 struct timespec ts
;
11264 target_to_host_timespec(&ts
, arg3
);
11265 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11266 &ts
, arg4
? &ts
: NULL
));
11268 host_to_target_timespec(arg4
, &ts
);
11270 #if defined(TARGET_PPC)
11271 /* clock_nanosleep is odd in that it returns positive errno values.
11272 * On PPC, CR0 bit 3 should be set in such a situation. */
11273 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11274 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11282 case TARGET_NR_set_tid_address
:
11283 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11287 case TARGET_NR_tkill
:
11288 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11291 case TARGET_NR_tgkill
:
11292 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11293 target_to_host_signal(arg3
)));
11296 #ifdef TARGET_NR_set_robust_list
11297 case TARGET_NR_set_robust_list
:
11298 case TARGET_NR_get_robust_list
:
11299 /* The ABI for supporting robust futexes has userspace pass
11300 * the kernel a pointer to a linked list which is updated by
11301 * userspace after the syscall; the list is walked by the kernel
11302 * when the thread exits. Since the linked list in QEMU guest
11303 * memory isn't a valid linked list for the host and we have
11304 * no way to reliably intercept the thread-death event, we can't
11305 * support these. Silently return ENOSYS so that guest userspace
11306 * falls back to a non-robust futex implementation (which should
11307 * be OK except in the corner case of the guest crashing while
11308 * holding a mutex that is shared with another process via
11311 goto unimplemented_nowarn
;
11314 #if defined(TARGET_NR_utimensat)
11315 case TARGET_NR_utimensat
:
11317 struct timespec
*tsp
, ts
[2];
11321 target_to_host_timespec(ts
, arg3
);
11322 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11326 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11328 if (!(p
= lock_user_string(arg2
))) {
11329 ret
= -TARGET_EFAULT
;
11332 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11333 unlock_user(p
, arg2
, 0);
11338 case TARGET_NR_futex
:
11339 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11341 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11342 case TARGET_NR_inotify_init
:
11343 ret
= get_errno(sys_inotify_init());
11346 #ifdef CONFIG_INOTIFY1
11347 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11348 case TARGET_NR_inotify_init1
:
11349 ret
= get_errno(sys_inotify_init1(arg1
));
11353 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11354 case TARGET_NR_inotify_add_watch
:
11355 p
= lock_user_string(arg2
);
11356 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11357 unlock_user(p
, arg2
, 0);
11360 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11361 case TARGET_NR_inotify_rm_watch
:
11362 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11366 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11367 case TARGET_NR_mq_open
:
11369 struct mq_attr posix_mq_attr
, *attrp
;
11371 p
= lock_user_string(arg1
- 1);
11373 copy_from_user_mq_attr (&posix_mq_attr
, arg4
);
11374 attrp
= &posix_mq_attr
;
11378 ret
= get_errno(mq_open(p
, arg2
, arg3
, attrp
));
11379 unlock_user (p
, arg1
, 0);
11383 case TARGET_NR_mq_unlink
:
11384 p
= lock_user_string(arg1
- 1);
11386 ret
= -TARGET_EFAULT
;
11389 ret
= get_errno(mq_unlink(p
));
11390 unlock_user (p
, arg1
, 0);
11393 case TARGET_NR_mq_timedsend
:
11395 struct timespec ts
;
11397 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11399 target_to_host_timespec(&ts
, arg5
);
11400 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11401 host_to_target_timespec(arg5
, &ts
);
11403 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11405 unlock_user (p
, arg2
, arg3
);
11409 case TARGET_NR_mq_timedreceive
:
11411 struct timespec ts
;
11414 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11416 target_to_host_timespec(&ts
, arg5
);
11417 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11419 host_to_target_timespec(arg5
, &ts
);
11421 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11424 unlock_user (p
, arg2
, arg3
);
11426 put_user_u32(prio
, arg4
);
11430 /* Not implemented for now... */
11431 /* case TARGET_NR_mq_notify: */
11434 case TARGET_NR_mq_getsetattr
:
11436 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11439 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11440 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11443 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11444 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11451 #ifdef CONFIG_SPLICE
11452 #ifdef TARGET_NR_tee
11453 case TARGET_NR_tee
:
11455 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11459 #ifdef TARGET_NR_splice
11460 case TARGET_NR_splice
:
11462 loff_t loff_in
, loff_out
;
11463 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11465 if (get_user_u64(loff_in
, arg2
)) {
11468 ploff_in
= &loff_in
;
11471 if (get_user_u64(loff_out
, arg4
)) {
11474 ploff_out
= &loff_out
;
11476 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11478 if (put_user_u64(loff_in
, arg2
)) {
11483 if (put_user_u64(loff_out
, arg4
)) {
11490 #ifdef TARGET_NR_vmsplice
11491 case TARGET_NR_vmsplice
:
11493 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11495 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11496 unlock_iovec(vec
, arg2
, arg3
, 0);
11498 ret
= -host_to_target_errno(errno
);
11503 #endif /* CONFIG_SPLICE */
11504 #ifdef CONFIG_EVENTFD
11505 #if defined(TARGET_NR_eventfd)
11506 case TARGET_NR_eventfd
:
11507 ret
= get_errno(eventfd(arg1
, 0));
11508 fd_trans_unregister(ret
);
11511 #if defined(TARGET_NR_eventfd2)
11512 case TARGET_NR_eventfd2
:
11514 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11515 if (arg2
& TARGET_O_NONBLOCK
) {
11516 host_flags
|= O_NONBLOCK
;
11518 if (arg2
& TARGET_O_CLOEXEC
) {
11519 host_flags
|= O_CLOEXEC
;
11521 ret
= get_errno(eventfd(arg1
, host_flags
));
11522 fd_trans_unregister(ret
);
11526 #endif /* CONFIG_EVENTFD */
11527 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11528 case TARGET_NR_fallocate
:
11529 #if TARGET_ABI_BITS == 32
11530 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11531 target_offset64(arg5
, arg6
)));
11533 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11537 #if defined(CONFIG_SYNC_FILE_RANGE)
11538 #if defined(TARGET_NR_sync_file_range)
11539 case TARGET_NR_sync_file_range
:
11540 #if TARGET_ABI_BITS == 32
11541 #if defined(TARGET_MIPS)
11542 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11543 target_offset64(arg5
, arg6
), arg7
));
11545 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11546 target_offset64(arg4
, arg5
), arg6
));
11547 #endif /* !TARGET_MIPS */
11549 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11553 #if defined(TARGET_NR_sync_file_range2)
11554 case TARGET_NR_sync_file_range2
:
11555 /* This is like sync_file_range but the arguments are reordered */
11556 #if TARGET_ABI_BITS == 32
11557 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11558 target_offset64(arg5
, arg6
), arg2
));
11560 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11565 #if defined(TARGET_NR_signalfd4)
11566 case TARGET_NR_signalfd4
:
11567 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11570 #if defined(TARGET_NR_signalfd)
11571 case TARGET_NR_signalfd
:
11572 ret
= do_signalfd4(arg1
, arg2
, 0);
11575 #if defined(CONFIG_EPOLL)
11576 #if defined(TARGET_NR_epoll_create)
11577 case TARGET_NR_epoll_create
:
11578 ret
= get_errno(epoll_create(arg1
));
11581 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11582 case TARGET_NR_epoll_create1
:
11583 ret
= get_errno(epoll_create1(arg1
));
11586 #if defined(TARGET_NR_epoll_ctl)
11587 case TARGET_NR_epoll_ctl
:
11589 struct epoll_event ep
;
11590 struct epoll_event
*epp
= 0;
11592 struct target_epoll_event
*target_ep
;
11593 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
11596 ep
.events
= tswap32(target_ep
->events
);
11597 /* The epoll_data_t union is just opaque data to the kernel,
11598 * so we transfer all 64 bits across and need not worry what
11599 * actual data type it is.
11601 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
11602 unlock_user_struct(target_ep
, arg4
, 0);
11605 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
11610 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11611 #if defined(TARGET_NR_epoll_wait)
11612 case TARGET_NR_epoll_wait
:
11614 #if defined(TARGET_NR_epoll_pwait)
11615 case TARGET_NR_epoll_pwait
:
11618 struct target_epoll_event
*target_ep
;
11619 struct epoll_event
*ep
;
11621 int maxevents
= arg3
;
11622 int timeout
= arg4
;
11624 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
11625 ret
= -TARGET_EINVAL
;
11629 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
11630 maxevents
* sizeof(struct target_epoll_event
), 1);
11635 ep
= alloca(maxevents
* sizeof(struct epoll_event
));
11638 #if defined(TARGET_NR_epoll_pwait)
11639 case TARGET_NR_epoll_pwait
:
11641 target_sigset_t
*target_set
;
11642 sigset_t _set
, *set
= &_set
;
11645 if (arg6
!= sizeof(target_sigset_t
)) {
11646 ret
= -TARGET_EINVAL
;
11650 target_set
= lock_user(VERIFY_READ
, arg5
,
11651 sizeof(target_sigset_t
), 1);
11653 unlock_user(target_ep
, arg2
, 0);
11656 target_to_host_sigset(set
, target_set
);
11657 unlock_user(target_set
, arg5
, 0);
11662 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11663 set
, SIGSET_T_SIZE
));
11667 #if defined(TARGET_NR_epoll_wait)
11668 case TARGET_NR_epoll_wait
:
11669 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
11674 ret
= -TARGET_ENOSYS
;
11676 if (!is_error(ret
)) {
11678 for (i
= 0; i
< ret
; i
++) {
11679 target_ep
[i
].events
= tswap32(ep
[i
].events
);
11680 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
11683 unlock_user(target_ep
, arg2
, ret
* sizeof(struct target_epoll_event
));
11688 #ifdef TARGET_NR_prlimit64
11689 case TARGET_NR_prlimit64
:
11691 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11692 struct target_rlimit64
*target_rnew
, *target_rold
;
11693 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
11694 int resource
= target_to_host_resource(arg2
);
11696 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
11699 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
11700 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
11701 unlock_user_struct(target_rnew
, arg3
, 0);
11705 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
11706 if (!is_error(ret
) && arg4
) {
11707 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
11710 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
11711 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
11712 unlock_user_struct(target_rold
, arg4
, 1);
11717 #ifdef TARGET_NR_gethostname
11718 case TARGET_NR_gethostname
:
11720 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
11722 ret
= get_errno(gethostname(name
, arg2
));
11723 unlock_user(name
, arg1
, arg2
);
11725 ret
= -TARGET_EFAULT
;
11730 #ifdef TARGET_NR_atomic_cmpxchg_32
11731 case TARGET_NR_atomic_cmpxchg_32
:
11733 /* should use start_exclusive from main.c */
11734 abi_ulong mem_value
;
11735 if (get_user_u32(mem_value
, arg6
)) {
11736 target_siginfo_t info
;
11737 info
.si_signo
= SIGSEGV
;
11739 info
.si_code
= TARGET_SEGV_MAPERR
;
11740 info
._sifields
._sigfault
._addr
= arg6
;
11741 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11742 QEMU_SI_FAULT
, &info
);
11746 if (mem_value
== arg2
)
11747 put_user_u32(arg1
, arg6
);
11752 #ifdef TARGET_NR_atomic_barrier
11753 case TARGET_NR_atomic_barrier
:
11755 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11761 #ifdef TARGET_NR_timer_create
11762 case TARGET_NR_timer_create
:
11764 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11766 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
11769 int timer_index
= next_free_host_timer();
11771 if (timer_index
< 0) {
11772 ret
= -TARGET_EAGAIN
;
11774 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
11777 phost_sevp
= &host_sevp
;
11778 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
11784 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
11788 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
11797 #ifdef TARGET_NR_timer_settime
11798 case TARGET_NR_timer_settime
:
11800 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11801 * struct itimerspec * old_value */
11802 target_timer_t timerid
= get_timer_id(arg1
);
11806 } else if (arg3
== 0) {
11807 ret
= -TARGET_EINVAL
;
11809 timer_t htimer
= g_posix_timers
[timerid
];
11810 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
11812 target_to_host_itimerspec(&hspec_new
, arg3
);
11814 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
11815 host_to_target_itimerspec(arg2
, &hspec_old
);
11821 #ifdef TARGET_NR_timer_gettime
11822 case TARGET_NR_timer_gettime
:
11824 /* args: timer_t timerid, struct itimerspec *curr_value */
11825 target_timer_t timerid
= get_timer_id(arg1
);
11829 } else if (!arg2
) {
11830 ret
= -TARGET_EFAULT
;
11832 timer_t htimer
= g_posix_timers
[timerid
];
11833 struct itimerspec hspec
;
11834 ret
= get_errno(timer_gettime(htimer
, &hspec
));
11836 if (host_to_target_itimerspec(arg2
, &hspec
)) {
11837 ret
= -TARGET_EFAULT
;
11844 #ifdef TARGET_NR_timer_getoverrun
11845 case TARGET_NR_timer_getoverrun
:
11847 /* args: timer_t timerid */
11848 target_timer_t timerid
= get_timer_id(arg1
);
11853 timer_t htimer
= g_posix_timers
[timerid
];
11854 ret
= get_errno(timer_getoverrun(htimer
));
11856 fd_trans_unregister(ret
);
11861 #ifdef TARGET_NR_timer_delete
11862 case TARGET_NR_timer_delete
:
11864 /* args: timer_t timerid */
11865 target_timer_t timerid
= get_timer_id(arg1
);
11870 timer_t htimer
= g_posix_timers
[timerid
];
11871 ret
= get_errno(timer_delete(htimer
));
11872 g_posix_timers
[timerid
] = 0;
11878 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11879 case TARGET_NR_timerfd_create
:
11880 ret
= get_errno(timerfd_create(arg1
,
11881 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
11885 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11886 case TARGET_NR_timerfd_gettime
:
11888 struct itimerspec its_curr
;
11890 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
11892 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
11899 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11900 case TARGET_NR_timerfd_settime
:
11902 struct itimerspec its_new
, its_old
, *p_new
;
11905 if (target_to_host_itimerspec(&its_new
, arg3
)) {
11913 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
11915 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
11922 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11923 case TARGET_NR_ioprio_get
:
11924 ret
= get_errno(ioprio_get(arg1
, arg2
));
11928 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11929 case TARGET_NR_ioprio_set
:
11930 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
11934 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11935 case TARGET_NR_setns
:
11936 ret
= get_errno(setns(arg1
, arg2
));
11939 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11940 case TARGET_NR_unshare
:
11941 ret
= get_errno(unshare(arg1
));
11947 gemu_log("qemu: Unsupported syscall: %d\n", num
);
11948 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11949 unimplemented_nowarn
:
11951 ret
= -TARGET_ENOSYS
;
11956 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
11959 print_syscall_ret(num
, ret
);
11960 trace_guest_user_syscall_ret(cpu
, num
, ret
);
11963 ret
= -TARGET_EFAULT
;