4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include "qemu-common.h"
64 #include <sys/timerfd.h>
70 #include <sys/eventfd.h>
73 #include <sys/epoll.h>
76 #include "qemu/xattr.h"
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
95 #include <linux/mtio.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
100 #include <linux/fb.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #ifdef CONFIG_RTNETLINK
110 #include <linux/rtnetlink.h>
111 #include <linux/if_bridge.h>
113 #include <linux/audit.h>
114 #include "linux_loop.h"
120 #define CLONE_IO 0x80000000 /* Clone io context */
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
171 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
172 * once. This exercises the codepaths for restart.
174 //#define DEBUG_ERESTARTSYS
176 //#include <linux/msdos_fs.h>
177 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
178 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
188 #define _syscall0(type,name) \
189 static type name (void) \
191 return syscall(__NR_##name); \
194 #define _syscall1(type,name,type1,arg1) \
195 static type name (type1 arg1) \
197 return syscall(__NR_##name, arg1); \
200 #define _syscall2(type,name,type1,arg1,type2,arg2) \
201 static type name (type1 arg1,type2 arg2) \
203 return syscall(__NR_##name, arg1, arg2); \
206 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
207 static type name (type1 arg1,type2 arg2,type3 arg3) \
209 return syscall(__NR_##name, arg1, arg2, arg3); \
212 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
215 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
218 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
226 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
227 type5,arg5,type6,arg6) \
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
235 #define __NR_sys_uname __NR_uname
236 #define __NR_sys_getcwd1 __NR_getcwd
237 #define __NR_sys_getdents __NR_getdents
238 #define __NR_sys_getdents64 __NR_getdents64
239 #define __NR_sys_getpriority __NR_getpriority
240 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
241 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
242 #define __NR_sys_syslog __NR_syslog
243 #define __NR_sys_futex __NR_futex
244 #define __NR_sys_inotify_init __NR_inotify_init
245 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
246 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
248 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
250 #define __NR__llseek __NR_lseek
253 /* Newer kernel ports have llseek() instead of _llseek() */
254 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
255 #define TARGET_NR__llseek TARGET_NR_llseek
259 _syscall0(int, gettid
)
261 /* This is a replacement for the host gettid() and must return a host
263 static int gettid(void) {
267 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
268 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
270 #if !defined(__NR_getdents) || \
271 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
272 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
274 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
275 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
276 loff_t
*, res
, uint
, wh
);
278 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
279 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
281 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
282 #ifdef __NR_exit_group
283 _syscall1(int,exit_group
,int,error_code
)
285 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
286 _syscall1(int,set_tid_address
,int *,tidptr
)
288 #if defined(TARGET_NR_futex) && defined(__NR_futex)
289 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
290 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
292 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
293 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
294 unsigned long *, user_mask_ptr
);
295 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
296 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
297 unsigned long *, user_mask_ptr
);
298 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
300 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
301 struct __user_cap_data_struct
*, data
);
302 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
303 struct __user_cap_data_struct
*, data
);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get
, int, which
, int, who
)
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
316 unsigned long, idx1
, unsigned long, idx2
)
319 static bitmask_transtbl fcntl_flags_tbl
[] = {
320 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
321 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
322 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
323 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
324 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
325 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
326 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
327 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
328 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
329 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
330 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
331 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
332 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
333 #if defined(O_DIRECT)
334 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
336 #if defined(O_NOATIME)
337 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
339 #if defined(O_CLOEXEC)
340 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
343 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
354 QEMU_IFLA_BR_FORWARD_DELAY
,
355 QEMU_IFLA_BR_HELLO_TIME
,
356 QEMU_IFLA_BR_MAX_AGE
,
357 QEMU_IFLA_BR_AGEING_TIME
,
358 QEMU_IFLA_BR_STP_STATE
,
359 QEMU_IFLA_BR_PRIORITY
,
360 QEMU_IFLA_BR_VLAN_FILTERING
,
361 QEMU_IFLA_BR_VLAN_PROTOCOL
,
362 QEMU_IFLA_BR_GROUP_FWD_MASK
,
363 QEMU_IFLA_BR_ROOT_ID
,
364 QEMU_IFLA_BR_BRIDGE_ID
,
365 QEMU_IFLA_BR_ROOT_PORT
,
366 QEMU_IFLA_BR_ROOT_PATH_COST
,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
369 QEMU_IFLA_BR_HELLO_TIMER
,
370 QEMU_IFLA_BR_TCN_TIMER
,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
372 QEMU_IFLA_BR_GC_TIMER
,
373 QEMU_IFLA_BR_GROUP_ADDR
,
374 QEMU_IFLA_BR_FDB_FLUSH
,
375 QEMU_IFLA_BR_MCAST_ROUTER
,
376 QEMU_IFLA_BR_MCAST_SNOOPING
,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
378 QEMU_IFLA_BR_MCAST_QUERIER
,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
380 QEMU_IFLA_BR_MCAST_HASH_MAX
,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
419 QEMU_IFLA_NET_NS_PID
,
422 QEMU_IFLA_VFINFO_LIST
,
430 QEMU_IFLA_PROMISCUITY
,
431 QEMU_IFLA_NUM_TX_QUEUES
,
432 QEMU_IFLA_NUM_RX_QUEUES
,
434 QEMU_IFLA_PHYS_PORT_ID
,
435 QEMU_IFLA_CARRIER_CHANGES
,
436 QEMU_IFLA_PHYS_SWITCH_ID
,
437 QEMU_IFLA_LINK_NETNSID
,
438 QEMU_IFLA_PHYS_PORT_NAME
,
439 QEMU_IFLA_PROTO_DOWN
,
440 QEMU_IFLA_GSO_MAX_SEGS
,
441 QEMU_IFLA_GSO_MAX_SIZE
,
448 QEMU_IFLA_BRPORT_UNSPEC
,
449 QEMU_IFLA_BRPORT_STATE
,
450 QEMU_IFLA_BRPORT_PRIORITY
,
451 QEMU_IFLA_BRPORT_COST
,
452 QEMU_IFLA_BRPORT_MODE
,
453 QEMU_IFLA_BRPORT_GUARD
,
454 QEMU_IFLA_BRPORT_PROTECT
,
455 QEMU_IFLA_BRPORT_FAST_LEAVE
,
456 QEMU_IFLA_BRPORT_LEARNING
,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
458 QEMU_IFLA_BRPORT_PROXYARP
,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
461 QEMU_IFLA_BRPORT_ROOT_ID
,
462 QEMU_IFLA_BRPORT_BRIDGE_ID
,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
471 QEMU_IFLA_BRPORT_HOLD_TIMER
,
472 QEMU_IFLA_BRPORT_FLUSH
,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
474 QEMU_IFLA_BRPORT_PAD
,
475 QEMU___IFLA_BRPORT_MAX
479 QEMU_IFLA_INFO_UNSPEC
,
482 QEMU_IFLA_INFO_XSTATS
,
483 QEMU_IFLA_INFO_SLAVE_KIND
,
484 QEMU_IFLA_INFO_SLAVE_DATA
,
485 QEMU___IFLA_INFO_MAX
,
489 QEMU_IFLA_INET_UNSPEC
,
491 QEMU___IFLA_INET_MAX
,
495 QEMU_IFLA_INET6_UNSPEC
,
496 QEMU_IFLA_INET6_FLAGS
,
497 QEMU_IFLA_INET6_CONF
,
498 QEMU_IFLA_INET6_STATS
,
499 QEMU_IFLA_INET6_MCAST
,
500 QEMU_IFLA_INET6_CACHEINFO
,
501 QEMU_IFLA_INET6_ICMP6STATS
,
502 QEMU_IFLA_INET6_TOKEN
,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
509 typedef struct TargetFdTrans
{
510 TargetFdDataFunc host_to_target_data
;
511 TargetFdDataFunc target_to_host_data
;
512 TargetFdAddrFunc target_to_host_addr
;
515 static TargetFdTrans
**target_fd_trans
;
517 static unsigned int target_fd_max
;
519 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
521 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
522 return target_fd_trans
[fd
]->target_to_host_data
;
527 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
529 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
530 return target_fd_trans
[fd
]->host_to_target_data
;
535 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
537 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
538 return target_fd_trans
[fd
]->target_to_host_addr
;
543 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
547 if (fd
>= target_fd_max
) {
548 oldmax
= target_fd_max
;
549 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans
= g_renew(TargetFdTrans
*,
551 target_fd_trans
, target_fd_max
);
552 memset((void *)(target_fd_trans
+ oldmax
), 0,
553 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
555 target_fd_trans
[fd
] = trans
;
558 static void fd_trans_unregister(int fd
)
560 if (fd
>= 0 && fd
< target_fd_max
) {
561 target_fd_trans
[fd
] = NULL
;
565 static void fd_trans_dup(int oldfd
, int newfd
)
567 fd_trans_unregister(newfd
);
568 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
569 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
573 static int sys_getcwd1(char *buf
, size_t size
)
575 if (getcwd(buf
, size
) == NULL
) {
576 /* getcwd() sets errno */
579 return strlen(buf
)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
586 const struct timespec
*,tsp
,int,flags
)
588 static int sys_utimensat(int dirfd
, const char *pathname
,
589 const struct timespec times
[2], int flags
)
595 #endif /* TARGET_NR_utimensat */
597 #ifdef CONFIG_INOTIFY
598 #include <sys/inotify.h>
600 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
601 static int sys_inotify_init(void)
603 return (inotify_init());
606 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
607 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
609 return (inotify_add_watch(fd
, pathname
, mask
));
612 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
613 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
615 return (inotify_rm_watch(fd
, wd
));
618 #ifdef CONFIG_INOTIFY1
619 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
620 static int sys_inotify_init1(int flags
)
622 return (inotify_init1(flags
));
627 /* Userspace can usually survive runtime without inotify */
628 #undef TARGET_NR_inotify_init
629 #undef TARGET_NR_inotify_init1
630 #undef TARGET_NR_inotify_add_watch
631 #undef TARGET_NR_inotify_rm_watch
632 #endif /* CONFIG_INOTIFY */
634 #if defined(TARGET_NR_prlimit64)
635 #ifndef __NR_prlimit64
636 # define __NR_prlimit64 -1
638 #define __NR_sys_prlimit64 __NR_prlimit64
639 /* The glibc rlimit structure may not be that used by the underlying syscall */
640 struct host_rlimit64
{
644 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
645 const struct host_rlimit64
*, new_limit
,
646 struct host_rlimit64
*, old_limit
)
650 #if defined(TARGET_NR_timer_create)
651 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
652 static timer_t g_posix_timers
[32] = { 0, } ;
654 static inline int next_free_host_timer(void)
657 /* FIXME: Does finding the next free slot require a lock? */
658 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
659 if (g_posix_timers
[k
] == 0) {
660 g_posix_timers
[k
] = (timer_t
) 1;
668 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
670 static inline int regpairs_aligned(void *cpu_env
) {
671 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
673 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
674 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
675 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
676 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
677 * of registers which translates to the same as ARM/MIPS, because we start with
679 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
681 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
684 #define ERRNO_TABLE_SIZE 1200
686 /* target_to_host_errno_table[] is initialized from
687 * host_to_target_errno_table[] in syscall_init(). */
688 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
692 * This list is the union of errno values overridden in asm-<arch>/errno.h
693 * minus the errnos that are not actually generic to all archs.
695 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
696 [EAGAIN
] = TARGET_EAGAIN
,
697 [EIDRM
] = TARGET_EIDRM
,
698 [ECHRNG
] = TARGET_ECHRNG
,
699 [EL2NSYNC
] = TARGET_EL2NSYNC
,
700 [EL3HLT
] = TARGET_EL3HLT
,
701 [EL3RST
] = TARGET_EL3RST
,
702 [ELNRNG
] = TARGET_ELNRNG
,
703 [EUNATCH
] = TARGET_EUNATCH
,
704 [ENOCSI
] = TARGET_ENOCSI
,
705 [EL2HLT
] = TARGET_EL2HLT
,
706 [EDEADLK
] = TARGET_EDEADLK
,
707 [ENOLCK
] = TARGET_ENOLCK
,
708 [EBADE
] = TARGET_EBADE
,
709 [EBADR
] = TARGET_EBADR
,
710 [EXFULL
] = TARGET_EXFULL
,
711 [ENOANO
] = TARGET_ENOANO
,
712 [EBADRQC
] = TARGET_EBADRQC
,
713 [EBADSLT
] = TARGET_EBADSLT
,
714 [EBFONT
] = TARGET_EBFONT
,
715 [ENOSTR
] = TARGET_ENOSTR
,
716 [ENODATA
] = TARGET_ENODATA
,
717 [ETIME
] = TARGET_ETIME
,
718 [ENOSR
] = TARGET_ENOSR
,
719 [ENONET
] = TARGET_ENONET
,
720 [ENOPKG
] = TARGET_ENOPKG
,
721 [EREMOTE
] = TARGET_EREMOTE
,
722 [ENOLINK
] = TARGET_ENOLINK
,
723 [EADV
] = TARGET_EADV
,
724 [ESRMNT
] = TARGET_ESRMNT
,
725 [ECOMM
] = TARGET_ECOMM
,
726 [EPROTO
] = TARGET_EPROTO
,
727 [EDOTDOT
] = TARGET_EDOTDOT
,
728 [EMULTIHOP
] = TARGET_EMULTIHOP
,
729 [EBADMSG
] = TARGET_EBADMSG
,
730 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
731 [EOVERFLOW
] = TARGET_EOVERFLOW
,
732 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
733 [EBADFD
] = TARGET_EBADFD
,
734 [EREMCHG
] = TARGET_EREMCHG
,
735 [ELIBACC
] = TARGET_ELIBACC
,
736 [ELIBBAD
] = TARGET_ELIBBAD
,
737 [ELIBSCN
] = TARGET_ELIBSCN
,
738 [ELIBMAX
] = TARGET_ELIBMAX
,
739 [ELIBEXEC
] = TARGET_ELIBEXEC
,
740 [EILSEQ
] = TARGET_EILSEQ
,
741 [ENOSYS
] = TARGET_ENOSYS
,
742 [ELOOP
] = TARGET_ELOOP
,
743 [ERESTART
] = TARGET_ERESTART
,
744 [ESTRPIPE
] = TARGET_ESTRPIPE
,
745 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
746 [EUSERS
] = TARGET_EUSERS
,
747 [ENOTSOCK
] = TARGET_ENOTSOCK
,
748 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
749 [EMSGSIZE
] = TARGET_EMSGSIZE
,
750 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
751 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
752 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
753 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
754 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
755 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
756 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
757 [EADDRINUSE
] = TARGET_EADDRINUSE
,
758 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
759 [ENETDOWN
] = TARGET_ENETDOWN
,
760 [ENETUNREACH
] = TARGET_ENETUNREACH
,
761 [ENETRESET
] = TARGET_ENETRESET
,
762 [ECONNABORTED
] = TARGET_ECONNABORTED
,
763 [ECONNRESET
] = TARGET_ECONNRESET
,
764 [ENOBUFS
] = TARGET_ENOBUFS
,
765 [EISCONN
] = TARGET_EISCONN
,
766 [ENOTCONN
] = TARGET_ENOTCONN
,
767 [EUCLEAN
] = TARGET_EUCLEAN
,
768 [ENOTNAM
] = TARGET_ENOTNAM
,
769 [ENAVAIL
] = TARGET_ENAVAIL
,
770 [EISNAM
] = TARGET_EISNAM
,
771 [EREMOTEIO
] = TARGET_EREMOTEIO
,
772 [EDQUOT
] = TARGET_EDQUOT
,
773 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
774 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
775 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
776 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
777 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
778 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
779 [EALREADY
] = TARGET_EALREADY
,
780 [EINPROGRESS
] = TARGET_EINPROGRESS
,
781 [ESTALE
] = TARGET_ESTALE
,
782 [ECANCELED
] = TARGET_ECANCELED
,
783 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
784 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
786 [ENOKEY
] = TARGET_ENOKEY
,
789 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
792 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
795 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
798 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
800 #ifdef ENOTRECOVERABLE
801 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
804 [ENOMSG
] = TARGET_ENOMSG
,
807 [ERFKILL
] = TARGET_ERFKILL
,
810 [EHWPOISON
] = TARGET_EHWPOISON
,
814 static inline int host_to_target_errno(int err
)
816 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
817 host_to_target_errno_table
[err
]) {
818 return host_to_target_errno_table
[err
];
823 static inline int target_to_host_errno(int err
)
825 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
826 target_to_host_errno_table
[err
]) {
827 return target_to_host_errno_table
[err
];
832 static inline abi_long
get_errno(abi_long ret
)
835 return -host_to_target_errno(errno
);
840 static inline int is_error(abi_long ret
)
842 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
845 const char *target_strerror(int err
)
847 if (err
== TARGET_ERESTARTSYS
) {
848 return "To be restarted";
850 if (err
== TARGET_QEMU_ESIGRETURN
) {
851 return "Successful exit from sigreturn";
854 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
857 return strerror(target_to_host_errno(err
));
860 #define safe_syscall0(type, name) \
861 static type safe_##name(void) \
863 return safe_syscall(__NR_##name); \
866 #define safe_syscall1(type, name, type1, arg1) \
867 static type safe_##name(type1 arg1) \
869 return safe_syscall(__NR_##name, arg1); \
872 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
873 static type safe_##name(type1 arg1, type2 arg2) \
875 return safe_syscall(__NR_##name, arg1, arg2); \
878 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
879 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
881 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
884 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
886 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
888 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
891 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
892 type4, arg4, type5, arg5) \
893 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
896 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
899 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
900 type4, arg4, type5, arg5, type6, arg6) \
901 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
902 type5 arg5, type6 arg6) \
904 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
907 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
908 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
909 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
910 int, flags
, mode_t
, mode
)
911 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
912 struct rusage
*, rusage
)
913 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
914 int, options
, struct rusage
*, rusage
)
915 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
916 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
917 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
918 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
919 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
921 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
922 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
924 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
925 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
926 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
927 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
928 safe_syscall2(int, tkill
, int, tid
, int, sig
)
929 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
930 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
931 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
932 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
933 unsigned long, pos_l
, unsigned long, pos_h
)
934 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
935 unsigned long, pos_l
, unsigned long, pos_h
)
936 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
938 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
939 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
940 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
941 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
942 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
943 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
944 safe_syscall2(int, flock
, int, fd
, int, operation
)
945 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
946 const struct timespec
*, uts
, size_t, sigsetsize
)
947 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
949 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
950 struct timespec
*, rem
)
951 #ifdef TARGET_NR_clock_nanosleep
952 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
953 const struct timespec
*, req
, struct timespec
*, rem
)
956 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
958 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
959 long, msgtype
, int, flags
)
960 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
961 unsigned, nsops
, const struct timespec
*, timeout
)
963 /* This host kernel architecture uses a single ipc syscall; fake up
964 * wrappers for the sub-operations to hide this implementation detail.
965 * Annoyingly we can't include linux/ipc.h to get the constant definitions
966 * for the call parameter because some structs in there conflict with the
967 * sys/ipc.h ones. So we just define them here, and rely on them being
968 * the same for all host architectures.
970 #define Q_SEMTIMEDOP 4
973 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
975 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
976 void *, ptr
, long, fifth
)
977 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
979 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
981 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
983 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
985 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
986 const struct timespec
*timeout
)
988 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
992 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
993 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
994 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
995 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
996 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
998 /* We do ioctl like this rather than via safe_syscall3 to preserve the
999 * "third argument might be integer or pointer or not present" behaviour of
1000 * the libc function.
1002 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1003 /* Similarly for fcntl. Note that callers must always:
1004 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1005 * use the flock64 struct rather than unsuffixed flock
1006 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1009 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1011 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1014 static inline int host_to_target_sock_type(int host_type
)
1018 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1020 target_type
= TARGET_SOCK_DGRAM
;
1023 target_type
= TARGET_SOCK_STREAM
;
1026 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1030 #if defined(SOCK_CLOEXEC)
1031 if (host_type
& SOCK_CLOEXEC
) {
1032 target_type
|= TARGET_SOCK_CLOEXEC
;
1036 #if defined(SOCK_NONBLOCK)
1037 if (host_type
& SOCK_NONBLOCK
) {
1038 target_type
|= TARGET_SOCK_NONBLOCK
;
1045 static abi_ulong target_brk
;
1046 static abi_ulong target_original_brk
;
1047 static abi_ulong brk_page
;
1049 void target_set_brk(abi_ulong new_brk
)
1051 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1052 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1055 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1056 #define DEBUGF_BRK(message, args...)
1058 /* do_brk() must return target values and target errnos. */
1059 abi_long
do_brk(abi_ulong new_brk
)
1061 abi_long mapped_addr
;
1062 abi_ulong new_alloc_size
;
1064 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1067 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1070 if (new_brk
< target_original_brk
) {
1071 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1076 /* If the new brk is less than the highest page reserved to the
1077 * target heap allocation, set it and we're almost done... */
1078 if (new_brk
<= brk_page
) {
1079 /* Heap contents are initialized to zero, as for anonymous
1081 if (new_brk
> target_brk
) {
1082 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1084 target_brk
= new_brk
;
1085 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1089 /* We need to allocate more memory after the brk... Note that
1090 * we don't use MAP_FIXED because that will map over the top of
1091 * any existing mapping (like the one with the host libc or qemu
1092 * itself); instead we treat "mapped but at wrong address" as
1093 * a failure and unmap again.
1095 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1096 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1097 PROT_READ
|PROT_WRITE
,
1098 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1100 if (mapped_addr
== brk_page
) {
1101 /* Heap contents are initialized to zero, as for anonymous
1102 * mapped pages. Technically the new pages are already
1103 * initialized to zero since they *are* anonymous mapped
1104 * pages, however we have to take care with the contents that
1105 * come from the remaining part of the previous page: it may
1106 * contains garbage data due to a previous heap usage (grown
1107 * then shrunken). */
1108 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1110 target_brk
= new_brk
;
1111 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1112 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1115 } else if (mapped_addr
!= -1) {
1116 /* Mapped but at wrong address, meaning there wasn't actually
1117 * enough space for this brk.
1119 target_munmap(mapped_addr
, new_alloc_size
);
1121 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1124 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1127 #if defined(TARGET_ALPHA)
1128 /* We (partially) emulate OSF/1 on Alpha, which requires we
1129 return a proper errno, not an unchanged brk value. */
1130 return -TARGET_ENOMEM
;
1132 /* For everything else, return the previous break. */
1136 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1137 abi_ulong target_fds_addr
,
1141 abi_ulong b
, *target_fds
;
1143 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1144 if (!(target_fds
= lock_user(VERIFY_READ
,
1146 sizeof(abi_ulong
) * nw
,
1148 return -TARGET_EFAULT
;
1152 for (i
= 0; i
< nw
; i
++) {
1153 /* grab the abi_ulong */
1154 __get_user(b
, &target_fds
[i
]);
1155 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1156 /* check the bit inside the abi_ulong */
1163 unlock_user(target_fds
, target_fds_addr
, 0);
1168 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1169 abi_ulong target_fds_addr
,
1172 if (target_fds_addr
) {
1173 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1174 return -TARGET_EFAULT
;
1182 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1188 abi_ulong
*target_fds
;
1190 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1191 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1193 sizeof(abi_ulong
) * nw
,
1195 return -TARGET_EFAULT
;
1198 for (i
= 0; i
< nw
; i
++) {
1200 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1201 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1204 __put_user(v
, &target_fds
[i
]);
1207 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1212 #if defined(__alpha__)
1213 #define HOST_HZ 1024
1218 static inline abi_long
host_to_target_clock_t(long ticks
)
1220 #if HOST_HZ == TARGET_HZ
1223 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1227 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1228 const struct rusage
*rusage
)
1230 struct target_rusage
*target_rusage
;
1232 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1233 return -TARGET_EFAULT
;
1234 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1235 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1236 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1237 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1238 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1239 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1240 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1241 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1242 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1243 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1244 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1245 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1246 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1247 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1248 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1249 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1250 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1251 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1252 unlock_user_struct(target_rusage
, target_addr
, 1);
1257 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1259 abi_ulong target_rlim_swap
;
1262 target_rlim_swap
= tswapal(target_rlim
);
1263 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1264 return RLIM_INFINITY
;
1266 result
= target_rlim_swap
;
1267 if (target_rlim_swap
!= (rlim_t
)result
)
1268 return RLIM_INFINITY
;
1273 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1275 abi_ulong target_rlim_swap
;
1278 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1279 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1281 target_rlim_swap
= rlim
;
1282 result
= tswapal(target_rlim_swap
);
1287 static inline int target_to_host_resource(int code
)
1290 case TARGET_RLIMIT_AS
:
1292 case TARGET_RLIMIT_CORE
:
1294 case TARGET_RLIMIT_CPU
:
1296 case TARGET_RLIMIT_DATA
:
1298 case TARGET_RLIMIT_FSIZE
:
1299 return RLIMIT_FSIZE
;
1300 case TARGET_RLIMIT_LOCKS
:
1301 return RLIMIT_LOCKS
;
1302 case TARGET_RLIMIT_MEMLOCK
:
1303 return RLIMIT_MEMLOCK
;
1304 case TARGET_RLIMIT_MSGQUEUE
:
1305 return RLIMIT_MSGQUEUE
;
1306 case TARGET_RLIMIT_NICE
:
1308 case TARGET_RLIMIT_NOFILE
:
1309 return RLIMIT_NOFILE
;
1310 case TARGET_RLIMIT_NPROC
:
1311 return RLIMIT_NPROC
;
1312 case TARGET_RLIMIT_RSS
:
1314 case TARGET_RLIMIT_RTPRIO
:
1315 return RLIMIT_RTPRIO
;
1316 case TARGET_RLIMIT_SIGPENDING
:
1317 return RLIMIT_SIGPENDING
;
1318 case TARGET_RLIMIT_STACK
:
1319 return RLIMIT_STACK
;
1325 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1326 abi_ulong target_tv_addr
)
1328 struct target_timeval
*target_tv
;
1330 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1331 return -TARGET_EFAULT
;
1333 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1334 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1336 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1341 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1342 const struct timeval
*tv
)
1344 struct target_timeval
*target_tv
;
1346 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1347 return -TARGET_EFAULT
;
1349 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1350 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1352 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1357 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1358 abi_ulong target_tz_addr
)
1360 struct target_timezone
*target_tz
;
1362 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1363 return -TARGET_EFAULT
;
1366 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1367 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1369 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1374 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1377 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1378 abi_ulong target_mq_attr_addr
)
1380 struct target_mq_attr
*target_mq_attr
;
1382 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1383 target_mq_attr_addr
, 1))
1384 return -TARGET_EFAULT
;
1386 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1387 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1388 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1389 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1391 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1396 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1397 const struct mq_attr
*attr
)
1399 struct target_mq_attr
*target_mq_attr
;
1401 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1402 target_mq_attr_addr
, 0))
1403 return -TARGET_EFAULT
;
1405 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1406 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1407 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1408 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1410 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1416 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1417 /* do_select() must return target values and target errnos. */
1418 static abi_long
do_select(int n
,
1419 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1420 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1422 fd_set rfds
, wfds
, efds
;
1423 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1425 struct timespec ts
, *ts_ptr
;
1428 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1432 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1436 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1441 if (target_tv_addr
) {
1442 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1443 return -TARGET_EFAULT
;
1444 ts
.tv_sec
= tv
.tv_sec
;
1445 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1451 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1454 if (!is_error(ret
)) {
1455 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1456 return -TARGET_EFAULT
;
1457 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1458 return -TARGET_EFAULT
;
1459 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1460 return -TARGET_EFAULT
;
1462 if (target_tv_addr
) {
1463 tv
.tv_sec
= ts
.tv_sec
;
1464 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1465 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1466 return -TARGET_EFAULT
;
1474 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1475 static abi_long
do_old_select(abi_ulong arg1
)
1477 struct target_sel_arg_struct
*sel
;
1478 abi_ulong inp
, outp
, exp
, tvp
;
1481 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1482 return -TARGET_EFAULT
;
1485 nsel
= tswapal(sel
->n
);
1486 inp
= tswapal(sel
->inp
);
1487 outp
= tswapal(sel
->outp
);
1488 exp
= tswapal(sel
->exp
);
1489 tvp
= tswapal(sel
->tvp
);
1491 unlock_user_struct(sel
, arg1
, 0);
1493 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1498 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1501 return pipe2(host_pipe
, flags
);
1507 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1508 int flags
, int is_pipe2
)
1512 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1515 return get_errno(ret
);
1517 /* Several targets have special calling conventions for the original
1518 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1520 #if defined(TARGET_ALPHA)
1521 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1522 return host_pipe
[0];
1523 #elif defined(TARGET_MIPS)
1524 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1525 return host_pipe
[0];
1526 #elif defined(TARGET_SH4)
1527 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1528 return host_pipe
[0];
1529 #elif defined(TARGET_SPARC)
1530 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1531 return host_pipe
[0];
1535 if (put_user_s32(host_pipe
[0], pipedes
)
1536 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1537 return -TARGET_EFAULT
;
1538 return get_errno(ret
);
1541 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1542 abi_ulong target_addr
,
1545 struct target_ip_mreqn
*target_smreqn
;
1547 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1549 return -TARGET_EFAULT
;
1550 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1551 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1552 if (len
== sizeof(struct target_ip_mreqn
))
1553 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1554 unlock_user(target_smreqn
, target_addr
, 0);
1559 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1560 abi_ulong target_addr
,
1563 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1564 sa_family_t sa_family
;
1565 struct target_sockaddr
*target_saddr
;
1567 if (fd_trans_target_to_host_addr(fd
)) {
1568 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1571 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1573 return -TARGET_EFAULT
;
1575 sa_family
= tswap16(target_saddr
->sa_family
);
1577 /* Oops. The caller might send a incomplete sun_path; sun_path
1578 * must be terminated by \0 (see the manual page), but
1579 * unfortunately it is quite common to specify sockaddr_un
1580 * length as "strlen(x->sun_path)" while it should be
1581 * "strlen(...) + 1". We'll fix that here if needed.
1582 * Linux kernel has a similar feature.
1585 if (sa_family
== AF_UNIX
) {
1586 if (len
< unix_maxlen
&& len
> 0) {
1587 char *cp
= (char*)target_saddr
;
1589 if ( cp
[len
-1] && !cp
[len
] )
1592 if (len
> unix_maxlen
)
1596 memcpy(addr
, target_saddr
, len
);
1597 addr
->sa_family
= sa_family
;
1598 if (sa_family
== AF_NETLINK
) {
1599 struct sockaddr_nl
*nladdr
;
1601 nladdr
= (struct sockaddr_nl
*)addr
;
1602 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1603 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1604 } else if (sa_family
== AF_PACKET
) {
1605 struct target_sockaddr_ll
*lladdr
;
1607 lladdr
= (struct target_sockaddr_ll
*)addr
;
1608 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1609 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1611 unlock_user(target_saddr
, target_addr
, 0);
1616 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1617 struct sockaddr
*addr
,
1620 struct target_sockaddr
*target_saddr
;
1627 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1629 return -TARGET_EFAULT
;
1630 memcpy(target_saddr
, addr
, len
);
1631 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1632 sizeof(target_saddr
->sa_family
)) {
1633 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1635 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1636 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1637 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1638 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1639 } else if (addr
->sa_family
== AF_PACKET
) {
1640 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1641 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1642 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1643 } else if (addr
->sa_family
== AF_INET6
&&
1644 len
>= sizeof(struct target_sockaddr_in6
)) {
1645 struct target_sockaddr_in6
*target_in6
=
1646 (struct target_sockaddr_in6
*)target_saddr
;
1647 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1649 unlock_user(target_saddr
, target_addr
, len
);
1654 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1655 struct target_msghdr
*target_msgh
)
1657 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1658 abi_long msg_controllen
;
1659 abi_ulong target_cmsg_addr
;
1660 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1661 socklen_t space
= 0;
1663 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1664 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1666 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1667 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1668 target_cmsg_start
= target_cmsg
;
1670 return -TARGET_EFAULT
;
1672 while (cmsg
&& target_cmsg
) {
1673 void *data
= CMSG_DATA(cmsg
);
1674 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1676 int len
= tswapal(target_cmsg
->cmsg_len
)
1677 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1679 space
+= CMSG_SPACE(len
);
1680 if (space
> msgh
->msg_controllen
) {
1681 space
-= CMSG_SPACE(len
);
1682 /* This is a QEMU bug, since we allocated the payload
1683 * area ourselves (unlike overflow in host-to-target
1684 * conversion, which is just the guest giving us a buffer
1685 * that's too small). It can't happen for the payload types
1686 * we currently support; if it becomes an issue in future
1687 * we would need to improve our allocation strategy to
1688 * something more intelligent than "twice the size of the
1689 * target buffer we're reading from".
1691 gemu_log("Host cmsg overflow\n");
1695 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1696 cmsg
->cmsg_level
= SOL_SOCKET
;
1698 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1700 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1701 cmsg
->cmsg_len
= CMSG_LEN(len
);
1703 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1704 int *fd
= (int *)data
;
1705 int *target_fd
= (int *)target_data
;
1706 int i
, numfds
= len
/ sizeof(int);
1708 for (i
= 0; i
< numfds
; i
++) {
1709 __get_user(fd
[i
], target_fd
+ i
);
1711 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1712 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1713 struct ucred
*cred
= (struct ucred
*)data
;
1714 struct target_ucred
*target_cred
=
1715 (struct target_ucred
*)target_data
;
1717 __get_user(cred
->pid
, &target_cred
->pid
);
1718 __get_user(cred
->uid
, &target_cred
->uid
);
1719 __get_user(cred
->gid
, &target_cred
->gid
);
1721 gemu_log("Unsupported ancillary data: %d/%d\n",
1722 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1723 memcpy(data
, target_data
, len
);
1726 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1727 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1730 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1732 msgh
->msg_controllen
= space
;
1736 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1737 struct msghdr
*msgh
)
1739 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1740 abi_long msg_controllen
;
1741 abi_ulong target_cmsg_addr
;
1742 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1743 socklen_t space
= 0;
1745 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1746 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1748 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1749 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1750 target_cmsg_start
= target_cmsg
;
1752 return -TARGET_EFAULT
;
1754 while (cmsg
&& target_cmsg
) {
1755 void *data
= CMSG_DATA(cmsg
);
1756 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1758 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1759 int tgt_len
, tgt_space
;
1761 /* We never copy a half-header but may copy half-data;
1762 * this is Linux's behaviour in put_cmsg(). Note that
1763 * truncation here is a guest problem (which we report
1764 * to the guest via the CTRUNC bit), unlike truncation
1765 * in target_to_host_cmsg, which is a QEMU bug.
1767 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1768 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1772 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1773 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1775 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1777 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1779 tgt_len
= TARGET_CMSG_LEN(len
);
1781 /* Payload types which need a different size of payload on
1782 * the target must adjust tgt_len here.
1784 switch (cmsg
->cmsg_level
) {
1786 switch (cmsg
->cmsg_type
) {
1788 tgt_len
= sizeof(struct target_timeval
);
1797 if (msg_controllen
< tgt_len
) {
1798 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1799 tgt_len
= msg_controllen
;
1802 /* We must now copy-and-convert len bytes of payload
1803 * into tgt_len bytes of destination space. Bear in mind
1804 * that in both source and destination we may be dealing
1805 * with a truncated value!
1807 switch (cmsg
->cmsg_level
) {
1809 switch (cmsg
->cmsg_type
) {
1812 int *fd
= (int *)data
;
1813 int *target_fd
= (int *)target_data
;
1814 int i
, numfds
= tgt_len
/ sizeof(int);
1816 for (i
= 0; i
< numfds
; i
++) {
1817 __put_user(fd
[i
], target_fd
+ i
);
1823 struct timeval
*tv
= (struct timeval
*)data
;
1824 struct target_timeval
*target_tv
=
1825 (struct target_timeval
*)target_data
;
1827 if (len
!= sizeof(struct timeval
) ||
1828 tgt_len
!= sizeof(struct target_timeval
)) {
1832 /* copy struct timeval to target */
1833 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1834 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1837 case SCM_CREDENTIALS
:
1839 struct ucred
*cred
= (struct ucred
*)data
;
1840 struct target_ucred
*target_cred
=
1841 (struct target_ucred
*)target_data
;
1843 __put_user(cred
->pid
, &target_cred
->pid
);
1844 __put_user(cred
->uid
, &target_cred
->uid
);
1845 __put_user(cred
->gid
, &target_cred
->gid
);
1854 switch (cmsg
->cmsg_type
) {
1857 uint32_t *v
= (uint32_t *)data
;
1858 uint32_t *t_int
= (uint32_t *)target_data
;
1860 __put_user(*v
, t_int
);
1866 struct sock_extended_err ee
;
1867 struct sockaddr_in offender
;
1869 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1870 struct errhdr_t
*target_errh
=
1871 (struct errhdr_t
*)target_data
;
1873 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1874 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1875 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1876 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1877 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1878 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1879 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1880 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1881 (void *) &errh
->offender
, sizeof(errh
->offender
));
1890 switch (cmsg
->cmsg_type
) {
1893 uint32_t *v
= (uint32_t *)data
;
1894 uint32_t *t_int
= (uint32_t *)target_data
;
1896 __put_user(*v
, t_int
);
1902 struct sock_extended_err ee
;
1903 struct sockaddr_in6 offender
;
1905 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1906 struct errhdr6_t
*target_errh
=
1907 (struct errhdr6_t
*)target_data
;
1909 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1910 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1911 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1912 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1913 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1914 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1915 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1916 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1917 (void *) &errh
->offender
, sizeof(errh
->offender
));
1927 gemu_log("Unsupported ancillary data: %d/%d\n",
1928 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1929 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1930 if (tgt_len
> len
) {
1931 memset(target_data
+ len
, 0, tgt_len
- len
);
1935 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1936 tgt_space
= TARGET_CMSG_SPACE(len
);
1937 if (msg_controllen
< tgt_space
) {
1938 tgt_space
= msg_controllen
;
1940 msg_controllen
-= tgt_space
;
1942 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1943 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1946 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1948 target_msgh
->msg_controllen
= tswapal(space
);
1952 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1954 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1955 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1956 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1957 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1958 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1961 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1963 abi_long (*host_to_target_nlmsg
)
1964 (struct nlmsghdr
*))
1969 while (len
> sizeof(struct nlmsghdr
)) {
1971 nlmsg_len
= nlh
->nlmsg_len
;
1972 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1977 switch (nlh
->nlmsg_type
) {
1979 tswap_nlmsghdr(nlh
);
1985 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1986 e
->error
= tswap32(e
->error
);
1987 tswap_nlmsghdr(&e
->msg
);
1988 tswap_nlmsghdr(nlh
);
1992 ret
= host_to_target_nlmsg(nlh
);
1994 tswap_nlmsghdr(nlh
);
1999 tswap_nlmsghdr(nlh
);
2000 len
-= NLMSG_ALIGN(nlmsg_len
);
2001 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2006 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2008 abi_long (*target_to_host_nlmsg
)
2009 (struct nlmsghdr
*))
2013 while (len
> sizeof(struct nlmsghdr
)) {
2014 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2015 tswap32(nlh
->nlmsg_len
) > len
) {
2018 tswap_nlmsghdr(nlh
);
2019 switch (nlh
->nlmsg_type
) {
2026 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2027 e
->error
= tswap32(e
->error
);
2028 tswap_nlmsghdr(&e
->msg
);
2032 ret
= target_to_host_nlmsg(nlh
);
2037 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2038 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2043 #ifdef CONFIG_RTNETLINK
2044 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2045 size_t len
, void *context
,
2046 abi_long (*host_to_target_nlattr
)
2050 unsigned short nla_len
;
2053 while (len
> sizeof(struct nlattr
)) {
2054 nla_len
= nlattr
->nla_len
;
2055 if (nla_len
< sizeof(struct nlattr
) ||
2059 ret
= host_to_target_nlattr(nlattr
, context
);
2060 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2061 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2065 len
-= NLA_ALIGN(nla_len
);
2066 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2071 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2073 abi_long (*host_to_target_rtattr
)
2076 unsigned short rta_len
;
2079 while (len
> sizeof(struct rtattr
)) {
2080 rta_len
= rtattr
->rta_len
;
2081 if (rta_len
< sizeof(struct rtattr
) ||
2085 ret
= host_to_target_rtattr(rtattr
);
2086 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2087 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2091 len
-= RTA_ALIGN(rta_len
);
2092 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2097 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2099 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2106 switch (nlattr
->nla_type
) {
2108 case QEMU_IFLA_BR_FDB_FLUSH
:
2111 case QEMU_IFLA_BR_GROUP_ADDR
:
2114 case QEMU_IFLA_BR_VLAN_FILTERING
:
2115 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2116 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2117 case QEMU_IFLA_BR_MCAST_ROUTER
:
2118 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2119 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2120 case QEMU_IFLA_BR_MCAST_QUERIER
:
2121 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2122 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2123 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2126 case QEMU_IFLA_BR_PRIORITY
:
2127 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2128 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2129 case QEMU_IFLA_BR_ROOT_PORT
:
2130 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2131 u16
= NLA_DATA(nlattr
);
2132 *u16
= tswap16(*u16
);
2135 case QEMU_IFLA_BR_FORWARD_DELAY
:
2136 case QEMU_IFLA_BR_HELLO_TIME
:
2137 case QEMU_IFLA_BR_MAX_AGE
:
2138 case QEMU_IFLA_BR_AGEING_TIME
:
2139 case QEMU_IFLA_BR_STP_STATE
:
2140 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2141 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2142 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2143 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2144 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2145 u32
= NLA_DATA(nlattr
);
2146 *u32
= tswap32(*u32
);
2149 case QEMU_IFLA_BR_HELLO_TIMER
:
2150 case QEMU_IFLA_BR_TCN_TIMER
:
2151 case QEMU_IFLA_BR_GC_TIMER
:
2152 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2153 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2154 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2155 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2156 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2157 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2158 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2159 u64
= NLA_DATA(nlattr
);
2160 *u64
= tswap64(*u64
);
2162 /* ifla_bridge_id: uin8_t[] */
2163 case QEMU_IFLA_BR_ROOT_ID
:
2164 case QEMU_IFLA_BR_BRIDGE_ID
:
2167 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2173 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2180 switch (nlattr
->nla_type
) {
2182 case QEMU_IFLA_BRPORT_STATE
:
2183 case QEMU_IFLA_BRPORT_MODE
:
2184 case QEMU_IFLA_BRPORT_GUARD
:
2185 case QEMU_IFLA_BRPORT_PROTECT
:
2186 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2187 case QEMU_IFLA_BRPORT_LEARNING
:
2188 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2189 case QEMU_IFLA_BRPORT_PROXYARP
:
2190 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2191 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2192 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2193 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2194 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2197 case QEMU_IFLA_BRPORT_PRIORITY
:
2198 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2199 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2200 case QEMU_IFLA_BRPORT_ID
:
2201 case QEMU_IFLA_BRPORT_NO
:
2202 u16
= NLA_DATA(nlattr
);
2203 *u16
= tswap16(*u16
);
2206 case QEMU_IFLA_BRPORT_COST
:
2207 u32
= NLA_DATA(nlattr
);
2208 *u32
= tswap32(*u32
);
2211 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2212 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2213 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2214 u64
= NLA_DATA(nlattr
);
2215 *u64
= tswap64(*u64
);
2217 /* ifla_bridge_id: uint8_t[] */
2218 case QEMU_IFLA_BRPORT_ROOT_ID
:
2219 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2222 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2228 struct linkinfo_context
{
2235 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2238 struct linkinfo_context
*li_context
= context
;
2240 switch (nlattr
->nla_type
) {
2242 case QEMU_IFLA_INFO_KIND
:
2243 li_context
->name
= NLA_DATA(nlattr
);
2244 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2246 case QEMU_IFLA_INFO_SLAVE_KIND
:
2247 li_context
->slave_name
= NLA_DATA(nlattr
);
2248 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2251 case QEMU_IFLA_INFO_XSTATS
:
2252 /* FIXME: only used by CAN */
2255 case QEMU_IFLA_INFO_DATA
:
2256 if (strncmp(li_context
->name
, "bridge",
2257 li_context
->len
) == 0) {
2258 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2261 host_to_target_data_bridge_nlattr
);
2263 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2266 case QEMU_IFLA_INFO_SLAVE_DATA
:
2267 if (strncmp(li_context
->slave_name
, "bridge",
2268 li_context
->slave_len
) == 0) {
2269 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2272 host_to_target_slave_data_bridge_nlattr
);
2274 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2275 li_context
->slave_name
);
2279 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2286 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2292 switch (nlattr
->nla_type
) {
2293 case QEMU_IFLA_INET_CONF
:
2294 u32
= NLA_DATA(nlattr
);
2295 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2297 u32
[i
] = tswap32(u32
[i
]);
2301 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2306 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2311 struct ifla_cacheinfo
*ci
;
2314 switch (nlattr
->nla_type
) {
2316 case QEMU_IFLA_INET6_TOKEN
:
2319 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2322 case QEMU_IFLA_INET6_FLAGS
:
2323 u32
= NLA_DATA(nlattr
);
2324 *u32
= tswap32(*u32
);
2327 case QEMU_IFLA_INET6_CONF
:
2328 u32
= NLA_DATA(nlattr
);
2329 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2331 u32
[i
] = tswap32(u32
[i
]);
2334 /* ifla_cacheinfo */
2335 case QEMU_IFLA_INET6_CACHEINFO
:
2336 ci
= NLA_DATA(nlattr
);
2337 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2338 ci
->tstamp
= tswap32(ci
->tstamp
);
2339 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2340 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2343 case QEMU_IFLA_INET6_STATS
:
2344 case QEMU_IFLA_INET6_ICMP6STATS
:
2345 u64
= NLA_DATA(nlattr
);
2346 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2348 u64
[i
] = tswap64(u64
[i
]);
2352 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2357 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2360 switch (nlattr
->nla_type
) {
2362 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2364 host_to_target_data_inet_nlattr
);
2366 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2368 host_to_target_data_inet6_nlattr
);
2370 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2376 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2379 struct rtnl_link_stats
*st
;
2380 struct rtnl_link_stats64
*st64
;
2381 struct rtnl_link_ifmap
*map
;
2382 struct linkinfo_context li_context
;
2384 switch (rtattr
->rta_type
) {
2386 case QEMU_IFLA_ADDRESS
:
2387 case QEMU_IFLA_BROADCAST
:
2389 case QEMU_IFLA_IFNAME
:
2390 case QEMU_IFLA_QDISC
:
2393 case QEMU_IFLA_OPERSTATE
:
2394 case QEMU_IFLA_LINKMODE
:
2395 case QEMU_IFLA_CARRIER
:
2396 case QEMU_IFLA_PROTO_DOWN
:
2400 case QEMU_IFLA_LINK
:
2401 case QEMU_IFLA_WEIGHT
:
2402 case QEMU_IFLA_TXQLEN
:
2403 case QEMU_IFLA_CARRIER_CHANGES
:
2404 case QEMU_IFLA_NUM_RX_QUEUES
:
2405 case QEMU_IFLA_NUM_TX_QUEUES
:
2406 case QEMU_IFLA_PROMISCUITY
:
2407 case QEMU_IFLA_EXT_MASK
:
2408 case QEMU_IFLA_LINK_NETNSID
:
2409 case QEMU_IFLA_GROUP
:
2410 case QEMU_IFLA_MASTER
:
2411 case QEMU_IFLA_NUM_VF
:
2412 case QEMU_IFLA_GSO_MAX_SEGS
:
2413 case QEMU_IFLA_GSO_MAX_SIZE
:
2414 u32
= RTA_DATA(rtattr
);
2415 *u32
= tswap32(*u32
);
2417 /* struct rtnl_link_stats */
2418 case QEMU_IFLA_STATS
:
2419 st
= RTA_DATA(rtattr
);
2420 st
->rx_packets
= tswap32(st
->rx_packets
);
2421 st
->tx_packets
= tswap32(st
->tx_packets
);
2422 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2423 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2424 st
->rx_errors
= tswap32(st
->rx_errors
);
2425 st
->tx_errors
= tswap32(st
->tx_errors
);
2426 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2427 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2428 st
->multicast
= tswap32(st
->multicast
);
2429 st
->collisions
= tswap32(st
->collisions
);
2431 /* detailed rx_errors: */
2432 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2433 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2434 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2435 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2436 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2437 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2439 /* detailed tx_errors */
2440 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2441 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2442 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2443 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2444 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2447 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2448 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2450 /* struct rtnl_link_stats64 */
2451 case QEMU_IFLA_STATS64
:
2452 st64
= RTA_DATA(rtattr
);
2453 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2454 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2455 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2456 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2457 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2458 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2459 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2460 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2461 st64
->multicast
= tswap64(st64
->multicast
);
2462 st64
->collisions
= tswap64(st64
->collisions
);
2464 /* detailed rx_errors: */
2465 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2466 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2467 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2468 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2469 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2470 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2472 /* detailed tx_errors */
2473 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2474 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2475 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2476 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2477 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2480 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2481 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2483 /* struct rtnl_link_ifmap */
2485 map
= RTA_DATA(rtattr
);
2486 map
->mem_start
= tswap64(map
->mem_start
);
2487 map
->mem_end
= tswap64(map
->mem_end
);
2488 map
->base_addr
= tswap64(map
->base_addr
);
2489 map
->irq
= tswap16(map
->irq
);
2492 case QEMU_IFLA_LINKINFO
:
2493 memset(&li_context
, 0, sizeof(li_context
));
2494 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2496 host_to_target_data_linkinfo_nlattr
);
2497 case QEMU_IFLA_AF_SPEC
:
2498 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2500 host_to_target_data_spec_nlattr
);
2502 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2508 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2511 struct ifa_cacheinfo
*ci
;
2513 switch (rtattr
->rta_type
) {
2514 /* binary: depends on family type */
2524 u32
= RTA_DATA(rtattr
);
2525 *u32
= tswap32(*u32
);
2527 /* struct ifa_cacheinfo */
2529 ci
= RTA_DATA(rtattr
);
2530 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2531 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2532 ci
->cstamp
= tswap32(ci
->cstamp
);
2533 ci
->tstamp
= tswap32(ci
->tstamp
);
2536 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2542 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2545 switch (rtattr
->rta_type
) {
2546 /* binary: depends on family type */
2555 u32
= RTA_DATA(rtattr
);
2556 *u32
= tswap32(*u32
);
2559 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2565 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2566 uint32_t rtattr_len
)
2568 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2569 host_to_target_data_link_rtattr
);
2572 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2573 uint32_t rtattr_len
)
2575 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2576 host_to_target_data_addr_rtattr
);
2579 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2580 uint32_t rtattr_len
)
2582 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2583 host_to_target_data_route_rtattr
);
2586 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2589 struct ifinfomsg
*ifi
;
2590 struct ifaddrmsg
*ifa
;
2593 nlmsg_len
= nlh
->nlmsg_len
;
2594 switch (nlh
->nlmsg_type
) {
2598 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2599 ifi
= NLMSG_DATA(nlh
);
2600 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2601 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2602 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2603 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2604 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2605 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2611 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2612 ifa
= NLMSG_DATA(nlh
);
2613 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2614 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2615 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2621 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2622 rtm
= NLMSG_DATA(nlh
);
2623 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2624 host_to_target_route_rtattr(RTM_RTA(rtm
),
2625 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2629 return -TARGET_EINVAL
;
2634 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2637 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2640 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2642 abi_long (*target_to_host_rtattr
)
2647 while (len
>= sizeof(struct rtattr
)) {
2648 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2649 tswap16(rtattr
->rta_len
) > len
) {
2652 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2653 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2654 ret
= target_to_host_rtattr(rtattr
);
2658 len
-= RTA_ALIGN(rtattr
->rta_len
);
2659 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2660 RTA_ALIGN(rtattr
->rta_len
));
2665 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2667 switch (rtattr
->rta_type
) {
2669 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2675 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2677 switch (rtattr
->rta_type
) {
2678 /* binary: depends on family type */
2683 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2689 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2692 switch (rtattr
->rta_type
) {
2693 /* binary: depends on family type */
2701 u32
= RTA_DATA(rtattr
);
2702 *u32
= tswap32(*u32
);
2705 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2711 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2712 uint32_t rtattr_len
)
2714 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2715 target_to_host_data_link_rtattr
);
2718 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2719 uint32_t rtattr_len
)
2721 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2722 target_to_host_data_addr_rtattr
);
2725 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2726 uint32_t rtattr_len
)
2728 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2729 target_to_host_data_route_rtattr
);
2732 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2734 struct ifinfomsg
*ifi
;
2735 struct ifaddrmsg
*ifa
;
2738 switch (nlh
->nlmsg_type
) {
2743 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2744 ifi
= NLMSG_DATA(nlh
);
2745 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2746 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2747 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2748 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2749 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2750 NLMSG_LENGTH(sizeof(*ifi
)));
2756 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2757 ifa
= NLMSG_DATA(nlh
);
2758 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2759 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2760 NLMSG_LENGTH(sizeof(*ifa
)));
2767 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2768 rtm
= NLMSG_DATA(nlh
);
2769 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2770 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2771 NLMSG_LENGTH(sizeof(*rtm
)));
2775 return -TARGET_EOPNOTSUPP
;
2780 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2782 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2784 #endif /* CONFIG_RTNETLINK */
2786 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2788 switch (nlh
->nlmsg_type
) {
2790 gemu_log("Unknown host audit message type %d\n",
2792 return -TARGET_EINVAL
;
2797 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2800 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2803 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2805 switch (nlh
->nlmsg_type
) {
2807 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2808 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2811 gemu_log("Unknown target audit message type %d\n",
2813 return -TARGET_EINVAL
;
2819 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2821 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2824 /* do_setsockopt() Must return target values and target errnos. */
2825 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2826 abi_ulong optval_addr
, socklen_t optlen
)
2830 struct ip_mreqn
*ip_mreq
;
2831 struct ip_mreq_source
*ip_mreq_source
;
2835 /* TCP options all take an 'int' value. */
2836 if (optlen
< sizeof(uint32_t))
2837 return -TARGET_EINVAL
;
2839 if (get_user_u32(val
, optval_addr
))
2840 return -TARGET_EFAULT
;
2841 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2848 case IP_ROUTER_ALERT
:
2852 case IP_MTU_DISCOVER
:
2859 case IP_MULTICAST_TTL
:
2860 case IP_MULTICAST_LOOP
:
2862 if (optlen
>= sizeof(uint32_t)) {
2863 if (get_user_u32(val
, optval_addr
))
2864 return -TARGET_EFAULT
;
2865 } else if (optlen
>= 1) {
2866 if (get_user_u8(val
, optval_addr
))
2867 return -TARGET_EFAULT
;
2869 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2871 case IP_ADD_MEMBERSHIP
:
2872 case IP_DROP_MEMBERSHIP
:
2873 if (optlen
< sizeof (struct target_ip_mreq
) ||
2874 optlen
> sizeof (struct target_ip_mreqn
))
2875 return -TARGET_EINVAL
;
2877 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2878 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2879 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2882 case IP_BLOCK_SOURCE
:
2883 case IP_UNBLOCK_SOURCE
:
2884 case IP_ADD_SOURCE_MEMBERSHIP
:
2885 case IP_DROP_SOURCE_MEMBERSHIP
:
2886 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2887 return -TARGET_EINVAL
;
2889 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2890 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2891 unlock_user (ip_mreq_source
, optval_addr
, 0);
2900 case IPV6_MTU_DISCOVER
:
2903 case IPV6_RECVPKTINFO
:
2904 case IPV6_UNICAST_HOPS
:
2906 case IPV6_RECVHOPLIMIT
:
2907 case IPV6_2292HOPLIMIT
:
2910 if (optlen
< sizeof(uint32_t)) {
2911 return -TARGET_EINVAL
;
2913 if (get_user_u32(val
, optval_addr
)) {
2914 return -TARGET_EFAULT
;
2916 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2917 &val
, sizeof(val
)));
2921 struct in6_pktinfo pki
;
2923 if (optlen
< sizeof(pki
)) {
2924 return -TARGET_EINVAL
;
2927 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2928 return -TARGET_EFAULT
;
2931 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2933 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2934 &pki
, sizeof(pki
)));
2945 struct icmp6_filter icmp6f
;
2947 if (optlen
> sizeof(icmp6f
)) {
2948 optlen
= sizeof(icmp6f
);
2951 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2952 return -TARGET_EFAULT
;
2955 for (val
= 0; val
< 8; val
++) {
2956 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2959 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2971 /* those take an u32 value */
2972 if (optlen
< sizeof(uint32_t)) {
2973 return -TARGET_EINVAL
;
2976 if (get_user_u32(val
, optval_addr
)) {
2977 return -TARGET_EFAULT
;
2979 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2980 &val
, sizeof(val
)));
2987 case TARGET_SOL_SOCKET
:
2989 case TARGET_SO_RCVTIMEO
:
2993 optname
= SO_RCVTIMEO
;
2996 if (optlen
!= sizeof(struct target_timeval
)) {
2997 return -TARGET_EINVAL
;
3000 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3001 return -TARGET_EFAULT
;
3004 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3008 case TARGET_SO_SNDTIMEO
:
3009 optname
= SO_SNDTIMEO
;
3011 case TARGET_SO_ATTACH_FILTER
:
3013 struct target_sock_fprog
*tfprog
;
3014 struct target_sock_filter
*tfilter
;
3015 struct sock_fprog fprog
;
3016 struct sock_filter
*filter
;
3019 if (optlen
!= sizeof(*tfprog
)) {
3020 return -TARGET_EINVAL
;
3022 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3023 return -TARGET_EFAULT
;
3025 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3026 tswapal(tfprog
->filter
), 0)) {
3027 unlock_user_struct(tfprog
, optval_addr
, 1);
3028 return -TARGET_EFAULT
;
3031 fprog
.len
= tswap16(tfprog
->len
);
3032 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3033 if (filter
== NULL
) {
3034 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3035 unlock_user_struct(tfprog
, optval_addr
, 1);
3036 return -TARGET_ENOMEM
;
3038 for (i
= 0; i
< fprog
.len
; i
++) {
3039 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3040 filter
[i
].jt
= tfilter
[i
].jt
;
3041 filter
[i
].jf
= tfilter
[i
].jf
;
3042 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3044 fprog
.filter
= filter
;
3046 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3047 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3050 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3051 unlock_user_struct(tfprog
, optval_addr
, 1);
3054 case TARGET_SO_BINDTODEVICE
:
3056 char *dev_ifname
, *addr_ifname
;
3058 if (optlen
> IFNAMSIZ
- 1) {
3059 optlen
= IFNAMSIZ
- 1;
3061 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3063 return -TARGET_EFAULT
;
3065 optname
= SO_BINDTODEVICE
;
3066 addr_ifname
= alloca(IFNAMSIZ
);
3067 memcpy(addr_ifname
, dev_ifname
, optlen
);
3068 addr_ifname
[optlen
] = 0;
3069 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3070 addr_ifname
, optlen
));
3071 unlock_user (dev_ifname
, optval_addr
, 0);
3074 /* Options with 'int' argument. */
3075 case TARGET_SO_DEBUG
:
3078 case TARGET_SO_REUSEADDR
:
3079 optname
= SO_REUSEADDR
;
3081 case TARGET_SO_TYPE
:
3084 case TARGET_SO_ERROR
:
3087 case TARGET_SO_DONTROUTE
:
3088 optname
= SO_DONTROUTE
;
3090 case TARGET_SO_BROADCAST
:
3091 optname
= SO_BROADCAST
;
3093 case TARGET_SO_SNDBUF
:
3094 optname
= SO_SNDBUF
;
3096 case TARGET_SO_SNDBUFFORCE
:
3097 optname
= SO_SNDBUFFORCE
;
3099 case TARGET_SO_RCVBUF
:
3100 optname
= SO_RCVBUF
;
3102 case TARGET_SO_RCVBUFFORCE
:
3103 optname
= SO_RCVBUFFORCE
;
3105 case TARGET_SO_KEEPALIVE
:
3106 optname
= SO_KEEPALIVE
;
3108 case TARGET_SO_OOBINLINE
:
3109 optname
= SO_OOBINLINE
;
3111 case TARGET_SO_NO_CHECK
:
3112 optname
= SO_NO_CHECK
;
3114 case TARGET_SO_PRIORITY
:
3115 optname
= SO_PRIORITY
;
3118 case TARGET_SO_BSDCOMPAT
:
3119 optname
= SO_BSDCOMPAT
;
3122 case TARGET_SO_PASSCRED
:
3123 optname
= SO_PASSCRED
;
3125 case TARGET_SO_PASSSEC
:
3126 optname
= SO_PASSSEC
;
3128 case TARGET_SO_TIMESTAMP
:
3129 optname
= SO_TIMESTAMP
;
3131 case TARGET_SO_RCVLOWAT
:
3132 optname
= SO_RCVLOWAT
;
3138 if (optlen
< sizeof(uint32_t))
3139 return -TARGET_EINVAL
;
3141 if (get_user_u32(val
, optval_addr
))
3142 return -TARGET_EFAULT
;
3143 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3147 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3148 ret
= -TARGET_ENOPROTOOPT
;
3153 /* do_getsockopt() Must return target values and target errnos. */
3154 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3155 abi_ulong optval_addr
, abi_ulong optlen
)
3162 case TARGET_SOL_SOCKET
:
3165 /* These don't just return a single integer */
3166 case TARGET_SO_LINGER
:
3167 case TARGET_SO_RCVTIMEO
:
3168 case TARGET_SO_SNDTIMEO
:
3169 case TARGET_SO_PEERNAME
:
3171 case TARGET_SO_PEERCRED
: {
3174 struct target_ucred
*tcr
;
3176 if (get_user_u32(len
, optlen
)) {
3177 return -TARGET_EFAULT
;
3180 return -TARGET_EINVAL
;
3184 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3192 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3193 return -TARGET_EFAULT
;
3195 __put_user(cr
.pid
, &tcr
->pid
);
3196 __put_user(cr
.uid
, &tcr
->uid
);
3197 __put_user(cr
.gid
, &tcr
->gid
);
3198 unlock_user_struct(tcr
, optval_addr
, 1);
3199 if (put_user_u32(len
, optlen
)) {
3200 return -TARGET_EFAULT
;
3204 /* Options with 'int' argument. */
3205 case TARGET_SO_DEBUG
:
3208 case TARGET_SO_REUSEADDR
:
3209 optname
= SO_REUSEADDR
;
3211 case TARGET_SO_TYPE
:
3214 case TARGET_SO_ERROR
:
3217 case TARGET_SO_DONTROUTE
:
3218 optname
= SO_DONTROUTE
;
3220 case TARGET_SO_BROADCAST
:
3221 optname
= SO_BROADCAST
;
3223 case TARGET_SO_SNDBUF
:
3224 optname
= SO_SNDBUF
;
3226 case TARGET_SO_RCVBUF
:
3227 optname
= SO_RCVBUF
;
3229 case TARGET_SO_KEEPALIVE
:
3230 optname
= SO_KEEPALIVE
;
3232 case TARGET_SO_OOBINLINE
:
3233 optname
= SO_OOBINLINE
;
3235 case TARGET_SO_NO_CHECK
:
3236 optname
= SO_NO_CHECK
;
3238 case TARGET_SO_PRIORITY
:
3239 optname
= SO_PRIORITY
;
3242 case TARGET_SO_BSDCOMPAT
:
3243 optname
= SO_BSDCOMPAT
;
3246 case TARGET_SO_PASSCRED
:
3247 optname
= SO_PASSCRED
;
3249 case TARGET_SO_TIMESTAMP
:
3250 optname
= SO_TIMESTAMP
;
3252 case TARGET_SO_RCVLOWAT
:
3253 optname
= SO_RCVLOWAT
;
3255 case TARGET_SO_ACCEPTCONN
:
3256 optname
= SO_ACCEPTCONN
;
3263 /* TCP options all take an 'int' value. */
3265 if (get_user_u32(len
, optlen
))
3266 return -TARGET_EFAULT
;
3268 return -TARGET_EINVAL
;
3270 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3273 if (optname
== SO_TYPE
) {
3274 val
= host_to_target_sock_type(val
);
3279 if (put_user_u32(val
, optval_addr
))
3280 return -TARGET_EFAULT
;
3282 if (put_user_u8(val
, optval_addr
))
3283 return -TARGET_EFAULT
;
3285 if (put_user_u32(len
, optlen
))
3286 return -TARGET_EFAULT
;
3293 case IP_ROUTER_ALERT
:
3297 case IP_MTU_DISCOVER
:
3303 case IP_MULTICAST_TTL
:
3304 case IP_MULTICAST_LOOP
:
3305 if (get_user_u32(len
, optlen
))
3306 return -TARGET_EFAULT
;
3308 return -TARGET_EINVAL
;
3310 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3313 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3315 if (put_user_u32(len
, optlen
)
3316 || put_user_u8(val
, optval_addr
))
3317 return -TARGET_EFAULT
;
3319 if (len
> sizeof(int))
3321 if (put_user_u32(len
, optlen
)
3322 || put_user_u32(val
, optval_addr
))
3323 return -TARGET_EFAULT
;
3327 ret
= -TARGET_ENOPROTOOPT
;
3333 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3335 ret
= -TARGET_EOPNOTSUPP
;
3341 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3342 abi_ulong count
, int copy
)
3344 struct target_iovec
*target_vec
;
3346 abi_ulong total_len
, max_len
;
3349 bool bad_address
= false;
3355 if (count
> IOV_MAX
) {
3360 vec
= g_try_new0(struct iovec
, count
);
3366 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3367 count
* sizeof(struct target_iovec
), 1);
3368 if (target_vec
== NULL
) {
3373 /* ??? If host page size > target page size, this will result in a
3374 value larger than what we can actually support. */
3375 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3378 for (i
= 0; i
< count
; i
++) {
3379 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3380 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3385 } else if (len
== 0) {
3386 /* Zero length pointer is ignored. */
3387 vec
[i
].iov_base
= 0;
3389 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3390 /* If the first buffer pointer is bad, this is a fault. But
3391 * subsequent bad buffers will result in a partial write; this
3392 * is realized by filling the vector with null pointers and
3394 if (!vec
[i
].iov_base
) {
3405 if (len
> max_len
- total_len
) {
3406 len
= max_len
- total_len
;
3409 vec
[i
].iov_len
= len
;
3413 unlock_user(target_vec
, target_addr
, 0);
3418 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3419 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3422 unlock_user(target_vec
, target_addr
, 0);
3429 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3430 abi_ulong count
, int copy
)
3432 struct target_iovec
*target_vec
;
3435 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3436 count
* sizeof(struct target_iovec
), 1);
3438 for (i
= 0; i
< count
; i
++) {
3439 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3440 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3444 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3446 unlock_user(target_vec
, target_addr
, 0);
3452 static inline int target_to_host_sock_type(int *type
)
3455 int target_type
= *type
;
3457 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3458 case TARGET_SOCK_DGRAM
:
3459 host_type
= SOCK_DGRAM
;
3461 case TARGET_SOCK_STREAM
:
3462 host_type
= SOCK_STREAM
;
3465 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3468 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3469 #if defined(SOCK_CLOEXEC)
3470 host_type
|= SOCK_CLOEXEC
;
3472 return -TARGET_EINVAL
;
3475 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3476 #if defined(SOCK_NONBLOCK)
3477 host_type
|= SOCK_NONBLOCK
;
3478 #elif !defined(O_NONBLOCK)
3479 return -TARGET_EINVAL
;
3486 /* Try to emulate socket type flags after socket creation. */
3487 static int sock_flags_fixup(int fd
, int target_type
)
3489 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3490 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3491 int flags
= fcntl(fd
, F_GETFL
);
3492 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3494 return -TARGET_EINVAL
;
3501 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3502 abi_ulong target_addr
,
3505 struct sockaddr
*addr
= host_addr
;
3506 struct target_sockaddr
*target_saddr
;
3508 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3509 if (!target_saddr
) {
3510 return -TARGET_EFAULT
;
3513 memcpy(addr
, target_saddr
, len
);
3514 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3515 /* spkt_protocol is big-endian */
3517 unlock_user(target_saddr
, target_addr
, 0);
3521 static TargetFdTrans target_packet_trans
= {
3522 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3525 #ifdef CONFIG_RTNETLINK
3526 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3530 ret
= target_to_host_nlmsg_route(buf
, len
);
3538 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3542 ret
= host_to_target_nlmsg_route(buf
, len
);
3550 static TargetFdTrans target_netlink_route_trans
= {
3551 .target_to_host_data
= netlink_route_target_to_host
,
3552 .host_to_target_data
= netlink_route_host_to_target
,
3554 #endif /* CONFIG_RTNETLINK */
3556 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3560 ret
= target_to_host_nlmsg_audit(buf
, len
);
3568 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3572 ret
= host_to_target_nlmsg_audit(buf
, len
);
3580 static TargetFdTrans target_netlink_audit_trans
= {
3581 .target_to_host_data
= netlink_audit_target_to_host
,
3582 .host_to_target_data
= netlink_audit_host_to_target
,
3585 /* do_socket() Must return target values and target errnos. */
3586 static abi_long
do_socket(int domain
, int type
, int protocol
)
3588 int target_type
= type
;
3591 ret
= target_to_host_sock_type(&type
);
3596 if (domain
== PF_NETLINK
&& !(
3597 #ifdef CONFIG_RTNETLINK
3598 protocol
== NETLINK_ROUTE
||
3600 protocol
== NETLINK_KOBJECT_UEVENT
||
3601 protocol
== NETLINK_AUDIT
)) {
3602 return -EPFNOSUPPORT
;
3605 if (domain
== AF_PACKET
||
3606 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3607 protocol
= tswap16(protocol
);
3610 ret
= get_errno(socket(domain
, type
, protocol
));
3612 ret
= sock_flags_fixup(ret
, target_type
);
3613 if (type
== SOCK_PACKET
) {
3614 /* Manage an obsolete case :
3615 * if socket type is SOCK_PACKET, bind by name
3617 fd_trans_register(ret
, &target_packet_trans
);
3618 } else if (domain
== PF_NETLINK
) {
3620 #ifdef CONFIG_RTNETLINK
3622 fd_trans_register(ret
, &target_netlink_route_trans
);
3625 case NETLINK_KOBJECT_UEVENT
:
3626 /* nothing to do: messages are strings */
3629 fd_trans_register(ret
, &target_netlink_audit_trans
);
3632 g_assert_not_reached();
3639 /* do_bind() Must return target values and target errnos. */
3640 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3646 if ((int)addrlen
< 0) {
3647 return -TARGET_EINVAL
;
3650 addr
= alloca(addrlen
+1);
3652 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3656 return get_errno(bind(sockfd
, addr
, addrlen
));
3659 /* do_connect() Must return target values and target errnos. */
3660 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3666 if ((int)addrlen
< 0) {
3667 return -TARGET_EINVAL
;
3670 addr
= alloca(addrlen
+1);
3672 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3676 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3679 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3680 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3681 int flags
, int send
)
3687 abi_ulong target_vec
;
3689 if (msgp
->msg_name
) {
3690 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3691 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3692 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3693 tswapal(msgp
->msg_name
),
3695 if (ret
== -TARGET_EFAULT
) {
3696 /* For connected sockets msg_name and msg_namelen must
3697 * be ignored, so returning EFAULT immediately is wrong.
3698 * Instead, pass a bad msg_name to the host kernel, and
3699 * let it decide whether to return EFAULT or not.
3701 msg
.msg_name
= (void *)-1;
3706 msg
.msg_name
= NULL
;
3707 msg
.msg_namelen
= 0;
3709 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3710 msg
.msg_control
= alloca(msg
.msg_controllen
);
3711 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3713 count
= tswapal(msgp
->msg_iovlen
);
3714 target_vec
= tswapal(msgp
->msg_iov
);
3716 if (count
> IOV_MAX
) {
3717 /* sendrcvmsg returns a different errno for this condition than
3718 * readv/writev, so we must catch it here before lock_iovec() does.
3720 ret
= -TARGET_EMSGSIZE
;
3724 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3725 target_vec
, count
, send
);
3727 ret
= -host_to_target_errno(errno
);
3730 msg
.msg_iovlen
= count
;
3734 if (fd_trans_target_to_host_data(fd
)) {
3737 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3738 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3739 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3740 msg
.msg_iov
->iov_len
);
3742 msg
.msg_iov
->iov_base
= host_msg
;
3743 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3747 ret
= target_to_host_cmsg(&msg
, msgp
);
3749 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3753 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3754 if (!is_error(ret
)) {
3756 if (fd_trans_host_to_target_data(fd
)) {
3757 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3760 ret
= host_to_target_cmsg(msgp
, &msg
);
3762 if (!is_error(ret
)) {
3763 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3764 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3765 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3766 msg
.msg_name
, msg
.msg_namelen
);
3778 unlock_iovec(vec
, target_vec
, count
, !send
);
3783 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3784 int flags
, int send
)
3787 struct target_msghdr
*msgp
;
3789 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3793 return -TARGET_EFAULT
;
3795 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3796 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3800 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3801 * so it might not have this *mmsg-specific flag either.
3803 #ifndef MSG_WAITFORONE
3804 #define MSG_WAITFORONE 0x10000
3807 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3808 unsigned int vlen
, unsigned int flags
,
3811 struct target_mmsghdr
*mmsgp
;
3815 if (vlen
> UIO_MAXIOV
) {
3819 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3821 return -TARGET_EFAULT
;
3824 for (i
= 0; i
< vlen
; i
++) {
3825 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3826 if (is_error(ret
)) {
3829 mmsgp
[i
].msg_len
= tswap32(ret
);
3830 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3831 if (flags
& MSG_WAITFORONE
) {
3832 flags
|= MSG_DONTWAIT
;
3836 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3838 /* Return number of datagrams sent if we sent any at all;
3839 * otherwise return the error.
3847 /* do_accept4() Must return target values and target errnos. */
3848 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3849 abi_ulong target_addrlen_addr
, int flags
)
3856 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3858 if (target_addr
== 0) {
3859 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3862 /* linux returns EINVAL if addrlen pointer is invalid */
3863 if (get_user_u32(addrlen
, target_addrlen_addr
))
3864 return -TARGET_EINVAL
;
3866 if ((int)addrlen
< 0) {
3867 return -TARGET_EINVAL
;
3870 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3871 return -TARGET_EINVAL
;
3873 addr
= alloca(addrlen
);
3875 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3876 if (!is_error(ret
)) {
3877 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3878 if (put_user_u32(addrlen
, target_addrlen_addr
))
3879 ret
= -TARGET_EFAULT
;
3884 /* do_getpeername() Must return target values and target errnos. */
3885 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3886 abi_ulong target_addrlen_addr
)
3892 if (get_user_u32(addrlen
, target_addrlen_addr
))
3893 return -TARGET_EFAULT
;
3895 if ((int)addrlen
< 0) {
3896 return -TARGET_EINVAL
;
3899 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3900 return -TARGET_EFAULT
;
3902 addr
= alloca(addrlen
);
3904 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3905 if (!is_error(ret
)) {
3906 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3907 if (put_user_u32(addrlen
, target_addrlen_addr
))
3908 ret
= -TARGET_EFAULT
;
3913 /* do_getsockname() Must return target values and target errnos. */
3914 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3915 abi_ulong target_addrlen_addr
)
3921 if (get_user_u32(addrlen
, target_addrlen_addr
))
3922 return -TARGET_EFAULT
;
3924 if ((int)addrlen
< 0) {
3925 return -TARGET_EINVAL
;
3928 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3929 return -TARGET_EFAULT
;
3931 addr
= alloca(addrlen
);
3933 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3934 if (!is_error(ret
)) {
3935 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3936 if (put_user_u32(addrlen
, target_addrlen_addr
))
3937 ret
= -TARGET_EFAULT
;
3942 /* do_socketpair() Must return target values and target errnos. */
3943 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3944 abi_ulong target_tab_addr
)
3949 target_to_host_sock_type(&type
);
3951 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3952 if (!is_error(ret
)) {
3953 if (put_user_s32(tab
[0], target_tab_addr
)
3954 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3955 ret
= -TARGET_EFAULT
;
3960 /* do_sendto() Must return target values and target errnos. */
3961 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3962 abi_ulong target_addr
, socklen_t addrlen
)
3966 void *copy_msg
= NULL
;
3969 if ((int)addrlen
< 0) {
3970 return -TARGET_EINVAL
;
3973 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3975 return -TARGET_EFAULT
;
3976 if (fd_trans_target_to_host_data(fd
)) {
3977 copy_msg
= host_msg
;
3978 host_msg
= g_malloc(len
);
3979 memcpy(host_msg
, copy_msg
, len
);
3980 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3986 addr
= alloca(addrlen
+1);
3987 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3991 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3993 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3998 host_msg
= copy_msg
;
4000 unlock_user(host_msg
, msg
, 0);
4004 /* do_recvfrom() Must return target values and target errnos. */
4005 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4006 abi_ulong target_addr
,
4007 abi_ulong target_addrlen
)
4014 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4016 return -TARGET_EFAULT
;
4018 if (get_user_u32(addrlen
, target_addrlen
)) {
4019 ret
= -TARGET_EFAULT
;
4022 if ((int)addrlen
< 0) {
4023 ret
= -TARGET_EINVAL
;
4026 addr
= alloca(addrlen
);
4027 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4030 addr
= NULL
; /* To keep compiler quiet. */
4031 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4033 if (!is_error(ret
)) {
4034 if (fd_trans_host_to_target_data(fd
)) {
4035 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4038 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4039 if (put_user_u32(addrlen
, target_addrlen
)) {
4040 ret
= -TARGET_EFAULT
;
4044 unlock_user(host_msg
, msg
, len
);
4047 unlock_user(host_msg
, msg
, 0);
4052 #ifdef TARGET_NR_socketcall
4053 /* do_socketcall() must return target values and target errnos. */
4054 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4056 static const unsigned nargs
[] = { /* number of arguments per operation */
4057 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4058 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4059 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4060 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4061 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4062 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4063 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4064 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4065 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4066 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4067 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4068 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4069 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4070 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4071 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4072 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4073 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4074 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4075 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4076 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4078 abi_long a
[6]; /* max 6 args */
4081 /* check the range of the first argument num */
4082 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4083 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4084 return -TARGET_EINVAL
;
4086 /* ensure we have space for args */
4087 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4088 return -TARGET_EINVAL
;
4090 /* collect the arguments in a[] according to nargs[] */
4091 for (i
= 0; i
< nargs
[num
]; ++i
) {
4092 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4093 return -TARGET_EFAULT
;
4096 /* now when we have the args, invoke the appropriate underlying function */
4098 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4099 return do_socket(a
[0], a
[1], a
[2]);
4100 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4101 return do_bind(a
[0], a
[1], a
[2]);
4102 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4103 return do_connect(a
[0], a
[1], a
[2]);
4104 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4105 return get_errno(listen(a
[0], a
[1]));
4106 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4107 return do_accept4(a
[0], a
[1], a
[2], 0);
4108 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4109 return do_getsockname(a
[0], a
[1], a
[2]);
4110 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4111 return do_getpeername(a
[0], a
[1], a
[2]);
4112 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4113 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4114 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4115 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4116 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4117 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4118 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4119 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4120 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4121 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4122 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4123 return get_errno(shutdown(a
[0], a
[1]));
4124 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4125 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4126 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4127 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4128 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4129 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4130 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4131 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4132 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4133 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4134 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4135 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4136 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4137 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4139 gemu_log("Unsupported socketcall: %d\n", num
);
4140 return -TARGET_EINVAL
;
4145 #define N_SHM_REGIONS 32
4147 static struct shm_region
{
4151 } shm_regions
[N_SHM_REGIONS
];
4153 #ifndef TARGET_SEMID64_DS
4154 /* asm-generic version of this struct */
4155 struct target_semid64_ds
4157 struct target_ipc_perm sem_perm
;
4158 abi_ulong sem_otime
;
4159 #if TARGET_ABI_BITS == 32
4160 abi_ulong __unused1
;
4162 abi_ulong sem_ctime
;
4163 #if TARGET_ABI_BITS == 32
4164 abi_ulong __unused2
;
4166 abi_ulong sem_nsems
;
4167 abi_ulong __unused3
;
4168 abi_ulong __unused4
;
4172 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4173 abi_ulong target_addr
)
4175 struct target_ipc_perm
*target_ip
;
4176 struct target_semid64_ds
*target_sd
;
4178 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4179 return -TARGET_EFAULT
;
4180 target_ip
= &(target_sd
->sem_perm
);
4181 host_ip
->__key
= tswap32(target_ip
->__key
);
4182 host_ip
->uid
= tswap32(target_ip
->uid
);
4183 host_ip
->gid
= tswap32(target_ip
->gid
);
4184 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4185 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4186 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4187 host_ip
->mode
= tswap32(target_ip
->mode
);
4189 host_ip
->mode
= tswap16(target_ip
->mode
);
4191 #if defined(TARGET_PPC)
4192 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4194 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4196 unlock_user_struct(target_sd
, target_addr
, 0);
4200 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4201 struct ipc_perm
*host_ip
)
4203 struct target_ipc_perm
*target_ip
;
4204 struct target_semid64_ds
*target_sd
;
4206 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4207 return -TARGET_EFAULT
;
4208 target_ip
= &(target_sd
->sem_perm
);
4209 target_ip
->__key
= tswap32(host_ip
->__key
);
4210 target_ip
->uid
= tswap32(host_ip
->uid
);
4211 target_ip
->gid
= tswap32(host_ip
->gid
);
4212 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4213 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4214 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4215 target_ip
->mode
= tswap32(host_ip
->mode
);
4217 target_ip
->mode
= tswap16(host_ip
->mode
);
4219 #if defined(TARGET_PPC)
4220 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4222 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4224 unlock_user_struct(target_sd
, target_addr
, 1);
4228 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4229 abi_ulong target_addr
)
4231 struct target_semid64_ds
*target_sd
;
4233 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4234 return -TARGET_EFAULT
;
4235 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4236 return -TARGET_EFAULT
;
4237 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4238 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4239 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4240 unlock_user_struct(target_sd
, target_addr
, 0);
4244 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4245 struct semid_ds
*host_sd
)
4247 struct target_semid64_ds
*target_sd
;
4249 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4250 return -TARGET_EFAULT
;
4251 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4252 return -TARGET_EFAULT
;
4253 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4254 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4255 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4256 unlock_user_struct(target_sd
, target_addr
, 1);
4260 struct target_seminfo
{
4273 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4274 struct seminfo
*host_seminfo
)
4276 struct target_seminfo
*target_seminfo
;
4277 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4278 return -TARGET_EFAULT
;
4279 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4280 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4281 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4282 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4283 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4284 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4285 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4286 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4287 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4288 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4289 unlock_user_struct(target_seminfo
, target_addr
, 1);
4295 struct semid_ds
*buf
;
4296 unsigned short *array
;
4297 struct seminfo
*__buf
;
4300 union target_semun
{
4307 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4308 abi_ulong target_addr
)
4311 unsigned short *array
;
4313 struct semid_ds semid_ds
;
4316 semun
.buf
= &semid_ds
;
4318 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4320 return get_errno(ret
);
4322 nsems
= semid_ds
.sem_nsems
;
4324 *host_array
= g_try_new(unsigned short, nsems
);
4326 return -TARGET_ENOMEM
;
4328 array
= lock_user(VERIFY_READ
, target_addr
,
4329 nsems
*sizeof(unsigned short), 1);
4331 g_free(*host_array
);
4332 return -TARGET_EFAULT
;
4335 for(i
=0; i
<nsems
; i
++) {
4336 __get_user((*host_array
)[i
], &array
[i
]);
4338 unlock_user(array
, target_addr
, 0);
4343 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4344 unsigned short **host_array
)
4347 unsigned short *array
;
4349 struct semid_ds semid_ds
;
4352 semun
.buf
= &semid_ds
;
4354 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4356 return get_errno(ret
);
4358 nsems
= semid_ds
.sem_nsems
;
4360 array
= lock_user(VERIFY_WRITE
, target_addr
,
4361 nsems
*sizeof(unsigned short), 0);
4363 return -TARGET_EFAULT
;
4365 for(i
=0; i
<nsems
; i
++) {
4366 __put_user((*host_array
)[i
], &array
[i
]);
4368 g_free(*host_array
);
4369 unlock_user(array
, target_addr
, 1);
4374 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4375 abi_ulong target_arg
)
4377 union target_semun target_su
= { .buf
= target_arg
};
4379 struct semid_ds dsarg
;
4380 unsigned short *array
= NULL
;
4381 struct seminfo seminfo
;
4382 abi_long ret
= -TARGET_EINVAL
;
4389 /* In 64 bit cross-endian situations, we will erroneously pick up
4390 * the wrong half of the union for the "val" element. To rectify
4391 * this, the entire 8-byte structure is byteswapped, followed by
4392 * a swap of the 4 byte val field. In other cases, the data is
4393 * already in proper host byte order. */
4394 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4395 target_su
.buf
= tswapal(target_su
.buf
);
4396 arg
.val
= tswap32(target_su
.val
);
4398 arg
.val
= target_su
.val
;
4400 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4404 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4408 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4409 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4416 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4420 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4421 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4427 arg
.__buf
= &seminfo
;
4428 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4429 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4437 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4444 struct target_sembuf
{
4445 unsigned short sem_num
;
4450 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4451 abi_ulong target_addr
,
4454 struct target_sembuf
*target_sembuf
;
4457 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4458 nsops
*sizeof(struct target_sembuf
), 1);
4460 return -TARGET_EFAULT
;
4462 for(i
=0; i
<nsops
; i
++) {
4463 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4464 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4465 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4468 unlock_user(target_sembuf
, target_addr
, 0);
4473 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4475 struct sembuf sops
[nsops
];
4477 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4478 return -TARGET_EFAULT
;
4480 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4483 struct target_msqid_ds
4485 struct target_ipc_perm msg_perm
;
4486 abi_ulong msg_stime
;
4487 #if TARGET_ABI_BITS == 32
4488 abi_ulong __unused1
;
4490 abi_ulong msg_rtime
;
4491 #if TARGET_ABI_BITS == 32
4492 abi_ulong __unused2
;
4494 abi_ulong msg_ctime
;
4495 #if TARGET_ABI_BITS == 32
4496 abi_ulong __unused3
;
4498 abi_ulong __msg_cbytes
;
4500 abi_ulong msg_qbytes
;
4501 abi_ulong msg_lspid
;
4502 abi_ulong msg_lrpid
;
4503 abi_ulong __unused4
;
4504 abi_ulong __unused5
;
4507 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4508 abi_ulong target_addr
)
4510 struct target_msqid_ds
*target_md
;
4512 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4513 return -TARGET_EFAULT
;
4514 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4515 return -TARGET_EFAULT
;
4516 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4517 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4518 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4519 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4520 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4521 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4522 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4523 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4524 unlock_user_struct(target_md
, target_addr
, 0);
4528 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4529 struct msqid_ds
*host_md
)
4531 struct target_msqid_ds
*target_md
;
4533 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4534 return -TARGET_EFAULT
;
4535 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4536 return -TARGET_EFAULT
;
4537 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4538 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4539 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4540 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4541 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4542 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4543 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4544 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4545 unlock_user_struct(target_md
, target_addr
, 1);
4549 struct target_msginfo
{
4557 unsigned short int msgseg
;
4560 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4561 struct msginfo
*host_msginfo
)
4563 struct target_msginfo
*target_msginfo
;
4564 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4565 return -TARGET_EFAULT
;
4566 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4567 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4568 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4569 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4570 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4571 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4572 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4573 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4574 unlock_user_struct(target_msginfo
, target_addr
, 1);
4578 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4580 struct msqid_ds dsarg
;
4581 struct msginfo msginfo
;
4582 abi_long ret
= -TARGET_EINVAL
;
4590 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4591 return -TARGET_EFAULT
;
4592 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4593 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4594 return -TARGET_EFAULT
;
4597 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4601 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4602 if (host_to_target_msginfo(ptr
, &msginfo
))
4603 return -TARGET_EFAULT
;
4610 struct target_msgbuf
{
4615 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4616 ssize_t msgsz
, int msgflg
)
4618 struct target_msgbuf
*target_mb
;
4619 struct msgbuf
*host_mb
;
4623 return -TARGET_EINVAL
;
4626 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4627 return -TARGET_EFAULT
;
4628 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4630 unlock_user_struct(target_mb
, msgp
, 0);
4631 return -TARGET_ENOMEM
;
4633 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4634 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4635 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4637 unlock_user_struct(target_mb
, msgp
, 0);
4642 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4643 ssize_t msgsz
, abi_long msgtyp
,
4646 struct target_msgbuf
*target_mb
;
4648 struct msgbuf
*host_mb
;
4652 return -TARGET_EINVAL
;
4655 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4656 return -TARGET_EFAULT
;
4658 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4660 ret
= -TARGET_ENOMEM
;
4663 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4666 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4667 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4668 if (!target_mtext
) {
4669 ret
= -TARGET_EFAULT
;
4672 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4673 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4676 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4680 unlock_user_struct(target_mb
, msgp
, 1);
4685 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4686 abi_ulong target_addr
)
4688 struct target_shmid_ds
*target_sd
;
4690 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4691 return -TARGET_EFAULT
;
4692 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4693 return -TARGET_EFAULT
;
4694 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4695 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4696 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4697 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4698 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4699 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4700 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4701 unlock_user_struct(target_sd
, target_addr
, 0);
4705 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4706 struct shmid_ds
*host_sd
)
4708 struct target_shmid_ds
*target_sd
;
4710 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4711 return -TARGET_EFAULT
;
4712 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4713 return -TARGET_EFAULT
;
4714 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4715 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4716 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4717 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4718 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4719 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4720 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4721 unlock_user_struct(target_sd
, target_addr
, 1);
4725 struct target_shminfo
{
4733 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4734 struct shminfo
*host_shminfo
)
4736 struct target_shminfo
*target_shminfo
;
4737 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4738 return -TARGET_EFAULT
;
4739 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4740 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4741 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4742 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4743 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4744 unlock_user_struct(target_shminfo
, target_addr
, 1);
4748 struct target_shm_info
{
4753 abi_ulong swap_attempts
;
4754 abi_ulong swap_successes
;
4757 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4758 struct shm_info
*host_shm_info
)
4760 struct target_shm_info
*target_shm_info
;
4761 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4762 return -TARGET_EFAULT
;
4763 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4764 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4765 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4766 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4767 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4768 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4769 unlock_user_struct(target_shm_info
, target_addr
, 1);
4773 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4775 struct shmid_ds dsarg
;
4776 struct shminfo shminfo
;
4777 struct shm_info shm_info
;
4778 abi_long ret
= -TARGET_EINVAL
;
4786 if (target_to_host_shmid_ds(&dsarg
, buf
))
4787 return -TARGET_EFAULT
;
4788 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4789 if (host_to_target_shmid_ds(buf
, &dsarg
))
4790 return -TARGET_EFAULT
;
4793 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4794 if (host_to_target_shminfo(buf
, &shminfo
))
4795 return -TARGET_EFAULT
;
4798 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4799 if (host_to_target_shm_info(buf
, &shm_info
))
4800 return -TARGET_EFAULT
;
4805 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4812 #ifndef TARGET_FORCE_SHMLBA
4813 /* For most architectures, SHMLBA is the same as the page size;
4814 * some architectures have larger values, in which case they should
4815 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4816 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4817 * and defining its own value for SHMLBA.
4819 * The kernel also permits SHMLBA to be set by the architecture to a
4820 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4821 * this means that addresses are rounded to the large size if
4822 * SHM_RND is set but addresses not aligned to that size are not rejected
4823 * as long as they are at least page-aligned. Since the only architecture
4824 * which uses this is ia64 this code doesn't provide for that oddity.
4826 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4828 return TARGET_PAGE_SIZE
;
4832 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4833 int shmid
, abi_ulong shmaddr
, int shmflg
)
4837 struct shmid_ds shm_info
;
4841 /* find out the length of the shared memory segment */
4842 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4843 if (is_error(ret
)) {
4844 /* can't get length, bail out */
4848 shmlba
= target_shmlba(cpu_env
);
4850 if (shmaddr
& (shmlba
- 1)) {
4851 if (shmflg
& SHM_RND
) {
4852 shmaddr
&= ~(shmlba
- 1);
4854 return -TARGET_EINVAL
;
4861 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4863 abi_ulong mmap_start
;
4865 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4867 if (mmap_start
== -1) {
4869 host_raddr
= (void *)-1;
4871 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4874 if (host_raddr
== (void *)-1) {
4876 return get_errno((long)host_raddr
);
4878 raddr
=h2g((unsigned long)host_raddr
);
4880 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4881 PAGE_VALID
| PAGE_READ
|
4882 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4884 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4885 if (!shm_regions
[i
].in_use
) {
4886 shm_regions
[i
].in_use
= true;
4887 shm_regions
[i
].start
= raddr
;
4888 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4898 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4902 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4903 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4904 shm_regions
[i
].in_use
= false;
4905 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4910 return get_errno(shmdt(g2h(shmaddr
)));
4913 #ifdef TARGET_NR_ipc
4914 /* ??? This only works with linear mappings. */
4915 /* do_ipc() must return target values and target errnos. */
4916 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4917 unsigned int call
, abi_long first
,
4918 abi_long second
, abi_long third
,
4919 abi_long ptr
, abi_long fifth
)
4924 version
= call
>> 16;
4929 ret
= do_semop(first
, ptr
, second
);
4933 ret
= get_errno(semget(first
, second
, third
));
4936 case IPCOP_semctl
: {
4937 /* The semun argument to semctl is passed by value, so dereference the
4940 get_user_ual(atptr
, ptr
);
4941 ret
= do_semctl(first
, second
, third
, atptr
);
4946 ret
= get_errno(msgget(first
, second
));
4950 ret
= do_msgsnd(first
, ptr
, second
, third
);
4954 ret
= do_msgctl(first
, second
, ptr
);
4961 struct target_ipc_kludge
{
4966 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4967 ret
= -TARGET_EFAULT
;
4971 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4973 unlock_user_struct(tmp
, ptr
, 0);
4977 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4986 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4987 if (is_error(raddr
))
4988 return get_errno(raddr
);
4989 if (put_user_ual(raddr
, third
))
4990 return -TARGET_EFAULT
;
4994 ret
= -TARGET_EINVAL
;
4999 ret
= do_shmdt(ptr
);
5003 /* IPC_* flag values are the same on all linux platforms */
5004 ret
= get_errno(shmget(first
, second
, third
));
5007 /* IPC_* and SHM_* command values are the same on all linux platforms */
5009 ret
= do_shmctl(first
, second
, ptr
);
5012 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5013 ret
= -TARGET_ENOSYS
;
5020 /* kernel structure types definitions */
5022 #define STRUCT(name, ...) STRUCT_ ## name,
5023 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5025 #include "syscall_types.h"
5029 #undef STRUCT_SPECIAL
5031 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5032 #define STRUCT_SPECIAL(name)
5033 #include "syscall_types.h"
5035 #undef STRUCT_SPECIAL
5037 typedef struct IOCTLEntry IOCTLEntry
;
5039 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5040 int fd
, int cmd
, abi_long arg
);
5044 unsigned int host_cmd
;
5047 do_ioctl_fn
*do_ioctl
;
5048 const argtype arg_type
[5];
5051 #define IOC_R 0x0001
5052 #define IOC_W 0x0002
5053 #define IOC_RW (IOC_R | IOC_W)
5055 #define MAX_STRUCT_SIZE 4096
5057 #ifdef CONFIG_FIEMAP
5058 /* So fiemap access checks don't overflow on 32 bit systems.
5059 * This is very slightly smaller than the limit imposed by
5060 * the underlying kernel.
5062 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5063 / sizeof(struct fiemap_extent))
5065 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5066 int fd
, int cmd
, abi_long arg
)
5068 /* The parameter for this ioctl is a struct fiemap followed
5069 * by an array of struct fiemap_extent whose size is set
5070 * in fiemap->fm_extent_count. The array is filled in by the
5073 int target_size_in
, target_size_out
;
5075 const argtype
*arg_type
= ie
->arg_type
;
5076 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5079 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5083 assert(arg_type
[0] == TYPE_PTR
);
5084 assert(ie
->access
== IOC_RW
);
5086 target_size_in
= thunk_type_size(arg_type
, 0);
5087 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5089 return -TARGET_EFAULT
;
5091 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5092 unlock_user(argptr
, arg
, 0);
5093 fm
= (struct fiemap
*)buf_temp
;
5094 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5095 return -TARGET_EINVAL
;
5098 outbufsz
= sizeof (*fm
) +
5099 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5101 if (outbufsz
> MAX_STRUCT_SIZE
) {
5102 /* We can't fit all the extents into the fixed size buffer.
5103 * Allocate one that is large enough and use it instead.
5105 fm
= g_try_malloc(outbufsz
);
5107 return -TARGET_ENOMEM
;
5109 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5112 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5113 if (!is_error(ret
)) {
5114 target_size_out
= target_size_in
;
5115 /* An extent_count of 0 means we were only counting the extents
5116 * so there are no structs to copy
5118 if (fm
->fm_extent_count
!= 0) {
5119 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5121 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5123 ret
= -TARGET_EFAULT
;
5125 /* Convert the struct fiemap */
5126 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5127 if (fm
->fm_extent_count
!= 0) {
5128 p
= argptr
+ target_size_in
;
5129 /* ...and then all the struct fiemap_extents */
5130 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5131 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5136 unlock_user(argptr
, arg
, target_size_out
);
5146 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5147 int fd
, int cmd
, abi_long arg
)
5149 const argtype
*arg_type
= ie
->arg_type
;
5153 struct ifconf
*host_ifconf
;
5155 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5156 int target_ifreq_size
;
5161 abi_long target_ifc_buf
;
5165 assert(arg_type
[0] == TYPE_PTR
);
5166 assert(ie
->access
== IOC_RW
);
5169 target_size
= thunk_type_size(arg_type
, 0);
5171 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5173 return -TARGET_EFAULT
;
5174 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5175 unlock_user(argptr
, arg
, 0);
5177 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5178 target_ifc_len
= host_ifconf
->ifc_len
;
5179 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5181 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5182 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5183 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5185 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5186 if (outbufsz
> MAX_STRUCT_SIZE
) {
5187 /* We can't fit all the extents into the fixed size buffer.
5188 * Allocate one that is large enough and use it instead.
5190 host_ifconf
= malloc(outbufsz
);
5192 return -TARGET_ENOMEM
;
5194 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5197 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5199 host_ifconf
->ifc_len
= host_ifc_len
;
5200 host_ifconf
->ifc_buf
= host_ifc_buf
;
5202 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5203 if (!is_error(ret
)) {
5204 /* convert host ifc_len to target ifc_len */
5206 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5207 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5208 host_ifconf
->ifc_len
= target_ifc_len
;
5210 /* restore target ifc_buf */
5212 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5214 /* copy struct ifconf to target user */
5216 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5218 return -TARGET_EFAULT
;
5219 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5220 unlock_user(argptr
, arg
, target_size
);
5222 /* copy ifreq[] to target user */
5224 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5225 for (i
= 0; i
< nb_ifreq
; i
++) {
5226 thunk_convert(argptr
+ i
* target_ifreq_size
,
5227 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5228 ifreq_arg_type
, THUNK_TARGET
);
5230 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5240 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5241 int cmd
, abi_long arg
)
5244 struct dm_ioctl
*host_dm
;
5245 abi_long guest_data
;
5246 uint32_t guest_data_size
;
5248 const argtype
*arg_type
= ie
->arg_type
;
5250 void *big_buf
= NULL
;
5254 target_size
= thunk_type_size(arg_type
, 0);
5255 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5257 ret
= -TARGET_EFAULT
;
5260 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5261 unlock_user(argptr
, arg
, 0);
5263 /* buf_temp is too small, so fetch things into a bigger buffer */
5264 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5265 memcpy(big_buf
, buf_temp
, target_size
);
5269 guest_data
= arg
+ host_dm
->data_start
;
5270 if ((guest_data
- arg
) < 0) {
5271 ret
= -TARGET_EINVAL
;
5274 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5275 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5277 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5279 ret
= -TARGET_EFAULT
;
5283 switch (ie
->host_cmd
) {
5285 case DM_LIST_DEVICES
:
5288 case DM_DEV_SUSPEND
:
5291 case DM_TABLE_STATUS
:
5292 case DM_TABLE_CLEAR
:
5294 case DM_LIST_VERSIONS
:
5298 case DM_DEV_SET_GEOMETRY
:
5299 /* data contains only strings */
5300 memcpy(host_data
, argptr
, guest_data_size
);
5303 memcpy(host_data
, argptr
, guest_data_size
);
5304 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5308 void *gspec
= argptr
;
5309 void *cur_data
= host_data
;
5310 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5311 int spec_size
= thunk_type_size(arg_type
, 0);
5314 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5315 struct dm_target_spec
*spec
= cur_data
;
5319 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5320 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5322 spec
->next
= sizeof(*spec
) + slen
;
5323 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5325 cur_data
+= spec
->next
;
5330 ret
= -TARGET_EINVAL
;
5331 unlock_user(argptr
, guest_data
, 0);
5334 unlock_user(argptr
, guest_data
, 0);
5336 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5337 if (!is_error(ret
)) {
5338 guest_data
= arg
+ host_dm
->data_start
;
5339 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5340 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5341 switch (ie
->host_cmd
) {
5346 case DM_DEV_SUSPEND
:
5349 case DM_TABLE_CLEAR
:
5351 case DM_DEV_SET_GEOMETRY
:
5352 /* no return data */
5354 case DM_LIST_DEVICES
:
5356 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5357 uint32_t remaining_data
= guest_data_size
;
5358 void *cur_data
= argptr
;
5359 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5360 int nl_size
= 12; /* can't use thunk_size due to alignment */
5363 uint32_t next
= nl
->next
;
5365 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5367 if (remaining_data
< nl
->next
) {
5368 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5371 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5372 strcpy(cur_data
+ nl_size
, nl
->name
);
5373 cur_data
+= nl
->next
;
5374 remaining_data
-= nl
->next
;
5378 nl
= (void*)nl
+ next
;
5383 case DM_TABLE_STATUS
:
5385 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5386 void *cur_data
= argptr
;
5387 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5388 int spec_size
= thunk_type_size(arg_type
, 0);
5391 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5392 uint32_t next
= spec
->next
;
5393 int slen
= strlen((char*)&spec
[1]) + 1;
5394 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5395 if (guest_data_size
< spec
->next
) {
5396 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5399 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5400 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5401 cur_data
= argptr
+ spec
->next
;
5402 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5408 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5409 int count
= *(uint32_t*)hdata
;
5410 uint64_t *hdev
= hdata
+ 8;
5411 uint64_t *gdev
= argptr
+ 8;
5414 *(uint32_t*)argptr
= tswap32(count
);
5415 for (i
= 0; i
< count
; i
++) {
5416 *gdev
= tswap64(*hdev
);
5422 case DM_LIST_VERSIONS
:
5424 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5425 uint32_t remaining_data
= guest_data_size
;
5426 void *cur_data
= argptr
;
5427 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5428 int vers_size
= thunk_type_size(arg_type
, 0);
5431 uint32_t next
= vers
->next
;
5433 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5435 if (remaining_data
< vers
->next
) {
5436 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5439 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5440 strcpy(cur_data
+ vers_size
, vers
->name
);
5441 cur_data
+= vers
->next
;
5442 remaining_data
-= vers
->next
;
5446 vers
= (void*)vers
+ next
;
5451 unlock_user(argptr
, guest_data
, 0);
5452 ret
= -TARGET_EINVAL
;
5455 unlock_user(argptr
, guest_data
, guest_data_size
);
5457 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5459 ret
= -TARGET_EFAULT
;
5462 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5463 unlock_user(argptr
, arg
, target_size
);
5470 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5471 int cmd
, abi_long arg
)
5475 const argtype
*arg_type
= ie
->arg_type
;
5476 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5479 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5480 struct blkpg_partition host_part
;
5482 /* Read and convert blkpg */
5484 target_size
= thunk_type_size(arg_type
, 0);
5485 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5487 ret
= -TARGET_EFAULT
;
5490 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5491 unlock_user(argptr
, arg
, 0);
5493 switch (host_blkpg
->op
) {
5494 case BLKPG_ADD_PARTITION
:
5495 case BLKPG_DEL_PARTITION
:
5496 /* payload is struct blkpg_partition */
5499 /* Unknown opcode */
5500 ret
= -TARGET_EINVAL
;
5504 /* Read and convert blkpg->data */
5505 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5506 target_size
= thunk_type_size(part_arg_type
, 0);
5507 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5509 ret
= -TARGET_EFAULT
;
5512 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5513 unlock_user(argptr
, arg
, 0);
5515 /* Swizzle the data pointer to our local copy and call! */
5516 host_blkpg
->data
= &host_part
;
5517 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5523 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5524 int fd
, int cmd
, abi_long arg
)
5526 const argtype
*arg_type
= ie
->arg_type
;
5527 const StructEntry
*se
;
5528 const argtype
*field_types
;
5529 const int *dst_offsets
, *src_offsets
;
5532 abi_ulong
*target_rt_dev_ptr
;
5533 unsigned long *host_rt_dev_ptr
;
5537 assert(ie
->access
== IOC_W
);
5538 assert(*arg_type
== TYPE_PTR
);
5540 assert(*arg_type
== TYPE_STRUCT
);
5541 target_size
= thunk_type_size(arg_type
, 0);
5542 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5544 return -TARGET_EFAULT
;
5547 assert(*arg_type
== (int)STRUCT_rtentry
);
5548 se
= struct_entries
+ *arg_type
++;
5549 assert(se
->convert
[0] == NULL
);
5550 /* convert struct here to be able to catch rt_dev string */
5551 field_types
= se
->field_types
;
5552 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5553 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5554 for (i
= 0; i
< se
->nb_fields
; i
++) {
5555 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5556 assert(*field_types
== TYPE_PTRVOID
);
5557 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5558 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5559 if (*target_rt_dev_ptr
!= 0) {
5560 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5561 tswapal(*target_rt_dev_ptr
));
5562 if (!*host_rt_dev_ptr
) {
5563 unlock_user(argptr
, arg
, 0);
5564 return -TARGET_EFAULT
;
5567 *host_rt_dev_ptr
= 0;
5572 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5573 argptr
+ src_offsets
[i
],
5574 field_types
, THUNK_HOST
);
5576 unlock_user(argptr
, arg
, 0);
5577 assert(host_rt_dev_ptr
);
5579 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5580 if (*host_rt_dev_ptr
!= 0) {
5581 unlock_user((void *)*host_rt_dev_ptr
,
5582 *target_rt_dev_ptr
, 0);
5587 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5588 int fd
, int cmd
, abi_long arg
)
5590 int sig
= target_to_host_signal(arg
);
5591 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5594 static IOCTLEntry ioctl_entries
[] = {
5595 #define IOCTL(cmd, access, ...) \
5596 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5597 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5598 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5599 #define IOCTL_IGNORE(cmd) \
5600 { TARGET_ ## cmd, 0, #cmd },
5605 /* ??? Implement proper locking for ioctls. */
5606 /* do_ioctl() Must return target values and target errnos. */
5607 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5609 const IOCTLEntry
*ie
;
5610 const argtype
*arg_type
;
5612 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5618 if (ie
->target_cmd
== 0) {
5619 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5620 return -TARGET_ENOSYS
;
5622 if (ie
->target_cmd
== cmd
)
5626 arg_type
= ie
->arg_type
;
5628 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5631 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5632 } else if (!ie
->host_cmd
) {
5633 /* Some architectures define BSD ioctls in their headers
5634 that are not implemented in Linux. */
5635 return -TARGET_ENOSYS
;
5638 switch(arg_type
[0]) {
5641 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5645 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5649 target_size
= thunk_type_size(arg_type
, 0);
5650 switch(ie
->access
) {
5652 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5653 if (!is_error(ret
)) {
5654 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5656 return -TARGET_EFAULT
;
5657 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5658 unlock_user(argptr
, arg
, target_size
);
5662 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5664 return -TARGET_EFAULT
;
5665 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5666 unlock_user(argptr
, arg
, 0);
5667 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5671 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5673 return -TARGET_EFAULT
;
5674 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5675 unlock_user(argptr
, arg
, 0);
5676 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5677 if (!is_error(ret
)) {
5678 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5680 return -TARGET_EFAULT
;
5681 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5682 unlock_user(argptr
, arg
, target_size
);
5688 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5689 (long)cmd
, arg_type
[0]);
5690 ret
= -TARGET_ENOSYS
;
5696 static const bitmask_transtbl iflag_tbl
[] = {
5697 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5698 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5699 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5700 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5701 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5702 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5703 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5704 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5705 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5706 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5707 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5708 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5709 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5710 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5714 static const bitmask_transtbl oflag_tbl
[] = {
5715 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5716 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5717 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5718 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5719 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5720 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5721 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5722 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5723 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5724 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5725 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5726 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5727 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5728 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5729 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5730 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5731 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5732 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5733 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5734 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5735 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5736 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5737 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5738 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5742 static const bitmask_transtbl cflag_tbl
[] = {
5743 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5744 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5745 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5746 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5747 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5748 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5749 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5750 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5751 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5752 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5753 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5754 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5755 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5756 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5757 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5758 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5759 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5760 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5761 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5762 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5763 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5764 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5765 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5766 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5767 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5768 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5769 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5770 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5771 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5772 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5773 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5777 static const bitmask_transtbl lflag_tbl
[] = {
5778 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5779 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5780 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5781 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5782 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5783 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5784 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5785 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5786 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5787 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5788 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5789 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5790 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5791 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5792 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5796 static void target_to_host_termios (void *dst
, const void *src
)
5798 struct host_termios
*host
= dst
;
5799 const struct target_termios
*target
= src
;
5802 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5804 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5806 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5808 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5809 host
->c_line
= target
->c_line
;
5811 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5812 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5813 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5814 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5815 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5816 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5817 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5818 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5819 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5820 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5821 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5822 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5823 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5824 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5825 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5826 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5827 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5828 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5831 static void host_to_target_termios (void *dst
, const void *src
)
5833 struct target_termios
*target
= dst
;
5834 const struct host_termios
*host
= src
;
5837 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5839 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5841 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5843 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5844 target
->c_line
= host
->c_line
;
5846 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5847 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5848 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5849 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5850 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5851 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5852 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5853 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5854 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5855 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5856 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5857 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5858 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5859 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5860 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5861 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5862 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5863 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5866 static const StructEntry struct_termios_def
= {
5867 .convert
= { host_to_target_termios
, target_to_host_termios
},
5868 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5869 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5872 static bitmask_transtbl mmap_flags_tbl
[] = {
5873 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5874 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5875 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5876 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5877 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5878 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5879 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5880 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5881 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5886 #if defined(TARGET_I386)
5888 /* NOTE: there is really one LDT for all the threads */
5889 static uint8_t *ldt_table
;
5891 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5898 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5899 if (size
> bytecount
)
5901 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5903 return -TARGET_EFAULT
;
5904 /* ??? Should this by byteswapped? */
5905 memcpy(p
, ldt_table
, size
);
5906 unlock_user(p
, ptr
, size
);
5910 /* XXX: add locking support */
5911 static abi_long
write_ldt(CPUX86State
*env
,
5912 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5914 struct target_modify_ldt_ldt_s ldt_info
;
5915 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5916 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5917 int seg_not_present
, useable
, lm
;
5918 uint32_t *lp
, entry_1
, entry_2
;
5920 if (bytecount
!= sizeof(ldt_info
))
5921 return -TARGET_EINVAL
;
5922 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5923 return -TARGET_EFAULT
;
5924 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5925 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5926 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5927 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5928 unlock_user_struct(target_ldt_info
, ptr
, 0);
5930 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5931 return -TARGET_EINVAL
;
5932 seg_32bit
= ldt_info
.flags
& 1;
5933 contents
= (ldt_info
.flags
>> 1) & 3;
5934 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5935 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5936 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5937 useable
= (ldt_info
.flags
>> 6) & 1;
5941 lm
= (ldt_info
.flags
>> 7) & 1;
5943 if (contents
== 3) {
5945 return -TARGET_EINVAL
;
5946 if (seg_not_present
== 0)
5947 return -TARGET_EINVAL
;
5949 /* allocate the LDT */
5951 env
->ldt
.base
= target_mmap(0,
5952 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5953 PROT_READ
|PROT_WRITE
,
5954 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5955 if (env
->ldt
.base
== -1)
5956 return -TARGET_ENOMEM
;
5957 memset(g2h(env
->ldt
.base
), 0,
5958 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5959 env
->ldt
.limit
= 0xffff;
5960 ldt_table
= g2h(env
->ldt
.base
);
5963 /* NOTE: same code as Linux kernel */
5964 /* Allow LDTs to be cleared by the user. */
5965 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5968 read_exec_only
== 1 &&
5970 limit_in_pages
== 0 &&
5971 seg_not_present
== 1 &&
5979 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5980 (ldt_info
.limit
& 0x0ffff);
5981 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5982 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5983 (ldt_info
.limit
& 0xf0000) |
5984 ((read_exec_only
^ 1) << 9) |
5986 ((seg_not_present
^ 1) << 15) |
5988 (limit_in_pages
<< 23) |
5992 entry_2
|= (useable
<< 20);
5994 /* Install the new entry ... */
5996 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5997 lp
[0] = tswap32(entry_1
);
5998 lp
[1] = tswap32(entry_2
);
6002 /* specific and weird i386 syscalls */
6003 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6004 unsigned long bytecount
)
6010 ret
= read_ldt(ptr
, bytecount
);
6013 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6016 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6019 ret
= -TARGET_ENOSYS
;
6025 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6026 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6028 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6029 struct target_modify_ldt_ldt_s ldt_info
;
6030 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6031 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6032 int seg_not_present
, useable
, lm
;
6033 uint32_t *lp
, entry_1
, entry_2
;
6036 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6037 if (!target_ldt_info
)
6038 return -TARGET_EFAULT
;
6039 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6040 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6041 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6042 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6043 if (ldt_info
.entry_number
== -1) {
6044 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6045 if (gdt_table
[i
] == 0) {
6046 ldt_info
.entry_number
= i
;
6047 target_ldt_info
->entry_number
= tswap32(i
);
6052 unlock_user_struct(target_ldt_info
, ptr
, 1);
6054 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6055 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6056 return -TARGET_EINVAL
;
6057 seg_32bit
= ldt_info
.flags
& 1;
6058 contents
= (ldt_info
.flags
>> 1) & 3;
6059 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6060 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6061 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6062 useable
= (ldt_info
.flags
>> 6) & 1;
6066 lm
= (ldt_info
.flags
>> 7) & 1;
6069 if (contents
== 3) {
6070 if (seg_not_present
== 0)
6071 return -TARGET_EINVAL
;
6074 /* NOTE: same code as Linux kernel */
6075 /* Allow LDTs to be cleared by the user. */
6076 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6077 if ((contents
== 0 &&
6078 read_exec_only
== 1 &&
6080 limit_in_pages
== 0 &&
6081 seg_not_present
== 1 &&
6089 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6090 (ldt_info
.limit
& 0x0ffff);
6091 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6092 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6093 (ldt_info
.limit
& 0xf0000) |
6094 ((read_exec_only
^ 1) << 9) |
6096 ((seg_not_present
^ 1) << 15) |
6098 (limit_in_pages
<< 23) |
6103 /* Install the new entry ... */
6105 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6106 lp
[0] = tswap32(entry_1
);
6107 lp
[1] = tswap32(entry_2
);
6111 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6113 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6114 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6115 uint32_t base_addr
, limit
, flags
;
6116 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6117 int seg_not_present
, useable
, lm
;
6118 uint32_t *lp
, entry_1
, entry_2
;
6120 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6121 if (!target_ldt_info
)
6122 return -TARGET_EFAULT
;
6123 idx
= tswap32(target_ldt_info
->entry_number
);
6124 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6125 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6126 unlock_user_struct(target_ldt_info
, ptr
, 1);
6127 return -TARGET_EINVAL
;
6129 lp
= (uint32_t *)(gdt_table
+ idx
);
6130 entry_1
= tswap32(lp
[0]);
6131 entry_2
= tswap32(lp
[1]);
6133 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6134 contents
= (entry_2
>> 10) & 3;
6135 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6136 seg_32bit
= (entry_2
>> 22) & 1;
6137 limit_in_pages
= (entry_2
>> 23) & 1;
6138 useable
= (entry_2
>> 20) & 1;
6142 lm
= (entry_2
>> 21) & 1;
6144 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6145 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6146 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6147 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6148 base_addr
= (entry_1
>> 16) |
6149 (entry_2
& 0xff000000) |
6150 ((entry_2
& 0xff) << 16);
6151 target_ldt_info
->base_addr
= tswapal(base_addr
);
6152 target_ldt_info
->limit
= tswap32(limit
);
6153 target_ldt_info
->flags
= tswap32(flags
);
6154 unlock_user_struct(target_ldt_info
, ptr
, 1);
6157 #endif /* TARGET_I386 && TARGET_ABI32 */
6159 #ifndef TARGET_ABI32
6160 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6167 case TARGET_ARCH_SET_GS
:
6168 case TARGET_ARCH_SET_FS
:
6169 if (code
== TARGET_ARCH_SET_GS
)
6173 cpu_x86_load_seg(env
, idx
, 0);
6174 env
->segs
[idx
].base
= addr
;
6176 case TARGET_ARCH_GET_GS
:
6177 case TARGET_ARCH_GET_FS
:
6178 if (code
== TARGET_ARCH_GET_GS
)
6182 val
= env
->segs
[idx
].base
;
6183 if (put_user(val
, addr
, abi_ulong
))
6184 ret
= -TARGET_EFAULT
;
6187 ret
= -TARGET_EINVAL
;
6194 #endif /* defined(TARGET_I386) */
6196 #define NEW_STACK_SIZE 0x40000
6199 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6202 pthread_mutex_t mutex
;
6203 pthread_cond_t cond
;
6206 abi_ulong child_tidptr
;
6207 abi_ulong parent_tidptr
;
6211 static void *clone_func(void *arg
)
6213 new_thread_info
*info
= arg
;
6218 rcu_register_thread();
6220 cpu
= ENV_GET_CPU(env
);
6222 ts
= (TaskState
*)cpu
->opaque
;
6223 info
->tid
= gettid();
6225 if (info
->child_tidptr
)
6226 put_user_u32(info
->tid
, info
->child_tidptr
);
6227 if (info
->parent_tidptr
)
6228 put_user_u32(info
->tid
, info
->parent_tidptr
);
6229 /* Enable signals. */
6230 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6231 /* Signal to the parent that we're ready. */
6232 pthread_mutex_lock(&info
->mutex
);
6233 pthread_cond_broadcast(&info
->cond
);
6234 pthread_mutex_unlock(&info
->mutex
);
6235 /* Wait until the parent has finshed initializing the tls state. */
6236 pthread_mutex_lock(&clone_lock
);
6237 pthread_mutex_unlock(&clone_lock
);
6243 /* do_fork() Must return host values and target errnos (unlike most
6244 do_*() functions). */
6245 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6246 abi_ulong parent_tidptr
, target_ulong newtls
,
6247 abi_ulong child_tidptr
)
6249 CPUState
*cpu
= ENV_GET_CPU(env
);
6253 CPUArchState
*new_env
;
6256 flags
&= ~CLONE_IGNORED_FLAGS
;
6258 /* Emulate vfork() with fork() */
6259 if (flags
& CLONE_VFORK
)
6260 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6262 if (flags
& CLONE_VM
) {
6263 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6264 new_thread_info info
;
6265 pthread_attr_t attr
;
6267 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6268 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6269 return -TARGET_EINVAL
;
6272 ts
= g_new0(TaskState
, 1);
6273 init_task_state(ts
);
6274 /* we create a new CPU instance. */
6275 new_env
= cpu_copy(env
);
6276 /* Init regs that differ from the parent. */
6277 cpu_clone_regs(new_env
, newsp
);
6278 new_cpu
= ENV_GET_CPU(new_env
);
6279 new_cpu
->opaque
= ts
;
6280 ts
->bprm
= parent_ts
->bprm
;
6281 ts
->info
= parent_ts
->info
;
6282 ts
->signal_mask
= parent_ts
->signal_mask
;
6284 if (flags
& CLONE_CHILD_CLEARTID
) {
6285 ts
->child_tidptr
= child_tidptr
;
6288 if (flags
& CLONE_SETTLS
) {
6289 cpu_set_tls (new_env
, newtls
);
6292 /* Grab a mutex so that thread setup appears atomic. */
6293 pthread_mutex_lock(&clone_lock
);
6295 memset(&info
, 0, sizeof(info
));
6296 pthread_mutex_init(&info
.mutex
, NULL
);
6297 pthread_mutex_lock(&info
.mutex
);
6298 pthread_cond_init(&info
.cond
, NULL
);
6300 if (flags
& CLONE_CHILD_SETTID
) {
6301 info
.child_tidptr
= child_tidptr
;
6303 if (flags
& CLONE_PARENT_SETTID
) {
6304 info
.parent_tidptr
= parent_tidptr
;
6307 ret
= pthread_attr_init(&attr
);
6308 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6309 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6310 /* It is not safe to deliver signals until the child has finished
6311 initializing, so temporarily block all signals. */
6312 sigfillset(&sigmask
);
6313 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6315 /* If this is our first additional thread, we need to ensure we
6316 * generate code for parallel execution and flush old translations.
6318 if (!parallel_cpus
) {
6319 parallel_cpus
= true;
6323 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6324 /* TODO: Free new CPU state if thread creation failed. */
6326 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6327 pthread_attr_destroy(&attr
);
6329 /* Wait for the child to initialize. */
6330 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6335 pthread_mutex_unlock(&info
.mutex
);
6336 pthread_cond_destroy(&info
.cond
);
6337 pthread_mutex_destroy(&info
.mutex
);
6338 pthread_mutex_unlock(&clone_lock
);
6340 /* if no CLONE_VM, we consider it is a fork */
6341 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6342 return -TARGET_EINVAL
;
6345 /* We can't support custom termination signals */
6346 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6347 return -TARGET_EINVAL
;
6350 if (block_signals()) {
6351 return -TARGET_ERESTARTSYS
;
6357 /* Child Process. */
6359 cpu_clone_regs(env
, newsp
);
6361 /* There is a race condition here. The parent process could
6362 theoretically read the TID in the child process before the child
6363 tid is set. This would require using either ptrace
6364 (not implemented) or having *_tidptr to point at a shared memory
6365 mapping. We can't repeat the spinlock hack used above because
6366 the child process gets its own copy of the lock. */
6367 if (flags
& CLONE_CHILD_SETTID
)
6368 put_user_u32(gettid(), child_tidptr
);
6369 if (flags
& CLONE_PARENT_SETTID
)
6370 put_user_u32(gettid(), parent_tidptr
);
6371 ts
= (TaskState
*)cpu
->opaque
;
6372 if (flags
& CLONE_SETTLS
)
6373 cpu_set_tls (env
, newtls
);
6374 if (flags
& CLONE_CHILD_CLEARTID
)
6375 ts
->child_tidptr
= child_tidptr
;
6383 /* warning : doesn't handle linux specific flags... */
6384 static int target_to_host_fcntl_cmd(int cmd
)
6387 case TARGET_F_DUPFD
:
6388 case TARGET_F_GETFD
:
6389 case TARGET_F_SETFD
:
6390 case TARGET_F_GETFL
:
6391 case TARGET_F_SETFL
:
6393 case TARGET_F_GETLK
:
6395 case TARGET_F_SETLK
:
6397 case TARGET_F_SETLKW
:
6399 case TARGET_F_GETOWN
:
6401 case TARGET_F_SETOWN
:
6403 case TARGET_F_GETSIG
:
6405 case TARGET_F_SETSIG
:
6407 #if TARGET_ABI_BITS == 32
6408 case TARGET_F_GETLK64
:
6410 case TARGET_F_SETLK64
:
6412 case TARGET_F_SETLKW64
:
6415 case TARGET_F_SETLEASE
:
6417 case TARGET_F_GETLEASE
:
6419 #ifdef F_DUPFD_CLOEXEC
6420 case TARGET_F_DUPFD_CLOEXEC
:
6421 return F_DUPFD_CLOEXEC
;
6423 case TARGET_F_NOTIFY
:
6426 case TARGET_F_GETOWN_EX
:
6430 case TARGET_F_SETOWN_EX
:
6434 case TARGET_F_SETPIPE_SZ
:
6435 return F_SETPIPE_SZ
;
6436 case TARGET_F_GETPIPE_SZ
:
6437 return F_GETPIPE_SZ
;
6440 return -TARGET_EINVAL
;
6442 return -TARGET_EINVAL
;
6445 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6446 static const bitmask_transtbl flock_tbl
[] = {
6447 TRANSTBL_CONVERT(F_RDLCK
),
6448 TRANSTBL_CONVERT(F_WRLCK
),
6449 TRANSTBL_CONVERT(F_UNLCK
),
6450 TRANSTBL_CONVERT(F_EXLCK
),
6451 TRANSTBL_CONVERT(F_SHLCK
),
6455 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6456 abi_ulong target_flock_addr
)
6458 struct target_flock
*target_fl
;
6461 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6462 return -TARGET_EFAULT
;
6465 __get_user(l_type
, &target_fl
->l_type
);
6466 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6467 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6468 __get_user(fl
->l_start
, &target_fl
->l_start
);
6469 __get_user(fl
->l_len
, &target_fl
->l_len
);
6470 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6471 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6475 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6476 const struct flock64
*fl
)
6478 struct target_flock
*target_fl
;
6481 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6482 return -TARGET_EFAULT
;
6485 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6486 __put_user(l_type
, &target_fl
->l_type
);
6487 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6488 __put_user(fl
->l_start
, &target_fl
->l_start
);
6489 __put_user(fl
->l_len
, &target_fl
->l_len
);
6490 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6491 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6495 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6496 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6498 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6499 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6500 abi_ulong target_flock_addr
)
6502 struct target_eabi_flock64
*target_fl
;
6505 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6506 return -TARGET_EFAULT
;
6509 __get_user(l_type
, &target_fl
->l_type
);
6510 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6511 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6512 __get_user(fl
->l_start
, &target_fl
->l_start
);
6513 __get_user(fl
->l_len
, &target_fl
->l_len
);
6514 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6515 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6519 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6520 const struct flock64
*fl
)
6522 struct target_eabi_flock64
*target_fl
;
6525 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6526 return -TARGET_EFAULT
;
6529 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6530 __put_user(l_type
, &target_fl
->l_type
);
6531 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6532 __put_user(fl
->l_start
, &target_fl
->l_start
);
6533 __put_user(fl
->l_len
, &target_fl
->l_len
);
6534 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6535 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6540 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6541 abi_ulong target_flock_addr
)
6543 struct target_flock64
*target_fl
;
6546 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6547 return -TARGET_EFAULT
;
6550 __get_user(l_type
, &target_fl
->l_type
);
6551 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6552 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6553 __get_user(fl
->l_start
, &target_fl
->l_start
);
6554 __get_user(fl
->l_len
, &target_fl
->l_len
);
6555 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6556 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6560 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6561 const struct flock64
*fl
)
6563 struct target_flock64
*target_fl
;
6566 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6567 return -TARGET_EFAULT
;
6570 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6571 __put_user(l_type
, &target_fl
->l_type
);
6572 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6573 __put_user(fl
->l_start
, &target_fl
->l_start
);
6574 __put_user(fl
->l_len
, &target_fl
->l_len
);
6575 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6576 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6580 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6582 struct flock64 fl64
;
6584 struct f_owner_ex fox
;
6585 struct target_f_owner_ex
*target_fox
;
6588 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6590 if (host_cmd
== -TARGET_EINVAL
)
6594 case TARGET_F_GETLK
:
6595 ret
= copy_from_user_flock(&fl64
, arg
);
6599 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6601 ret
= copy_to_user_flock(arg
, &fl64
);
6605 case TARGET_F_SETLK
:
6606 case TARGET_F_SETLKW
:
6607 ret
= copy_from_user_flock(&fl64
, arg
);
6611 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6614 case TARGET_F_GETLK64
:
6615 ret
= copy_from_user_flock64(&fl64
, arg
);
6619 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6621 ret
= copy_to_user_flock64(arg
, &fl64
);
6624 case TARGET_F_SETLK64
:
6625 case TARGET_F_SETLKW64
:
6626 ret
= copy_from_user_flock64(&fl64
, arg
);
6630 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6633 case TARGET_F_GETFL
:
6634 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6636 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6640 case TARGET_F_SETFL
:
6641 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6642 target_to_host_bitmask(arg
,
6647 case TARGET_F_GETOWN_EX
:
6648 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6650 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6651 return -TARGET_EFAULT
;
6652 target_fox
->type
= tswap32(fox
.type
);
6653 target_fox
->pid
= tswap32(fox
.pid
);
6654 unlock_user_struct(target_fox
, arg
, 1);
6660 case TARGET_F_SETOWN_EX
:
6661 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6662 return -TARGET_EFAULT
;
6663 fox
.type
= tswap32(target_fox
->type
);
6664 fox
.pid
= tswap32(target_fox
->pid
);
6665 unlock_user_struct(target_fox
, arg
, 0);
6666 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6670 case TARGET_F_SETOWN
:
6671 case TARGET_F_GETOWN
:
6672 case TARGET_F_SETSIG
:
6673 case TARGET_F_GETSIG
:
6674 case TARGET_F_SETLEASE
:
6675 case TARGET_F_GETLEASE
:
6676 case TARGET_F_SETPIPE_SZ
:
6677 case TARGET_F_GETPIPE_SZ
:
6678 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6682 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6690 static inline int high2lowuid(int uid
)
6698 static inline int high2lowgid(int gid
)
6706 static inline int low2highuid(int uid
)
6708 if ((int16_t)uid
== -1)
6714 static inline int low2highgid(int gid
)
6716 if ((int16_t)gid
== -1)
6721 static inline int tswapid(int id
)
6726 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6728 #else /* !USE_UID16 */
6729 static inline int high2lowuid(int uid
)
6733 static inline int high2lowgid(int gid
)
6737 static inline int low2highuid(int uid
)
6741 static inline int low2highgid(int gid
)
6745 static inline int tswapid(int id
)
6750 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6752 #endif /* USE_UID16 */
6754 /* We must do direct syscalls for setting UID/GID, because we want to
6755 * implement the Linux system call semantics of "change only for this thread",
6756 * not the libc/POSIX semantics of "change for all threads in process".
6757 * (See http://ewontfix.com/17/ for more details.)
6758 * We use the 32-bit version of the syscalls if present; if it is not
6759 * then either the host architecture supports 32-bit UIDs natively with
6760 * the standard syscall, or the 16-bit UID is the best we can do.
6762 #ifdef __NR_setuid32
6763 #define __NR_sys_setuid __NR_setuid32
6765 #define __NR_sys_setuid __NR_setuid
6767 #ifdef __NR_setgid32
6768 #define __NR_sys_setgid __NR_setgid32
6770 #define __NR_sys_setgid __NR_setgid
6772 #ifdef __NR_setresuid32
6773 #define __NR_sys_setresuid __NR_setresuid32
6775 #define __NR_sys_setresuid __NR_setresuid
6777 #ifdef __NR_setresgid32
6778 #define __NR_sys_setresgid __NR_setresgid32
6780 #define __NR_sys_setresgid __NR_setresgid
6783 _syscall1(int, sys_setuid
, uid_t
, uid
)
6784 _syscall1(int, sys_setgid
, gid_t
, gid
)
6785 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6786 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6788 void syscall_init(void)
6791 const argtype
*arg_type
;
6795 thunk_init(STRUCT_MAX
);
6797 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6798 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6799 #include "syscall_types.h"
6801 #undef STRUCT_SPECIAL
6803 /* Build target_to_host_errno_table[] table from
6804 * host_to_target_errno_table[]. */
6805 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6806 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6809 /* we patch the ioctl size if necessary. We rely on the fact that
6810 no ioctl has all the bits at '1' in the size field */
6812 while (ie
->target_cmd
!= 0) {
6813 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6814 TARGET_IOC_SIZEMASK
) {
6815 arg_type
= ie
->arg_type
;
6816 if (arg_type
[0] != TYPE_PTR
) {
6817 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6822 size
= thunk_type_size(arg_type
, 0);
6823 ie
->target_cmd
= (ie
->target_cmd
&
6824 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6825 (size
<< TARGET_IOC_SIZESHIFT
);
6828 /* automatic consistency check if same arch */
6829 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6830 (defined(__x86_64__) && defined(TARGET_X86_64))
6831 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6832 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6833 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6840 #if TARGET_ABI_BITS == 32
6841 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6843 #ifdef TARGET_WORDS_BIGENDIAN
6844 return ((uint64_t)word0
<< 32) | word1
;
6846 return ((uint64_t)word1
<< 32) | word0
;
6849 #else /* TARGET_ABI_BITS == 32 */
6850 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6854 #endif /* TARGET_ABI_BITS != 32 */
6856 #ifdef TARGET_NR_truncate64
6857 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6862 if (regpairs_aligned(cpu_env
)) {
6866 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6870 #ifdef TARGET_NR_ftruncate64
6871 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6876 if (regpairs_aligned(cpu_env
)) {
6880 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6884 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6885 abi_ulong target_addr
)
6887 struct target_timespec
*target_ts
;
6889 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6890 return -TARGET_EFAULT
;
6891 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6892 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6893 unlock_user_struct(target_ts
, target_addr
, 0);
6897 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6898 struct timespec
*host_ts
)
6900 struct target_timespec
*target_ts
;
6902 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6903 return -TARGET_EFAULT
;
6904 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6905 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6906 unlock_user_struct(target_ts
, target_addr
, 1);
6910 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6911 abi_ulong target_addr
)
6913 struct target_itimerspec
*target_itspec
;
6915 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6916 return -TARGET_EFAULT
;
6919 host_itspec
->it_interval
.tv_sec
=
6920 tswapal(target_itspec
->it_interval
.tv_sec
);
6921 host_itspec
->it_interval
.tv_nsec
=
6922 tswapal(target_itspec
->it_interval
.tv_nsec
);
6923 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6924 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6926 unlock_user_struct(target_itspec
, target_addr
, 1);
6930 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6931 struct itimerspec
*host_its
)
6933 struct target_itimerspec
*target_itspec
;
6935 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6936 return -TARGET_EFAULT
;
6939 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6940 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6942 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6943 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6945 unlock_user_struct(target_itspec
, target_addr
, 0);
6949 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6950 abi_long target_addr
)
6952 struct target_timex
*target_tx
;
6954 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6955 return -TARGET_EFAULT
;
6958 __get_user(host_tx
->modes
, &target_tx
->modes
);
6959 __get_user(host_tx
->offset
, &target_tx
->offset
);
6960 __get_user(host_tx
->freq
, &target_tx
->freq
);
6961 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6962 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6963 __get_user(host_tx
->status
, &target_tx
->status
);
6964 __get_user(host_tx
->constant
, &target_tx
->constant
);
6965 __get_user(host_tx
->precision
, &target_tx
->precision
);
6966 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6967 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6968 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6969 __get_user(host_tx
->tick
, &target_tx
->tick
);
6970 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6971 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6972 __get_user(host_tx
->shift
, &target_tx
->shift
);
6973 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6974 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6975 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6976 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6977 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6978 __get_user(host_tx
->tai
, &target_tx
->tai
);
6980 unlock_user_struct(target_tx
, target_addr
, 0);
6984 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6985 struct timex
*host_tx
)
6987 struct target_timex
*target_tx
;
6989 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6990 return -TARGET_EFAULT
;
6993 __put_user(host_tx
->modes
, &target_tx
->modes
);
6994 __put_user(host_tx
->offset
, &target_tx
->offset
);
6995 __put_user(host_tx
->freq
, &target_tx
->freq
);
6996 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6997 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6998 __put_user(host_tx
->status
, &target_tx
->status
);
6999 __put_user(host_tx
->constant
, &target_tx
->constant
);
7000 __put_user(host_tx
->precision
, &target_tx
->precision
);
7001 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7002 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7003 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7004 __put_user(host_tx
->tick
, &target_tx
->tick
);
7005 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7006 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7007 __put_user(host_tx
->shift
, &target_tx
->shift
);
7008 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7009 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7010 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7011 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7012 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7013 __put_user(host_tx
->tai
, &target_tx
->tai
);
7015 unlock_user_struct(target_tx
, target_addr
, 1);
7020 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7021 abi_ulong target_addr
)
7023 struct target_sigevent
*target_sevp
;
7025 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7026 return -TARGET_EFAULT
;
7029 /* This union is awkward on 64 bit systems because it has a 32 bit
7030 * integer and a pointer in it; we follow the conversion approach
7031 * used for handling sigval types in signal.c so the guest should get
7032 * the correct value back even if we did a 64 bit byteswap and it's
7033 * using the 32 bit integer.
7035 host_sevp
->sigev_value
.sival_ptr
=
7036 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7037 host_sevp
->sigev_signo
=
7038 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7039 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7040 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7042 unlock_user_struct(target_sevp
, target_addr
, 1);
7046 #if defined(TARGET_NR_mlockall)
7047 static inline int target_to_host_mlockall_arg(int arg
)
7051 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7052 result
|= MCL_CURRENT
;
7054 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7055 result
|= MCL_FUTURE
;
7061 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7062 abi_ulong target_addr
,
7063 struct stat
*host_st
)
7065 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7066 if (((CPUARMState
*)cpu_env
)->eabi
) {
7067 struct target_eabi_stat64
*target_st
;
7069 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7070 return -TARGET_EFAULT
;
7071 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7072 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7073 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7074 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7075 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7077 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7078 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7079 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7080 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7081 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7082 __put_user(host_st
->st_size
, &target_st
->st_size
);
7083 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7084 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7085 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7086 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7087 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7088 unlock_user_struct(target_st
, target_addr
, 1);
7092 #if defined(TARGET_HAS_STRUCT_STAT64)
7093 struct target_stat64
*target_st
;
7095 struct target_stat
*target_st
;
7098 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7099 return -TARGET_EFAULT
;
7100 memset(target_st
, 0, sizeof(*target_st
));
7101 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7102 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7103 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7104 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7106 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7107 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7108 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7109 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7110 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7111 /* XXX: better use of kernel struct */
7112 __put_user(host_st
->st_size
, &target_st
->st_size
);
7113 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7114 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7115 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7116 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7117 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7118 unlock_user_struct(target_st
, target_addr
, 1);
7124 /* ??? Using host futex calls even when target atomic operations
7125 are not really atomic probably breaks things. However implementing
7126 futexes locally would make futexes shared between multiple processes
7127 tricky. However they're probably useless because guest atomic
7128 operations won't work either. */
7129 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7130 target_ulong uaddr2
, int val3
)
7132 struct timespec ts
, *pts
;
7135 /* ??? We assume FUTEX_* constants are the same on both host
7137 #ifdef FUTEX_CMD_MASK
7138 base_op
= op
& FUTEX_CMD_MASK
;
7144 case FUTEX_WAIT_BITSET
:
7147 target_to_host_timespec(pts
, timeout
);
7151 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7154 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7156 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7158 case FUTEX_CMP_REQUEUE
:
7160 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7161 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7162 But the prototype takes a `struct timespec *'; insert casts
7163 to satisfy the compiler. We do not need to tswap TIMEOUT
7164 since it's not compared to guest memory. */
7165 pts
= (struct timespec
*)(uintptr_t) timeout
;
7166 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7168 (base_op
== FUTEX_CMP_REQUEUE
7172 return -TARGET_ENOSYS
;
7175 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7176 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7177 abi_long handle
, abi_long mount_id
,
7180 struct file_handle
*target_fh
;
7181 struct file_handle
*fh
;
7185 unsigned int size
, total_size
;
7187 if (get_user_s32(size
, handle
)) {
7188 return -TARGET_EFAULT
;
7191 name
= lock_user_string(pathname
);
7193 return -TARGET_EFAULT
;
7196 total_size
= sizeof(struct file_handle
) + size
;
7197 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7199 unlock_user(name
, pathname
, 0);
7200 return -TARGET_EFAULT
;
7203 fh
= g_malloc0(total_size
);
7204 fh
->handle_bytes
= size
;
7206 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7207 unlock_user(name
, pathname
, 0);
7209 /* man name_to_handle_at(2):
7210 * Other than the use of the handle_bytes field, the caller should treat
7211 * the file_handle structure as an opaque data type
7214 memcpy(target_fh
, fh
, total_size
);
7215 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7216 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7218 unlock_user(target_fh
, handle
, total_size
);
7220 if (put_user_s32(mid
, mount_id
)) {
7221 return -TARGET_EFAULT
;
7229 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7230 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7233 struct file_handle
*target_fh
;
7234 struct file_handle
*fh
;
7235 unsigned int size
, total_size
;
7238 if (get_user_s32(size
, handle
)) {
7239 return -TARGET_EFAULT
;
7242 total_size
= sizeof(struct file_handle
) + size
;
7243 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7245 return -TARGET_EFAULT
;
7248 fh
= g_memdup(target_fh
, total_size
);
7249 fh
->handle_bytes
= size
;
7250 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7252 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7253 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7257 unlock_user(target_fh
, handle
, total_size
);
7263 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7265 /* signalfd siginfo conversion */
7268 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7269 const struct signalfd_siginfo
*info
)
7271 int sig
= host_to_target_signal(info
->ssi_signo
);
7273 /* linux/signalfd.h defines a ssi_addr_lsb
7274 * not defined in sys/signalfd.h but used by some kernels
7277 #ifdef BUS_MCEERR_AO
7278 if (tinfo
->ssi_signo
== SIGBUS
&&
7279 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7280 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7281 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7282 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7283 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7287 tinfo
->ssi_signo
= tswap32(sig
);
7288 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7289 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7290 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7291 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7292 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7293 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7294 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7295 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7296 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7297 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7298 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7299 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7300 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7301 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7302 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7305 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7309 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7310 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7316 static TargetFdTrans target_signalfd_trans
= {
7317 .host_to_target_data
= host_to_target_data_signalfd
,
7320 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7323 target_sigset_t
*target_mask
;
7327 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7328 return -TARGET_EINVAL
;
7330 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7331 return -TARGET_EFAULT
;
7334 target_to_host_sigset(&host_mask
, target_mask
);
7336 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7338 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7340 fd_trans_register(ret
, &target_signalfd_trans
);
7343 unlock_user_struct(target_mask
, mask
, 0);
7349 /* Map host to target signal numbers for the wait family of syscalls.
7350 Assume all other status bits are the same. */
7351 int host_to_target_waitstatus(int status
)
7353 if (WIFSIGNALED(status
)) {
7354 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7356 if (WIFSTOPPED(status
)) {
7357 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7363 static int open_self_cmdline(void *cpu_env
, int fd
)
7365 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7366 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7369 for (i
= 0; i
< bprm
->argc
; i
++) {
7370 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7372 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7380 static int open_self_maps(void *cpu_env
, int fd
)
7382 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7383 TaskState
*ts
= cpu
->opaque
;
7389 fp
= fopen("/proc/self/maps", "r");
7394 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7395 int fields
, dev_maj
, dev_min
, inode
;
7396 uint64_t min
, max
, offset
;
7397 char flag_r
, flag_w
, flag_x
, flag_p
;
7398 char path
[512] = "";
7399 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7400 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7401 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7403 if ((fields
< 10) || (fields
> 11)) {
7406 if (h2g_valid(min
)) {
7407 int flags
= page_get_flags(h2g(min
));
7408 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7409 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7412 if (h2g(min
) == ts
->info
->stack_limit
) {
7413 pstrcpy(path
, sizeof(path
), " [stack]");
7415 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7416 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7417 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7418 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7419 path
[0] ? " " : "", path
);
7429 static int open_self_stat(void *cpu_env
, int fd
)
7431 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7432 TaskState
*ts
= cpu
->opaque
;
7433 abi_ulong start_stack
= ts
->info
->start_stack
;
7436 for (i
= 0; i
< 44; i
++) {
7444 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7445 } else if (i
== 1) {
7447 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7448 } else if (i
== 27) {
7451 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7453 /* for the rest, there is MasterCard */
7454 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7458 if (write(fd
, buf
, len
) != len
) {
7466 static int open_self_auxv(void *cpu_env
, int fd
)
7468 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7469 TaskState
*ts
= cpu
->opaque
;
7470 abi_ulong auxv
= ts
->info
->saved_auxv
;
7471 abi_ulong len
= ts
->info
->auxv_len
;
7475 * Auxiliary vector is stored in target process stack.
7476 * read in whole auxv vector and copy it to file
7478 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7482 r
= write(fd
, ptr
, len
);
7489 lseek(fd
, 0, SEEK_SET
);
7490 unlock_user(ptr
, auxv
, len
);
7496 static int is_proc_myself(const char *filename
, const char *entry
)
7498 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7499 filename
+= strlen("/proc/");
7500 if (!strncmp(filename
, "self/", strlen("self/"))) {
7501 filename
+= strlen("self/");
7502 } else if (*filename
>= '1' && *filename
<= '9') {
7504 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7505 if (!strncmp(filename
, myself
, strlen(myself
))) {
7506 filename
+= strlen(myself
);
7513 if (!strcmp(filename
, entry
)) {
7520 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7521 static int is_proc(const char *filename
, const char *entry
)
7523 return strcmp(filename
, entry
) == 0;
7526 static int open_net_route(void *cpu_env
, int fd
)
7533 fp
= fopen("/proc/net/route", "r");
7540 read
= getline(&line
, &len
, fp
);
7541 dprintf(fd
, "%s", line
);
7545 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7547 uint32_t dest
, gw
, mask
;
7548 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7549 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7550 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7551 &mask
, &mtu
, &window
, &irtt
);
7552 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7553 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7554 metric
, tswap32(mask
), mtu
, window
, irtt
);
7564 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7567 const char *filename
;
7568 int (*fill
)(void *cpu_env
, int fd
);
7569 int (*cmp
)(const char *s1
, const char *s2
);
7571 const struct fake_open
*fake_open
;
7572 static const struct fake_open fakes
[] = {
7573 { "maps", open_self_maps
, is_proc_myself
},
7574 { "stat", open_self_stat
, is_proc_myself
},
7575 { "auxv", open_self_auxv
, is_proc_myself
},
7576 { "cmdline", open_self_cmdline
, is_proc_myself
},
7577 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7578 { "/proc/net/route", open_net_route
, is_proc
},
7580 { NULL
, NULL
, NULL
}
7583 if (is_proc_myself(pathname
, "exe")) {
7584 int execfd
= qemu_getauxval(AT_EXECFD
);
7585 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7588 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7589 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7594 if (fake_open
->filename
) {
7596 char filename
[PATH_MAX
];
7599 /* create temporary file to map stat to */
7600 tmpdir
= getenv("TMPDIR");
7603 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7604 fd
= mkstemp(filename
);
7610 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7616 lseek(fd
, 0, SEEK_SET
);
7621 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7624 #define TIMER_MAGIC 0x0caf0000
7625 #define TIMER_MAGIC_MASK 0xffff0000
7627 /* Convert QEMU provided timer ID back to internal 16bit index format */
7628 static target_timer_t
get_timer_id(abi_long arg
)
7630 target_timer_t timerid
= arg
;
7632 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7633 return -TARGET_EINVAL
;
7638 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7639 return -TARGET_EINVAL
;
7645 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7647 uint64_t *counter
= buf
;
7650 if (len
< sizeof(uint64_t)) {
7654 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7655 *counter
= tswap64(*counter
);
7662 static TargetFdTrans target_eventfd_trans
= {
7663 .host_to_target_data
= swap_data_eventfd
,
7664 .target_to_host_data
= swap_data_eventfd
,
7667 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7668 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7669 defined(__NR_inotify_init1))
7670 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7672 struct inotify_event
*ev
;
7676 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7677 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7680 ev
->wd
= tswap32(ev
->wd
);
7681 ev
->mask
= tswap32(ev
->mask
);
7682 ev
->cookie
= tswap32(ev
->cookie
);
7683 ev
->len
= tswap32(name_len
);
7689 static TargetFdTrans target_inotify_trans
= {
7690 .host_to_target_data
= host_to_target_data_inotify
,
7694 /* do_syscall() should always have a single exit point at the end so
7695 that actions, such as logging of syscall results, can be performed.
7696 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7697 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7698 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7699 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7702 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7708 #if defined(DEBUG_ERESTARTSYS)
7709 /* Debug-only code for exercising the syscall-restart code paths
7710 * in the per-architecture cpu main loops: restart every syscall
7711 * the guest makes once before letting it through.
7718 return -TARGET_ERESTARTSYS
;
7724 gemu_log("syscall %d", num
);
7726 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7728 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7731 case TARGET_NR_exit
:
7732 /* In old applications this may be used to implement _exit(2).
7733 However in threaded applictions it is used for thread termination,
7734 and _exit_group is used for application termination.
7735 Do thread termination if we have more then one thread. */
7737 if (block_signals()) {
7738 ret
= -TARGET_ERESTARTSYS
;
7744 if (CPU_NEXT(first_cpu
)) {
7747 /* Remove the CPU from the list. */
7748 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7753 if (ts
->child_tidptr
) {
7754 put_user_u32(0, ts
->child_tidptr
);
7755 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7759 object_unref(OBJECT(cpu
));
7761 rcu_unregister_thread();
7769 gdb_exit(cpu_env
, arg1
);
7771 ret
= 0; /* avoid warning */
7773 case TARGET_NR_read
:
7777 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7779 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7781 fd_trans_host_to_target_data(arg1
)) {
7782 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7784 unlock_user(p
, arg2
, ret
);
7787 case TARGET_NR_write
:
7788 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7790 if (fd_trans_target_to_host_data(arg1
)) {
7791 void *copy
= g_malloc(arg3
);
7792 memcpy(copy
, p
, arg3
);
7793 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7795 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7799 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7801 unlock_user(p
, arg2
, 0);
7803 #ifdef TARGET_NR_open
7804 case TARGET_NR_open
:
7805 if (!(p
= lock_user_string(arg1
)))
7807 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7808 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7810 fd_trans_unregister(ret
);
7811 unlock_user(p
, arg1
, 0);
7814 case TARGET_NR_openat
:
7815 if (!(p
= lock_user_string(arg2
)))
7817 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7818 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7820 fd_trans_unregister(ret
);
7821 unlock_user(p
, arg2
, 0);
7823 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7824 case TARGET_NR_name_to_handle_at
:
7825 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7828 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7829 case TARGET_NR_open_by_handle_at
:
7830 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7831 fd_trans_unregister(ret
);
7834 case TARGET_NR_close
:
7835 fd_trans_unregister(arg1
);
7836 ret
= get_errno(close(arg1
));
7841 #ifdef TARGET_NR_fork
7842 case TARGET_NR_fork
:
7843 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7846 #ifdef TARGET_NR_waitpid
7847 case TARGET_NR_waitpid
:
7850 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7851 if (!is_error(ret
) && arg2
&& ret
7852 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7857 #ifdef TARGET_NR_waitid
7858 case TARGET_NR_waitid
:
7862 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7863 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7864 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7866 host_to_target_siginfo(p
, &info
);
7867 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7872 #ifdef TARGET_NR_creat /* not on alpha */
7873 case TARGET_NR_creat
:
7874 if (!(p
= lock_user_string(arg1
)))
7876 ret
= get_errno(creat(p
, arg2
));
7877 fd_trans_unregister(ret
);
7878 unlock_user(p
, arg1
, 0);
7881 #ifdef TARGET_NR_link
7882 case TARGET_NR_link
:
7885 p
= lock_user_string(arg1
);
7886 p2
= lock_user_string(arg2
);
7888 ret
= -TARGET_EFAULT
;
7890 ret
= get_errno(link(p
, p2
));
7891 unlock_user(p2
, arg2
, 0);
7892 unlock_user(p
, arg1
, 0);
7896 #if defined(TARGET_NR_linkat)
7897 case TARGET_NR_linkat
:
7902 p
= lock_user_string(arg2
);
7903 p2
= lock_user_string(arg4
);
7905 ret
= -TARGET_EFAULT
;
7907 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7908 unlock_user(p
, arg2
, 0);
7909 unlock_user(p2
, arg4
, 0);
7913 #ifdef TARGET_NR_unlink
7914 case TARGET_NR_unlink
:
7915 if (!(p
= lock_user_string(arg1
)))
7917 ret
= get_errno(unlink(p
));
7918 unlock_user(p
, arg1
, 0);
7921 #if defined(TARGET_NR_unlinkat)
7922 case TARGET_NR_unlinkat
:
7923 if (!(p
= lock_user_string(arg2
)))
7925 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7926 unlock_user(p
, arg2
, 0);
7929 case TARGET_NR_execve
:
7931 char **argp
, **envp
;
7934 abi_ulong guest_argp
;
7935 abi_ulong guest_envp
;
7942 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7943 if (get_user_ual(addr
, gp
))
7951 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7952 if (get_user_ual(addr
, gp
))
7959 argp
= g_new0(char *, argc
+ 1);
7960 envp
= g_new0(char *, envc
+ 1);
7962 for (gp
= guest_argp
, q
= argp
; gp
;
7963 gp
+= sizeof(abi_ulong
), q
++) {
7964 if (get_user_ual(addr
, gp
))
7968 if (!(*q
= lock_user_string(addr
)))
7970 total_size
+= strlen(*q
) + 1;
7974 for (gp
= guest_envp
, q
= envp
; gp
;
7975 gp
+= sizeof(abi_ulong
), q
++) {
7976 if (get_user_ual(addr
, gp
))
7980 if (!(*q
= lock_user_string(addr
)))
7982 total_size
+= strlen(*q
) + 1;
7986 if (!(p
= lock_user_string(arg1
)))
7988 /* Although execve() is not an interruptible syscall it is
7989 * a special case where we must use the safe_syscall wrapper:
7990 * if we allow a signal to happen before we make the host
7991 * syscall then we will 'lose' it, because at the point of
7992 * execve the process leaves QEMU's control. So we use the
7993 * safe syscall wrapper to ensure that we either take the
7994 * signal as a guest signal, or else it does not happen
7995 * before the execve completes and makes it the other
7996 * program's problem.
7998 ret
= get_errno(safe_execve(p
, argp
, envp
));
7999 unlock_user(p
, arg1
, 0);
8004 ret
= -TARGET_EFAULT
;
8007 for (gp
= guest_argp
, q
= argp
; *q
;
8008 gp
+= sizeof(abi_ulong
), q
++) {
8009 if (get_user_ual(addr
, gp
)
8012 unlock_user(*q
, addr
, 0);
8014 for (gp
= guest_envp
, q
= envp
; *q
;
8015 gp
+= sizeof(abi_ulong
), q
++) {
8016 if (get_user_ual(addr
, gp
)
8019 unlock_user(*q
, addr
, 0);
8026 case TARGET_NR_chdir
:
8027 if (!(p
= lock_user_string(arg1
)))
8029 ret
= get_errno(chdir(p
));
8030 unlock_user(p
, arg1
, 0);
8032 #ifdef TARGET_NR_time
8033 case TARGET_NR_time
:
8036 ret
= get_errno(time(&host_time
));
8039 && put_user_sal(host_time
, arg1
))
8044 #ifdef TARGET_NR_mknod
8045 case TARGET_NR_mknod
:
8046 if (!(p
= lock_user_string(arg1
)))
8048 ret
= get_errno(mknod(p
, arg2
, arg3
));
8049 unlock_user(p
, arg1
, 0);
8052 #if defined(TARGET_NR_mknodat)
8053 case TARGET_NR_mknodat
:
8054 if (!(p
= lock_user_string(arg2
)))
8056 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8057 unlock_user(p
, arg2
, 0);
8060 #ifdef TARGET_NR_chmod
8061 case TARGET_NR_chmod
:
8062 if (!(p
= lock_user_string(arg1
)))
8064 ret
= get_errno(chmod(p
, arg2
));
8065 unlock_user(p
, arg1
, 0);
8068 #ifdef TARGET_NR_break
8069 case TARGET_NR_break
:
8072 #ifdef TARGET_NR_oldstat
8073 case TARGET_NR_oldstat
:
8076 case TARGET_NR_lseek
:
8077 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8079 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8080 /* Alpha specific */
8081 case TARGET_NR_getxpid
:
8082 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8083 ret
= get_errno(getpid());
8086 #ifdef TARGET_NR_getpid
8087 case TARGET_NR_getpid
:
8088 ret
= get_errno(getpid());
8091 case TARGET_NR_mount
:
8093 /* need to look at the data field */
8097 p
= lock_user_string(arg1
);
8105 p2
= lock_user_string(arg2
);
8108 unlock_user(p
, arg1
, 0);
8114 p3
= lock_user_string(arg3
);
8117 unlock_user(p
, arg1
, 0);
8119 unlock_user(p2
, arg2
, 0);
8126 /* FIXME - arg5 should be locked, but it isn't clear how to
8127 * do that since it's not guaranteed to be a NULL-terminated
8131 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8133 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8135 ret
= get_errno(ret
);
8138 unlock_user(p
, arg1
, 0);
8140 unlock_user(p2
, arg2
, 0);
8142 unlock_user(p3
, arg3
, 0);
8146 #ifdef TARGET_NR_umount
8147 case TARGET_NR_umount
:
8148 if (!(p
= lock_user_string(arg1
)))
8150 ret
= get_errno(umount(p
));
8151 unlock_user(p
, arg1
, 0);
8154 #ifdef TARGET_NR_stime /* not on alpha */
8155 case TARGET_NR_stime
:
8158 if (get_user_sal(host_time
, arg1
))
8160 ret
= get_errno(stime(&host_time
));
8164 case TARGET_NR_ptrace
:
8166 #ifdef TARGET_NR_alarm /* not on alpha */
8167 case TARGET_NR_alarm
:
8171 #ifdef TARGET_NR_oldfstat
8172 case TARGET_NR_oldfstat
:
8175 #ifdef TARGET_NR_pause /* not on alpha */
8176 case TARGET_NR_pause
:
8177 if (!block_signals()) {
8178 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8180 ret
= -TARGET_EINTR
;
8183 #ifdef TARGET_NR_utime
8184 case TARGET_NR_utime
:
8186 struct utimbuf tbuf
, *host_tbuf
;
8187 struct target_utimbuf
*target_tbuf
;
8189 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8191 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8192 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8193 unlock_user_struct(target_tbuf
, arg2
, 0);
8198 if (!(p
= lock_user_string(arg1
)))
8200 ret
= get_errno(utime(p
, host_tbuf
));
8201 unlock_user(p
, arg1
, 0);
8205 #ifdef TARGET_NR_utimes
8206 case TARGET_NR_utimes
:
8208 struct timeval
*tvp
, tv
[2];
8210 if (copy_from_user_timeval(&tv
[0], arg2
)
8211 || copy_from_user_timeval(&tv
[1],
8212 arg2
+ sizeof(struct target_timeval
)))
8218 if (!(p
= lock_user_string(arg1
)))
8220 ret
= get_errno(utimes(p
, tvp
));
8221 unlock_user(p
, arg1
, 0);
8225 #if defined(TARGET_NR_futimesat)
8226 case TARGET_NR_futimesat
:
8228 struct timeval
*tvp
, tv
[2];
8230 if (copy_from_user_timeval(&tv
[0], arg3
)
8231 || copy_from_user_timeval(&tv
[1],
8232 arg3
+ sizeof(struct target_timeval
)))
8238 if (!(p
= lock_user_string(arg2
)))
8240 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8241 unlock_user(p
, arg2
, 0);
8245 #ifdef TARGET_NR_stty
8246 case TARGET_NR_stty
:
8249 #ifdef TARGET_NR_gtty
8250 case TARGET_NR_gtty
:
8253 #ifdef TARGET_NR_access
8254 case TARGET_NR_access
:
8255 if (!(p
= lock_user_string(arg1
)))
8257 ret
= get_errno(access(path(p
), arg2
));
8258 unlock_user(p
, arg1
, 0);
8261 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8262 case TARGET_NR_faccessat
:
8263 if (!(p
= lock_user_string(arg2
)))
8265 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8266 unlock_user(p
, arg2
, 0);
8269 #ifdef TARGET_NR_nice /* not on alpha */
8270 case TARGET_NR_nice
:
8271 ret
= get_errno(nice(arg1
));
8274 #ifdef TARGET_NR_ftime
8275 case TARGET_NR_ftime
:
8278 case TARGET_NR_sync
:
8282 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8283 case TARGET_NR_syncfs
:
8284 ret
= get_errno(syncfs(arg1
));
8287 case TARGET_NR_kill
:
8288 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8290 #ifdef TARGET_NR_rename
8291 case TARGET_NR_rename
:
8294 p
= lock_user_string(arg1
);
8295 p2
= lock_user_string(arg2
);
8297 ret
= -TARGET_EFAULT
;
8299 ret
= get_errno(rename(p
, p2
));
8300 unlock_user(p2
, arg2
, 0);
8301 unlock_user(p
, arg1
, 0);
8305 #if defined(TARGET_NR_renameat)
8306 case TARGET_NR_renameat
:
8309 p
= lock_user_string(arg2
);
8310 p2
= lock_user_string(arg4
);
8312 ret
= -TARGET_EFAULT
;
8314 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8315 unlock_user(p2
, arg4
, 0);
8316 unlock_user(p
, arg2
, 0);
8320 #ifdef TARGET_NR_mkdir
8321 case TARGET_NR_mkdir
:
8322 if (!(p
= lock_user_string(arg1
)))
8324 ret
= get_errno(mkdir(p
, arg2
));
8325 unlock_user(p
, arg1
, 0);
8328 #if defined(TARGET_NR_mkdirat)
8329 case TARGET_NR_mkdirat
:
8330 if (!(p
= lock_user_string(arg2
)))
8332 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8333 unlock_user(p
, arg2
, 0);
8336 #ifdef TARGET_NR_rmdir
8337 case TARGET_NR_rmdir
:
8338 if (!(p
= lock_user_string(arg1
)))
8340 ret
= get_errno(rmdir(p
));
8341 unlock_user(p
, arg1
, 0);
8345 ret
= get_errno(dup(arg1
));
8347 fd_trans_dup(arg1
, ret
);
8350 #ifdef TARGET_NR_pipe
8351 case TARGET_NR_pipe
:
8352 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8355 #ifdef TARGET_NR_pipe2
8356 case TARGET_NR_pipe2
:
8357 ret
= do_pipe(cpu_env
, arg1
,
8358 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8361 case TARGET_NR_times
:
8363 struct target_tms
*tmsp
;
8365 ret
= get_errno(times(&tms
));
8367 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8370 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8371 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8372 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8373 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8376 ret
= host_to_target_clock_t(ret
);
8379 #ifdef TARGET_NR_prof
8380 case TARGET_NR_prof
:
8383 #ifdef TARGET_NR_signal
8384 case TARGET_NR_signal
:
8387 case TARGET_NR_acct
:
8389 ret
= get_errno(acct(NULL
));
8391 if (!(p
= lock_user_string(arg1
)))
8393 ret
= get_errno(acct(path(p
)));
8394 unlock_user(p
, arg1
, 0);
8397 #ifdef TARGET_NR_umount2
8398 case TARGET_NR_umount2
:
8399 if (!(p
= lock_user_string(arg1
)))
8401 ret
= get_errno(umount2(p
, arg2
));
8402 unlock_user(p
, arg1
, 0);
8405 #ifdef TARGET_NR_lock
8406 case TARGET_NR_lock
:
8409 case TARGET_NR_ioctl
:
8410 ret
= do_ioctl(arg1
, arg2
, arg3
);
8412 case TARGET_NR_fcntl
:
8413 ret
= do_fcntl(arg1
, arg2
, arg3
);
8415 #ifdef TARGET_NR_mpx
8419 case TARGET_NR_setpgid
:
8420 ret
= get_errno(setpgid(arg1
, arg2
));
8422 #ifdef TARGET_NR_ulimit
8423 case TARGET_NR_ulimit
:
8426 #ifdef TARGET_NR_oldolduname
8427 case TARGET_NR_oldolduname
:
8430 case TARGET_NR_umask
:
8431 ret
= get_errno(umask(arg1
));
8433 case TARGET_NR_chroot
:
8434 if (!(p
= lock_user_string(arg1
)))
8436 ret
= get_errno(chroot(p
));
8437 unlock_user(p
, arg1
, 0);
8439 #ifdef TARGET_NR_ustat
8440 case TARGET_NR_ustat
:
8443 #ifdef TARGET_NR_dup2
8444 case TARGET_NR_dup2
:
8445 ret
= get_errno(dup2(arg1
, arg2
));
8447 fd_trans_dup(arg1
, arg2
);
8451 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8452 case TARGET_NR_dup3
:
8453 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8455 fd_trans_dup(arg1
, arg2
);
8459 #ifdef TARGET_NR_getppid /* not on alpha */
8460 case TARGET_NR_getppid
:
8461 ret
= get_errno(getppid());
8464 #ifdef TARGET_NR_getpgrp
8465 case TARGET_NR_getpgrp
:
8466 ret
= get_errno(getpgrp());
8469 case TARGET_NR_setsid
:
8470 ret
= get_errno(setsid());
8472 #ifdef TARGET_NR_sigaction
8473 case TARGET_NR_sigaction
:
8475 #if defined(TARGET_ALPHA)
8476 struct target_sigaction act
, oact
, *pact
= 0;
8477 struct target_old_sigaction
*old_act
;
8479 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8481 act
._sa_handler
= old_act
->_sa_handler
;
8482 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8483 act
.sa_flags
= old_act
->sa_flags
;
8484 act
.sa_restorer
= 0;
8485 unlock_user_struct(old_act
, arg2
, 0);
8488 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8489 if (!is_error(ret
) && arg3
) {
8490 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8492 old_act
->_sa_handler
= oact
._sa_handler
;
8493 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8494 old_act
->sa_flags
= oact
.sa_flags
;
8495 unlock_user_struct(old_act
, arg3
, 1);
8497 #elif defined(TARGET_MIPS)
8498 struct target_sigaction act
, oact
, *pact
, *old_act
;
8501 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8503 act
._sa_handler
= old_act
->_sa_handler
;
8504 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8505 act
.sa_flags
= old_act
->sa_flags
;
8506 unlock_user_struct(old_act
, arg2
, 0);
8512 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8514 if (!is_error(ret
) && arg3
) {
8515 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8517 old_act
->_sa_handler
= oact
._sa_handler
;
8518 old_act
->sa_flags
= oact
.sa_flags
;
8519 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8520 old_act
->sa_mask
.sig
[1] = 0;
8521 old_act
->sa_mask
.sig
[2] = 0;
8522 old_act
->sa_mask
.sig
[3] = 0;
8523 unlock_user_struct(old_act
, arg3
, 1);
8526 struct target_old_sigaction
*old_act
;
8527 struct target_sigaction act
, oact
, *pact
;
8529 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8531 act
._sa_handler
= old_act
->_sa_handler
;
8532 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8533 act
.sa_flags
= old_act
->sa_flags
;
8534 act
.sa_restorer
= old_act
->sa_restorer
;
8535 unlock_user_struct(old_act
, arg2
, 0);
8540 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8541 if (!is_error(ret
) && arg3
) {
8542 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8544 old_act
->_sa_handler
= oact
._sa_handler
;
8545 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8546 old_act
->sa_flags
= oact
.sa_flags
;
8547 old_act
->sa_restorer
= oact
.sa_restorer
;
8548 unlock_user_struct(old_act
, arg3
, 1);
8554 case TARGET_NR_rt_sigaction
:
8556 #if defined(TARGET_ALPHA)
8557 struct target_sigaction act
, oact
, *pact
= 0;
8558 struct target_rt_sigaction
*rt_act
;
8560 if (arg4
!= sizeof(target_sigset_t
)) {
8561 ret
= -TARGET_EINVAL
;
8565 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8567 act
._sa_handler
= rt_act
->_sa_handler
;
8568 act
.sa_mask
= rt_act
->sa_mask
;
8569 act
.sa_flags
= rt_act
->sa_flags
;
8570 act
.sa_restorer
= arg5
;
8571 unlock_user_struct(rt_act
, arg2
, 0);
8574 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8575 if (!is_error(ret
) && arg3
) {
8576 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8578 rt_act
->_sa_handler
= oact
._sa_handler
;
8579 rt_act
->sa_mask
= oact
.sa_mask
;
8580 rt_act
->sa_flags
= oact
.sa_flags
;
8581 unlock_user_struct(rt_act
, arg3
, 1);
8584 struct target_sigaction
*act
;
8585 struct target_sigaction
*oact
;
8587 if (arg4
!= sizeof(target_sigset_t
)) {
8588 ret
= -TARGET_EINVAL
;
8592 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8597 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8598 ret
= -TARGET_EFAULT
;
8599 goto rt_sigaction_fail
;
8603 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8606 unlock_user_struct(act
, arg2
, 0);
8608 unlock_user_struct(oact
, arg3
, 1);
8612 #ifdef TARGET_NR_sgetmask /* not on alpha */
8613 case TARGET_NR_sgetmask
:
8616 abi_ulong target_set
;
8617 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8619 host_to_target_old_sigset(&target_set
, &cur_set
);
8625 #ifdef TARGET_NR_ssetmask /* not on alpha */
8626 case TARGET_NR_ssetmask
:
8629 abi_ulong target_set
= arg1
;
8630 target_to_host_old_sigset(&set
, &target_set
);
8631 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8633 host_to_target_old_sigset(&target_set
, &oset
);
8639 #ifdef TARGET_NR_sigprocmask
8640 case TARGET_NR_sigprocmask
:
8642 #if defined(TARGET_ALPHA)
8643 sigset_t set
, oldset
;
8648 case TARGET_SIG_BLOCK
:
8651 case TARGET_SIG_UNBLOCK
:
8654 case TARGET_SIG_SETMASK
:
8658 ret
= -TARGET_EINVAL
;
8662 target_to_host_old_sigset(&set
, &mask
);
8664 ret
= do_sigprocmask(how
, &set
, &oldset
);
8665 if (!is_error(ret
)) {
8666 host_to_target_old_sigset(&mask
, &oldset
);
8668 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8671 sigset_t set
, oldset
, *set_ptr
;
8676 case TARGET_SIG_BLOCK
:
8679 case TARGET_SIG_UNBLOCK
:
8682 case TARGET_SIG_SETMASK
:
8686 ret
= -TARGET_EINVAL
;
8689 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8691 target_to_host_old_sigset(&set
, p
);
8692 unlock_user(p
, arg2
, 0);
8698 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8699 if (!is_error(ret
) && arg3
) {
8700 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8702 host_to_target_old_sigset(p
, &oldset
);
8703 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8709 case TARGET_NR_rt_sigprocmask
:
8712 sigset_t set
, oldset
, *set_ptr
;
8714 if (arg4
!= sizeof(target_sigset_t
)) {
8715 ret
= -TARGET_EINVAL
;
8721 case TARGET_SIG_BLOCK
:
8724 case TARGET_SIG_UNBLOCK
:
8727 case TARGET_SIG_SETMASK
:
8731 ret
= -TARGET_EINVAL
;
8734 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8736 target_to_host_sigset(&set
, p
);
8737 unlock_user(p
, arg2
, 0);
8743 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8744 if (!is_error(ret
) && arg3
) {
8745 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8747 host_to_target_sigset(p
, &oldset
);
8748 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8752 #ifdef TARGET_NR_sigpending
8753 case TARGET_NR_sigpending
:
8756 ret
= get_errno(sigpending(&set
));
8757 if (!is_error(ret
)) {
8758 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8760 host_to_target_old_sigset(p
, &set
);
8761 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8766 case TARGET_NR_rt_sigpending
:
8770 /* Yes, this check is >, not != like most. We follow the kernel's
8771 * logic and it does it like this because it implements
8772 * NR_sigpending through the same code path, and in that case
8773 * the old_sigset_t is smaller in size.
8775 if (arg2
> sizeof(target_sigset_t
)) {
8776 ret
= -TARGET_EINVAL
;
8780 ret
= get_errno(sigpending(&set
));
8781 if (!is_error(ret
)) {
8782 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8784 host_to_target_sigset(p
, &set
);
8785 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8789 #ifdef TARGET_NR_sigsuspend
8790 case TARGET_NR_sigsuspend
:
8792 TaskState
*ts
= cpu
->opaque
;
8793 #if defined(TARGET_ALPHA)
8794 abi_ulong mask
= arg1
;
8795 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8797 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8799 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8800 unlock_user(p
, arg1
, 0);
8802 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8804 if (ret
!= -TARGET_ERESTARTSYS
) {
8805 ts
->in_sigsuspend
= 1;
8810 case TARGET_NR_rt_sigsuspend
:
8812 TaskState
*ts
= cpu
->opaque
;
8814 if (arg2
!= sizeof(target_sigset_t
)) {
8815 ret
= -TARGET_EINVAL
;
8818 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8820 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8821 unlock_user(p
, arg1
, 0);
8822 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8824 if (ret
!= -TARGET_ERESTARTSYS
) {
8825 ts
->in_sigsuspend
= 1;
8829 case TARGET_NR_rt_sigtimedwait
:
8832 struct timespec uts
, *puts
;
8835 if (arg4
!= sizeof(target_sigset_t
)) {
8836 ret
= -TARGET_EINVAL
;
8840 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8842 target_to_host_sigset(&set
, p
);
8843 unlock_user(p
, arg1
, 0);
8846 target_to_host_timespec(puts
, arg3
);
8850 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8852 if (!is_error(ret
)) {
8854 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8859 host_to_target_siginfo(p
, &uinfo
);
8860 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8862 ret
= host_to_target_signal(ret
);
8866 case TARGET_NR_rt_sigqueueinfo
:
8870 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8874 target_to_host_siginfo(&uinfo
, p
);
8875 unlock_user(p
, arg3
, 0);
8876 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8879 case TARGET_NR_rt_tgsigqueueinfo
:
8883 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8887 target_to_host_siginfo(&uinfo
, p
);
8888 unlock_user(p
, arg4
, 0);
8889 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8892 #ifdef TARGET_NR_sigreturn
8893 case TARGET_NR_sigreturn
:
8894 if (block_signals()) {
8895 ret
= -TARGET_ERESTARTSYS
;
8897 ret
= do_sigreturn(cpu_env
);
8901 case TARGET_NR_rt_sigreturn
:
8902 if (block_signals()) {
8903 ret
= -TARGET_ERESTARTSYS
;
8905 ret
= do_rt_sigreturn(cpu_env
);
8908 case TARGET_NR_sethostname
:
8909 if (!(p
= lock_user_string(arg1
)))
8911 ret
= get_errno(sethostname(p
, arg2
));
8912 unlock_user(p
, arg1
, 0);
8914 case TARGET_NR_setrlimit
:
8916 int resource
= target_to_host_resource(arg1
);
8917 struct target_rlimit
*target_rlim
;
8919 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8921 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8922 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8923 unlock_user_struct(target_rlim
, arg2
, 0);
8924 ret
= get_errno(setrlimit(resource
, &rlim
));
8927 case TARGET_NR_getrlimit
:
8929 int resource
= target_to_host_resource(arg1
);
8930 struct target_rlimit
*target_rlim
;
8933 ret
= get_errno(getrlimit(resource
, &rlim
));
8934 if (!is_error(ret
)) {
8935 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8937 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8938 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8939 unlock_user_struct(target_rlim
, arg2
, 1);
8943 case TARGET_NR_getrusage
:
8945 struct rusage rusage
;
8946 ret
= get_errno(getrusage(arg1
, &rusage
));
8947 if (!is_error(ret
)) {
8948 ret
= host_to_target_rusage(arg2
, &rusage
);
8952 case TARGET_NR_gettimeofday
:
8955 ret
= get_errno(gettimeofday(&tv
, NULL
));
8956 if (!is_error(ret
)) {
8957 if (copy_to_user_timeval(arg1
, &tv
))
8962 case TARGET_NR_settimeofday
:
8964 struct timeval tv
, *ptv
= NULL
;
8965 struct timezone tz
, *ptz
= NULL
;
8968 if (copy_from_user_timeval(&tv
, arg1
)) {
8975 if (copy_from_user_timezone(&tz
, arg2
)) {
8981 ret
= get_errno(settimeofday(ptv
, ptz
));
8984 #if defined(TARGET_NR_select)
8985 case TARGET_NR_select
:
8986 #if defined(TARGET_WANT_NI_OLD_SELECT)
8987 /* some architectures used to have old_select here
8988 * but now ENOSYS it.
8990 ret
= -TARGET_ENOSYS
;
8991 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8992 ret
= do_old_select(arg1
);
8994 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8998 #ifdef TARGET_NR_pselect6
8999 case TARGET_NR_pselect6
:
9001 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9002 fd_set rfds
, wfds
, efds
;
9003 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9004 struct timespec ts
, *ts_ptr
;
9007 * The 6th arg is actually two args smashed together,
9008 * so we cannot use the C library.
9016 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9017 target_sigset_t
*target_sigset
;
9025 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9029 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9033 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9039 * This takes a timespec, and not a timeval, so we cannot
9040 * use the do_select() helper ...
9043 if (target_to_host_timespec(&ts
, ts_addr
)) {
9051 /* Extract the two packed args for the sigset */
9054 sig
.size
= SIGSET_T_SIZE
;
9056 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9060 arg_sigset
= tswapal(arg7
[0]);
9061 arg_sigsize
= tswapal(arg7
[1]);
9062 unlock_user(arg7
, arg6
, 0);
9066 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9067 /* Like the kernel, we enforce correct size sigsets */
9068 ret
= -TARGET_EINVAL
;
9071 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9072 sizeof(*target_sigset
), 1);
9073 if (!target_sigset
) {
9076 target_to_host_sigset(&set
, target_sigset
);
9077 unlock_user(target_sigset
, arg_sigset
, 0);
9085 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9088 if (!is_error(ret
)) {
9089 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9091 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9093 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9096 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9102 #ifdef TARGET_NR_symlink
9103 case TARGET_NR_symlink
:
9106 p
= lock_user_string(arg1
);
9107 p2
= lock_user_string(arg2
);
9109 ret
= -TARGET_EFAULT
;
9111 ret
= get_errno(symlink(p
, p2
));
9112 unlock_user(p2
, arg2
, 0);
9113 unlock_user(p
, arg1
, 0);
9117 #if defined(TARGET_NR_symlinkat)
9118 case TARGET_NR_symlinkat
:
9121 p
= lock_user_string(arg1
);
9122 p2
= lock_user_string(arg3
);
9124 ret
= -TARGET_EFAULT
;
9126 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9127 unlock_user(p2
, arg3
, 0);
9128 unlock_user(p
, arg1
, 0);
9132 #ifdef TARGET_NR_oldlstat
9133 case TARGET_NR_oldlstat
:
9136 #ifdef TARGET_NR_readlink
9137 case TARGET_NR_readlink
:
9140 p
= lock_user_string(arg1
);
9141 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9143 ret
= -TARGET_EFAULT
;
9145 /* Short circuit this for the magic exe check. */
9146 ret
= -TARGET_EINVAL
;
9147 } else if (is_proc_myself((const char *)p
, "exe")) {
9148 char real
[PATH_MAX
], *temp
;
9149 temp
= realpath(exec_path
, real
);
9150 /* Return value is # of bytes that we wrote to the buffer. */
9152 ret
= get_errno(-1);
9154 /* Don't worry about sign mismatch as earlier mapping
9155 * logic would have thrown a bad address error. */
9156 ret
= MIN(strlen(real
), arg3
);
9157 /* We cannot NUL terminate the string. */
9158 memcpy(p2
, real
, ret
);
9161 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9163 unlock_user(p2
, arg2
, ret
);
9164 unlock_user(p
, arg1
, 0);
9168 #if defined(TARGET_NR_readlinkat)
9169 case TARGET_NR_readlinkat
:
9172 p
= lock_user_string(arg2
);
9173 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9175 ret
= -TARGET_EFAULT
;
9176 } else if (is_proc_myself((const char *)p
, "exe")) {
9177 char real
[PATH_MAX
], *temp
;
9178 temp
= realpath(exec_path
, real
);
9179 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9180 snprintf((char *)p2
, arg4
, "%s", real
);
9182 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9184 unlock_user(p2
, arg3
, ret
);
9185 unlock_user(p
, arg2
, 0);
9189 #ifdef TARGET_NR_uselib
9190 case TARGET_NR_uselib
:
9193 #ifdef TARGET_NR_swapon
9194 case TARGET_NR_swapon
:
9195 if (!(p
= lock_user_string(arg1
)))
9197 ret
= get_errno(swapon(p
, arg2
));
9198 unlock_user(p
, arg1
, 0);
9201 case TARGET_NR_reboot
:
9202 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9203 /* arg4 must be ignored in all other cases */
9204 p
= lock_user_string(arg4
);
9208 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9209 unlock_user(p
, arg4
, 0);
9211 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9214 #ifdef TARGET_NR_readdir
9215 case TARGET_NR_readdir
:
9218 #ifdef TARGET_NR_mmap
9219 case TARGET_NR_mmap
:
9220 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9221 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9222 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9223 || defined(TARGET_S390X)
9226 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9227 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9235 unlock_user(v
, arg1
, 0);
9236 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9237 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9241 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9242 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9248 #ifdef TARGET_NR_mmap2
9249 case TARGET_NR_mmap2
:
9251 #define MMAP_SHIFT 12
9253 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9254 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9256 arg6
<< MMAP_SHIFT
));
9259 case TARGET_NR_munmap
:
9260 ret
= get_errno(target_munmap(arg1
, arg2
));
9262 case TARGET_NR_mprotect
:
9264 TaskState
*ts
= cpu
->opaque
;
9265 /* Special hack to detect libc making the stack executable. */
9266 if ((arg3
& PROT_GROWSDOWN
)
9267 && arg1
>= ts
->info
->stack_limit
9268 && arg1
<= ts
->info
->start_stack
) {
9269 arg3
&= ~PROT_GROWSDOWN
;
9270 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9271 arg1
= ts
->info
->stack_limit
;
9274 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9276 #ifdef TARGET_NR_mremap
9277 case TARGET_NR_mremap
:
9278 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9281 /* ??? msync/mlock/munlock are broken for softmmu. */
9282 #ifdef TARGET_NR_msync
9283 case TARGET_NR_msync
:
9284 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9287 #ifdef TARGET_NR_mlock
9288 case TARGET_NR_mlock
:
9289 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9292 #ifdef TARGET_NR_munlock
9293 case TARGET_NR_munlock
:
9294 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9297 #ifdef TARGET_NR_mlockall
9298 case TARGET_NR_mlockall
:
9299 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9302 #ifdef TARGET_NR_munlockall
9303 case TARGET_NR_munlockall
:
9304 ret
= get_errno(munlockall());
9307 case TARGET_NR_truncate
:
9308 if (!(p
= lock_user_string(arg1
)))
9310 ret
= get_errno(truncate(p
, arg2
));
9311 unlock_user(p
, arg1
, 0);
9313 case TARGET_NR_ftruncate
:
9314 ret
= get_errno(ftruncate(arg1
, arg2
));
9316 case TARGET_NR_fchmod
:
9317 ret
= get_errno(fchmod(arg1
, arg2
));
9319 #if defined(TARGET_NR_fchmodat)
9320 case TARGET_NR_fchmodat
:
9321 if (!(p
= lock_user_string(arg2
)))
9323 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9324 unlock_user(p
, arg2
, 0);
9327 case TARGET_NR_getpriority
:
9328 /* Note that negative values are valid for getpriority, so we must
9329 differentiate based on errno settings. */
9331 ret
= getpriority(arg1
, arg2
);
9332 if (ret
== -1 && errno
!= 0) {
9333 ret
= -host_to_target_errno(errno
);
9337 /* Return value is the unbiased priority. Signal no error. */
9338 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9340 /* Return value is a biased priority to avoid negative numbers. */
9344 case TARGET_NR_setpriority
:
9345 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9347 #ifdef TARGET_NR_profil
9348 case TARGET_NR_profil
:
9351 case TARGET_NR_statfs
:
9352 if (!(p
= lock_user_string(arg1
)))
9354 ret
= get_errno(statfs(path(p
), &stfs
));
9355 unlock_user(p
, arg1
, 0);
9357 if (!is_error(ret
)) {
9358 struct target_statfs
*target_stfs
;
9360 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9362 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9363 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9364 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9365 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9366 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9367 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9368 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9369 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9370 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9371 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9372 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9373 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9374 unlock_user_struct(target_stfs
, arg2
, 1);
9377 case TARGET_NR_fstatfs
:
9378 ret
= get_errno(fstatfs(arg1
, &stfs
));
9379 goto convert_statfs
;
9380 #ifdef TARGET_NR_statfs64
9381 case TARGET_NR_statfs64
:
9382 if (!(p
= lock_user_string(arg1
)))
9384 ret
= get_errno(statfs(path(p
), &stfs
));
9385 unlock_user(p
, arg1
, 0);
9387 if (!is_error(ret
)) {
9388 struct target_statfs64
*target_stfs
;
9390 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9392 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9393 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9394 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9395 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9396 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9397 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9398 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9399 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9400 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9401 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9402 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9403 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9404 unlock_user_struct(target_stfs
, arg3
, 1);
9407 case TARGET_NR_fstatfs64
:
9408 ret
= get_errno(fstatfs(arg1
, &stfs
));
9409 goto convert_statfs64
;
9411 #ifdef TARGET_NR_ioperm
9412 case TARGET_NR_ioperm
:
9415 #ifdef TARGET_NR_socketcall
9416 case TARGET_NR_socketcall
:
9417 ret
= do_socketcall(arg1
, arg2
);
9420 #ifdef TARGET_NR_accept
9421 case TARGET_NR_accept
:
9422 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9425 #ifdef TARGET_NR_accept4
9426 case TARGET_NR_accept4
:
9427 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9430 #ifdef TARGET_NR_bind
9431 case TARGET_NR_bind
:
9432 ret
= do_bind(arg1
, arg2
, arg3
);
9435 #ifdef TARGET_NR_connect
9436 case TARGET_NR_connect
:
9437 ret
= do_connect(arg1
, arg2
, arg3
);
9440 #ifdef TARGET_NR_getpeername
9441 case TARGET_NR_getpeername
:
9442 ret
= do_getpeername(arg1
, arg2
, arg3
);
9445 #ifdef TARGET_NR_getsockname
9446 case TARGET_NR_getsockname
:
9447 ret
= do_getsockname(arg1
, arg2
, arg3
);
9450 #ifdef TARGET_NR_getsockopt
9451 case TARGET_NR_getsockopt
:
9452 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9455 #ifdef TARGET_NR_listen
9456 case TARGET_NR_listen
:
9457 ret
= get_errno(listen(arg1
, arg2
));
9460 #ifdef TARGET_NR_recv
9461 case TARGET_NR_recv
:
9462 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9465 #ifdef TARGET_NR_recvfrom
9466 case TARGET_NR_recvfrom
:
9467 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9470 #ifdef TARGET_NR_recvmsg
9471 case TARGET_NR_recvmsg
:
9472 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9475 #ifdef TARGET_NR_send
9476 case TARGET_NR_send
:
9477 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9480 #ifdef TARGET_NR_sendmsg
9481 case TARGET_NR_sendmsg
:
9482 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9485 #ifdef TARGET_NR_sendmmsg
9486 case TARGET_NR_sendmmsg
:
9487 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9489 case TARGET_NR_recvmmsg
:
9490 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9493 #ifdef TARGET_NR_sendto
9494 case TARGET_NR_sendto
:
9495 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9498 #ifdef TARGET_NR_shutdown
9499 case TARGET_NR_shutdown
:
9500 ret
= get_errno(shutdown(arg1
, arg2
));
9503 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9504 case TARGET_NR_getrandom
:
9505 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9509 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9510 unlock_user(p
, arg1
, ret
);
9513 #ifdef TARGET_NR_socket
9514 case TARGET_NR_socket
:
9515 ret
= do_socket(arg1
, arg2
, arg3
);
9518 #ifdef TARGET_NR_socketpair
9519 case TARGET_NR_socketpair
:
9520 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9523 #ifdef TARGET_NR_setsockopt
9524 case TARGET_NR_setsockopt
:
9525 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9528 #if defined(TARGET_NR_syslog)
9529 case TARGET_NR_syslog
:
9534 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9535 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9536 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9537 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9538 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9539 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9540 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9541 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9543 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9546 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9547 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9548 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9550 ret
= -TARGET_EINVAL
;
9558 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9560 ret
= -TARGET_EFAULT
;
9563 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9564 unlock_user(p
, arg2
, arg3
);
9574 case TARGET_NR_setitimer
:
9576 struct itimerval value
, ovalue
, *pvalue
;
9580 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9581 || copy_from_user_timeval(&pvalue
->it_value
,
9582 arg2
+ sizeof(struct target_timeval
)))
9587 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9588 if (!is_error(ret
) && arg3
) {
9589 if (copy_to_user_timeval(arg3
,
9590 &ovalue
.it_interval
)
9591 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9597 case TARGET_NR_getitimer
:
9599 struct itimerval value
;
9601 ret
= get_errno(getitimer(arg1
, &value
));
9602 if (!is_error(ret
) && arg2
) {
9603 if (copy_to_user_timeval(arg2
,
9605 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9611 #ifdef TARGET_NR_stat
9612 case TARGET_NR_stat
:
9613 if (!(p
= lock_user_string(arg1
)))
9615 ret
= get_errno(stat(path(p
), &st
));
9616 unlock_user(p
, arg1
, 0);
9619 #ifdef TARGET_NR_lstat
9620 case TARGET_NR_lstat
:
9621 if (!(p
= lock_user_string(arg1
)))
9623 ret
= get_errno(lstat(path(p
), &st
));
9624 unlock_user(p
, arg1
, 0);
9627 case TARGET_NR_fstat
:
9629 ret
= get_errno(fstat(arg1
, &st
));
9630 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9633 if (!is_error(ret
)) {
9634 struct target_stat
*target_st
;
9636 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9638 memset(target_st
, 0, sizeof(*target_st
));
9639 __put_user(st
.st_dev
, &target_st
->st_dev
);
9640 __put_user(st
.st_ino
, &target_st
->st_ino
);
9641 __put_user(st
.st_mode
, &target_st
->st_mode
);
9642 __put_user(st
.st_uid
, &target_st
->st_uid
);
9643 __put_user(st
.st_gid
, &target_st
->st_gid
);
9644 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9645 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9646 __put_user(st
.st_size
, &target_st
->st_size
);
9647 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9648 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9649 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9650 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9651 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9652 unlock_user_struct(target_st
, arg2
, 1);
9656 #ifdef TARGET_NR_olduname
9657 case TARGET_NR_olduname
:
9660 #ifdef TARGET_NR_iopl
9661 case TARGET_NR_iopl
:
9664 case TARGET_NR_vhangup
:
9665 ret
= get_errno(vhangup());
9667 #ifdef TARGET_NR_idle
9668 case TARGET_NR_idle
:
9671 #ifdef TARGET_NR_syscall
9672 case TARGET_NR_syscall
:
9673 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9674 arg6
, arg7
, arg8
, 0);
9677 case TARGET_NR_wait4
:
9680 abi_long status_ptr
= arg2
;
9681 struct rusage rusage
, *rusage_ptr
;
9682 abi_ulong target_rusage
= arg4
;
9683 abi_long rusage_err
;
9685 rusage_ptr
= &rusage
;
9688 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9689 if (!is_error(ret
)) {
9690 if (status_ptr
&& ret
) {
9691 status
= host_to_target_waitstatus(status
);
9692 if (put_user_s32(status
, status_ptr
))
9695 if (target_rusage
) {
9696 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9704 #ifdef TARGET_NR_swapoff
9705 case TARGET_NR_swapoff
:
9706 if (!(p
= lock_user_string(arg1
)))
9708 ret
= get_errno(swapoff(p
));
9709 unlock_user(p
, arg1
, 0);
9712 case TARGET_NR_sysinfo
:
9714 struct target_sysinfo
*target_value
;
9715 struct sysinfo value
;
9716 ret
= get_errno(sysinfo(&value
));
9717 if (!is_error(ret
) && arg1
)
9719 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9721 __put_user(value
.uptime
, &target_value
->uptime
);
9722 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9723 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9724 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9725 __put_user(value
.totalram
, &target_value
->totalram
);
9726 __put_user(value
.freeram
, &target_value
->freeram
);
9727 __put_user(value
.sharedram
, &target_value
->sharedram
);
9728 __put_user(value
.bufferram
, &target_value
->bufferram
);
9729 __put_user(value
.totalswap
, &target_value
->totalswap
);
9730 __put_user(value
.freeswap
, &target_value
->freeswap
);
9731 __put_user(value
.procs
, &target_value
->procs
);
9732 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9733 __put_user(value
.freehigh
, &target_value
->freehigh
);
9734 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9735 unlock_user_struct(target_value
, arg1
, 1);
9739 #ifdef TARGET_NR_ipc
9741 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9744 #ifdef TARGET_NR_semget
9745 case TARGET_NR_semget
:
9746 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9749 #ifdef TARGET_NR_semop
9750 case TARGET_NR_semop
:
9751 ret
= do_semop(arg1
, arg2
, arg3
);
9754 #ifdef TARGET_NR_semctl
9755 case TARGET_NR_semctl
:
9756 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9759 #ifdef TARGET_NR_msgctl
9760 case TARGET_NR_msgctl
:
9761 ret
= do_msgctl(arg1
, arg2
, arg3
);
9764 #ifdef TARGET_NR_msgget
9765 case TARGET_NR_msgget
:
9766 ret
= get_errno(msgget(arg1
, arg2
));
9769 #ifdef TARGET_NR_msgrcv
9770 case TARGET_NR_msgrcv
:
9771 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9774 #ifdef TARGET_NR_msgsnd
9775 case TARGET_NR_msgsnd
:
9776 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9779 #ifdef TARGET_NR_shmget
9780 case TARGET_NR_shmget
:
9781 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9784 #ifdef TARGET_NR_shmctl
9785 case TARGET_NR_shmctl
:
9786 ret
= do_shmctl(arg1
, arg2
, arg3
);
9789 #ifdef TARGET_NR_shmat
9790 case TARGET_NR_shmat
:
9791 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9794 #ifdef TARGET_NR_shmdt
9795 case TARGET_NR_shmdt
:
9796 ret
= do_shmdt(arg1
);
9799 case TARGET_NR_fsync
:
9800 ret
= get_errno(fsync(arg1
));
9802 case TARGET_NR_clone
:
9803 /* Linux manages to have three different orderings for its
9804 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9805 * match the kernel's CONFIG_CLONE_* settings.
9806 * Microblaze is further special in that it uses a sixth
9807 * implicit argument to clone for the TLS pointer.
9809 #if defined(TARGET_MICROBLAZE)
9810 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9811 #elif defined(TARGET_CLONE_BACKWARDS)
9812 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9813 #elif defined(TARGET_CLONE_BACKWARDS2)
9814 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9816 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9819 #ifdef __NR_exit_group
9820 /* new thread calls */
9821 case TARGET_NR_exit_group
:
9825 gdb_exit(cpu_env
, arg1
);
9826 ret
= get_errno(exit_group(arg1
));
9829 case TARGET_NR_setdomainname
:
9830 if (!(p
= lock_user_string(arg1
)))
9832 ret
= get_errno(setdomainname(p
, arg2
));
9833 unlock_user(p
, arg1
, 0);
9835 case TARGET_NR_uname
:
9836 /* no need to transcode because we use the linux syscall */
9838 struct new_utsname
* buf
;
9840 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9842 ret
= get_errno(sys_uname(buf
));
9843 if (!is_error(ret
)) {
9844 /* Overwrite the native machine name with whatever is being
9846 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9847 /* Allow the user to override the reported release. */
9848 if (qemu_uname_release
&& *qemu_uname_release
) {
9849 g_strlcpy(buf
->release
, qemu_uname_release
,
9850 sizeof(buf
->release
));
9853 unlock_user_struct(buf
, arg1
, 1);
9857 case TARGET_NR_modify_ldt
:
9858 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9860 #if !defined(TARGET_X86_64)
9861 case TARGET_NR_vm86old
:
9863 case TARGET_NR_vm86
:
9864 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9868 case TARGET_NR_adjtimex
:
9870 struct timex host_buf
;
9872 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9875 ret
= get_errno(adjtimex(&host_buf
));
9876 if (!is_error(ret
)) {
9877 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9883 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9884 case TARGET_NR_clock_adjtime
:
9886 struct timex htx
, *phtx
= &htx
;
9888 if (target_to_host_timex(phtx
, arg2
) != 0) {
9891 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9892 if (!is_error(ret
) && phtx
) {
9893 if (host_to_target_timex(arg2
, phtx
) != 0) {
9900 #ifdef TARGET_NR_create_module
9901 case TARGET_NR_create_module
:
9903 case TARGET_NR_init_module
:
9904 case TARGET_NR_delete_module
:
9905 #ifdef TARGET_NR_get_kernel_syms
9906 case TARGET_NR_get_kernel_syms
:
9909 case TARGET_NR_quotactl
:
9911 case TARGET_NR_getpgid
:
9912 ret
= get_errno(getpgid(arg1
));
9914 case TARGET_NR_fchdir
:
9915 ret
= get_errno(fchdir(arg1
));
9917 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9918 case TARGET_NR_bdflush
:
9921 #ifdef TARGET_NR_sysfs
9922 case TARGET_NR_sysfs
:
9925 case TARGET_NR_personality
:
9926 ret
= get_errno(personality(arg1
));
9928 #ifdef TARGET_NR_afs_syscall
9929 case TARGET_NR_afs_syscall
:
9932 #ifdef TARGET_NR__llseek /* Not on alpha */
9933 case TARGET_NR__llseek
:
9936 #if !defined(__NR_llseek)
9937 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9939 ret
= get_errno(res
);
9944 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9946 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9952 #ifdef TARGET_NR_getdents
9953 case TARGET_NR_getdents
:
9954 #ifdef __NR_getdents
9955 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9957 struct target_dirent
*target_dirp
;
9958 struct linux_dirent
*dirp
;
9959 abi_long count
= arg3
;
9961 dirp
= g_try_malloc(count
);
9963 ret
= -TARGET_ENOMEM
;
9967 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9968 if (!is_error(ret
)) {
9969 struct linux_dirent
*de
;
9970 struct target_dirent
*tde
;
9972 int reclen
, treclen
;
9973 int count1
, tnamelen
;
9977 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9981 reclen
= de
->d_reclen
;
9982 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9983 assert(tnamelen
>= 0);
9984 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9985 assert(count1
+ treclen
<= count
);
9986 tde
->d_reclen
= tswap16(treclen
);
9987 tde
->d_ino
= tswapal(de
->d_ino
);
9988 tde
->d_off
= tswapal(de
->d_off
);
9989 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9990 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9992 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9996 unlock_user(target_dirp
, arg2
, ret
);
10002 struct linux_dirent
*dirp
;
10003 abi_long count
= arg3
;
10005 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10007 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10008 if (!is_error(ret
)) {
10009 struct linux_dirent
*de
;
10014 reclen
= de
->d_reclen
;
10017 de
->d_reclen
= tswap16(reclen
);
10018 tswapls(&de
->d_ino
);
10019 tswapls(&de
->d_off
);
10020 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10024 unlock_user(dirp
, arg2
, ret
);
10028 /* Implement getdents in terms of getdents64 */
10030 struct linux_dirent64
*dirp
;
10031 abi_long count
= arg3
;
10033 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10037 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10038 if (!is_error(ret
)) {
10039 /* Convert the dirent64 structs to target dirent. We do this
10040 * in-place, since we can guarantee that a target_dirent is no
10041 * larger than a dirent64; however this means we have to be
10042 * careful to read everything before writing in the new format.
10044 struct linux_dirent64
*de
;
10045 struct target_dirent
*tde
;
10050 tde
= (struct target_dirent
*)dirp
;
10052 int namelen
, treclen
;
10053 int reclen
= de
->d_reclen
;
10054 uint64_t ino
= de
->d_ino
;
10055 int64_t off
= de
->d_off
;
10056 uint8_t type
= de
->d_type
;
10058 namelen
= strlen(de
->d_name
);
10059 treclen
= offsetof(struct target_dirent
, d_name
)
10061 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10063 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10064 tde
->d_ino
= tswapal(ino
);
10065 tde
->d_off
= tswapal(off
);
10066 tde
->d_reclen
= tswap16(treclen
);
10067 /* The target_dirent type is in what was formerly a padding
10068 * byte at the end of the structure:
10070 *(((char *)tde
) + treclen
- 1) = type
;
10072 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10073 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10079 unlock_user(dirp
, arg2
, ret
);
10083 #endif /* TARGET_NR_getdents */
10084 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10085 case TARGET_NR_getdents64
:
10087 struct linux_dirent64
*dirp
;
10088 abi_long count
= arg3
;
10089 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10091 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10092 if (!is_error(ret
)) {
10093 struct linux_dirent64
*de
;
10098 reclen
= de
->d_reclen
;
10101 de
->d_reclen
= tswap16(reclen
);
10102 tswap64s((uint64_t *)&de
->d_ino
);
10103 tswap64s((uint64_t *)&de
->d_off
);
10104 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10108 unlock_user(dirp
, arg2
, ret
);
10111 #endif /* TARGET_NR_getdents64 */
10112 #if defined(TARGET_NR__newselect)
10113 case TARGET_NR__newselect
:
10114 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10117 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10118 # ifdef TARGET_NR_poll
10119 case TARGET_NR_poll
:
10121 # ifdef TARGET_NR_ppoll
10122 case TARGET_NR_ppoll
:
10125 struct target_pollfd
*target_pfd
;
10126 unsigned int nfds
= arg2
;
10127 struct pollfd
*pfd
;
10133 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10134 ret
= -TARGET_EINVAL
;
10138 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10139 sizeof(struct target_pollfd
) * nfds
, 1);
10144 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10145 for (i
= 0; i
< nfds
; i
++) {
10146 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10147 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10152 # ifdef TARGET_NR_ppoll
10153 case TARGET_NR_ppoll
:
10155 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10156 target_sigset_t
*target_set
;
10157 sigset_t _set
, *set
= &_set
;
10160 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10161 unlock_user(target_pfd
, arg1
, 0);
10169 if (arg5
!= sizeof(target_sigset_t
)) {
10170 unlock_user(target_pfd
, arg1
, 0);
10171 ret
= -TARGET_EINVAL
;
10175 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10177 unlock_user(target_pfd
, arg1
, 0);
10180 target_to_host_sigset(set
, target_set
);
10185 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10186 set
, SIGSET_T_SIZE
));
10188 if (!is_error(ret
) && arg3
) {
10189 host_to_target_timespec(arg3
, timeout_ts
);
10192 unlock_user(target_set
, arg4
, 0);
10197 # ifdef TARGET_NR_poll
10198 case TARGET_NR_poll
:
10200 struct timespec ts
, *pts
;
10203 /* Convert ms to secs, ns */
10204 ts
.tv_sec
= arg3
/ 1000;
10205 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10208 /* -ve poll() timeout means "infinite" */
10211 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10216 g_assert_not_reached();
10219 if (!is_error(ret
)) {
10220 for(i
= 0; i
< nfds
; i
++) {
10221 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10224 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10228 case TARGET_NR_flock
:
10229 /* NOTE: the flock constant seems to be the same for every
10231 ret
= get_errno(safe_flock(arg1
, arg2
));
10233 case TARGET_NR_readv
:
10235 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10237 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10238 unlock_iovec(vec
, arg2
, arg3
, 1);
10240 ret
= -host_to_target_errno(errno
);
10244 case TARGET_NR_writev
:
10246 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10248 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10249 unlock_iovec(vec
, arg2
, arg3
, 0);
10251 ret
= -host_to_target_errno(errno
);
10255 #if defined(TARGET_NR_preadv)
10256 case TARGET_NR_preadv
:
10258 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10260 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10261 unlock_iovec(vec
, arg2
, arg3
, 1);
10263 ret
= -host_to_target_errno(errno
);
10268 #if defined(TARGET_NR_pwritev)
10269 case TARGET_NR_pwritev
:
10271 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10273 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10274 unlock_iovec(vec
, arg2
, arg3
, 0);
10276 ret
= -host_to_target_errno(errno
);
10281 case TARGET_NR_getsid
:
10282 ret
= get_errno(getsid(arg1
));
10284 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10285 case TARGET_NR_fdatasync
:
10286 ret
= get_errno(fdatasync(arg1
));
10289 #ifdef TARGET_NR__sysctl
10290 case TARGET_NR__sysctl
:
10291 /* We don't implement this, but ENOTDIR is always a safe
10293 ret
= -TARGET_ENOTDIR
;
10296 case TARGET_NR_sched_getaffinity
:
10298 unsigned int mask_size
;
10299 unsigned long *mask
;
10302 * sched_getaffinity needs multiples of ulong, so need to take
10303 * care of mismatches between target ulong and host ulong sizes.
10305 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10306 ret
= -TARGET_EINVAL
;
10309 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10311 mask
= alloca(mask_size
);
10312 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10314 if (!is_error(ret
)) {
10316 /* More data returned than the caller's buffer will fit.
10317 * This only happens if sizeof(abi_long) < sizeof(long)
10318 * and the caller passed us a buffer holding an odd number
10319 * of abi_longs. If the host kernel is actually using the
10320 * extra 4 bytes then fail EINVAL; otherwise we can just
10321 * ignore them and only copy the interesting part.
10323 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10324 if (numcpus
> arg2
* 8) {
10325 ret
= -TARGET_EINVAL
;
10331 if (copy_to_user(arg3
, mask
, ret
)) {
10337 case TARGET_NR_sched_setaffinity
:
10339 unsigned int mask_size
;
10340 unsigned long *mask
;
10343 * sched_setaffinity needs multiples of ulong, so need to take
10344 * care of mismatches between target ulong and host ulong sizes.
10346 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10347 ret
= -TARGET_EINVAL
;
10350 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10352 mask
= alloca(mask_size
);
10353 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10356 memcpy(mask
, p
, arg2
);
10357 unlock_user_struct(p
, arg2
, 0);
10359 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10362 case TARGET_NR_sched_setparam
:
10364 struct sched_param
*target_schp
;
10365 struct sched_param schp
;
10368 return -TARGET_EINVAL
;
10370 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10372 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10373 unlock_user_struct(target_schp
, arg2
, 0);
10374 ret
= get_errno(sched_setparam(arg1
, &schp
));
10377 case TARGET_NR_sched_getparam
:
10379 struct sched_param
*target_schp
;
10380 struct sched_param schp
;
10383 return -TARGET_EINVAL
;
10385 ret
= get_errno(sched_getparam(arg1
, &schp
));
10386 if (!is_error(ret
)) {
10387 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10389 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10390 unlock_user_struct(target_schp
, arg2
, 1);
10394 case TARGET_NR_sched_setscheduler
:
10396 struct sched_param
*target_schp
;
10397 struct sched_param schp
;
10399 return -TARGET_EINVAL
;
10401 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10403 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10404 unlock_user_struct(target_schp
, arg3
, 0);
10405 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10408 case TARGET_NR_sched_getscheduler
:
10409 ret
= get_errno(sched_getscheduler(arg1
));
10411 case TARGET_NR_sched_yield
:
10412 ret
= get_errno(sched_yield());
10414 case TARGET_NR_sched_get_priority_max
:
10415 ret
= get_errno(sched_get_priority_max(arg1
));
10417 case TARGET_NR_sched_get_priority_min
:
10418 ret
= get_errno(sched_get_priority_min(arg1
));
10420 case TARGET_NR_sched_rr_get_interval
:
10422 struct timespec ts
;
10423 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10424 if (!is_error(ret
)) {
10425 ret
= host_to_target_timespec(arg2
, &ts
);
10429 case TARGET_NR_nanosleep
:
10431 struct timespec req
, rem
;
10432 target_to_host_timespec(&req
, arg1
);
10433 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10434 if (is_error(ret
) && arg2
) {
10435 host_to_target_timespec(arg2
, &rem
);
10439 #ifdef TARGET_NR_query_module
10440 case TARGET_NR_query_module
:
10441 goto unimplemented
;
10443 #ifdef TARGET_NR_nfsservctl
10444 case TARGET_NR_nfsservctl
:
10445 goto unimplemented
;
10447 case TARGET_NR_prctl
:
10449 case PR_GET_PDEATHSIG
:
10452 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10453 if (!is_error(ret
) && arg2
10454 && put_user_ual(deathsig
, arg2
)) {
10462 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10466 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10467 arg3
, arg4
, arg5
));
10468 unlock_user(name
, arg2
, 16);
10473 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10477 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10478 arg3
, arg4
, arg5
));
10479 unlock_user(name
, arg2
, 0);
10484 /* Most prctl options have no pointer arguments */
10485 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10489 #ifdef TARGET_NR_arch_prctl
10490 case TARGET_NR_arch_prctl
:
10491 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10492 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10495 goto unimplemented
;
10498 #ifdef TARGET_NR_pread64
10499 case TARGET_NR_pread64
:
10500 if (regpairs_aligned(cpu_env
)) {
10504 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10506 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10507 unlock_user(p
, arg2
, ret
);
10509 case TARGET_NR_pwrite64
:
10510 if (regpairs_aligned(cpu_env
)) {
10514 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10516 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10517 unlock_user(p
, arg2
, 0);
10520 case TARGET_NR_getcwd
:
10521 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10523 ret
= get_errno(sys_getcwd1(p
, arg2
));
10524 unlock_user(p
, arg1
, ret
);
10526 case TARGET_NR_capget
:
10527 case TARGET_NR_capset
:
10529 struct target_user_cap_header
*target_header
;
10530 struct target_user_cap_data
*target_data
= NULL
;
10531 struct __user_cap_header_struct header
;
10532 struct __user_cap_data_struct data
[2];
10533 struct __user_cap_data_struct
*dataptr
= NULL
;
10534 int i
, target_datalen
;
10535 int data_items
= 1;
10537 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10540 header
.version
= tswap32(target_header
->version
);
10541 header
.pid
= tswap32(target_header
->pid
);
10543 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10544 /* Version 2 and up takes pointer to two user_data structs */
10548 target_datalen
= sizeof(*target_data
) * data_items
;
10551 if (num
== TARGET_NR_capget
) {
10552 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10554 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10556 if (!target_data
) {
10557 unlock_user_struct(target_header
, arg1
, 0);
10561 if (num
== TARGET_NR_capset
) {
10562 for (i
= 0; i
< data_items
; i
++) {
10563 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10564 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10565 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10572 if (num
== TARGET_NR_capget
) {
10573 ret
= get_errno(capget(&header
, dataptr
));
10575 ret
= get_errno(capset(&header
, dataptr
));
10578 /* The kernel always updates version for both capget and capset */
10579 target_header
->version
= tswap32(header
.version
);
10580 unlock_user_struct(target_header
, arg1
, 1);
10583 if (num
== TARGET_NR_capget
) {
10584 for (i
= 0; i
< data_items
; i
++) {
10585 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10586 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10587 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10589 unlock_user(target_data
, arg2
, target_datalen
);
10591 unlock_user(target_data
, arg2
, 0);
10596 case TARGET_NR_sigaltstack
:
10597 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10600 #ifdef CONFIG_SENDFILE
10601 case TARGET_NR_sendfile
:
10603 off_t
*offp
= NULL
;
10606 ret
= get_user_sal(off
, arg3
);
10607 if (is_error(ret
)) {
10612 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10613 if (!is_error(ret
) && arg3
) {
10614 abi_long ret2
= put_user_sal(off
, arg3
);
10615 if (is_error(ret2
)) {
10621 #ifdef TARGET_NR_sendfile64
10622 case TARGET_NR_sendfile64
:
10624 off_t
*offp
= NULL
;
10627 ret
= get_user_s64(off
, arg3
);
10628 if (is_error(ret
)) {
10633 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10634 if (!is_error(ret
) && arg3
) {
10635 abi_long ret2
= put_user_s64(off
, arg3
);
10636 if (is_error(ret2
)) {
10644 case TARGET_NR_sendfile
:
10645 #ifdef TARGET_NR_sendfile64
10646 case TARGET_NR_sendfile64
:
10648 goto unimplemented
;
10651 #ifdef TARGET_NR_getpmsg
10652 case TARGET_NR_getpmsg
:
10653 goto unimplemented
;
10655 #ifdef TARGET_NR_putpmsg
10656 case TARGET_NR_putpmsg
:
10657 goto unimplemented
;
10659 #ifdef TARGET_NR_vfork
10660 case TARGET_NR_vfork
:
10661 ret
= get_errno(do_fork(cpu_env
,
10662 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10666 #ifdef TARGET_NR_ugetrlimit
10667 case TARGET_NR_ugetrlimit
:
10669 struct rlimit rlim
;
10670 int resource
= target_to_host_resource(arg1
);
10671 ret
= get_errno(getrlimit(resource
, &rlim
));
10672 if (!is_error(ret
)) {
10673 struct target_rlimit
*target_rlim
;
10674 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10676 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10677 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10678 unlock_user_struct(target_rlim
, arg2
, 1);
10683 #ifdef TARGET_NR_truncate64
10684 case TARGET_NR_truncate64
:
10685 if (!(p
= lock_user_string(arg1
)))
10687 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10688 unlock_user(p
, arg1
, 0);
10691 #ifdef TARGET_NR_ftruncate64
10692 case TARGET_NR_ftruncate64
:
10693 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10696 #ifdef TARGET_NR_stat64
10697 case TARGET_NR_stat64
:
10698 if (!(p
= lock_user_string(arg1
)))
10700 ret
= get_errno(stat(path(p
), &st
));
10701 unlock_user(p
, arg1
, 0);
10702 if (!is_error(ret
))
10703 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10706 #ifdef TARGET_NR_lstat64
10707 case TARGET_NR_lstat64
:
10708 if (!(p
= lock_user_string(arg1
)))
10710 ret
= get_errno(lstat(path(p
), &st
));
10711 unlock_user(p
, arg1
, 0);
10712 if (!is_error(ret
))
10713 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10716 #ifdef TARGET_NR_fstat64
10717 case TARGET_NR_fstat64
:
10718 ret
= get_errno(fstat(arg1
, &st
));
10719 if (!is_error(ret
))
10720 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10723 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10724 #ifdef TARGET_NR_fstatat64
10725 case TARGET_NR_fstatat64
:
10727 #ifdef TARGET_NR_newfstatat
10728 case TARGET_NR_newfstatat
:
10730 if (!(p
= lock_user_string(arg2
)))
10732 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10733 if (!is_error(ret
))
10734 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10737 #ifdef TARGET_NR_lchown
10738 case TARGET_NR_lchown
:
10739 if (!(p
= lock_user_string(arg1
)))
10741 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10742 unlock_user(p
, arg1
, 0);
10745 #ifdef TARGET_NR_getuid
10746 case TARGET_NR_getuid
:
10747 ret
= get_errno(high2lowuid(getuid()));
10750 #ifdef TARGET_NR_getgid
10751 case TARGET_NR_getgid
:
10752 ret
= get_errno(high2lowgid(getgid()));
10755 #ifdef TARGET_NR_geteuid
10756 case TARGET_NR_geteuid
:
10757 ret
= get_errno(high2lowuid(geteuid()));
10760 #ifdef TARGET_NR_getegid
10761 case TARGET_NR_getegid
:
10762 ret
= get_errno(high2lowgid(getegid()));
10765 case TARGET_NR_setreuid
:
10766 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10768 case TARGET_NR_setregid
:
10769 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10771 case TARGET_NR_getgroups
:
10773 int gidsetsize
= arg1
;
10774 target_id
*target_grouplist
;
10778 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10779 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10780 if (gidsetsize
== 0)
10782 if (!is_error(ret
)) {
10783 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10784 if (!target_grouplist
)
10786 for(i
= 0;i
< ret
; i
++)
10787 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10788 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10792 case TARGET_NR_setgroups
:
10794 int gidsetsize
= arg1
;
10795 target_id
*target_grouplist
;
10796 gid_t
*grouplist
= NULL
;
10799 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10800 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10801 if (!target_grouplist
) {
10802 ret
= -TARGET_EFAULT
;
10805 for (i
= 0; i
< gidsetsize
; i
++) {
10806 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10808 unlock_user(target_grouplist
, arg2
, 0);
10810 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10813 case TARGET_NR_fchown
:
10814 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10816 #if defined(TARGET_NR_fchownat)
10817 case TARGET_NR_fchownat
:
10818 if (!(p
= lock_user_string(arg2
)))
10820 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10821 low2highgid(arg4
), arg5
));
10822 unlock_user(p
, arg2
, 0);
10825 #ifdef TARGET_NR_setresuid
10826 case TARGET_NR_setresuid
:
10827 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10829 low2highuid(arg3
)));
10832 #ifdef TARGET_NR_getresuid
10833 case TARGET_NR_getresuid
:
10835 uid_t ruid
, euid
, suid
;
10836 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10837 if (!is_error(ret
)) {
10838 if (put_user_id(high2lowuid(ruid
), arg1
)
10839 || put_user_id(high2lowuid(euid
), arg2
)
10840 || put_user_id(high2lowuid(suid
), arg3
))
10846 #ifdef TARGET_NR_getresgid
10847 case TARGET_NR_setresgid
:
10848 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10850 low2highgid(arg3
)));
10853 #ifdef TARGET_NR_getresgid
10854 case TARGET_NR_getresgid
:
10856 gid_t rgid
, egid
, sgid
;
10857 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10858 if (!is_error(ret
)) {
10859 if (put_user_id(high2lowgid(rgid
), arg1
)
10860 || put_user_id(high2lowgid(egid
), arg2
)
10861 || put_user_id(high2lowgid(sgid
), arg3
))
10867 #ifdef TARGET_NR_chown
10868 case TARGET_NR_chown
:
10869 if (!(p
= lock_user_string(arg1
)))
10871 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10872 unlock_user(p
, arg1
, 0);
10875 case TARGET_NR_setuid
:
10876 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10878 case TARGET_NR_setgid
:
10879 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10881 case TARGET_NR_setfsuid
:
10882 ret
= get_errno(setfsuid(arg1
));
10884 case TARGET_NR_setfsgid
:
10885 ret
= get_errno(setfsgid(arg1
));
10888 #ifdef TARGET_NR_lchown32
10889 case TARGET_NR_lchown32
:
10890 if (!(p
= lock_user_string(arg1
)))
10892 ret
= get_errno(lchown(p
, arg2
, arg3
));
10893 unlock_user(p
, arg1
, 0);
10896 #ifdef TARGET_NR_getuid32
10897 case TARGET_NR_getuid32
:
10898 ret
= get_errno(getuid());
10902 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10903 /* Alpha specific */
10904 case TARGET_NR_getxuid
:
10908 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10910 ret
= get_errno(getuid());
10913 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10914 /* Alpha specific */
10915 case TARGET_NR_getxgid
:
10919 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10921 ret
= get_errno(getgid());
10924 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10925 /* Alpha specific */
10926 case TARGET_NR_osf_getsysinfo
:
10927 ret
= -TARGET_EOPNOTSUPP
;
10929 case TARGET_GSI_IEEE_FP_CONTROL
:
10931 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10933 /* Copied from linux ieee_fpcr_to_swcr. */
10934 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10935 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10936 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10937 | SWCR_TRAP_ENABLE_DZE
10938 | SWCR_TRAP_ENABLE_OVF
);
10939 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10940 | SWCR_TRAP_ENABLE_INE
);
10941 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10942 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10944 if (put_user_u64 (swcr
, arg2
))
10950 /* case GSI_IEEE_STATE_AT_SIGNAL:
10951 -- Not implemented in linux kernel.
10953 -- Retrieves current unaligned access state; not much used.
10954 case GSI_PROC_TYPE:
10955 -- Retrieves implver information; surely not used.
10956 case GSI_GET_HWRPB:
10957 -- Grabs a copy of the HWRPB; surely not used.
10962 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10963 /* Alpha specific */
10964 case TARGET_NR_osf_setsysinfo
:
10965 ret
= -TARGET_EOPNOTSUPP
;
10967 case TARGET_SSI_IEEE_FP_CONTROL
:
10969 uint64_t swcr
, fpcr
, orig_fpcr
;
10971 if (get_user_u64 (swcr
, arg2
)) {
10974 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10975 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10977 /* Copied from linux ieee_swcr_to_fpcr. */
10978 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10979 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10980 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10981 | SWCR_TRAP_ENABLE_DZE
10982 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10983 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10984 | SWCR_TRAP_ENABLE_INE
)) << 57;
10985 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10986 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10988 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10993 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10995 uint64_t exc
, fpcr
, orig_fpcr
;
10998 if (get_user_u64(exc
, arg2
)) {
11002 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11004 /* We only add to the exception status here. */
11005 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11007 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11010 /* Old exceptions are not signaled. */
11011 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11013 /* If any exceptions set by this call,
11014 and are unmasked, send a signal. */
11016 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11017 si_code
= TARGET_FPE_FLTRES
;
11019 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11020 si_code
= TARGET_FPE_FLTUND
;
11022 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11023 si_code
= TARGET_FPE_FLTOVF
;
11025 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11026 si_code
= TARGET_FPE_FLTDIV
;
11028 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11029 si_code
= TARGET_FPE_FLTINV
;
11031 if (si_code
!= 0) {
11032 target_siginfo_t info
;
11033 info
.si_signo
= SIGFPE
;
11035 info
.si_code
= si_code
;
11036 info
._sifields
._sigfault
._addr
11037 = ((CPUArchState
*)cpu_env
)->pc
;
11038 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11039 QEMU_SI_FAULT
, &info
);
11044 /* case SSI_NVPAIRS:
11045 -- Used with SSIN_UACPROC to enable unaligned accesses.
11046 case SSI_IEEE_STATE_AT_SIGNAL:
11047 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11048 -- Not implemented in linux kernel
11053 #ifdef TARGET_NR_osf_sigprocmask
11054 /* Alpha specific. */
11055 case TARGET_NR_osf_sigprocmask
:
11059 sigset_t set
, oldset
;
11062 case TARGET_SIG_BLOCK
:
11065 case TARGET_SIG_UNBLOCK
:
11068 case TARGET_SIG_SETMASK
:
11072 ret
= -TARGET_EINVAL
;
11076 target_to_host_old_sigset(&set
, &mask
);
11077 ret
= do_sigprocmask(how
, &set
, &oldset
);
11079 host_to_target_old_sigset(&mask
, &oldset
);
11086 #ifdef TARGET_NR_getgid32
11087 case TARGET_NR_getgid32
:
11088 ret
= get_errno(getgid());
11091 #ifdef TARGET_NR_geteuid32
11092 case TARGET_NR_geteuid32
:
11093 ret
= get_errno(geteuid());
11096 #ifdef TARGET_NR_getegid32
11097 case TARGET_NR_getegid32
:
11098 ret
= get_errno(getegid());
11101 #ifdef TARGET_NR_setreuid32
11102 case TARGET_NR_setreuid32
:
11103 ret
= get_errno(setreuid(arg1
, arg2
));
11106 #ifdef TARGET_NR_setregid32
11107 case TARGET_NR_setregid32
:
11108 ret
= get_errno(setregid(arg1
, arg2
));
11111 #ifdef TARGET_NR_getgroups32
11112 case TARGET_NR_getgroups32
:
11114 int gidsetsize
= arg1
;
11115 uint32_t *target_grouplist
;
11119 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11120 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11121 if (gidsetsize
== 0)
11123 if (!is_error(ret
)) {
11124 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11125 if (!target_grouplist
) {
11126 ret
= -TARGET_EFAULT
;
11129 for(i
= 0;i
< ret
; i
++)
11130 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11131 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11136 #ifdef TARGET_NR_setgroups32
11137 case TARGET_NR_setgroups32
:
11139 int gidsetsize
= arg1
;
11140 uint32_t *target_grouplist
;
11144 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11145 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11146 if (!target_grouplist
) {
11147 ret
= -TARGET_EFAULT
;
11150 for(i
= 0;i
< gidsetsize
; i
++)
11151 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11152 unlock_user(target_grouplist
, arg2
, 0);
11153 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11157 #ifdef TARGET_NR_fchown32
11158 case TARGET_NR_fchown32
:
11159 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11162 #ifdef TARGET_NR_setresuid32
11163 case TARGET_NR_setresuid32
:
11164 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11167 #ifdef TARGET_NR_getresuid32
11168 case TARGET_NR_getresuid32
:
11170 uid_t ruid
, euid
, suid
;
11171 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11172 if (!is_error(ret
)) {
11173 if (put_user_u32(ruid
, arg1
)
11174 || put_user_u32(euid
, arg2
)
11175 || put_user_u32(suid
, arg3
))
11181 #ifdef TARGET_NR_setresgid32
11182 case TARGET_NR_setresgid32
:
11183 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11186 #ifdef TARGET_NR_getresgid32
11187 case TARGET_NR_getresgid32
:
11189 gid_t rgid
, egid
, sgid
;
11190 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11191 if (!is_error(ret
)) {
11192 if (put_user_u32(rgid
, arg1
)
11193 || put_user_u32(egid
, arg2
)
11194 || put_user_u32(sgid
, arg3
))
11200 #ifdef TARGET_NR_chown32
11201 case TARGET_NR_chown32
:
11202 if (!(p
= lock_user_string(arg1
)))
11204 ret
= get_errno(chown(p
, arg2
, arg3
));
11205 unlock_user(p
, arg1
, 0);
11208 #ifdef TARGET_NR_setuid32
11209 case TARGET_NR_setuid32
:
11210 ret
= get_errno(sys_setuid(arg1
));
11213 #ifdef TARGET_NR_setgid32
11214 case TARGET_NR_setgid32
:
11215 ret
= get_errno(sys_setgid(arg1
));
11218 #ifdef TARGET_NR_setfsuid32
11219 case TARGET_NR_setfsuid32
:
11220 ret
= get_errno(setfsuid(arg1
));
11223 #ifdef TARGET_NR_setfsgid32
11224 case TARGET_NR_setfsgid32
:
11225 ret
= get_errno(setfsgid(arg1
));
11229 case TARGET_NR_pivot_root
:
11230 goto unimplemented
;
11231 #ifdef TARGET_NR_mincore
11232 case TARGET_NR_mincore
:
11235 ret
= -TARGET_ENOMEM
;
11236 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11240 ret
= -TARGET_EFAULT
;
11241 p
= lock_user_string(arg3
);
11245 ret
= get_errno(mincore(a
, arg2
, p
));
11246 unlock_user(p
, arg3
, ret
);
11248 unlock_user(a
, arg1
, 0);
11252 #ifdef TARGET_NR_arm_fadvise64_64
11253 case TARGET_NR_arm_fadvise64_64
:
11254 /* arm_fadvise64_64 looks like fadvise64_64 but
11255 * with different argument order: fd, advice, offset, len
11256 * rather than the usual fd, offset, len, advice.
11257 * Note that offset and len are both 64-bit so appear as
11258 * pairs of 32-bit registers.
11260 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11261 target_offset64(arg5
, arg6
), arg2
);
11262 ret
= -host_to_target_errno(ret
);
11266 #if TARGET_ABI_BITS == 32
11268 #ifdef TARGET_NR_fadvise64_64
11269 case TARGET_NR_fadvise64_64
:
11270 #if defined(TARGET_PPC)
11271 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11279 /* 6 args: fd, offset (high, low), len (high, low), advice */
11280 if (regpairs_aligned(cpu_env
)) {
11281 /* offset is in (3,4), len in (5,6) and advice in 7 */
11289 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11290 target_offset64(arg2
, arg3
),
11291 target_offset64(arg4
, arg5
),
11296 #ifdef TARGET_NR_fadvise64
11297 case TARGET_NR_fadvise64
:
11298 /* 5 args: fd, offset (high, low), len, advice */
11299 if (regpairs_aligned(cpu_env
)) {
11300 /* offset is in (3,4), len in 5 and advice in 6 */
11306 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11307 target_offset64(arg2
, arg3
),
11312 #else /* not a 32-bit ABI */
11313 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11314 #ifdef TARGET_NR_fadvise64_64
11315 case TARGET_NR_fadvise64_64
:
11317 #ifdef TARGET_NR_fadvise64
11318 case TARGET_NR_fadvise64
:
11320 #ifdef TARGET_S390X
11322 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11323 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11324 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11325 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11329 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11332 #endif /* end of 64-bit ABI fadvise handling */
11334 #ifdef TARGET_NR_madvise
11335 case TARGET_NR_madvise
:
11336 /* A straight passthrough may not be safe because qemu sometimes
11337 turns private file-backed mappings into anonymous mappings.
11338 This will break MADV_DONTNEED.
11339 This is a hint, so ignoring and returning success is ok. */
11340 ret
= get_errno(0);
11343 #if TARGET_ABI_BITS == 32
11344 case TARGET_NR_fcntl64
:
11348 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11349 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11352 if (((CPUARMState
*)cpu_env
)->eabi
) {
11353 copyfrom
= copy_from_user_eabi_flock64
;
11354 copyto
= copy_to_user_eabi_flock64
;
11358 cmd
= target_to_host_fcntl_cmd(arg2
);
11359 if (cmd
== -TARGET_EINVAL
) {
11365 case TARGET_F_GETLK64
:
11366 ret
= copyfrom(&fl
, arg3
);
11370 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11372 ret
= copyto(arg3
, &fl
);
11376 case TARGET_F_SETLK64
:
11377 case TARGET_F_SETLKW64
:
11378 ret
= copyfrom(&fl
, arg3
);
11382 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11385 ret
= do_fcntl(arg1
, arg2
, arg3
);
11391 #ifdef TARGET_NR_cacheflush
11392 case TARGET_NR_cacheflush
:
11393 /* self-modifying code is handled automatically, so nothing needed */
11397 #ifdef TARGET_NR_security
11398 case TARGET_NR_security
:
11399 goto unimplemented
;
11401 #ifdef TARGET_NR_getpagesize
11402 case TARGET_NR_getpagesize
:
11403 ret
= TARGET_PAGE_SIZE
;
11406 case TARGET_NR_gettid
:
11407 ret
= get_errno(gettid());
11409 #ifdef TARGET_NR_readahead
11410 case TARGET_NR_readahead
:
11411 #if TARGET_ABI_BITS == 32
11412 if (regpairs_aligned(cpu_env
)) {
11417 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11419 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11424 #ifdef TARGET_NR_setxattr
11425 case TARGET_NR_listxattr
:
11426 case TARGET_NR_llistxattr
:
11430 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11432 ret
= -TARGET_EFAULT
;
11436 p
= lock_user_string(arg1
);
11438 if (num
== TARGET_NR_listxattr
) {
11439 ret
= get_errno(listxattr(p
, b
, arg3
));
11441 ret
= get_errno(llistxattr(p
, b
, arg3
));
11444 ret
= -TARGET_EFAULT
;
11446 unlock_user(p
, arg1
, 0);
11447 unlock_user(b
, arg2
, arg3
);
11450 case TARGET_NR_flistxattr
:
11454 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11456 ret
= -TARGET_EFAULT
;
11460 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11461 unlock_user(b
, arg2
, arg3
);
11464 case TARGET_NR_setxattr
:
11465 case TARGET_NR_lsetxattr
:
11467 void *p
, *n
, *v
= 0;
11469 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11471 ret
= -TARGET_EFAULT
;
11475 p
= lock_user_string(arg1
);
11476 n
= lock_user_string(arg2
);
11478 if (num
== TARGET_NR_setxattr
) {
11479 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11481 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11484 ret
= -TARGET_EFAULT
;
11486 unlock_user(p
, arg1
, 0);
11487 unlock_user(n
, arg2
, 0);
11488 unlock_user(v
, arg3
, 0);
11491 case TARGET_NR_fsetxattr
:
11495 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11497 ret
= -TARGET_EFAULT
;
11501 n
= lock_user_string(arg2
);
11503 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11505 ret
= -TARGET_EFAULT
;
11507 unlock_user(n
, arg2
, 0);
11508 unlock_user(v
, arg3
, 0);
11511 case TARGET_NR_getxattr
:
11512 case TARGET_NR_lgetxattr
:
11514 void *p
, *n
, *v
= 0;
11516 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11518 ret
= -TARGET_EFAULT
;
11522 p
= lock_user_string(arg1
);
11523 n
= lock_user_string(arg2
);
11525 if (num
== TARGET_NR_getxattr
) {
11526 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11528 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11531 ret
= -TARGET_EFAULT
;
11533 unlock_user(p
, arg1
, 0);
11534 unlock_user(n
, arg2
, 0);
11535 unlock_user(v
, arg3
, arg4
);
11538 case TARGET_NR_fgetxattr
:
11542 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11544 ret
= -TARGET_EFAULT
;
11548 n
= lock_user_string(arg2
);
11550 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11552 ret
= -TARGET_EFAULT
;
11554 unlock_user(n
, arg2
, 0);
11555 unlock_user(v
, arg3
, arg4
);
11558 case TARGET_NR_removexattr
:
11559 case TARGET_NR_lremovexattr
:
11562 p
= lock_user_string(arg1
);
11563 n
= lock_user_string(arg2
);
11565 if (num
== TARGET_NR_removexattr
) {
11566 ret
= get_errno(removexattr(p
, n
));
11568 ret
= get_errno(lremovexattr(p
, n
));
11571 ret
= -TARGET_EFAULT
;
11573 unlock_user(p
, arg1
, 0);
11574 unlock_user(n
, arg2
, 0);
11577 case TARGET_NR_fremovexattr
:
11580 n
= lock_user_string(arg2
);
11582 ret
= get_errno(fremovexattr(arg1
, n
));
11584 ret
= -TARGET_EFAULT
;
11586 unlock_user(n
, arg2
, 0);
11590 #endif /* CONFIG_ATTR */
11591 #ifdef TARGET_NR_set_thread_area
11592 case TARGET_NR_set_thread_area
:
11593 #if defined(TARGET_MIPS)
11594 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11597 #elif defined(TARGET_CRIS)
11599 ret
= -TARGET_EINVAL
;
11601 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11605 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11606 ret
= do_set_thread_area(cpu_env
, arg1
);
11608 #elif defined(TARGET_M68K)
11610 TaskState
*ts
= cpu
->opaque
;
11611 ts
->tp_value
= arg1
;
11616 goto unimplemented_nowarn
;
11619 #ifdef TARGET_NR_get_thread_area
11620 case TARGET_NR_get_thread_area
:
11621 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11622 ret
= do_get_thread_area(cpu_env
, arg1
);
11624 #elif defined(TARGET_M68K)
11626 TaskState
*ts
= cpu
->opaque
;
11627 ret
= ts
->tp_value
;
11631 goto unimplemented_nowarn
;
11634 #ifdef TARGET_NR_getdomainname
11635 case TARGET_NR_getdomainname
:
11636 goto unimplemented_nowarn
;
11639 #ifdef TARGET_NR_clock_gettime
11640 case TARGET_NR_clock_gettime
:
11642 struct timespec ts
;
11643 ret
= get_errno(clock_gettime(arg1
, &ts
));
11644 if (!is_error(ret
)) {
11645 host_to_target_timespec(arg2
, &ts
);
11650 #ifdef TARGET_NR_clock_getres
11651 case TARGET_NR_clock_getres
:
11653 struct timespec ts
;
11654 ret
= get_errno(clock_getres(arg1
, &ts
));
11655 if (!is_error(ret
)) {
11656 host_to_target_timespec(arg2
, &ts
);
11661 #ifdef TARGET_NR_clock_nanosleep
11662 case TARGET_NR_clock_nanosleep
:
11664 struct timespec ts
;
11665 target_to_host_timespec(&ts
, arg3
);
11666 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11667 &ts
, arg4
? &ts
: NULL
));
11669 host_to_target_timespec(arg4
, &ts
);
11671 #if defined(TARGET_PPC)
11672 /* clock_nanosleep is odd in that it returns positive errno values.
11673 * On PPC, CR0 bit 3 should be set in such a situation. */
11674 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11675 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11682 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11683 case TARGET_NR_set_tid_address
:
11684 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11688 case TARGET_NR_tkill
:
11689 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11692 case TARGET_NR_tgkill
:
11693 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11694 target_to_host_signal(arg3
)));
11697 #ifdef TARGET_NR_set_robust_list
11698 case TARGET_NR_set_robust_list
:
11699 case TARGET_NR_get_robust_list
:
11700 /* The ABI for supporting robust futexes has userspace pass
11701 * the kernel a pointer to a linked list which is updated by
11702 * userspace after the syscall; the list is walked by the kernel
11703 * when the thread exits. Since the linked list in QEMU guest
11704 * memory isn't a valid linked list for the host and we have
11705 * no way to reliably intercept the thread-death event, we can't
11706 * support these. Silently return ENOSYS so that guest userspace
11707 * falls back to a non-robust futex implementation (which should
11708 * be OK except in the corner case of the guest crashing while
11709 * holding a mutex that is shared with another process via
11712 goto unimplemented_nowarn
;
11715 #if defined(TARGET_NR_utimensat)
11716 case TARGET_NR_utimensat
:
11718 struct timespec
*tsp
, ts
[2];
11722 target_to_host_timespec(ts
, arg3
);
11723 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11727 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11729 if (!(p
= lock_user_string(arg2
))) {
11730 ret
= -TARGET_EFAULT
;
11733 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11734 unlock_user(p
, arg2
, 0);
11739 case TARGET_NR_futex
:
11740 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11742 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11743 case TARGET_NR_inotify_init
:
11744 ret
= get_errno(sys_inotify_init());
11746 fd_trans_register(ret
, &target_inotify_trans
);
11750 #ifdef CONFIG_INOTIFY1
11751 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11752 case TARGET_NR_inotify_init1
:
11753 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11754 fcntl_flags_tbl
)));
11756 fd_trans_register(ret
, &target_inotify_trans
);
11761 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11762 case TARGET_NR_inotify_add_watch
:
11763 p
= lock_user_string(arg2
);
11764 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11765 unlock_user(p
, arg2
, 0);
11768 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11769 case TARGET_NR_inotify_rm_watch
:
11770 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11774 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11775 case TARGET_NR_mq_open
:
11777 struct mq_attr posix_mq_attr
;
11778 struct mq_attr
*pposix_mq_attr
;
11781 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11782 pposix_mq_attr
= NULL
;
11784 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11787 pposix_mq_attr
= &posix_mq_attr
;
11789 p
= lock_user_string(arg1
- 1);
11793 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11794 unlock_user (p
, arg1
, 0);
11798 case TARGET_NR_mq_unlink
:
11799 p
= lock_user_string(arg1
- 1);
11801 ret
= -TARGET_EFAULT
;
11804 ret
= get_errno(mq_unlink(p
));
11805 unlock_user (p
, arg1
, 0);
11808 case TARGET_NR_mq_timedsend
:
11810 struct timespec ts
;
11812 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11814 target_to_host_timespec(&ts
, arg5
);
11815 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11816 host_to_target_timespec(arg5
, &ts
);
11818 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11820 unlock_user (p
, arg2
, arg3
);
11824 case TARGET_NR_mq_timedreceive
:
11826 struct timespec ts
;
11829 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11831 target_to_host_timespec(&ts
, arg5
);
11832 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11834 host_to_target_timespec(arg5
, &ts
);
11836 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11839 unlock_user (p
, arg2
, arg3
);
11841 put_user_u32(prio
, arg4
);
11845 /* Not implemented for now... */
11846 /* case TARGET_NR_mq_notify: */
11849 case TARGET_NR_mq_getsetattr
:
11851 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11854 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11855 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11858 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11859 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11866 #ifdef CONFIG_SPLICE
11867 #ifdef TARGET_NR_tee
11868 case TARGET_NR_tee
:
11870 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11874 #ifdef TARGET_NR_splice
11875 case TARGET_NR_splice
:
11877 loff_t loff_in
, loff_out
;
11878 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11880 if (get_user_u64(loff_in
, arg2
)) {
11883 ploff_in
= &loff_in
;
11886 if (get_user_u64(loff_out
, arg4
)) {
11889 ploff_out
= &loff_out
;
11891 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11893 if (put_user_u64(loff_in
, arg2
)) {
11898 if (put_user_u64(loff_out
, arg4
)) {
11905 #ifdef TARGET_NR_vmsplice
11906 case TARGET_NR_vmsplice
:
11908 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11910 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11911 unlock_iovec(vec
, arg2
, arg3
, 0);
11913 ret
= -host_to_target_errno(errno
);
11918 #endif /* CONFIG_SPLICE */
11919 #ifdef CONFIG_EVENTFD
11920 #if defined(TARGET_NR_eventfd)
11921 case TARGET_NR_eventfd
:
11922 ret
= get_errno(eventfd(arg1
, 0));
11924 fd_trans_register(ret
, &target_eventfd_trans
);
11928 #if defined(TARGET_NR_eventfd2)
11929 case TARGET_NR_eventfd2
:
11931 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11932 if (arg2
& TARGET_O_NONBLOCK
) {
11933 host_flags
|= O_NONBLOCK
;
11935 if (arg2
& TARGET_O_CLOEXEC
) {
11936 host_flags
|= O_CLOEXEC
;
11938 ret
= get_errno(eventfd(arg1
, host_flags
));
11940 fd_trans_register(ret
, &target_eventfd_trans
);
11945 #endif /* CONFIG_EVENTFD */
11946 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11947 case TARGET_NR_fallocate
:
11948 #if TARGET_ABI_BITS == 32
11949 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11950 target_offset64(arg5
, arg6
)));
11952 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11956 #if defined(CONFIG_SYNC_FILE_RANGE)
11957 #if defined(TARGET_NR_sync_file_range)
11958 case TARGET_NR_sync_file_range
:
11959 #if TARGET_ABI_BITS == 32
11960 #if defined(TARGET_MIPS)
11961 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11962 target_offset64(arg5
, arg6
), arg7
));
11964 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11965 target_offset64(arg4
, arg5
), arg6
));
11966 #endif /* !TARGET_MIPS */
11968 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11972 #if defined(TARGET_NR_sync_file_range2)
11973 case TARGET_NR_sync_file_range2
:
11974 /* This is like sync_file_range but the arguments are reordered */
11975 #if TARGET_ABI_BITS == 32
11976 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11977 target_offset64(arg5
, arg6
), arg2
));
11979 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11984 #if defined(TARGET_NR_signalfd4)
11985 case TARGET_NR_signalfd4
:
11986 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11989 #if defined(TARGET_NR_signalfd)
11990 case TARGET_NR_signalfd
:
11991 ret
= do_signalfd4(arg1
, arg2
, 0);
11994 #if defined(CONFIG_EPOLL)
11995 #if defined(TARGET_NR_epoll_create)
11996 case TARGET_NR_epoll_create
:
11997 ret
= get_errno(epoll_create(arg1
));
12000 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12001 case TARGET_NR_epoll_create1
:
12002 ret
= get_errno(epoll_create1(arg1
));
12005 #if defined(TARGET_NR_epoll_ctl)
12006 case TARGET_NR_epoll_ctl
:
12008 struct epoll_event ep
;
12009 struct epoll_event
*epp
= 0;
12011 struct target_epoll_event
*target_ep
;
12012 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12015 ep
.events
= tswap32(target_ep
->events
);
12016 /* The epoll_data_t union is just opaque data to the kernel,
12017 * so we transfer all 64 bits across and need not worry what
12018 * actual data type it is.
12020 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12021 unlock_user_struct(target_ep
, arg4
, 0);
12024 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12029 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12030 #if defined(TARGET_NR_epoll_wait)
12031 case TARGET_NR_epoll_wait
:
12033 #if defined(TARGET_NR_epoll_pwait)
12034 case TARGET_NR_epoll_pwait
:
12037 struct target_epoll_event
*target_ep
;
12038 struct epoll_event
*ep
;
12040 int maxevents
= arg3
;
12041 int timeout
= arg4
;
12043 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12044 ret
= -TARGET_EINVAL
;
12048 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12049 maxevents
* sizeof(struct target_epoll_event
), 1);
12054 ep
= g_try_new(struct epoll_event
, maxevents
);
12056 unlock_user(target_ep
, arg2
, 0);
12057 ret
= -TARGET_ENOMEM
;
12062 #if defined(TARGET_NR_epoll_pwait)
12063 case TARGET_NR_epoll_pwait
:
12065 target_sigset_t
*target_set
;
12066 sigset_t _set
, *set
= &_set
;
12069 if (arg6
!= sizeof(target_sigset_t
)) {
12070 ret
= -TARGET_EINVAL
;
12074 target_set
= lock_user(VERIFY_READ
, arg5
,
12075 sizeof(target_sigset_t
), 1);
12077 ret
= -TARGET_EFAULT
;
12080 target_to_host_sigset(set
, target_set
);
12081 unlock_user(target_set
, arg5
, 0);
12086 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12087 set
, SIGSET_T_SIZE
));
12091 #if defined(TARGET_NR_epoll_wait)
12092 case TARGET_NR_epoll_wait
:
12093 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12098 ret
= -TARGET_ENOSYS
;
12100 if (!is_error(ret
)) {
12102 for (i
= 0; i
< ret
; i
++) {
12103 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12104 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12106 unlock_user(target_ep
, arg2
,
12107 ret
* sizeof(struct target_epoll_event
));
12109 unlock_user(target_ep
, arg2
, 0);
12116 #ifdef TARGET_NR_prlimit64
12117 case TARGET_NR_prlimit64
:
12119 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12120 struct target_rlimit64
*target_rnew
, *target_rold
;
12121 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12122 int resource
= target_to_host_resource(arg2
);
12124 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12127 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12128 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12129 unlock_user_struct(target_rnew
, arg3
, 0);
12133 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12134 if (!is_error(ret
) && arg4
) {
12135 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12138 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12139 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12140 unlock_user_struct(target_rold
, arg4
, 1);
12145 #ifdef TARGET_NR_gethostname
12146 case TARGET_NR_gethostname
:
12148 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12150 ret
= get_errno(gethostname(name
, arg2
));
12151 unlock_user(name
, arg1
, arg2
);
12153 ret
= -TARGET_EFAULT
;
12158 #ifdef TARGET_NR_atomic_cmpxchg_32
12159 case TARGET_NR_atomic_cmpxchg_32
:
12161 /* should use start_exclusive from main.c */
12162 abi_ulong mem_value
;
12163 if (get_user_u32(mem_value
, arg6
)) {
12164 target_siginfo_t info
;
12165 info
.si_signo
= SIGSEGV
;
12167 info
.si_code
= TARGET_SEGV_MAPERR
;
12168 info
._sifields
._sigfault
._addr
= arg6
;
12169 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12170 QEMU_SI_FAULT
, &info
);
12174 if (mem_value
== arg2
)
12175 put_user_u32(arg1
, arg6
);
12180 #ifdef TARGET_NR_atomic_barrier
12181 case TARGET_NR_atomic_barrier
:
12183 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12189 #ifdef TARGET_NR_timer_create
12190 case TARGET_NR_timer_create
:
12192 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12194 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12197 int timer_index
= next_free_host_timer();
12199 if (timer_index
< 0) {
12200 ret
= -TARGET_EAGAIN
;
12202 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12205 phost_sevp
= &host_sevp
;
12206 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12212 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12216 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12225 #ifdef TARGET_NR_timer_settime
12226 case TARGET_NR_timer_settime
:
12228 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12229 * struct itimerspec * old_value */
12230 target_timer_t timerid
= get_timer_id(arg1
);
12234 } else if (arg3
== 0) {
12235 ret
= -TARGET_EINVAL
;
12237 timer_t htimer
= g_posix_timers
[timerid
];
12238 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12240 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12244 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12245 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12253 #ifdef TARGET_NR_timer_gettime
12254 case TARGET_NR_timer_gettime
:
12256 /* args: timer_t timerid, struct itimerspec *curr_value */
12257 target_timer_t timerid
= get_timer_id(arg1
);
12261 } else if (!arg2
) {
12262 ret
= -TARGET_EFAULT
;
12264 timer_t htimer
= g_posix_timers
[timerid
];
12265 struct itimerspec hspec
;
12266 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12268 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12269 ret
= -TARGET_EFAULT
;
12276 #ifdef TARGET_NR_timer_getoverrun
12277 case TARGET_NR_timer_getoverrun
:
12279 /* args: timer_t timerid */
12280 target_timer_t timerid
= get_timer_id(arg1
);
12285 timer_t htimer
= g_posix_timers
[timerid
];
12286 ret
= get_errno(timer_getoverrun(htimer
));
12288 fd_trans_unregister(ret
);
12293 #ifdef TARGET_NR_timer_delete
12294 case TARGET_NR_timer_delete
:
12296 /* args: timer_t timerid */
12297 target_timer_t timerid
= get_timer_id(arg1
);
12302 timer_t htimer
= g_posix_timers
[timerid
];
12303 ret
= get_errno(timer_delete(htimer
));
12304 g_posix_timers
[timerid
] = 0;
12310 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12311 case TARGET_NR_timerfd_create
:
12312 ret
= get_errno(timerfd_create(arg1
,
12313 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12317 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12318 case TARGET_NR_timerfd_gettime
:
12320 struct itimerspec its_curr
;
12322 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12324 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12331 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12332 case TARGET_NR_timerfd_settime
:
12334 struct itimerspec its_new
, its_old
, *p_new
;
12337 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12345 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12347 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12354 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12355 case TARGET_NR_ioprio_get
:
12356 ret
= get_errno(ioprio_get(arg1
, arg2
));
12360 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12361 case TARGET_NR_ioprio_set
:
12362 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12366 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12367 case TARGET_NR_setns
:
12368 ret
= get_errno(setns(arg1
, arg2
));
12371 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12372 case TARGET_NR_unshare
:
12373 ret
= get_errno(unshare(arg1
));
12376 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12377 case TARGET_NR_kcmp
:
12378 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12384 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12385 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12386 unimplemented_nowarn
:
12388 ret
= -TARGET_ENOSYS
;
12393 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12396 print_syscall_ret(num
, ret
);
12397 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12400 ret
= -TARGET_EFAULT
;