4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include "qemu-common.h"
64 #include <sys/timerfd.h>
70 #include <sys/eventfd.h>
73 #include <sys/epoll.h>
76 #include "qemu/xattr.h"
78 #ifdef CONFIG_SENDFILE
79 #include <sys/sendfile.h>
82 #define termios host_termios
83 #define winsize host_winsize
84 #define termio host_termio
85 #define sgttyb host_sgttyb /* same as target */
86 #define tchars host_tchars /* same as target */
87 #define ltchars host_ltchars /* same as target */
89 #include <linux/termios.h>
90 #include <linux/unistd.h>
91 #include <linux/cdrom.h>
92 #include <linux/hdreg.h>
93 #include <linux/soundcard.h>
95 #include <linux/mtio.h>
97 #if defined(CONFIG_FIEMAP)
98 #include <linux/fiemap.h>
100 #include <linux/fb.h>
101 #include <linux/vt.h>
102 #include <linux/dm-ioctl.h>
103 #include <linux/reboot.h>
104 #include <linux/route.h>
105 #include <linux/filter.h>
106 #include <linux/blkpg.h>
107 #include <netpacket/packet.h>
108 #include <linux/netlink.h>
109 #ifdef CONFIG_RTNETLINK
110 #include <linux/rtnetlink.h>
111 #include <linux/if_bridge.h>
113 #include <linux/audit.h>
114 #include "linux_loop.h"
120 #define CLONE_IO 0x80000000 /* Clone io context */
123 /* We can't directly call the host clone syscall, because this will
124 * badly confuse libc (breaking mutexes, for example). So we must
125 * divide clone flags into:
126 * * flag combinations that look like pthread_create()
127 * * flag combinations that look like fork()
128 * * flags we can implement within QEMU itself
129 * * flags we can't support and will return an error for
131 /* For thread creation, all these flags must be present; for
132 * fork, none must be present.
134 #define CLONE_THREAD_FLAGS \
135 (CLONE_VM | CLONE_FS | CLONE_FILES | \
136 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
138 /* These flags are ignored:
139 * CLONE_DETACHED is now ignored by the kernel;
140 * CLONE_IO is just an optimisation hint to the I/O scheduler
142 #define CLONE_IGNORED_FLAGS \
143 (CLONE_DETACHED | CLONE_IO)
145 /* Flags for fork which we can implement within QEMU itself */
146 #define CLONE_OPTIONAL_FORK_FLAGS \
147 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
148 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
150 /* Flags for thread creation which we can implement within QEMU itself */
151 #define CLONE_OPTIONAL_THREAD_FLAGS \
152 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
153 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
155 #define CLONE_INVALID_FORK_FLAGS \
156 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
158 #define CLONE_INVALID_THREAD_FLAGS \
159 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
160 CLONE_IGNORED_FLAGS))
162 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
163 * have almost all been allocated. We cannot support any of
164 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
165 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
166 * The checks against the invalid thread masks above will catch these.
167 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
171 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
172 * once. This exercises the codepaths for restart.
174 //#define DEBUG_ERESTARTSYS
176 //#include <linux/msdos_fs.h>
177 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
178 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
188 #define _syscall0(type,name) \
189 static type name (void) \
191 return syscall(__NR_##name); \
194 #define _syscall1(type,name,type1,arg1) \
195 static type name (type1 arg1) \
197 return syscall(__NR_##name, arg1); \
200 #define _syscall2(type,name,type1,arg1,type2,arg2) \
201 static type name (type1 arg1,type2 arg2) \
203 return syscall(__NR_##name, arg1, arg2); \
206 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
207 static type name (type1 arg1,type2 arg2,type3 arg3) \
209 return syscall(__NR_##name, arg1, arg2, arg3); \
212 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
213 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
215 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
218 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
220 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
222 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
226 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
227 type5,arg5,type6,arg6) \
228 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
231 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
235 #define __NR_sys_uname __NR_uname
236 #define __NR_sys_getcwd1 __NR_getcwd
237 #define __NR_sys_getdents __NR_getdents
238 #define __NR_sys_getdents64 __NR_getdents64
239 #define __NR_sys_getpriority __NR_getpriority
240 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
241 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
242 #define __NR_sys_syslog __NR_syslog
243 #define __NR_sys_futex __NR_futex
244 #define __NR_sys_inotify_init __NR_inotify_init
245 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
246 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
248 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
250 #define __NR__llseek __NR_lseek
253 /* Newer kernel ports have llseek() instead of _llseek() */
254 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
255 #define TARGET_NR__llseek TARGET_NR_llseek
259 _syscall0(int, gettid
)
261 /* This is a replacement for the host gettid() and must return a host
263 static int gettid(void) {
267 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
268 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
270 #if !defined(__NR_getdents) || \
271 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
272 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
274 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
275 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
276 loff_t
*, res
, uint
, wh
);
278 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
279 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
281 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
282 #ifdef __NR_exit_group
283 _syscall1(int,exit_group
,int,error_code
)
285 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
286 _syscall1(int,set_tid_address
,int *,tidptr
)
288 #if defined(TARGET_NR_futex) && defined(__NR_futex)
289 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
290 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
292 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
293 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
294 unsigned long *, user_mask_ptr
);
295 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
296 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
297 unsigned long *, user_mask_ptr
);
298 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
300 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
301 struct __user_cap_data_struct
*, data
);
302 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
303 struct __user_cap_data_struct
*, data
);
304 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
305 _syscall2(int, ioprio_get
, int, which
, int, who
)
307 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
308 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
310 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
311 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
314 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
315 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
316 unsigned long, idx1
, unsigned long, idx2
)
319 static bitmask_transtbl fcntl_flags_tbl
[] = {
320 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
321 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
322 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
323 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
324 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
325 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
326 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
327 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
328 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
329 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
330 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
331 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
332 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
333 #if defined(O_DIRECT)
334 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
336 #if defined(O_NOATIME)
337 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
339 #if defined(O_CLOEXEC)
340 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
343 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
345 /* Don't terminate the list prematurely on 64-bit host+guest. */
346 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
347 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
354 QEMU_IFLA_BR_FORWARD_DELAY
,
355 QEMU_IFLA_BR_HELLO_TIME
,
356 QEMU_IFLA_BR_MAX_AGE
,
357 QEMU_IFLA_BR_AGEING_TIME
,
358 QEMU_IFLA_BR_STP_STATE
,
359 QEMU_IFLA_BR_PRIORITY
,
360 QEMU_IFLA_BR_VLAN_FILTERING
,
361 QEMU_IFLA_BR_VLAN_PROTOCOL
,
362 QEMU_IFLA_BR_GROUP_FWD_MASK
,
363 QEMU_IFLA_BR_ROOT_ID
,
364 QEMU_IFLA_BR_BRIDGE_ID
,
365 QEMU_IFLA_BR_ROOT_PORT
,
366 QEMU_IFLA_BR_ROOT_PATH_COST
,
367 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
368 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
369 QEMU_IFLA_BR_HELLO_TIMER
,
370 QEMU_IFLA_BR_TCN_TIMER
,
371 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
372 QEMU_IFLA_BR_GC_TIMER
,
373 QEMU_IFLA_BR_GROUP_ADDR
,
374 QEMU_IFLA_BR_FDB_FLUSH
,
375 QEMU_IFLA_BR_MCAST_ROUTER
,
376 QEMU_IFLA_BR_MCAST_SNOOPING
,
377 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
378 QEMU_IFLA_BR_MCAST_QUERIER
,
379 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
380 QEMU_IFLA_BR_MCAST_HASH_MAX
,
381 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
382 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
383 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
384 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
385 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
386 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
387 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
389 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
390 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
391 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
392 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
394 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
395 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
419 QEMU_IFLA_NET_NS_PID
,
422 QEMU_IFLA_VFINFO_LIST
,
430 QEMU_IFLA_PROMISCUITY
,
431 QEMU_IFLA_NUM_TX_QUEUES
,
432 QEMU_IFLA_NUM_RX_QUEUES
,
434 QEMU_IFLA_PHYS_PORT_ID
,
435 QEMU_IFLA_CARRIER_CHANGES
,
436 QEMU_IFLA_PHYS_SWITCH_ID
,
437 QEMU_IFLA_LINK_NETNSID
,
438 QEMU_IFLA_PHYS_PORT_NAME
,
439 QEMU_IFLA_PROTO_DOWN
,
440 QEMU_IFLA_GSO_MAX_SEGS
,
441 QEMU_IFLA_GSO_MAX_SIZE
,
448 QEMU_IFLA_BRPORT_UNSPEC
,
449 QEMU_IFLA_BRPORT_STATE
,
450 QEMU_IFLA_BRPORT_PRIORITY
,
451 QEMU_IFLA_BRPORT_COST
,
452 QEMU_IFLA_BRPORT_MODE
,
453 QEMU_IFLA_BRPORT_GUARD
,
454 QEMU_IFLA_BRPORT_PROTECT
,
455 QEMU_IFLA_BRPORT_FAST_LEAVE
,
456 QEMU_IFLA_BRPORT_LEARNING
,
457 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
458 QEMU_IFLA_BRPORT_PROXYARP
,
459 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
460 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
461 QEMU_IFLA_BRPORT_ROOT_ID
,
462 QEMU_IFLA_BRPORT_BRIDGE_ID
,
463 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
464 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
467 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
468 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
469 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
470 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
471 QEMU_IFLA_BRPORT_HOLD_TIMER
,
472 QEMU_IFLA_BRPORT_FLUSH
,
473 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
474 QEMU_IFLA_BRPORT_PAD
,
475 QEMU___IFLA_BRPORT_MAX
479 QEMU_IFLA_INFO_UNSPEC
,
482 QEMU_IFLA_INFO_XSTATS
,
483 QEMU_IFLA_INFO_SLAVE_KIND
,
484 QEMU_IFLA_INFO_SLAVE_DATA
,
485 QEMU___IFLA_INFO_MAX
,
489 QEMU_IFLA_INET_UNSPEC
,
491 QEMU___IFLA_INET_MAX
,
495 QEMU_IFLA_INET6_UNSPEC
,
496 QEMU_IFLA_INET6_FLAGS
,
497 QEMU_IFLA_INET6_CONF
,
498 QEMU_IFLA_INET6_STATS
,
499 QEMU_IFLA_INET6_MCAST
,
500 QEMU_IFLA_INET6_CACHEINFO
,
501 QEMU_IFLA_INET6_ICMP6STATS
,
502 QEMU_IFLA_INET6_TOKEN
,
503 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
504 QEMU___IFLA_INET6_MAX
507 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
508 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
509 typedef struct TargetFdTrans
{
510 TargetFdDataFunc host_to_target_data
;
511 TargetFdDataFunc target_to_host_data
;
512 TargetFdAddrFunc target_to_host_addr
;
515 static TargetFdTrans
**target_fd_trans
;
517 static unsigned int target_fd_max
;
519 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
521 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
522 return target_fd_trans
[fd
]->target_to_host_data
;
527 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
529 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
530 return target_fd_trans
[fd
]->host_to_target_data
;
535 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
537 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
538 return target_fd_trans
[fd
]->target_to_host_addr
;
543 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
547 if (fd
>= target_fd_max
) {
548 oldmax
= target_fd_max
;
549 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
550 target_fd_trans
= g_renew(TargetFdTrans
*,
551 target_fd_trans
, target_fd_max
);
552 memset((void *)(target_fd_trans
+ oldmax
), 0,
553 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
555 target_fd_trans
[fd
] = trans
;
558 static void fd_trans_unregister(int fd
)
560 if (fd
>= 0 && fd
< target_fd_max
) {
561 target_fd_trans
[fd
] = NULL
;
565 static void fd_trans_dup(int oldfd
, int newfd
)
567 fd_trans_unregister(newfd
);
568 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
569 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
573 static int sys_getcwd1(char *buf
, size_t size
)
575 if (getcwd(buf
, size
) == NULL
) {
576 /* getcwd() sets errno */
579 return strlen(buf
)+1;
582 #ifdef TARGET_NR_utimensat
583 #if defined(__NR_utimensat)
584 #define __NR_sys_utimensat __NR_utimensat
585 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
586 const struct timespec
*,tsp
,int,flags
)
588 static int sys_utimensat(int dirfd
, const char *pathname
,
589 const struct timespec times
[2], int flags
)
595 #endif /* TARGET_NR_utimensat */
597 #ifdef CONFIG_INOTIFY
598 #include <sys/inotify.h>
600 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
601 static int sys_inotify_init(void)
603 return (inotify_init());
606 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
607 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
609 return (inotify_add_watch(fd
, pathname
, mask
));
612 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
613 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
615 return (inotify_rm_watch(fd
, wd
));
618 #ifdef CONFIG_INOTIFY1
619 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
620 static int sys_inotify_init1(int flags
)
622 return (inotify_init1(flags
));
627 /* Userspace can usually survive runtime without inotify */
628 #undef TARGET_NR_inotify_init
629 #undef TARGET_NR_inotify_init1
630 #undef TARGET_NR_inotify_add_watch
631 #undef TARGET_NR_inotify_rm_watch
632 #endif /* CONFIG_INOTIFY */
634 #if defined(TARGET_NR_prlimit64)
635 #ifndef __NR_prlimit64
636 # define __NR_prlimit64 -1
638 #define __NR_sys_prlimit64 __NR_prlimit64
639 /* The glibc rlimit structure may not be that used by the underlying syscall */
640 struct host_rlimit64
{
644 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
645 const struct host_rlimit64
*, new_limit
,
646 struct host_rlimit64
*, old_limit
)
650 #if defined(TARGET_NR_timer_create)
651 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
652 static timer_t g_posix_timers
[32] = { 0, } ;
654 static inline int next_free_host_timer(void)
657 /* FIXME: Does finding the next free slot require a lock? */
658 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
659 if (g_posix_timers
[k
] == 0) {
660 g_posix_timers
[k
] = (timer_t
) 1;
668 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
670 static inline int regpairs_aligned(void *cpu_env
) {
671 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
673 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
674 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
675 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
676 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
677 * of registers which translates to the same as ARM/MIPS, because we start with
679 static inline int regpairs_aligned(void *cpu_env
) { return 1; }
681 static inline int regpairs_aligned(void *cpu_env
) { return 0; }
684 #define ERRNO_TABLE_SIZE 1200
686 /* target_to_host_errno_table[] is initialized from
687 * host_to_target_errno_table[] in syscall_init(). */
688 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
692 * This list is the union of errno values overridden in asm-<arch>/errno.h
693 * minus the errnos that are not actually generic to all archs.
695 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
696 [EAGAIN
] = TARGET_EAGAIN
,
697 [EIDRM
] = TARGET_EIDRM
,
698 [ECHRNG
] = TARGET_ECHRNG
,
699 [EL2NSYNC
] = TARGET_EL2NSYNC
,
700 [EL3HLT
] = TARGET_EL3HLT
,
701 [EL3RST
] = TARGET_EL3RST
,
702 [ELNRNG
] = TARGET_ELNRNG
,
703 [EUNATCH
] = TARGET_EUNATCH
,
704 [ENOCSI
] = TARGET_ENOCSI
,
705 [EL2HLT
] = TARGET_EL2HLT
,
706 [EDEADLK
] = TARGET_EDEADLK
,
707 [ENOLCK
] = TARGET_ENOLCK
,
708 [EBADE
] = TARGET_EBADE
,
709 [EBADR
] = TARGET_EBADR
,
710 [EXFULL
] = TARGET_EXFULL
,
711 [ENOANO
] = TARGET_ENOANO
,
712 [EBADRQC
] = TARGET_EBADRQC
,
713 [EBADSLT
] = TARGET_EBADSLT
,
714 [EBFONT
] = TARGET_EBFONT
,
715 [ENOSTR
] = TARGET_ENOSTR
,
716 [ENODATA
] = TARGET_ENODATA
,
717 [ETIME
] = TARGET_ETIME
,
718 [ENOSR
] = TARGET_ENOSR
,
719 [ENONET
] = TARGET_ENONET
,
720 [ENOPKG
] = TARGET_ENOPKG
,
721 [EREMOTE
] = TARGET_EREMOTE
,
722 [ENOLINK
] = TARGET_ENOLINK
,
723 [EADV
] = TARGET_EADV
,
724 [ESRMNT
] = TARGET_ESRMNT
,
725 [ECOMM
] = TARGET_ECOMM
,
726 [EPROTO
] = TARGET_EPROTO
,
727 [EDOTDOT
] = TARGET_EDOTDOT
,
728 [EMULTIHOP
] = TARGET_EMULTIHOP
,
729 [EBADMSG
] = TARGET_EBADMSG
,
730 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
731 [EOVERFLOW
] = TARGET_EOVERFLOW
,
732 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
733 [EBADFD
] = TARGET_EBADFD
,
734 [EREMCHG
] = TARGET_EREMCHG
,
735 [ELIBACC
] = TARGET_ELIBACC
,
736 [ELIBBAD
] = TARGET_ELIBBAD
,
737 [ELIBSCN
] = TARGET_ELIBSCN
,
738 [ELIBMAX
] = TARGET_ELIBMAX
,
739 [ELIBEXEC
] = TARGET_ELIBEXEC
,
740 [EILSEQ
] = TARGET_EILSEQ
,
741 [ENOSYS
] = TARGET_ENOSYS
,
742 [ELOOP
] = TARGET_ELOOP
,
743 [ERESTART
] = TARGET_ERESTART
,
744 [ESTRPIPE
] = TARGET_ESTRPIPE
,
745 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
746 [EUSERS
] = TARGET_EUSERS
,
747 [ENOTSOCK
] = TARGET_ENOTSOCK
,
748 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
749 [EMSGSIZE
] = TARGET_EMSGSIZE
,
750 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
751 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
752 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
753 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
754 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
755 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
756 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
757 [EADDRINUSE
] = TARGET_EADDRINUSE
,
758 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
759 [ENETDOWN
] = TARGET_ENETDOWN
,
760 [ENETUNREACH
] = TARGET_ENETUNREACH
,
761 [ENETRESET
] = TARGET_ENETRESET
,
762 [ECONNABORTED
] = TARGET_ECONNABORTED
,
763 [ECONNRESET
] = TARGET_ECONNRESET
,
764 [ENOBUFS
] = TARGET_ENOBUFS
,
765 [EISCONN
] = TARGET_EISCONN
,
766 [ENOTCONN
] = TARGET_ENOTCONN
,
767 [EUCLEAN
] = TARGET_EUCLEAN
,
768 [ENOTNAM
] = TARGET_ENOTNAM
,
769 [ENAVAIL
] = TARGET_ENAVAIL
,
770 [EISNAM
] = TARGET_EISNAM
,
771 [EREMOTEIO
] = TARGET_EREMOTEIO
,
772 [EDQUOT
] = TARGET_EDQUOT
,
773 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
774 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
775 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
776 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
777 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
778 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
779 [EALREADY
] = TARGET_EALREADY
,
780 [EINPROGRESS
] = TARGET_EINPROGRESS
,
781 [ESTALE
] = TARGET_ESTALE
,
782 [ECANCELED
] = TARGET_ECANCELED
,
783 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
784 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
786 [ENOKEY
] = TARGET_ENOKEY
,
789 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
792 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
795 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
798 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
800 #ifdef ENOTRECOVERABLE
801 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
804 [ENOMSG
] = TARGET_ENOMSG
,
807 [ERFKILL
] = TARGET_ERFKILL
,
810 [EHWPOISON
] = TARGET_EHWPOISON
,
814 static inline int host_to_target_errno(int err
)
816 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
817 host_to_target_errno_table
[err
]) {
818 return host_to_target_errno_table
[err
];
823 static inline int target_to_host_errno(int err
)
825 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
826 target_to_host_errno_table
[err
]) {
827 return target_to_host_errno_table
[err
];
832 static inline abi_long
get_errno(abi_long ret
)
835 return -host_to_target_errno(errno
);
840 static inline int is_error(abi_long ret
)
842 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
845 const char *target_strerror(int err
)
847 if (err
== TARGET_ERESTARTSYS
) {
848 return "To be restarted";
850 if (err
== TARGET_QEMU_ESIGRETURN
) {
851 return "Successful exit from sigreturn";
854 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
857 return strerror(target_to_host_errno(err
));
860 #define safe_syscall0(type, name) \
861 static type safe_##name(void) \
863 return safe_syscall(__NR_##name); \
866 #define safe_syscall1(type, name, type1, arg1) \
867 static type safe_##name(type1 arg1) \
869 return safe_syscall(__NR_##name, arg1); \
872 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
873 static type safe_##name(type1 arg1, type2 arg2) \
875 return safe_syscall(__NR_##name, arg1, arg2); \
878 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
879 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
881 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
884 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
886 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
888 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
891 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
892 type4, arg4, type5, arg5) \
893 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
896 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
899 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
900 type4, arg4, type5, arg5, type6, arg6) \
901 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
902 type5 arg5, type6 arg6) \
904 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
907 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
908 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
909 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
910 int, flags
, mode_t
, mode
)
911 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
912 struct rusage
*, rusage
)
913 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
914 int, options
, struct rusage
*, rusage
)
915 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
916 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
917 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
918 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
919 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
921 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
922 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
924 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
925 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
926 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
927 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
928 safe_syscall2(int, tkill
, int, tid
, int, sig
)
929 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
930 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
931 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
932 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
933 unsigned long, pos_l
, unsigned long, pos_h
)
934 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
935 unsigned long, pos_l
, unsigned long, pos_h
)
936 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
938 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
939 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
940 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
941 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
942 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
943 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
944 safe_syscall2(int, flock
, int, fd
, int, operation
)
945 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
946 const struct timespec
*, uts
, size_t, sigsetsize
)
947 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
949 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
950 struct timespec
*, rem
)
951 #ifdef TARGET_NR_clock_nanosleep
952 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
953 const struct timespec
*, req
, struct timespec
*, rem
)
956 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
958 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
959 long, msgtype
, int, flags
)
960 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
961 unsigned, nsops
, const struct timespec
*, timeout
)
963 /* This host kernel architecture uses a single ipc syscall; fake up
964 * wrappers for the sub-operations to hide this implementation detail.
965 * Annoyingly we can't include linux/ipc.h to get the constant definitions
966 * for the call parameter because some structs in there conflict with the
967 * sys/ipc.h ones. So we just define them here, and rely on them being
968 * the same for all host architectures.
970 #define Q_SEMTIMEDOP 4
973 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
975 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
976 void *, ptr
, long, fifth
)
977 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
979 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
981 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
983 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
985 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
986 const struct timespec
*timeout
)
988 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
992 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
993 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
994 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
995 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
996 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
998 /* We do ioctl like this rather than via safe_syscall3 to preserve the
999 * "third argument might be integer or pointer or not present" behaviour of
1000 * the libc function.
1002 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1003 /* Similarly for fcntl. Note that callers must always:
1004 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1005 * use the flock64 struct rather than unsuffixed flock
1006 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1009 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1011 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1014 static inline int host_to_target_sock_type(int host_type
)
1018 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1020 target_type
= TARGET_SOCK_DGRAM
;
1023 target_type
= TARGET_SOCK_STREAM
;
1026 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1030 #if defined(SOCK_CLOEXEC)
1031 if (host_type
& SOCK_CLOEXEC
) {
1032 target_type
|= TARGET_SOCK_CLOEXEC
;
1036 #if defined(SOCK_NONBLOCK)
1037 if (host_type
& SOCK_NONBLOCK
) {
1038 target_type
|= TARGET_SOCK_NONBLOCK
;
1045 static abi_ulong target_brk
;
1046 static abi_ulong target_original_brk
;
1047 static abi_ulong brk_page
;
1049 void target_set_brk(abi_ulong new_brk
)
1051 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1052 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1055 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1056 #define DEBUGF_BRK(message, args...)
1058 /* do_brk() must return target values and target errnos. */
1059 abi_long
do_brk(abi_ulong new_brk
)
1061 abi_long mapped_addr
;
1062 abi_ulong new_alloc_size
;
1064 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1067 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1070 if (new_brk
< target_original_brk
) {
1071 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1076 /* If the new brk is less than the highest page reserved to the
1077 * target heap allocation, set it and we're almost done... */
1078 if (new_brk
<= brk_page
) {
1079 /* Heap contents are initialized to zero, as for anonymous
1081 if (new_brk
> target_brk
) {
1082 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1084 target_brk
= new_brk
;
1085 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1089 /* We need to allocate more memory after the brk... Note that
1090 * we don't use MAP_FIXED because that will map over the top of
1091 * any existing mapping (like the one with the host libc or qemu
1092 * itself); instead we treat "mapped but at wrong address" as
1093 * a failure and unmap again.
1095 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1096 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1097 PROT_READ
|PROT_WRITE
,
1098 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1100 if (mapped_addr
== brk_page
) {
1101 /* Heap contents are initialized to zero, as for anonymous
1102 * mapped pages. Technically the new pages are already
1103 * initialized to zero since they *are* anonymous mapped
1104 * pages, however we have to take care with the contents that
1105 * come from the remaining part of the previous page: it may
1106 * contains garbage data due to a previous heap usage (grown
1107 * then shrunken). */
1108 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1110 target_brk
= new_brk
;
1111 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1112 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1115 } else if (mapped_addr
!= -1) {
1116 /* Mapped but at wrong address, meaning there wasn't actually
1117 * enough space for this brk.
1119 target_munmap(mapped_addr
, new_alloc_size
);
1121 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1124 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1127 #if defined(TARGET_ALPHA)
1128 /* We (partially) emulate OSF/1 on Alpha, which requires we
1129 return a proper errno, not an unchanged brk value. */
1130 return -TARGET_ENOMEM
;
1132 /* For everything else, return the previous break. */
1136 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1137 abi_ulong target_fds_addr
,
1141 abi_ulong b
, *target_fds
;
1143 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1144 if (!(target_fds
= lock_user(VERIFY_READ
,
1146 sizeof(abi_ulong
) * nw
,
1148 return -TARGET_EFAULT
;
1152 for (i
= 0; i
< nw
; i
++) {
1153 /* grab the abi_ulong */
1154 __get_user(b
, &target_fds
[i
]);
1155 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1156 /* check the bit inside the abi_ulong */
1163 unlock_user(target_fds
, target_fds_addr
, 0);
1168 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1169 abi_ulong target_fds_addr
,
1172 if (target_fds_addr
) {
1173 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1174 return -TARGET_EFAULT
;
1182 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1188 abi_ulong
*target_fds
;
1190 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1191 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1193 sizeof(abi_ulong
) * nw
,
1195 return -TARGET_EFAULT
;
1198 for (i
= 0; i
< nw
; i
++) {
1200 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1201 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1204 __put_user(v
, &target_fds
[i
]);
1207 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1212 #if defined(__alpha__)
1213 #define HOST_HZ 1024
1218 static inline abi_long
host_to_target_clock_t(long ticks
)
1220 #if HOST_HZ == TARGET_HZ
1223 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1227 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1228 const struct rusage
*rusage
)
1230 struct target_rusage
*target_rusage
;
1232 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1233 return -TARGET_EFAULT
;
1234 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1235 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1236 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1237 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1238 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1239 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1240 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1241 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1242 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1243 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1244 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1245 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1246 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1247 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1248 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1249 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1250 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1251 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1252 unlock_user_struct(target_rusage
, target_addr
, 1);
1257 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1259 abi_ulong target_rlim_swap
;
1262 target_rlim_swap
= tswapal(target_rlim
);
1263 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1264 return RLIM_INFINITY
;
1266 result
= target_rlim_swap
;
1267 if (target_rlim_swap
!= (rlim_t
)result
)
1268 return RLIM_INFINITY
;
1273 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1275 abi_ulong target_rlim_swap
;
1278 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1279 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1281 target_rlim_swap
= rlim
;
1282 result
= tswapal(target_rlim_swap
);
1287 static inline int target_to_host_resource(int code
)
1290 case TARGET_RLIMIT_AS
:
1292 case TARGET_RLIMIT_CORE
:
1294 case TARGET_RLIMIT_CPU
:
1296 case TARGET_RLIMIT_DATA
:
1298 case TARGET_RLIMIT_FSIZE
:
1299 return RLIMIT_FSIZE
;
1300 case TARGET_RLIMIT_LOCKS
:
1301 return RLIMIT_LOCKS
;
1302 case TARGET_RLIMIT_MEMLOCK
:
1303 return RLIMIT_MEMLOCK
;
1304 case TARGET_RLIMIT_MSGQUEUE
:
1305 return RLIMIT_MSGQUEUE
;
1306 case TARGET_RLIMIT_NICE
:
1308 case TARGET_RLIMIT_NOFILE
:
1309 return RLIMIT_NOFILE
;
1310 case TARGET_RLIMIT_NPROC
:
1311 return RLIMIT_NPROC
;
1312 case TARGET_RLIMIT_RSS
:
1314 case TARGET_RLIMIT_RTPRIO
:
1315 return RLIMIT_RTPRIO
;
1316 case TARGET_RLIMIT_SIGPENDING
:
1317 return RLIMIT_SIGPENDING
;
1318 case TARGET_RLIMIT_STACK
:
1319 return RLIMIT_STACK
;
1325 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1326 abi_ulong target_tv_addr
)
1328 struct target_timeval
*target_tv
;
1330 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1331 return -TARGET_EFAULT
;
1333 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1334 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1336 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1341 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1342 const struct timeval
*tv
)
1344 struct target_timeval
*target_tv
;
1346 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1347 return -TARGET_EFAULT
;
1349 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1350 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1352 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1357 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1358 abi_ulong target_tz_addr
)
1360 struct target_timezone
*target_tz
;
1362 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1363 return -TARGET_EFAULT
;
1366 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1367 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1369 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1374 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1377 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1378 abi_ulong target_mq_attr_addr
)
1380 struct target_mq_attr
*target_mq_attr
;
1382 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1383 target_mq_attr_addr
, 1))
1384 return -TARGET_EFAULT
;
1386 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1387 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1388 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1389 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1391 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1396 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1397 const struct mq_attr
*attr
)
1399 struct target_mq_attr
*target_mq_attr
;
1401 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1402 target_mq_attr_addr
, 0))
1403 return -TARGET_EFAULT
;
1405 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1406 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1407 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1408 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1410 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1416 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1417 /* do_select() must return target values and target errnos. */
1418 static abi_long
do_select(int n
,
1419 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1420 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1422 fd_set rfds
, wfds
, efds
;
1423 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1425 struct timespec ts
, *ts_ptr
;
1428 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1432 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1436 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1441 if (target_tv_addr
) {
1442 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1443 return -TARGET_EFAULT
;
1444 ts
.tv_sec
= tv
.tv_sec
;
1445 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1451 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1454 if (!is_error(ret
)) {
1455 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1456 return -TARGET_EFAULT
;
1457 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1458 return -TARGET_EFAULT
;
1459 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1460 return -TARGET_EFAULT
;
1462 if (target_tv_addr
) {
1463 tv
.tv_sec
= ts
.tv_sec
;
1464 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1465 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1466 return -TARGET_EFAULT
;
1474 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1475 static abi_long
do_old_select(abi_ulong arg1
)
1477 struct target_sel_arg_struct
*sel
;
1478 abi_ulong inp
, outp
, exp
, tvp
;
1481 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1482 return -TARGET_EFAULT
;
1485 nsel
= tswapal(sel
->n
);
1486 inp
= tswapal(sel
->inp
);
1487 outp
= tswapal(sel
->outp
);
1488 exp
= tswapal(sel
->exp
);
1489 tvp
= tswapal(sel
->tvp
);
1491 unlock_user_struct(sel
, arg1
, 0);
1493 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1498 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1501 return pipe2(host_pipe
, flags
);
1507 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1508 int flags
, int is_pipe2
)
1512 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1515 return get_errno(ret
);
1517 /* Several targets have special calling conventions for the original
1518 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1520 #if defined(TARGET_ALPHA)
1521 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1522 return host_pipe
[0];
1523 #elif defined(TARGET_MIPS)
1524 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1525 return host_pipe
[0];
1526 #elif defined(TARGET_SH4)
1527 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1528 return host_pipe
[0];
1529 #elif defined(TARGET_SPARC)
1530 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1531 return host_pipe
[0];
1535 if (put_user_s32(host_pipe
[0], pipedes
)
1536 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1537 return -TARGET_EFAULT
;
1538 return get_errno(ret
);
1541 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1542 abi_ulong target_addr
,
1545 struct target_ip_mreqn
*target_smreqn
;
1547 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1549 return -TARGET_EFAULT
;
1550 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1551 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1552 if (len
== sizeof(struct target_ip_mreqn
))
1553 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1554 unlock_user(target_smreqn
, target_addr
, 0);
1559 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1560 abi_ulong target_addr
,
1563 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1564 sa_family_t sa_family
;
1565 struct target_sockaddr
*target_saddr
;
1567 if (fd_trans_target_to_host_addr(fd
)) {
1568 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1571 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1573 return -TARGET_EFAULT
;
1575 sa_family
= tswap16(target_saddr
->sa_family
);
1577 /* Oops. The caller might send a incomplete sun_path; sun_path
1578 * must be terminated by \0 (see the manual page), but
1579 * unfortunately it is quite common to specify sockaddr_un
1580 * length as "strlen(x->sun_path)" while it should be
1581 * "strlen(...) + 1". We'll fix that here if needed.
1582 * Linux kernel has a similar feature.
1585 if (sa_family
== AF_UNIX
) {
1586 if (len
< unix_maxlen
&& len
> 0) {
1587 char *cp
= (char*)target_saddr
;
1589 if ( cp
[len
-1] && !cp
[len
] )
1592 if (len
> unix_maxlen
)
1596 memcpy(addr
, target_saddr
, len
);
1597 addr
->sa_family
= sa_family
;
1598 if (sa_family
== AF_NETLINK
) {
1599 struct sockaddr_nl
*nladdr
;
1601 nladdr
= (struct sockaddr_nl
*)addr
;
1602 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1603 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1604 } else if (sa_family
== AF_PACKET
) {
1605 struct target_sockaddr_ll
*lladdr
;
1607 lladdr
= (struct target_sockaddr_ll
*)addr
;
1608 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1609 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1611 unlock_user(target_saddr
, target_addr
, 0);
1616 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1617 struct sockaddr
*addr
,
1620 struct target_sockaddr
*target_saddr
;
1626 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1628 return -TARGET_EFAULT
;
1629 memcpy(target_saddr
, addr
, len
);
1630 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1631 sizeof(target_saddr
->sa_family
)) {
1632 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1634 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1635 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1636 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1637 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1638 } else if (addr
->sa_family
== AF_PACKET
) {
1639 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1640 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1641 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1642 } else if (addr
->sa_family
== AF_INET6
&&
1643 len
>= sizeof(struct target_sockaddr_in6
)) {
1644 struct target_sockaddr_in6
*target_in6
=
1645 (struct target_sockaddr_in6
*)target_saddr
;
1646 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1648 unlock_user(target_saddr
, target_addr
, len
);
1653 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1654 struct target_msghdr
*target_msgh
)
1656 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1657 abi_long msg_controllen
;
1658 abi_ulong target_cmsg_addr
;
1659 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1660 socklen_t space
= 0;
1662 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1663 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1665 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1666 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1667 target_cmsg_start
= target_cmsg
;
1669 return -TARGET_EFAULT
;
1671 while (cmsg
&& target_cmsg
) {
1672 void *data
= CMSG_DATA(cmsg
);
1673 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1675 int len
= tswapal(target_cmsg
->cmsg_len
)
1676 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr
));
1678 space
+= CMSG_SPACE(len
);
1679 if (space
> msgh
->msg_controllen
) {
1680 space
-= CMSG_SPACE(len
);
1681 /* This is a QEMU bug, since we allocated the payload
1682 * area ourselves (unlike overflow in host-to-target
1683 * conversion, which is just the guest giving us a buffer
1684 * that's too small). It can't happen for the payload types
1685 * we currently support; if it becomes an issue in future
1686 * we would need to improve our allocation strategy to
1687 * something more intelligent than "twice the size of the
1688 * target buffer we're reading from".
1690 gemu_log("Host cmsg overflow\n");
1694 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1695 cmsg
->cmsg_level
= SOL_SOCKET
;
1697 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1699 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1700 cmsg
->cmsg_len
= CMSG_LEN(len
);
1702 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1703 int *fd
= (int *)data
;
1704 int *target_fd
= (int *)target_data
;
1705 int i
, numfds
= len
/ sizeof(int);
1707 for (i
= 0; i
< numfds
; i
++) {
1708 __get_user(fd
[i
], target_fd
+ i
);
1710 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1711 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1712 struct ucred
*cred
= (struct ucred
*)data
;
1713 struct target_ucred
*target_cred
=
1714 (struct target_ucred
*)target_data
;
1716 __get_user(cred
->pid
, &target_cred
->pid
);
1717 __get_user(cred
->uid
, &target_cred
->uid
);
1718 __get_user(cred
->gid
, &target_cred
->gid
);
1720 gemu_log("Unsupported ancillary data: %d/%d\n",
1721 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1722 memcpy(data
, target_data
, len
);
1725 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1726 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1729 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1731 msgh
->msg_controllen
= space
;
1735 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1736 struct msghdr
*msgh
)
1738 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1739 abi_long msg_controllen
;
1740 abi_ulong target_cmsg_addr
;
1741 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1742 socklen_t space
= 0;
1744 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1745 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1747 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1748 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1749 target_cmsg_start
= target_cmsg
;
1751 return -TARGET_EFAULT
;
1753 while (cmsg
&& target_cmsg
) {
1754 void *data
= CMSG_DATA(cmsg
);
1755 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1757 int len
= cmsg
->cmsg_len
- CMSG_ALIGN(sizeof (struct cmsghdr
));
1758 int tgt_len
, tgt_space
;
1760 /* We never copy a half-header but may copy half-data;
1761 * this is Linux's behaviour in put_cmsg(). Note that
1762 * truncation here is a guest problem (which we report
1763 * to the guest via the CTRUNC bit), unlike truncation
1764 * in target_to_host_cmsg, which is a QEMU bug.
1766 if (msg_controllen
< sizeof(struct cmsghdr
)) {
1767 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1771 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1772 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1774 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1776 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1778 tgt_len
= TARGET_CMSG_LEN(len
);
1780 /* Payload types which need a different size of payload on
1781 * the target must adjust tgt_len here.
1783 switch (cmsg
->cmsg_level
) {
1785 switch (cmsg
->cmsg_type
) {
1787 tgt_len
= sizeof(struct target_timeval
);
1796 if (msg_controllen
< tgt_len
) {
1797 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1798 tgt_len
= msg_controllen
;
1801 /* We must now copy-and-convert len bytes of payload
1802 * into tgt_len bytes of destination space. Bear in mind
1803 * that in both source and destination we may be dealing
1804 * with a truncated value!
1806 switch (cmsg
->cmsg_level
) {
1808 switch (cmsg
->cmsg_type
) {
1811 int *fd
= (int *)data
;
1812 int *target_fd
= (int *)target_data
;
1813 int i
, numfds
= tgt_len
/ sizeof(int);
1815 for (i
= 0; i
< numfds
; i
++) {
1816 __put_user(fd
[i
], target_fd
+ i
);
1822 struct timeval
*tv
= (struct timeval
*)data
;
1823 struct target_timeval
*target_tv
=
1824 (struct target_timeval
*)target_data
;
1826 if (len
!= sizeof(struct timeval
) ||
1827 tgt_len
!= sizeof(struct target_timeval
)) {
1831 /* copy struct timeval to target */
1832 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1833 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1836 case SCM_CREDENTIALS
:
1838 struct ucred
*cred
= (struct ucred
*)data
;
1839 struct target_ucred
*target_cred
=
1840 (struct target_ucred
*)target_data
;
1842 __put_user(cred
->pid
, &target_cred
->pid
);
1843 __put_user(cred
->uid
, &target_cred
->uid
);
1844 __put_user(cred
->gid
, &target_cred
->gid
);
1853 switch (cmsg
->cmsg_type
) {
1856 uint32_t *v
= (uint32_t *)data
;
1857 uint32_t *t_int
= (uint32_t *)target_data
;
1859 __put_user(*v
, t_int
);
1865 struct sock_extended_err ee
;
1866 struct sockaddr_in offender
;
1868 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1869 struct errhdr_t
*target_errh
=
1870 (struct errhdr_t
*)target_data
;
1872 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1873 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1874 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1875 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1876 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1877 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1878 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1879 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1880 (void *) &errh
->offender
, sizeof(errh
->offender
));
1889 switch (cmsg
->cmsg_type
) {
1892 uint32_t *v
= (uint32_t *)data
;
1893 uint32_t *t_int
= (uint32_t *)target_data
;
1895 __put_user(*v
, t_int
);
1901 struct sock_extended_err ee
;
1902 struct sockaddr_in6 offender
;
1904 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1905 struct errhdr6_t
*target_errh
=
1906 (struct errhdr6_t
*)target_data
;
1908 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1909 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1910 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1911 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1912 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1913 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1914 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1915 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1916 (void *) &errh
->offender
, sizeof(errh
->offender
));
1926 gemu_log("Unsupported ancillary data: %d/%d\n",
1927 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1928 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1929 if (tgt_len
> len
) {
1930 memset(target_data
+ len
, 0, tgt_len
- len
);
1934 target_cmsg
->cmsg_len
= tswapal(tgt_len
);
1935 tgt_space
= TARGET_CMSG_SPACE(len
);
1936 if (msg_controllen
< tgt_space
) {
1937 tgt_space
= msg_controllen
;
1939 msg_controllen
-= tgt_space
;
1941 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1942 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1945 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
1947 target_msgh
->msg_controllen
= tswapal(space
);
1951 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
1953 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
1954 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
1955 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
1956 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
1957 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
1960 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
1962 abi_long (*host_to_target_nlmsg
)
1963 (struct nlmsghdr
*))
1968 while (len
> sizeof(struct nlmsghdr
)) {
1970 nlmsg_len
= nlh
->nlmsg_len
;
1971 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
1976 switch (nlh
->nlmsg_type
) {
1978 tswap_nlmsghdr(nlh
);
1984 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
1985 e
->error
= tswap32(e
->error
);
1986 tswap_nlmsghdr(&e
->msg
);
1987 tswap_nlmsghdr(nlh
);
1991 ret
= host_to_target_nlmsg(nlh
);
1993 tswap_nlmsghdr(nlh
);
1998 tswap_nlmsghdr(nlh
);
1999 len
-= NLMSG_ALIGN(nlmsg_len
);
2000 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2005 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2007 abi_long (*target_to_host_nlmsg
)
2008 (struct nlmsghdr
*))
2012 while (len
> sizeof(struct nlmsghdr
)) {
2013 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2014 tswap32(nlh
->nlmsg_len
) > len
) {
2017 tswap_nlmsghdr(nlh
);
2018 switch (nlh
->nlmsg_type
) {
2025 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2026 e
->error
= tswap32(e
->error
);
2027 tswap_nlmsghdr(&e
->msg
);
2031 ret
= target_to_host_nlmsg(nlh
);
2036 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2037 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2042 #ifdef CONFIG_RTNETLINK
2043 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2044 size_t len
, void *context
,
2045 abi_long (*host_to_target_nlattr
)
2049 unsigned short nla_len
;
2052 while (len
> sizeof(struct nlattr
)) {
2053 nla_len
= nlattr
->nla_len
;
2054 if (nla_len
< sizeof(struct nlattr
) ||
2058 ret
= host_to_target_nlattr(nlattr
, context
);
2059 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2060 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2064 len
-= NLA_ALIGN(nla_len
);
2065 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2070 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2072 abi_long (*host_to_target_rtattr
)
2075 unsigned short rta_len
;
2078 while (len
> sizeof(struct rtattr
)) {
2079 rta_len
= rtattr
->rta_len
;
2080 if (rta_len
< sizeof(struct rtattr
) ||
2084 ret
= host_to_target_rtattr(rtattr
);
2085 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2086 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2090 len
-= RTA_ALIGN(rta_len
);
2091 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2096 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2098 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2105 switch (nlattr
->nla_type
) {
2107 case QEMU_IFLA_BR_FDB_FLUSH
:
2110 case QEMU_IFLA_BR_GROUP_ADDR
:
2113 case QEMU_IFLA_BR_VLAN_FILTERING
:
2114 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2115 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2116 case QEMU_IFLA_BR_MCAST_ROUTER
:
2117 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2118 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2119 case QEMU_IFLA_BR_MCAST_QUERIER
:
2120 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2121 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2122 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2125 case QEMU_IFLA_BR_PRIORITY
:
2126 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2127 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2128 case QEMU_IFLA_BR_ROOT_PORT
:
2129 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2130 u16
= NLA_DATA(nlattr
);
2131 *u16
= tswap16(*u16
);
2134 case QEMU_IFLA_BR_FORWARD_DELAY
:
2135 case QEMU_IFLA_BR_HELLO_TIME
:
2136 case QEMU_IFLA_BR_MAX_AGE
:
2137 case QEMU_IFLA_BR_AGEING_TIME
:
2138 case QEMU_IFLA_BR_STP_STATE
:
2139 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2140 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2141 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2142 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2143 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2144 u32
= NLA_DATA(nlattr
);
2145 *u32
= tswap32(*u32
);
2148 case QEMU_IFLA_BR_HELLO_TIMER
:
2149 case QEMU_IFLA_BR_TCN_TIMER
:
2150 case QEMU_IFLA_BR_GC_TIMER
:
2151 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2152 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2153 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2154 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2155 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2156 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2157 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2158 u64
= NLA_DATA(nlattr
);
2159 *u64
= tswap64(*u64
);
2161 /* ifla_bridge_id: uin8_t[] */
2162 case QEMU_IFLA_BR_ROOT_ID
:
2163 case QEMU_IFLA_BR_BRIDGE_ID
:
2166 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2172 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2179 switch (nlattr
->nla_type
) {
2181 case QEMU_IFLA_BRPORT_STATE
:
2182 case QEMU_IFLA_BRPORT_MODE
:
2183 case QEMU_IFLA_BRPORT_GUARD
:
2184 case QEMU_IFLA_BRPORT_PROTECT
:
2185 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2186 case QEMU_IFLA_BRPORT_LEARNING
:
2187 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2188 case QEMU_IFLA_BRPORT_PROXYARP
:
2189 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2190 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2191 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2192 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2193 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2196 case QEMU_IFLA_BRPORT_PRIORITY
:
2197 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2198 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2199 case QEMU_IFLA_BRPORT_ID
:
2200 case QEMU_IFLA_BRPORT_NO
:
2201 u16
= NLA_DATA(nlattr
);
2202 *u16
= tswap16(*u16
);
2205 case QEMU_IFLA_BRPORT_COST
:
2206 u32
= NLA_DATA(nlattr
);
2207 *u32
= tswap32(*u32
);
2210 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2211 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2212 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2213 u64
= NLA_DATA(nlattr
);
2214 *u64
= tswap64(*u64
);
2216 /* ifla_bridge_id: uint8_t[] */
2217 case QEMU_IFLA_BRPORT_ROOT_ID
:
2218 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2221 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2227 struct linkinfo_context
{
2234 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2237 struct linkinfo_context
*li_context
= context
;
2239 switch (nlattr
->nla_type
) {
2241 case QEMU_IFLA_INFO_KIND
:
2242 li_context
->name
= NLA_DATA(nlattr
);
2243 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2245 case QEMU_IFLA_INFO_SLAVE_KIND
:
2246 li_context
->slave_name
= NLA_DATA(nlattr
);
2247 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2250 case QEMU_IFLA_INFO_XSTATS
:
2251 /* FIXME: only used by CAN */
2254 case QEMU_IFLA_INFO_DATA
:
2255 if (strncmp(li_context
->name
, "bridge",
2256 li_context
->len
) == 0) {
2257 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2260 host_to_target_data_bridge_nlattr
);
2262 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2265 case QEMU_IFLA_INFO_SLAVE_DATA
:
2266 if (strncmp(li_context
->slave_name
, "bridge",
2267 li_context
->slave_len
) == 0) {
2268 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2271 host_to_target_slave_data_bridge_nlattr
);
2273 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2274 li_context
->slave_name
);
2278 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2285 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2291 switch (nlattr
->nla_type
) {
2292 case QEMU_IFLA_INET_CONF
:
2293 u32
= NLA_DATA(nlattr
);
2294 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2296 u32
[i
] = tswap32(u32
[i
]);
2300 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2305 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2310 struct ifla_cacheinfo
*ci
;
2313 switch (nlattr
->nla_type
) {
2315 case QEMU_IFLA_INET6_TOKEN
:
2318 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2321 case QEMU_IFLA_INET6_FLAGS
:
2322 u32
= NLA_DATA(nlattr
);
2323 *u32
= tswap32(*u32
);
2326 case QEMU_IFLA_INET6_CONF
:
2327 u32
= NLA_DATA(nlattr
);
2328 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2330 u32
[i
] = tswap32(u32
[i
]);
2333 /* ifla_cacheinfo */
2334 case QEMU_IFLA_INET6_CACHEINFO
:
2335 ci
= NLA_DATA(nlattr
);
2336 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2337 ci
->tstamp
= tswap32(ci
->tstamp
);
2338 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2339 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2342 case QEMU_IFLA_INET6_STATS
:
2343 case QEMU_IFLA_INET6_ICMP6STATS
:
2344 u64
= NLA_DATA(nlattr
);
2345 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2347 u64
[i
] = tswap64(u64
[i
]);
2351 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2356 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2359 switch (nlattr
->nla_type
) {
2361 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2363 host_to_target_data_inet_nlattr
);
2365 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2367 host_to_target_data_inet6_nlattr
);
2369 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2375 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2378 struct rtnl_link_stats
*st
;
2379 struct rtnl_link_stats64
*st64
;
2380 struct rtnl_link_ifmap
*map
;
2381 struct linkinfo_context li_context
;
2383 switch (rtattr
->rta_type
) {
2385 case QEMU_IFLA_ADDRESS
:
2386 case QEMU_IFLA_BROADCAST
:
2388 case QEMU_IFLA_IFNAME
:
2389 case QEMU_IFLA_QDISC
:
2392 case QEMU_IFLA_OPERSTATE
:
2393 case QEMU_IFLA_LINKMODE
:
2394 case QEMU_IFLA_CARRIER
:
2395 case QEMU_IFLA_PROTO_DOWN
:
2399 case QEMU_IFLA_LINK
:
2400 case QEMU_IFLA_WEIGHT
:
2401 case QEMU_IFLA_TXQLEN
:
2402 case QEMU_IFLA_CARRIER_CHANGES
:
2403 case QEMU_IFLA_NUM_RX_QUEUES
:
2404 case QEMU_IFLA_NUM_TX_QUEUES
:
2405 case QEMU_IFLA_PROMISCUITY
:
2406 case QEMU_IFLA_EXT_MASK
:
2407 case QEMU_IFLA_LINK_NETNSID
:
2408 case QEMU_IFLA_GROUP
:
2409 case QEMU_IFLA_MASTER
:
2410 case QEMU_IFLA_NUM_VF
:
2411 case QEMU_IFLA_GSO_MAX_SEGS
:
2412 case QEMU_IFLA_GSO_MAX_SIZE
:
2413 u32
= RTA_DATA(rtattr
);
2414 *u32
= tswap32(*u32
);
2416 /* struct rtnl_link_stats */
2417 case QEMU_IFLA_STATS
:
2418 st
= RTA_DATA(rtattr
);
2419 st
->rx_packets
= tswap32(st
->rx_packets
);
2420 st
->tx_packets
= tswap32(st
->tx_packets
);
2421 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2422 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2423 st
->rx_errors
= tswap32(st
->rx_errors
);
2424 st
->tx_errors
= tswap32(st
->tx_errors
);
2425 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2426 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2427 st
->multicast
= tswap32(st
->multicast
);
2428 st
->collisions
= tswap32(st
->collisions
);
2430 /* detailed rx_errors: */
2431 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2432 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2433 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2434 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2435 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2436 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2438 /* detailed tx_errors */
2439 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2440 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2441 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2442 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2443 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2446 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2447 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2449 /* struct rtnl_link_stats64 */
2450 case QEMU_IFLA_STATS64
:
2451 st64
= RTA_DATA(rtattr
);
2452 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2453 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2454 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2455 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2456 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2457 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2458 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2459 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2460 st64
->multicast
= tswap64(st64
->multicast
);
2461 st64
->collisions
= tswap64(st64
->collisions
);
2463 /* detailed rx_errors: */
2464 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2465 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2466 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2467 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2468 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2469 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2471 /* detailed tx_errors */
2472 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2473 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2474 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2475 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2476 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2479 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2480 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2482 /* struct rtnl_link_ifmap */
2484 map
= RTA_DATA(rtattr
);
2485 map
->mem_start
= tswap64(map
->mem_start
);
2486 map
->mem_end
= tswap64(map
->mem_end
);
2487 map
->base_addr
= tswap64(map
->base_addr
);
2488 map
->irq
= tswap16(map
->irq
);
2491 case QEMU_IFLA_LINKINFO
:
2492 memset(&li_context
, 0, sizeof(li_context
));
2493 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2495 host_to_target_data_linkinfo_nlattr
);
2496 case QEMU_IFLA_AF_SPEC
:
2497 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2499 host_to_target_data_spec_nlattr
);
2501 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2507 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2510 struct ifa_cacheinfo
*ci
;
2512 switch (rtattr
->rta_type
) {
2513 /* binary: depends on family type */
2523 u32
= RTA_DATA(rtattr
);
2524 *u32
= tswap32(*u32
);
2526 /* struct ifa_cacheinfo */
2528 ci
= RTA_DATA(rtattr
);
2529 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2530 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2531 ci
->cstamp
= tswap32(ci
->cstamp
);
2532 ci
->tstamp
= tswap32(ci
->tstamp
);
2535 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2541 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2544 switch (rtattr
->rta_type
) {
2545 /* binary: depends on family type */
2554 u32
= RTA_DATA(rtattr
);
2555 *u32
= tswap32(*u32
);
2558 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2564 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2565 uint32_t rtattr_len
)
2567 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2568 host_to_target_data_link_rtattr
);
2571 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2572 uint32_t rtattr_len
)
2574 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2575 host_to_target_data_addr_rtattr
);
2578 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2579 uint32_t rtattr_len
)
2581 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2582 host_to_target_data_route_rtattr
);
2585 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2588 struct ifinfomsg
*ifi
;
2589 struct ifaddrmsg
*ifa
;
2592 nlmsg_len
= nlh
->nlmsg_len
;
2593 switch (nlh
->nlmsg_type
) {
2597 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2598 ifi
= NLMSG_DATA(nlh
);
2599 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2600 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2601 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2602 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2603 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2604 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2610 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2611 ifa
= NLMSG_DATA(nlh
);
2612 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2613 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2614 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2620 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2621 rtm
= NLMSG_DATA(nlh
);
2622 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2623 host_to_target_route_rtattr(RTM_RTA(rtm
),
2624 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2628 return -TARGET_EINVAL
;
2633 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2636 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2639 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2641 abi_long (*target_to_host_rtattr
)
2646 while (len
>= sizeof(struct rtattr
)) {
2647 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2648 tswap16(rtattr
->rta_len
) > len
) {
2651 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2652 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2653 ret
= target_to_host_rtattr(rtattr
);
2657 len
-= RTA_ALIGN(rtattr
->rta_len
);
2658 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2659 RTA_ALIGN(rtattr
->rta_len
));
2664 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2666 switch (rtattr
->rta_type
) {
2668 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2674 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2676 switch (rtattr
->rta_type
) {
2677 /* binary: depends on family type */
2682 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2688 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2691 switch (rtattr
->rta_type
) {
2692 /* binary: depends on family type */
2700 u32
= RTA_DATA(rtattr
);
2701 *u32
= tswap32(*u32
);
2704 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2710 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2711 uint32_t rtattr_len
)
2713 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2714 target_to_host_data_link_rtattr
);
2717 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2718 uint32_t rtattr_len
)
2720 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2721 target_to_host_data_addr_rtattr
);
2724 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2725 uint32_t rtattr_len
)
2727 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2728 target_to_host_data_route_rtattr
);
2731 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2733 struct ifinfomsg
*ifi
;
2734 struct ifaddrmsg
*ifa
;
2737 switch (nlh
->nlmsg_type
) {
2742 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2743 ifi
= NLMSG_DATA(nlh
);
2744 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2745 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2746 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2747 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2748 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2749 NLMSG_LENGTH(sizeof(*ifi
)));
2755 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2756 ifa
= NLMSG_DATA(nlh
);
2757 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2758 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2759 NLMSG_LENGTH(sizeof(*ifa
)));
2766 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2767 rtm
= NLMSG_DATA(nlh
);
2768 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2769 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2770 NLMSG_LENGTH(sizeof(*rtm
)));
2774 return -TARGET_EOPNOTSUPP
;
2779 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2781 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2783 #endif /* CONFIG_RTNETLINK */
2785 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2787 switch (nlh
->nlmsg_type
) {
2789 gemu_log("Unknown host audit message type %d\n",
2791 return -TARGET_EINVAL
;
2796 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2799 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2802 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2804 switch (nlh
->nlmsg_type
) {
2806 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2807 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2810 gemu_log("Unknown target audit message type %d\n",
2812 return -TARGET_EINVAL
;
2818 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2820 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2823 /* do_setsockopt() Must return target values and target errnos. */
2824 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2825 abi_ulong optval_addr
, socklen_t optlen
)
2829 struct ip_mreqn
*ip_mreq
;
2830 struct ip_mreq_source
*ip_mreq_source
;
2834 /* TCP options all take an 'int' value. */
2835 if (optlen
< sizeof(uint32_t))
2836 return -TARGET_EINVAL
;
2838 if (get_user_u32(val
, optval_addr
))
2839 return -TARGET_EFAULT
;
2840 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2847 case IP_ROUTER_ALERT
:
2851 case IP_MTU_DISCOVER
:
2858 case IP_MULTICAST_TTL
:
2859 case IP_MULTICAST_LOOP
:
2861 if (optlen
>= sizeof(uint32_t)) {
2862 if (get_user_u32(val
, optval_addr
))
2863 return -TARGET_EFAULT
;
2864 } else if (optlen
>= 1) {
2865 if (get_user_u8(val
, optval_addr
))
2866 return -TARGET_EFAULT
;
2868 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2870 case IP_ADD_MEMBERSHIP
:
2871 case IP_DROP_MEMBERSHIP
:
2872 if (optlen
< sizeof (struct target_ip_mreq
) ||
2873 optlen
> sizeof (struct target_ip_mreqn
))
2874 return -TARGET_EINVAL
;
2876 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2877 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2878 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2881 case IP_BLOCK_SOURCE
:
2882 case IP_UNBLOCK_SOURCE
:
2883 case IP_ADD_SOURCE_MEMBERSHIP
:
2884 case IP_DROP_SOURCE_MEMBERSHIP
:
2885 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2886 return -TARGET_EINVAL
;
2888 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2889 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2890 unlock_user (ip_mreq_source
, optval_addr
, 0);
2899 case IPV6_MTU_DISCOVER
:
2902 case IPV6_RECVPKTINFO
:
2903 case IPV6_UNICAST_HOPS
:
2905 case IPV6_RECVHOPLIMIT
:
2906 case IPV6_2292HOPLIMIT
:
2909 if (optlen
< sizeof(uint32_t)) {
2910 return -TARGET_EINVAL
;
2912 if (get_user_u32(val
, optval_addr
)) {
2913 return -TARGET_EFAULT
;
2915 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2916 &val
, sizeof(val
)));
2920 struct in6_pktinfo pki
;
2922 if (optlen
< sizeof(pki
)) {
2923 return -TARGET_EINVAL
;
2926 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2927 return -TARGET_EFAULT
;
2930 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2932 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2933 &pki
, sizeof(pki
)));
2944 struct icmp6_filter icmp6f
;
2946 if (optlen
> sizeof(icmp6f
)) {
2947 optlen
= sizeof(icmp6f
);
2950 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
2951 return -TARGET_EFAULT
;
2954 for (val
= 0; val
< 8; val
++) {
2955 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
2958 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2970 /* those take an u32 value */
2971 if (optlen
< sizeof(uint32_t)) {
2972 return -TARGET_EINVAL
;
2975 if (get_user_u32(val
, optval_addr
)) {
2976 return -TARGET_EFAULT
;
2978 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2979 &val
, sizeof(val
)));
2986 case TARGET_SOL_SOCKET
:
2988 case TARGET_SO_RCVTIMEO
:
2992 optname
= SO_RCVTIMEO
;
2995 if (optlen
!= sizeof(struct target_timeval
)) {
2996 return -TARGET_EINVAL
;
2999 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3000 return -TARGET_EFAULT
;
3003 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3007 case TARGET_SO_SNDTIMEO
:
3008 optname
= SO_SNDTIMEO
;
3010 case TARGET_SO_ATTACH_FILTER
:
3012 struct target_sock_fprog
*tfprog
;
3013 struct target_sock_filter
*tfilter
;
3014 struct sock_fprog fprog
;
3015 struct sock_filter
*filter
;
3018 if (optlen
!= sizeof(*tfprog
)) {
3019 return -TARGET_EINVAL
;
3021 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3022 return -TARGET_EFAULT
;
3024 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3025 tswapal(tfprog
->filter
), 0)) {
3026 unlock_user_struct(tfprog
, optval_addr
, 1);
3027 return -TARGET_EFAULT
;
3030 fprog
.len
= tswap16(tfprog
->len
);
3031 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3032 if (filter
== NULL
) {
3033 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3034 unlock_user_struct(tfprog
, optval_addr
, 1);
3035 return -TARGET_ENOMEM
;
3037 for (i
= 0; i
< fprog
.len
; i
++) {
3038 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3039 filter
[i
].jt
= tfilter
[i
].jt
;
3040 filter
[i
].jf
= tfilter
[i
].jf
;
3041 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3043 fprog
.filter
= filter
;
3045 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3046 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3049 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3050 unlock_user_struct(tfprog
, optval_addr
, 1);
3053 case TARGET_SO_BINDTODEVICE
:
3055 char *dev_ifname
, *addr_ifname
;
3057 if (optlen
> IFNAMSIZ
- 1) {
3058 optlen
= IFNAMSIZ
- 1;
3060 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3062 return -TARGET_EFAULT
;
3064 optname
= SO_BINDTODEVICE
;
3065 addr_ifname
= alloca(IFNAMSIZ
);
3066 memcpy(addr_ifname
, dev_ifname
, optlen
);
3067 addr_ifname
[optlen
] = 0;
3068 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3069 addr_ifname
, optlen
));
3070 unlock_user (dev_ifname
, optval_addr
, 0);
3073 /* Options with 'int' argument. */
3074 case TARGET_SO_DEBUG
:
3077 case TARGET_SO_REUSEADDR
:
3078 optname
= SO_REUSEADDR
;
3080 case TARGET_SO_TYPE
:
3083 case TARGET_SO_ERROR
:
3086 case TARGET_SO_DONTROUTE
:
3087 optname
= SO_DONTROUTE
;
3089 case TARGET_SO_BROADCAST
:
3090 optname
= SO_BROADCAST
;
3092 case TARGET_SO_SNDBUF
:
3093 optname
= SO_SNDBUF
;
3095 case TARGET_SO_SNDBUFFORCE
:
3096 optname
= SO_SNDBUFFORCE
;
3098 case TARGET_SO_RCVBUF
:
3099 optname
= SO_RCVBUF
;
3101 case TARGET_SO_RCVBUFFORCE
:
3102 optname
= SO_RCVBUFFORCE
;
3104 case TARGET_SO_KEEPALIVE
:
3105 optname
= SO_KEEPALIVE
;
3107 case TARGET_SO_OOBINLINE
:
3108 optname
= SO_OOBINLINE
;
3110 case TARGET_SO_NO_CHECK
:
3111 optname
= SO_NO_CHECK
;
3113 case TARGET_SO_PRIORITY
:
3114 optname
= SO_PRIORITY
;
3117 case TARGET_SO_BSDCOMPAT
:
3118 optname
= SO_BSDCOMPAT
;
3121 case TARGET_SO_PASSCRED
:
3122 optname
= SO_PASSCRED
;
3124 case TARGET_SO_PASSSEC
:
3125 optname
= SO_PASSSEC
;
3127 case TARGET_SO_TIMESTAMP
:
3128 optname
= SO_TIMESTAMP
;
3130 case TARGET_SO_RCVLOWAT
:
3131 optname
= SO_RCVLOWAT
;
3137 if (optlen
< sizeof(uint32_t))
3138 return -TARGET_EINVAL
;
3140 if (get_user_u32(val
, optval_addr
))
3141 return -TARGET_EFAULT
;
3142 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3146 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3147 ret
= -TARGET_ENOPROTOOPT
;
3152 /* do_getsockopt() Must return target values and target errnos. */
3153 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3154 abi_ulong optval_addr
, abi_ulong optlen
)
3161 case TARGET_SOL_SOCKET
:
3164 /* These don't just return a single integer */
3165 case TARGET_SO_LINGER
:
3166 case TARGET_SO_RCVTIMEO
:
3167 case TARGET_SO_SNDTIMEO
:
3168 case TARGET_SO_PEERNAME
:
3170 case TARGET_SO_PEERCRED
: {
3173 struct target_ucred
*tcr
;
3175 if (get_user_u32(len
, optlen
)) {
3176 return -TARGET_EFAULT
;
3179 return -TARGET_EINVAL
;
3183 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3191 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3192 return -TARGET_EFAULT
;
3194 __put_user(cr
.pid
, &tcr
->pid
);
3195 __put_user(cr
.uid
, &tcr
->uid
);
3196 __put_user(cr
.gid
, &tcr
->gid
);
3197 unlock_user_struct(tcr
, optval_addr
, 1);
3198 if (put_user_u32(len
, optlen
)) {
3199 return -TARGET_EFAULT
;
3203 /* Options with 'int' argument. */
3204 case TARGET_SO_DEBUG
:
3207 case TARGET_SO_REUSEADDR
:
3208 optname
= SO_REUSEADDR
;
3210 case TARGET_SO_TYPE
:
3213 case TARGET_SO_ERROR
:
3216 case TARGET_SO_DONTROUTE
:
3217 optname
= SO_DONTROUTE
;
3219 case TARGET_SO_BROADCAST
:
3220 optname
= SO_BROADCAST
;
3222 case TARGET_SO_SNDBUF
:
3223 optname
= SO_SNDBUF
;
3225 case TARGET_SO_RCVBUF
:
3226 optname
= SO_RCVBUF
;
3228 case TARGET_SO_KEEPALIVE
:
3229 optname
= SO_KEEPALIVE
;
3231 case TARGET_SO_OOBINLINE
:
3232 optname
= SO_OOBINLINE
;
3234 case TARGET_SO_NO_CHECK
:
3235 optname
= SO_NO_CHECK
;
3237 case TARGET_SO_PRIORITY
:
3238 optname
= SO_PRIORITY
;
3241 case TARGET_SO_BSDCOMPAT
:
3242 optname
= SO_BSDCOMPAT
;
3245 case TARGET_SO_PASSCRED
:
3246 optname
= SO_PASSCRED
;
3248 case TARGET_SO_TIMESTAMP
:
3249 optname
= SO_TIMESTAMP
;
3251 case TARGET_SO_RCVLOWAT
:
3252 optname
= SO_RCVLOWAT
;
3254 case TARGET_SO_ACCEPTCONN
:
3255 optname
= SO_ACCEPTCONN
;
3262 /* TCP options all take an 'int' value. */
3264 if (get_user_u32(len
, optlen
))
3265 return -TARGET_EFAULT
;
3267 return -TARGET_EINVAL
;
3269 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3272 if (optname
== SO_TYPE
) {
3273 val
= host_to_target_sock_type(val
);
3278 if (put_user_u32(val
, optval_addr
))
3279 return -TARGET_EFAULT
;
3281 if (put_user_u8(val
, optval_addr
))
3282 return -TARGET_EFAULT
;
3284 if (put_user_u32(len
, optlen
))
3285 return -TARGET_EFAULT
;
3292 case IP_ROUTER_ALERT
:
3296 case IP_MTU_DISCOVER
:
3302 case IP_MULTICAST_TTL
:
3303 case IP_MULTICAST_LOOP
:
3304 if (get_user_u32(len
, optlen
))
3305 return -TARGET_EFAULT
;
3307 return -TARGET_EINVAL
;
3309 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3312 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3314 if (put_user_u32(len
, optlen
)
3315 || put_user_u8(val
, optval_addr
))
3316 return -TARGET_EFAULT
;
3318 if (len
> sizeof(int))
3320 if (put_user_u32(len
, optlen
)
3321 || put_user_u32(val
, optval_addr
))
3322 return -TARGET_EFAULT
;
3326 ret
= -TARGET_ENOPROTOOPT
;
3332 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3334 ret
= -TARGET_EOPNOTSUPP
;
3340 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3341 abi_ulong count
, int copy
)
3343 struct target_iovec
*target_vec
;
3345 abi_ulong total_len
, max_len
;
3348 bool bad_address
= false;
3354 if (count
> IOV_MAX
) {
3359 vec
= g_try_new0(struct iovec
, count
);
3365 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3366 count
* sizeof(struct target_iovec
), 1);
3367 if (target_vec
== NULL
) {
3372 /* ??? If host page size > target page size, this will result in a
3373 value larger than what we can actually support. */
3374 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3377 for (i
= 0; i
< count
; i
++) {
3378 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3379 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3384 } else if (len
== 0) {
3385 /* Zero length pointer is ignored. */
3386 vec
[i
].iov_base
= 0;
3388 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3389 /* If the first buffer pointer is bad, this is a fault. But
3390 * subsequent bad buffers will result in a partial write; this
3391 * is realized by filling the vector with null pointers and
3393 if (!vec
[i
].iov_base
) {
3404 if (len
> max_len
- total_len
) {
3405 len
= max_len
- total_len
;
3408 vec
[i
].iov_len
= len
;
3412 unlock_user(target_vec
, target_addr
, 0);
3417 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3418 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3421 unlock_user(target_vec
, target_addr
, 0);
3428 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3429 abi_ulong count
, int copy
)
3431 struct target_iovec
*target_vec
;
3434 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3435 count
* sizeof(struct target_iovec
), 1);
3437 for (i
= 0; i
< count
; i
++) {
3438 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3439 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3443 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3445 unlock_user(target_vec
, target_addr
, 0);
3451 static inline int target_to_host_sock_type(int *type
)
3454 int target_type
= *type
;
3456 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3457 case TARGET_SOCK_DGRAM
:
3458 host_type
= SOCK_DGRAM
;
3460 case TARGET_SOCK_STREAM
:
3461 host_type
= SOCK_STREAM
;
3464 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3467 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3468 #if defined(SOCK_CLOEXEC)
3469 host_type
|= SOCK_CLOEXEC
;
3471 return -TARGET_EINVAL
;
3474 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3475 #if defined(SOCK_NONBLOCK)
3476 host_type
|= SOCK_NONBLOCK
;
3477 #elif !defined(O_NONBLOCK)
3478 return -TARGET_EINVAL
;
3485 /* Try to emulate socket type flags after socket creation. */
3486 static int sock_flags_fixup(int fd
, int target_type
)
3488 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3489 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3490 int flags
= fcntl(fd
, F_GETFL
);
3491 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3493 return -TARGET_EINVAL
;
3500 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3501 abi_ulong target_addr
,
3504 struct sockaddr
*addr
= host_addr
;
3505 struct target_sockaddr
*target_saddr
;
3507 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3508 if (!target_saddr
) {
3509 return -TARGET_EFAULT
;
3512 memcpy(addr
, target_saddr
, len
);
3513 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3514 /* spkt_protocol is big-endian */
3516 unlock_user(target_saddr
, target_addr
, 0);
3520 static TargetFdTrans target_packet_trans
= {
3521 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3524 #ifdef CONFIG_RTNETLINK
3525 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3529 ret
= target_to_host_nlmsg_route(buf
, len
);
3537 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3541 ret
= host_to_target_nlmsg_route(buf
, len
);
3549 static TargetFdTrans target_netlink_route_trans
= {
3550 .target_to_host_data
= netlink_route_target_to_host
,
3551 .host_to_target_data
= netlink_route_host_to_target
,
3553 #endif /* CONFIG_RTNETLINK */
3555 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3559 ret
= target_to_host_nlmsg_audit(buf
, len
);
3567 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3571 ret
= host_to_target_nlmsg_audit(buf
, len
);
3579 static TargetFdTrans target_netlink_audit_trans
= {
3580 .target_to_host_data
= netlink_audit_target_to_host
,
3581 .host_to_target_data
= netlink_audit_host_to_target
,
3584 /* do_socket() Must return target values and target errnos. */
3585 static abi_long
do_socket(int domain
, int type
, int protocol
)
3587 int target_type
= type
;
3590 ret
= target_to_host_sock_type(&type
);
3595 if (domain
== PF_NETLINK
&& !(
3596 #ifdef CONFIG_RTNETLINK
3597 protocol
== NETLINK_ROUTE
||
3599 protocol
== NETLINK_KOBJECT_UEVENT
||
3600 protocol
== NETLINK_AUDIT
)) {
3601 return -EPFNOSUPPORT
;
3604 if (domain
== AF_PACKET
||
3605 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3606 protocol
= tswap16(protocol
);
3609 ret
= get_errno(socket(domain
, type
, protocol
));
3611 ret
= sock_flags_fixup(ret
, target_type
);
3612 if (type
== SOCK_PACKET
) {
3613 /* Manage an obsolete case :
3614 * if socket type is SOCK_PACKET, bind by name
3616 fd_trans_register(ret
, &target_packet_trans
);
3617 } else if (domain
== PF_NETLINK
) {
3619 #ifdef CONFIG_RTNETLINK
3621 fd_trans_register(ret
, &target_netlink_route_trans
);
3624 case NETLINK_KOBJECT_UEVENT
:
3625 /* nothing to do: messages are strings */
3628 fd_trans_register(ret
, &target_netlink_audit_trans
);
3631 g_assert_not_reached();
3638 /* do_bind() Must return target values and target errnos. */
3639 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3645 if ((int)addrlen
< 0) {
3646 return -TARGET_EINVAL
;
3649 addr
= alloca(addrlen
+1);
3651 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3655 return get_errno(bind(sockfd
, addr
, addrlen
));
3658 /* do_connect() Must return target values and target errnos. */
3659 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3665 if ((int)addrlen
< 0) {
3666 return -TARGET_EINVAL
;
3669 addr
= alloca(addrlen
+1);
3671 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3675 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3678 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3679 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3680 int flags
, int send
)
3686 abi_ulong target_vec
;
3688 if (msgp
->msg_name
) {
3689 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3690 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3691 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3692 tswapal(msgp
->msg_name
),
3694 if (ret
== -TARGET_EFAULT
) {
3695 /* For connected sockets msg_name and msg_namelen must
3696 * be ignored, so returning EFAULT immediately is wrong.
3697 * Instead, pass a bad msg_name to the host kernel, and
3698 * let it decide whether to return EFAULT or not.
3700 msg
.msg_name
= (void *)-1;
3705 msg
.msg_name
= NULL
;
3706 msg
.msg_namelen
= 0;
3708 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3709 msg
.msg_control
= alloca(msg
.msg_controllen
);
3710 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3712 count
= tswapal(msgp
->msg_iovlen
);
3713 target_vec
= tswapal(msgp
->msg_iov
);
3715 if (count
> IOV_MAX
) {
3716 /* sendrcvmsg returns a different errno for this condition than
3717 * readv/writev, so we must catch it here before lock_iovec() does.
3719 ret
= -TARGET_EMSGSIZE
;
3723 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3724 target_vec
, count
, send
);
3726 ret
= -host_to_target_errno(errno
);
3729 msg
.msg_iovlen
= count
;
3733 if (fd_trans_target_to_host_data(fd
)) {
3736 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3737 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3738 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3739 msg
.msg_iov
->iov_len
);
3741 msg
.msg_iov
->iov_base
= host_msg
;
3742 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3746 ret
= target_to_host_cmsg(&msg
, msgp
);
3748 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3752 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3753 if (!is_error(ret
)) {
3755 if (fd_trans_host_to_target_data(fd
)) {
3756 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3759 ret
= host_to_target_cmsg(msgp
, &msg
);
3761 if (!is_error(ret
)) {
3762 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3763 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3764 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3765 msg
.msg_name
, msg
.msg_namelen
);
3777 unlock_iovec(vec
, target_vec
, count
, !send
);
3782 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3783 int flags
, int send
)
3786 struct target_msghdr
*msgp
;
3788 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3792 return -TARGET_EFAULT
;
3794 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3795 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3799 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3800 * so it might not have this *mmsg-specific flag either.
3802 #ifndef MSG_WAITFORONE
3803 #define MSG_WAITFORONE 0x10000
3806 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3807 unsigned int vlen
, unsigned int flags
,
3810 struct target_mmsghdr
*mmsgp
;
3814 if (vlen
> UIO_MAXIOV
) {
3818 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3820 return -TARGET_EFAULT
;
3823 for (i
= 0; i
< vlen
; i
++) {
3824 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3825 if (is_error(ret
)) {
3828 mmsgp
[i
].msg_len
= tswap32(ret
);
3829 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3830 if (flags
& MSG_WAITFORONE
) {
3831 flags
|= MSG_DONTWAIT
;
3835 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3837 /* Return number of datagrams sent if we sent any at all;
3838 * otherwise return the error.
3846 /* do_accept4() Must return target values and target errnos. */
3847 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3848 abi_ulong target_addrlen_addr
, int flags
)
3855 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3857 if (target_addr
== 0) {
3858 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3861 /* linux returns EINVAL if addrlen pointer is invalid */
3862 if (get_user_u32(addrlen
, target_addrlen_addr
))
3863 return -TARGET_EINVAL
;
3865 if ((int)addrlen
< 0) {
3866 return -TARGET_EINVAL
;
3869 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3870 return -TARGET_EINVAL
;
3872 addr
= alloca(addrlen
);
3874 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3875 if (!is_error(ret
)) {
3876 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3877 if (put_user_u32(addrlen
, target_addrlen_addr
))
3878 ret
= -TARGET_EFAULT
;
3883 /* do_getpeername() Must return target values and target errnos. */
3884 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3885 abi_ulong target_addrlen_addr
)
3891 if (get_user_u32(addrlen
, target_addrlen_addr
))
3892 return -TARGET_EFAULT
;
3894 if ((int)addrlen
< 0) {
3895 return -TARGET_EINVAL
;
3898 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3899 return -TARGET_EFAULT
;
3901 addr
= alloca(addrlen
);
3903 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3904 if (!is_error(ret
)) {
3905 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3906 if (put_user_u32(addrlen
, target_addrlen_addr
))
3907 ret
= -TARGET_EFAULT
;
3912 /* do_getsockname() Must return target values and target errnos. */
3913 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3914 abi_ulong target_addrlen_addr
)
3920 if (get_user_u32(addrlen
, target_addrlen_addr
))
3921 return -TARGET_EFAULT
;
3923 if ((int)addrlen
< 0) {
3924 return -TARGET_EINVAL
;
3927 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3928 return -TARGET_EFAULT
;
3930 addr
= alloca(addrlen
);
3932 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3933 if (!is_error(ret
)) {
3934 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3935 if (put_user_u32(addrlen
, target_addrlen_addr
))
3936 ret
= -TARGET_EFAULT
;
3941 /* do_socketpair() Must return target values and target errnos. */
3942 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3943 abi_ulong target_tab_addr
)
3948 target_to_host_sock_type(&type
);
3950 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
3951 if (!is_error(ret
)) {
3952 if (put_user_s32(tab
[0], target_tab_addr
)
3953 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
3954 ret
= -TARGET_EFAULT
;
3959 /* do_sendto() Must return target values and target errnos. */
3960 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
3961 abi_ulong target_addr
, socklen_t addrlen
)
3965 void *copy_msg
= NULL
;
3968 if ((int)addrlen
< 0) {
3969 return -TARGET_EINVAL
;
3972 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
3974 return -TARGET_EFAULT
;
3975 if (fd_trans_target_to_host_data(fd
)) {
3976 copy_msg
= host_msg
;
3977 host_msg
= g_malloc(len
);
3978 memcpy(host_msg
, copy_msg
, len
);
3979 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
3985 addr
= alloca(addrlen
+1);
3986 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
3990 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
3992 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
3997 host_msg
= copy_msg
;
3999 unlock_user(host_msg
, msg
, 0);
4003 /* do_recvfrom() Must return target values and target errnos. */
4004 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4005 abi_ulong target_addr
,
4006 abi_ulong target_addrlen
)
4013 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4015 return -TARGET_EFAULT
;
4017 if (get_user_u32(addrlen
, target_addrlen
)) {
4018 ret
= -TARGET_EFAULT
;
4021 if ((int)addrlen
< 0) {
4022 ret
= -TARGET_EINVAL
;
4025 addr
= alloca(addrlen
);
4026 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4029 addr
= NULL
; /* To keep compiler quiet. */
4030 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4032 if (!is_error(ret
)) {
4033 if (fd_trans_host_to_target_data(fd
)) {
4034 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4037 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4038 if (put_user_u32(addrlen
, target_addrlen
)) {
4039 ret
= -TARGET_EFAULT
;
4043 unlock_user(host_msg
, msg
, len
);
4046 unlock_user(host_msg
, msg
, 0);
4051 #ifdef TARGET_NR_socketcall
4052 /* do_socketcall() must return target values and target errnos. */
4053 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4055 static const unsigned nargs
[] = { /* number of arguments per operation */
4056 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4057 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4058 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4059 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4060 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4061 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4062 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4063 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4064 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4065 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4066 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4067 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4068 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4069 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4070 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4071 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4072 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4073 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4074 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4075 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4077 abi_long a
[6]; /* max 6 args */
4080 /* check the range of the first argument num */
4081 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4082 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4083 return -TARGET_EINVAL
;
4085 /* ensure we have space for args */
4086 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4087 return -TARGET_EINVAL
;
4089 /* collect the arguments in a[] according to nargs[] */
4090 for (i
= 0; i
< nargs
[num
]; ++i
) {
4091 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4092 return -TARGET_EFAULT
;
4095 /* now when we have the args, invoke the appropriate underlying function */
4097 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4098 return do_socket(a
[0], a
[1], a
[2]);
4099 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4100 return do_bind(a
[0], a
[1], a
[2]);
4101 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4102 return do_connect(a
[0], a
[1], a
[2]);
4103 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4104 return get_errno(listen(a
[0], a
[1]));
4105 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4106 return do_accept4(a
[0], a
[1], a
[2], 0);
4107 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4108 return do_getsockname(a
[0], a
[1], a
[2]);
4109 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4110 return do_getpeername(a
[0], a
[1], a
[2]);
4111 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4112 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4113 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4114 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4115 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4116 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4117 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4118 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4119 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4120 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4121 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4122 return get_errno(shutdown(a
[0], a
[1]));
4123 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4124 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4125 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4126 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4127 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4128 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4129 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4130 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4131 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4132 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4133 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4134 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4135 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4136 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4138 gemu_log("Unsupported socketcall: %d\n", num
);
4139 return -TARGET_EINVAL
;
4144 #define N_SHM_REGIONS 32
4146 static struct shm_region
{
4150 } shm_regions
[N_SHM_REGIONS
];
4152 #ifndef TARGET_SEMID64_DS
4153 /* asm-generic version of this struct */
4154 struct target_semid64_ds
4156 struct target_ipc_perm sem_perm
;
4157 abi_ulong sem_otime
;
4158 #if TARGET_ABI_BITS == 32
4159 abi_ulong __unused1
;
4161 abi_ulong sem_ctime
;
4162 #if TARGET_ABI_BITS == 32
4163 abi_ulong __unused2
;
4165 abi_ulong sem_nsems
;
4166 abi_ulong __unused3
;
4167 abi_ulong __unused4
;
4171 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4172 abi_ulong target_addr
)
4174 struct target_ipc_perm
*target_ip
;
4175 struct target_semid64_ds
*target_sd
;
4177 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4178 return -TARGET_EFAULT
;
4179 target_ip
= &(target_sd
->sem_perm
);
4180 host_ip
->__key
= tswap32(target_ip
->__key
);
4181 host_ip
->uid
= tswap32(target_ip
->uid
);
4182 host_ip
->gid
= tswap32(target_ip
->gid
);
4183 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4184 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4185 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4186 host_ip
->mode
= tswap32(target_ip
->mode
);
4188 host_ip
->mode
= tswap16(target_ip
->mode
);
4190 #if defined(TARGET_PPC)
4191 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4193 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4195 unlock_user_struct(target_sd
, target_addr
, 0);
4199 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4200 struct ipc_perm
*host_ip
)
4202 struct target_ipc_perm
*target_ip
;
4203 struct target_semid64_ds
*target_sd
;
4205 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4206 return -TARGET_EFAULT
;
4207 target_ip
= &(target_sd
->sem_perm
);
4208 target_ip
->__key
= tswap32(host_ip
->__key
);
4209 target_ip
->uid
= tswap32(host_ip
->uid
);
4210 target_ip
->gid
= tswap32(host_ip
->gid
);
4211 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4212 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4213 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4214 target_ip
->mode
= tswap32(host_ip
->mode
);
4216 target_ip
->mode
= tswap16(host_ip
->mode
);
4218 #if defined(TARGET_PPC)
4219 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4221 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4223 unlock_user_struct(target_sd
, target_addr
, 1);
4227 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4228 abi_ulong target_addr
)
4230 struct target_semid64_ds
*target_sd
;
4232 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4233 return -TARGET_EFAULT
;
4234 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4235 return -TARGET_EFAULT
;
4236 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4237 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4238 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4239 unlock_user_struct(target_sd
, target_addr
, 0);
4243 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4244 struct semid_ds
*host_sd
)
4246 struct target_semid64_ds
*target_sd
;
4248 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4249 return -TARGET_EFAULT
;
4250 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4251 return -TARGET_EFAULT
;
4252 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4253 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4254 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4255 unlock_user_struct(target_sd
, target_addr
, 1);
4259 struct target_seminfo
{
4272 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4273 struct seminfo
*host_seminfo
)
4275 struct target_seminfo
*target_seminfo
;
4276 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4277 return -TARGET_EFAULT
;
4278 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4279 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4280 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4281 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4282 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4283 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4284 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4285 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4286 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4287 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4288 unlock_user_struct(target_seminfo
, target_addr
, 1);
4294 struct semid_ds
*buf
;
4295 unsigned short *array
;
4296 struct seminfo
*__buf
;
4299 union target_semun
{
4306 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4307 abi_ulong target_addr
)
4310 unsigned short *array
;
4312 struct semid_ds semid_ds
;
4315 semun
.buf
= &semid_ds
;
4317 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4319 return get_errno(ret
);
4321 nsems
= semid_ds
.sem_nsems
;
4323 *host_array
= g_try_new(unsigned short, nsems
);
4325 return -TARGET_ENOMEM
;
4327 array
= lock_user(VERIFY_READ
, target_addr
,
4328 nsems
*sizeof(unsigned short), 1);
4330 g_free(*host_array
);
4331 return -TARGET_EFAULT
;
4334 for(i
=0; i
<nsems
; i
++) {
4335 __get_user((*host_array
)[i
], &array
[i
]);
4337 unlock_user(array
, target_addr
, 0);
4342 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4343 unsigned short **host_array
)
4346 unsigned short *array
;
4348 struct semid_ds semid_ds
;
4351 semun
.buf
= &semid_ds
;
4353 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4355 return get_errno(ret
);
4357 nsems
= semid_ds
.sem_nsems
;
4359 array
= lock_user(VERIFY_WRITE
, target_addr
,
4360 nsems
*sizeof(unsigned short), 0);
4362 return -TARGET_EFAULT
;
4364 for(i
=0; i
<nsems
; i
++) {
4365 __put_user((*host_array
)[i
], &array
[i
]);
4367 g_free(*host_array
);
4368 unlock_user(array
, target_addr
, 1);
4373 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4374 abi_ulong target_arg
)
4376 union target_semun target_su
= { .buf
= target_arg
};
4378 struct semid_ds dsarg
;
4379 unsigned short *array
= NULL
;
4380 struct seminfo seminfo
;
4381 abi_long ret
= -TARGET_EINVAL
;
4388 /* In 64 bit cross-endian situations, we will erroneously pick up
4389 * the wrong half of the union for the "val" element. To rectify
4390 * this, the entire 8-byte structure is byteswapped, followed by
4391 * a swap of the 4 byte val field. In other cases, the data is
4392 * already in proper host byte order. */
4393 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4394 target_su
.buf
= tswapal(target_su
.buf
);
4395 arg
.val
= tswap32(target_su
.val
);
4397 arg
.val
= target_su
.val
;
4399 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4403 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4407 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4408 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4415 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4419 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4420 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4426 arg
.__buf
= &seminfo
;
4427 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4428 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4436 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4443 struct target_sembuf
{
4444 unsigned short sem_num
;
4449 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4450 abi_ulong target_addr
,
4453 struct target_sembuf
*target_sembuf
;
4456 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4457 nsops
*sizeof(struct target_sembuf
), 1);
4459 return -TARGET_EFAULT
;
4461 for(i
=0; i
<nsops
; i
++) {
4462 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4463 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4464 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4467 unlock_user(target_sembuf
, target_addr
, 0);
4472 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4474 struct sembuf sops
[nsops
];
4476 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4477 return -TARGET_EFAULT
;
4479 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4482 struct target_msqid_ds
4484 struct target_ipc_perm msg_perm
;
4485 abi_ulong msg_stime
;
4486 #if TARGET_ABI_BITS == 32
4487 abi_ulong __unused1
;
4489 abi_ulong msg_rtime
;
4490 #if TARGET_ABI_BITS == 32
4491 abi_ulong __unused2
;
4493 abi_ulong msg_ctime
;
4494 #if TARGET_ABI_BITS == 32
4495 abi_ulong __unused3
;
4497 abi_ulong __msg_cbytes
;
4499 abi_ulong msg_qbytes
;
4500 abi_ulong msg_lspid
;
4501 abi_ulong msg_lrpid
;
4502 abi_ulong __unused4
;
4503 abi_ulong __unused5
;
4506 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4507 abi_ulong target_addr
)
4509 struct target_msqid_ds
*target_md
;
4511 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4512 return -TARGET_EFAULT
;
4513 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4514 return -TARGET_EFAULT
;
4515 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4516 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4517 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4518 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4519 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4520 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4521 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4522 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4523 unlock_user_struct(target_md
, target_addr
, 0);
4527 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4528 struct msqid_ds
*host_md
)
4530 struct target_msqid_ds
*target_md
;
4532 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4533 return -TARGET_EFAULT
;
4534 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4535 return -TARGET_EFAULT
;
4536 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4537 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4538 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4539 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4540 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4541 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4542 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4543 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4544 unlock_user_struct(target_md
, target_addr
, 1);
4548 struct target_msginfo
{
4556 unsigned short int msgseg
;
4559 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4560 struct msginfo
*host_msginfo
)
4562 struct target_msginfo
*target_msginfo
;
4563 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4564 return -TARGET_EFAULT
;
4565 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4566 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4567 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4568 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4569 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4570 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4571 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4572 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4573 unlock_user_struct(target_msginfo
, target_addr
, 1);
4577 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4579 struct msqid_ds dsarg
;
4580 struct msginfo msginfo
;
4581 abi_long ret
= -TARGET_EINVAL
;
4589 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4590 return -TARGET_EFAULT
;
4591 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4592 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4593 return -TARGET_EFAULT
;
4596 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4600 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4601 if (host_to_target_msginfo(ptr
, &msginfo
))
4602 return -TARGET_EFAULT
;
4609 struct target_msgbuf
{
4614 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4615 ssize_t msgsz
, int msgflg
)
4617 struct target_msgbuf
*target_mb
;
4618 struct msgbuf
*host_mb
;
4622 return -TARGET_EINVAL
;
4625 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4626 return -TARGET_EFAULT
;
4627 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4629 unlock_user_struct(target_mb
, msgp
, 0);
4630 return -TARGET_ENOMEM
;
4632 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4633 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4634 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4636 unlock_user_struct(target_mb
, msgp
, 0);
4641 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4642 ssize_t msgsz
, abi_long msgtyp
,
4645 struct target_msgbuf
*target_mb
;
4647 struct msgbuf
*host_mb
;
4651 return -TARGET_EINVAL
;
4654 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4655 return -TARGET_EFAULT
;
4657 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4659 ret
= -TARGET_ENOMEM
;
4662 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4665 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4666 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4667 if (!target_mtext
) {
4668 ret
= -TARGET_EFAULT
;
4671 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4672 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4675 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4679 unlock_user_struct(target_mb
, msgp
, 1);
4684 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4685 abi_ulong target_addr
)
4687 struct target_shmid_ds
*target_sd
;
4689 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4690 return -TARGET_EFAULT
;
4691 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4692 return -TARGET_EFAULT
;
4693 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4694 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4695 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4696 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4697 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4698 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4699 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4700 unlock_user_struct(target_sd
, target_addr
, 0);
4704 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4705 struct shmid_ds
*host_sd
)
4707 struct target_shmid_ds
*target_sd
;
4709 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4710 return -TARGET_EFAULT
;
4711 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4712 return -TARGET_EFAULT
;
4713 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4714 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4715 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4716 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4717 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4718 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4719 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4720 unlock_user_struct(target_sd
, target_addr
, 1);
4724 struct target_shminfo
{
4732 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4733 struct shminfo
*host_shminfo
)
4735 struct target_shminfo
*target_shminfo
;
4736 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4737 return -TARGET_EFAULT
;
4738 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4739 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4740 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4741 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4742 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4743 unlock_user_struct(target_shminfo
, target_addr
, 1);
4747 struct target_shm_info
{
4752 abi_ulong swap_attempts
;
4753 abi_ulong swap_successes
;
4756 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4757 struct shm_info
*host_shm_info
)
4759 struct target_shm_info
*target_shm_info
;
4760 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4761 return -TARGET_EFAULT
;
4762 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4763 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4764 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4765 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4766 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4767 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4768 unlock_user_struct(target_shm_info
, target_addr
, 1);
4772 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4774 struct shmid_ds dsarg
;
4775 struct shminfo shminfo
;
4776 struct shm_info shm_info
;
4777 abi_long ret
= -TARGET_EINVAL
;
4785 if (target_to_host_shmid_ds(&dsarg
, buf
))
4786 return -TARGET_EFAULT
;
4787 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4788 if (host_to_target_shmid_ds(buf
, &dsarg
))
4789 return -TARGET_EFAULT
;
4792 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4793 if (host_to_target_shminfo(buf
, &shminfo
))
4794 return -TARGET_EFAULT
;
4797 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4798 if (host_to_target_shm_info(buf
, &shm_info
))
4799 return -TARGET_EFAULT
;
4804 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4811 #ifndef TARGET_FORCE_SHMLBA
4812 /* For most architectures, SHMLBA is the same as the page size;
4813 * some architectures have larger values, in which case they should
4814 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4815 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4816 * and defining its own value for SHMLBA.
4818 * The kernel also permits SHMLBA to be set by the architecture to a
4819 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4820 * this means that addresses are rounded to the large size if
4821 * SHM_RND is set but addresses not aligned to that size are not rejected
4822 * as long as they are at least page-aligned. Since the only architecture
4823 * which uses this is ia64 this code doesn't provide for that oddity.
4825 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4827 return TARGET_PAGE_SIZE
;
4831 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4832 int shmid
, abi_ulong shmaddr
, int shmflg
)
4836 struct shmid_ds shm_info
;
4840 /* find out the length of the shared memory segment */
4841 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4842 if (is_error(ret
)) {
4843 /* can't get length, bail out */
4847 shmlba
= target_shmlba(cpu_env
);
4849 if (shmaddr
& (shmlba
- 1)) {
4850 if (shmflg
& SHM_RND
) {
4851 shmaddr
&= ~(shmlba
- 1);
4853 return -TARGET_EINVAL
;
4860 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4862 abi_ulong mmap_start
;
4864 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4866 if (mmap_start
== -1) {
4868 host_raddr
= (void *)-1;
4870 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4873 if (host_raddr
== (void *)-1) {
4875 return get_errno((long)host_raddr
);
4877 raddr
=h2g((unsigned long)host_raddr
);
4879 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4880 PAGE_VALID
| PAGE_READ
|
4881 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4883 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4884 if (!shm_regions
[i
].in_use
) {
4885 shm_regions
[i
].in_use
= true;
4886 shm_regions
[i
].start
= raddr
;
4887 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4897 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4901 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4902 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4903 shm_regions
[i
].in_use
= false;
4904 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4909 return get_errno(shmdt(g2h(shmaddr
)));
4912 #ifdef TARGET_NR_ipc
4913 /* ??? This only works with linear mappings. */
4914 /* do_ipc() must return target values and target errnos. */
4915 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4916 unsigned int call
, abi_long first
,
4917 abi_long second
, abi_long third
,
4918 abi_long ptr
, abi_long fifth
)
4923 version
= call
>> 16;
4928 ret
= do_semop(first
, ptr
, second
);
4932 ret
= get_errno(semget(first
, second
, third
));
4935 case IPCOP_semctl
: {
4936 /* The semun argument to semctl is passed by value, so dereference the
4939 get_user_ual(atptr
, ptr
);
4940 ret
= do_semctl(first
, second
, third
, atptr
);
4945 ret
= get_errno(msgget(first
, second
));
4949 ret
= do_msgsnd(first
, ptr
, second
, third
);
4953 ret
= do_msgctl(first
, second
, ptr
);
4960 struct target_ipc_kludge
{
4965 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
4966 ret
= -TARGET_EFAULT
;
4970 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
4972 unlock_user_struct(tmp
, ptr
, 0);
4976 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
4985 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
4986 if (is_error(raddr
))
4987 return get_errno(raddr
);
4988 if (put_user_ual(raddr
, third
))
4989 return -TARGET_EFAULT
;
4993 ret
= -TARGET_EINVAL
;
4998 ret
= do_shmdt(ptr
);
5002 /* IPC_* flag values are the same on all linux platforms */
5003 ret
= get_errno(shmget(first
, second
, third
));
5006 /* IPC_* and SHM_* command values are the same on all linux platforms */
5008 ret
= do_shmctl(first
, second
, ptr
);
5011 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5012 ret
= -TARGET_ENOSYS
;
5019 /* kernel structure types definitions */
5021 #define STRUCT(name, ...) STRUCT_ ## name,
5022 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5024 #include "syscall_types.h"
5028 #undef STRUCT_SPECIAL
5030 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5031 #define STRUCT_SPECIAL(name)
5032 #include "syscall_types.h"
5034 #undef STRUCT_SPECIAL
5036 typedef struct IOCTLEntry IOCTLEntry
;
5038 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5039 int fd
, int cmd
, abi_long arg
);
5043 unsigned int host_cmd
;
5046 do_ioctl_fn
*do_ioctl
;
5047 const argtype arg_type
[5];
5050 #define IOC_R 0x0001
5051 #define IOC_W 0x0002
5052 #define IOC_RW (IOC_R | IOC_W)
5054 #define MAX_STRUCT_SIZE 4096
5056 #ifdef CONFIG_FIEMAP
5057 /* So fiemap access checks don't overflow on 32 bit systems.
5058 * This is very slightly smaller than the limit imposed by
5059 * the underlying kernel.
5061 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5062 / sizeof(struct fiemap_extent))
5064 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5065 int fd
, int cmd
, abi_long arg
)
5067 /* The parameter for this ioctl is a struct fiemap followed
5068 * by an array of struct fiemap_extent whose size is set
5069 * in fiemap->fm_extent_count. The array is filled in by the
5072 int target_size_in
, target_size_out
;
5074 const argtype
*arg_type
= ie
->arg_type
;
5075 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5078 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5082 assert(arg_type
[0] == TYPE_PTR
);
5083 assert(ie
->access
== IOC_RW
);
5085 target_size_in
= thunk_type_size(arg_type
, 0);
5086 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5088 return -TARGET_EFAULT
;
5090 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5091 unlock_user(argptr
, arg
, 0);
5092 fm
= (struct fiemap
*)buf_temp
;
5093 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5094 return -TARGET_EINVAL
;
5097 outbufsz
= sizeof (*fm
) +
5098 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5100 if (outbufsz
> MAX_STRUCT_SIZE
) {
5101 /* We can't fit all the extents into the fixed size buffer.
5102 * Allocate one that is large enough and use it instead.
5104 fm
= g_try_malloc(outbufsz
);
5106 return -TARGET_ENOMEM
;
5108 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5111 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5112 if (!is_error(ret
)) {
5113 target_size_out
= target_size_in
;
5114 /* An extent_count of 0 means we were only counting the extents
5115 * so there are no structs to copy
5117 if (fm
->fm_extent_count
!= 0) {
5118 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5120 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5122 ret
= -TARGET_EFAULT
;
5124 /* Convert the struct fiemap */
5125 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5126 if (fm
->fm_extent_count
!= 0) {
5127 p
= argptr
+ target_size_in
;
5128 /* ...and then all the struct fiemap_extents */
5129 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5130 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5135 unlock_user(argptr
, arg
, target_size_out
);
5145 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5146 int fd
, int cmd
, abi_long arg
)
5148 const argtype
*arg_type
= ie
->arg_type
;
5152 struct ifconf
*host_ifconf
;
5154 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5155 int target_ifreq_size
;
5160 abi_long target_ifc_buf
;
5164 assert(arg_type
[0] == TYPE_PTR
);
5165 assert(ie
->access
== IOC_RW
);
5168 target_size
= thunk_type_size(arg_type
, 0);
5170 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5172 return -TARGET_EFAULT
;
5173 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5174 unlock_user(argptr
, arg
, 0);
5176 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5177 target_ifc_len
= host_ifconf
->ifc_len
;
5178 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5180 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5181 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5182 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5184 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5185 if (outbufsz
> MAX_STRUCT_SIZE
) {
5186 /* We can't fit all the extents into the fixed size buffer.
5187 * Allocate one that is large enough and use it instead.
5189 host_ifconf
= malloc(outbufsz
);
5191 return -TARGET_ENOMEM
;
5193 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5196 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5198 host_ifconf
->ifc_len
= host_ifc_len
;
5199 host_ifconf
->ifc_buf
= host_ifc_buf
;
5201 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5202 if (!is_error(ret
)) {
5203 /* convert host ifc_len to target ifc_len */
5205 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5206 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5207 host_ifconf
->ifc_len
= target_ifc_len
;
5209 /* restore target ifc_buf */
5211 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5213 /* copy struct ifconf to target user */
5215 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5217 return -TARGET_EFAULT
;
5218 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5219 unlock_user(argptr
, arg
, target_size
);
5221 /* copy ifreq[] to target user */
5223 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5224 for (i
= 0; i
< nb_ifreq
; i
++) {
5225 thunk_convert(argptr
+ i
* target_ifreq_size
,
5226 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5227 ifreq_arg_type
, THUNK_TARGET
);
5229 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5239 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5240 int cmd
, abi_long arg
)
5243 struct dm_ioctl
*host_dm
;
5244 abi_long guest_data
;
5245 uint32_t guest_data_size
;
5247 const argtype
*arg_type
= ie
->arg_type
;
5249 void *big_buf
= NULL
;
5253 target_size
= thunk_type_size(arg_type
, 0);
5254 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5256 ret
= -TARGET_EFAULT
;
5259 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5260 unlock_user(argptr
, arg
, 0);
5262 /* buf_temp is too small, so fetch things into a bigger buffer */
5263 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5264 memcpy(big_buf
, buf_temp
, target_size
);
5268 guest_data
= arg
+ host_dm
->data_start
;
5269 if ((guest_data
- arg
) < 0) {
5270 ret
= -TARGET_EINVAL
;
5273 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5274 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5276 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5278 ret
= -TARGET_EFAULT
;
5282 switch (ie
->host_cmd
) {
5284 case DM_LIST_DEVICES
:
5287 case DM_DEV_SUSPEND
:
5290 case DM_TABLE_STATUS
:
5291 case DM_TABLE_CLEAR
:
5293 case DM_LIST_VERSIONS
:
5297 case DM_DEV_SET_GEOMETRY
:
5298 /* data contains only strings */
5299 memcpy(host_data
, argptr
, guest_data_size
);
5302 memcpy(host_data
, argptr
, guest_data_size
);
5303 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5307 void *gspec
= argptr
;
5308 void *cur_data
= host_data
;
5309 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5310 int spec_size
= thunk_type_size(arg_type
, 0);
5313 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5314 struct dm_target_spec
*spec
= cur_data
;
5318 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5319 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5321 spec
->next
= sizeof(*spec
) + slen
;
5322 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5324 cur_data
+= spec
->next
;
5329 ret
= -TARGET_EINVAL
;
5330 unlock_user(argptr
, guest_data
, 0);
5333 unlock_user(argptr
, guest_data
, 0);
5335 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5336 if (!is_error(ret
)) {
5337 guest_data
= arg
+ host_dm
->data_start
;
5338 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5339 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5340 switch (ie
->host_cmd
) {
5345 case DM_DEV_SUSPEND
:
5348 case DM_TABLE_CLEAR
:
5350 case DM_DEV_SET_GEOMETRY
:
5351 /* no return data */
5353 case DM_LIST_DEVICES
:
5355 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5356 uint32_t remaining_data
= guest_data_size
;
5357 void *cur_data
= argptr
;
5358 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5359 int nl_size
= 12; /* can't use thunk_size due to alignment */
5362 uint32_t next
= nl
->next
;
5364 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5366 if (remaining_data
< nl
->next
) {
5367 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5370 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5371 strcpy(cur_data
+ nl_size
, nl
->name
);
5372 cur_data
+= nl
->next
;
5373 remaining_data
-= nl
->next
;
5377 nl
= (void*)nl
+ next
;
5382 case DM_TABLE_STATUS
:
5384 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5385 void *cur_data
= argptr
;
5386 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5387 int spec_size
= thunk_type_size(arg_type
, 0);
5390 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5391 uint32_t next
= spec
->next
;
5392 int slen
= strlen((char*)&spec
[1]) + 1;
5393 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5394 if (guest_data_size
< spec
->next
) {
5395 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5398 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5399 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5400 cur_data
= argptr
+ spec
->next
;
5401 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5407 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5408 int count
= *(uint32_t*)hdata
;
5409 uint64_t *hdev
= hdata
+ 8;
5410 uint64_t *gdev
= argptr
+ 8;
5413 *(uint32_t*)argptr
= tswap32(count
);
5414 for (i
= 0; i
< count
; i
++) {
5415 *gdev
= tswap64(*hdev
);
5421 case DM_LIST_VERSIONS
:
5423 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5424 uint32_t remaining_data
= guest_data_size
;
5425 void *cur_data
= argptr
;
5426 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5427 int vers_size
= thunk_type_size(arg_type
, 0);
5430 uint32_t next
= vers
->next
;
5432 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5434 if (remaining_data
< vers
->next
) {
5435 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5438 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5439 strcpy(cur_data
+ vers_size
, vers
->name
);
5440 cur_data
+= vers
->next
;
5441 remaining_data
-= vers
->next
;
5445 vers
= (void*)vers
+ next
;
5450 unlock_user(argptr
, guest_data
, 0);
5451 ret
= -TARGET_EINVAL
;
5454 unlock_user(argptr
, guest_data
, guest_data_size
);
5456 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5458 ret
= -TARGET_EFAULT
;
5461 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5462 unlock_user(argptr
, arg
, target_size
);
5469 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5470 int cmd
, abi_long arg
)
5474 const argtype
*arg_type
= ie
->arg_type
;
5475 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5478 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5479 struct blkpg_partition host_part
;
5481 /* Read and convert blkpg */
5483 target_size
= thunk_type_size(arg_type
, 0);
5484 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5486 ret
= -TARGET_EFAULT
;
5489 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5490 unlock_user(argptr
, arg
, 0);
5492 switch (host_blkpg
->op
) {
5493 case BLKPG_ADD_PARTITION
:
5494 case BLKPG_DEL_PARTITION
:
5495 /* payload is struct blkpg_partition */
5498 /* Unknown opcode */
5499 ret
= -TARGET_EINVAL
;
5503 /* Read and convert blkpg->data */
5504 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5505 target_size
= thunk_type_size(part_arg_type
, 0);
5506 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5508 ret
= -TARGET_EFAULT
;
5511 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5512 unlock_user(argptr
, arg
, 0);
5514 /* Swizzle the data pointer to our local copy and call! */
5515 host_blkpg
->data
= &host_part
;
5516 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5522 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5523 int fd
, int cmd
, abi_long arg
)
5525 const argtype
*arg_type
= ie
->arg_type
;
5526 const StructEntry
*se
;
5527 const argtype
*field_types
;
5528 const int *dst_offsets
, *src_offsets
;
5531 abi_ulong
*target_rt_dev_ptr
;
5532 unsigned long *host_rt_dev_ptr
;
5536 assert(ie
->access
== IOC_W
);
5537 assert(*arg_type
== TYPE_PTR
);
5539 assert(*arg_type
== TYPE_STRUCT
);
5540 target_size
= thunk_type_size(arg_type
, 0);
5541 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5543 return -TARGET_EFAULT
;
5546 assert(*arg_type
== (int)STRUCT_rtentry
);
5547 se
= struct_entries
+ *arg_type
++;
5548 assert(se
->convert
[0] == NULL
);
5549 /* convert struct here to be able to catch rt_dev string */
5550 field_types
= se
->field_types
;
5551 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5552 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5553 for (i
= 0; i
< se
->nb_fields
; i
++) {
5554 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5555 assert(*field_types
== TYPE_PTRVOID
);
5556 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5557 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5558 if (*target_rt_dev_ptr
!= 0) {
5559 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5560 tswapal(*target_rt_dev_ptr
));
5561 if (!*host_rt_dev_ptr
) {
5562 unlock_user(argptr
, arg
, 0);
5563 return -TARGET_EFAULT
;
5566 *host_rt_dev_ptr
= 0;
5571 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5572 argptr
+ src_offsets
[i
],
5573 field_types
, THUNK_HOST
);
5575 unlock_user(argptr
, arg
, 0);
5577 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5578 if (*host_rt_dev_ptr
!= 0) {
5579 unlock_user((void *)*host_rt_dev_ptr
,
5580 *target_rt_dev_ptr
, 0);
5585 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5586 int fd
, int cmd
, abi_long arg
)
5588 int sig
= target_to_host_signal(arg
);
5589 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5592 static IOCTLEntry ioctl_entries
[] = {
5593 #define IOCTL(cmd, access, ...) \
5594 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5595 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5596 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5597 #define IOCTL_IGNORE(cmd) \
5598 { TARGET_ ## cmd, 0, #cmd },
5603 /* ??? Implement proper locking for ioctls. */
5604 /* do_ioctl() Must return target values and target errnos. */
5605 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5607 const IOCTLEntry
*ie
;
5608 const argtype
*arg_type
;
5610 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5616 if (ie
->target_cmd
== 0) {
5617 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5618 return -TARGET_ENOSYS
;
5620 if (ie
->target_cmd
== cmd
)
5624 arg_type
= ie
->arg_type
;
5626 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5629 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5630 } else if (!ie
->host_cmd
) {
5631 /* Some architectures define BSD ioctls in their headers
5632 that are not implemented in Linux. */
5633 return -TARGET_ENOSYS
;
5636 switch(arg_type
[0]) {
5639 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5643 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5647 target_size
= thunk_type_size(arg_type
, 0);
5648 switch(ie
->access
) {
5650 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5651 if (!is_error(ret
)) {
5652 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5654 return -TARGET_EFAULT
;
5655 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5656 unlock_user(argptr
, arg
, target_size
);
5660 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5662 return -TARGET_EFAULT
;
5663 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5664 unlock_user(argptr
, arg
, 0);
5665 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5669 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5671 return -TARGET_EFAULT
;
5672 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5673 unlock_user(argptr
, arg
, 0);
5674 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5675 if (!is_error(ret
)) {
5676 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5678 return -TARGET_EFAULT
;
5679 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5680 unlock_user(argptr
, arg
, target_size
);
5686 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5687 (long)cmd
, arg_type
[0]);
5688 ret
= -TARGET_ENOSYS
;
5694 static const bitmask_transtbl iflag_tbl
[] = {
5695 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5696 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5697 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5698 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5699 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5700 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5701 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5702 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5703 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5704 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5705 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5706 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5707 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5708 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5712 static const bitmask_transtbl oflag_tbl
[] = {
5713 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5714 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5715 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5716 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5717 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5718 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5719 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5720 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5721 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5722 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5723 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5724 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5725 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5726 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5727 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5728 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5729 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5730 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5731 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5732 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5733 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5734 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5735 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5736 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5740 static const bitmask_transtbl cflag_tbl
[] = {
5741 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5742 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5743 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5744 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5745 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5746 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5747 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5748 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5749 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5750 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5751 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5752 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5753 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5754 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5755 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5756 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5757 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5758 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5759 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5760 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5761 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5762 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5763 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5764 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5765 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5766 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5767 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5768 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5769 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5770 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5771 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5775 static const bitmask_transtbl lflag_tbl
[] = {
5776 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5777 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5778 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5779 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5780 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5781 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5782 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5783 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5784 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5785 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5786 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5787 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5788 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5789 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5790 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5794 static void target_to_host_termios (void *dst
, const void *src
)
5796 struct host_termios
*host
= dst
;
5797 const struct target_termios
*target
= src
;
5800 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5802 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5804 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5806 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5807 host
->c_line
= target
->c_line
;
5809 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5810 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5811 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5812 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5813 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5814 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5815 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5816 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5817 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5818 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5819 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5820 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5821 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5822 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5823 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5824 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5825 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5826 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5829 static void host_to_target_termios (void *dst
, const void *src
)
5831 struct target_termios
*target
= dst
;
5832 const struct host_termios
*host
= src
;
5835 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5837 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5839 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5841 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5842 target
->c_line
= host
->c_line
;
5844 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5845 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5846 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5847 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5848 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5849 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5850 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5851 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5852 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5853 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5854 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5855 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5856 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5857 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5858 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5859 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5860 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5861 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5864 static const StructEntry struct_termios_def
= {
5865 .convert
= { host_to_target_termios
, target_to_host_termios
},
5866 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5867 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5870 static bitmask_transtbl mmap_flags_tbl
[] = {
5871 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5872 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5873 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5874 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
, MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5875 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
, MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5876 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
, MAP_DENYWRITE
, MAP_DENYWRITE
},
5877 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
, MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5878 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5879 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
, MAP_NORESERVE
,
5884 #if defined(TARGET_I386)
5886 /* NOTE: there is really one LDT for all the threads */
5887 static uint8_t *ldt_table
;
5889 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5896 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5897 if (size
> bytecount
)
5899 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5901 return -TARGET_EFAULT
;
5902 /* ??? Should this by byteswapped? */
5903 memcpy(p
, ldt_table
, size
);
5904 unlock_user(p
, ptr
, size
);
5908 /* XXX: add locking support */
5909 static abi_long
write_ldt(CPUX86State
*env
,
5910 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5912 struct target_modify_ldt_ldt_s ldt_info
;
5913 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5914 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5915 int seg_not_present
, useable
, lm
;
5916 uint32_t *lp
, entry_1
, entry_2
;
5918 if (bytecount
!= sizeof(ldt_info
))
5919 return -TARGET_EINVAL
;
5920 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5921 return -TARGET_EFAULT
;
5922 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5923 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5924 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5925 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5926 unlock_user_struct(target_ldt_info
, ptr
, 0);
5928 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5929 return -TARGET_EINVAL
;
5930 seg_32bit
= ldt_info
.flags
& 1;
5931 contents
= (ldt_info
.flags
>> 1) & 3;
5932 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5933 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5934 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5935 useable
= (ldt_info
.flags
>> 6) & 1;
5939 lm
= (ldt_info
.flags
>> 7) & 1;
5941 if (contents
== 3) {
5943 return -TARGET_EINVAL
;
5944 if (seg_not_present
== 0)
5945 return -TARGET_EINVAL
;
5947 /* allocate the LDT */
5949 env
->ldt
.base
= target_mmap(0,
5950 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
5951 PROT_READ
|PROT_WRITE
,
5952 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
5953 if (env
->ldt
.base
== -1)
5954 return -TARGET_ENOMEM
;
5955 memset(g2h(env
->ldt
.base
), 0,
5956 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
5957 env
->ldt
.limit
= 0xffff;
5958 ldt_table
= g2h(env
->ldt
.base
);
5961 /* NOTE: same code as Linux kernel */
5962 /* Allow LDTs to be cleared by the user. */
5963 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
5966 read_exec_only
== 1 &&
5968 limit_in_pages
== 0 &&
5969 seg_not_present
== 1 &&
5977 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
5978 (ldt_info
.limit
& 0x0ffff);
5979 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
5980 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
5981 (ldt_info
.limit
& 0xf0000) |
5982 ((read_exec_only
^ 1) << 9) |
5984 ((seg_not_present
^ 1) << 15) |
5986 (limit_in_pages
<< 23) |
5990 entry_2
|= (useable
<< 20);
5992 /* Install the new entry ... */
5994 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
5995 lp
[0] = tswap32(entry_1
);
5996 lp
[1] = tswap32(entry_2
);
6000 /* specific and weird i386 syscalls */
6001 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6002 unsigned long bytecount
)
6008 ret
= read_ldt(ptr
, bytecount
);
6011 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6014 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6017 ret
= -TARGET_ENOSYS
;
6023 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6024 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6026 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6027 struct target_modify_ldt_ldt_s ldt_info
;
6028 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6029 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6030 int seg_not_present
, useable
, lm
;
6031 uint32_t *lp
, entry_1
, entry_2
;
6034 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6035 if (!target_ldt_info
)
6036 return -TARGET_EFAULT
;
6037 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6038 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6039 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6040 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6041 if (ldt_info
.entry_number
== -1) {
6042 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6043 if (gdt_table
[i
] == 0) {
6044 ldt_info
.entry_number
= i
;
6045 target_ldt_info
->entry_number
= tswap32(i
);
6050 unlock_user_struct(target_ldt_info
, ptr
, 1);
6052 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6053 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6054 return -TARGET_EINVAL
;
6055 seg_32bit
= ldt_info
.flags
& 1;
6056 contents
= (ldt_info
.flags
>> 1) & 3;
6057 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6058 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6059 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6060 useable
= (ldt_info
.flags
>> 6) & 1;
6064 lm
= (ldt_info
.flags
>> 7) & 1;
6067 if (contents
== 3) {
6068 if (seg_not_present
== 0)
6069 return -TARGET_EINVAL
;
6072 /* NOTE: same code as Linux kernel */
6073 /* Allow LDTs to be cleared by the user. */
6074 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6075 if ((contents
== 0 &&
6076 read_exec_only
== 1 &&
6078 limit_in_pages
== 0 &&
6079 seg_not_present
== 1 &&
6087 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6088 (ldt_info
.limit
& 0x0ffff);
6089 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6090 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6091 (ldt_info
.limit
& 0xf0000) |
6092 ((read_exec_only
^ 1) << 9) |
6094 ((seg_not_present
^ 1) << 15) |
6096 (limit_in_pages
<< 23) |
6101 /* Install the new entry ... */
6103 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6104 lp
[0] = tswap32(entry_1
);
6105 lp
[1] = tswap32(entry_2
);
6109 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6111 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6112 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6113 uint32_t base_addr
, limit
, flags
;
6114 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6115 int seg_not_present
, useable
, lm
;
6116 uint32_t *lp
, entry_1
, entry_2
;
6118 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6119 if (!target_ldt_info
)
6120 return -TARGET_EFAULT
;
6121 idx
= tswap32(target_ldt_info
->entry_number
);
6122 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6123 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6124 unlock_user_struct(target_ldt_info
, ptr
, 1);
6125 return -TARGET_EINVAL
;
6127 lp
= (uint32_t *)(gdt_table
+ idx
);
6128 entry_1
= tswap32(lp
[0]);
6129 entry_2
= tswap32(lp
[1]);
6131 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6132 contents
= (entry_2
>> 10) & 3;
6133 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6134 seg_32bit
= (entry_2
>> 22) & 1;
6135 limit_in_pages
= (entry_2
>> 23) & 1;
6136 useable
= (entry_2
>> 20) & 1;
6140 lm
= (entry_2
>> 21) & 1;
6142 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6143 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6144 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6145 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6146 base_addr
= (entry_1
>> 16) |
6147 (entry_2
& 0xff000000) |
6148 ((entry_2
& 0xff) << 16);
6149 target_ldt_info
->base_addr
= tswapal(base_addr
);
6150 target_ldt_info
->limit
= tswap32(limit
);
6151 target_ldt_info
->flags
= tswap32(flags
);
6152 unlock_user_struct(target_ldt_info
, ptr
, 1);
6155 #endif /* TARGET_I386 && TARGET_ABI32 */
6157 #ifndef TARGET_ABI32
6158 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6165 case TARGET_ARCH_SET_GS
:
6166 case TARGET_ARCH_SET_FS
:
6167 if (code
== TARGET_ARCH_SET_GS
)
6171 cpu_x86_load_seg(env
, idx
, 0);
6172 env
->segs
[idx
].base
= addr
;
6174 case TARGET_ARCH_GET_GS
:
6175 case TARGET_ARCH_GET_FS
:
6176 if (code
== TARGET_ARCH_GET_GS
)
6180 val
= env
->segs
[idx
].base
;
6181 if (put_user(val
, addr
, abi_ulong
))
6182 ret
= -TARGET_EFAULT
;
6185 ret
= -TARGET_EINVAL
;
6192 #endif /* defined(TARGET_I386) */
6194 #define NEW_STACK_SIZE 0x40000
6197 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6200 pthread_mutex_t mutex
;
6201 pthread_cond_t cond
;
6204 abi_ulong child_tidptr
;
6205 abi_ulong parent_tidptr
;
6209 static void *clone_func(void *arg
)
6211 new_thread_info
*info
= arg
;
6216 rcu_register_thread();
6218 cpu
= ENV_GET_CPU(env
);
6220 ts
= (TaskState
*)cpu
->opaque
;
6221 info
->tid
= gettid();
6223 if (info
->child_tidptr
)
6224 put_user_u32(info
->tid
, info
->child_tidptr
);
6225 if (info
->parent_tidptr
)
6226 put_user_u32(info
->tid
, info
->parent_tidptr
);
6227 /* Enable signals. */
6228 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6229 /* Signal to the parent that we're ready. */
6230 pthread_mutex_lock(&info
->mutex
);
6231 pthread_cond_broadcast(&info
->cond
);
6232 pthread_mutex_unlock(&info
->mutex
);
6233 /* Wait until the parent has finshed initializing the tls state. */
6234 pthread_mutex_lock(&clone_lock
);
6235 pthread_mutex_unlock(&clone_lock
);
6241 /* do_fork() Must return host values and target errnos (unlike most
6242 do_*() functions). */
6243 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6244 abi_ulong parent_tidptr
, target_ulong newtls
,
6245 abi_ulong child_tidptr
)
6247 CPUState
*cpu
= ENV_GET_CPU(env
);
6251 CPUArchState
*new_env
;
6254 flags
&= ~CLONE_IGNORED_FLAGS
;
6256 /* Emulate vfork() with fork() */
6257 if (flags
& CLONE_VFORK
)
6258 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6260 if (flags
& CLONE_VM
) {
6261 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6262 new_thread_info info
;
6263 pthread_attr_t attr
;
6265 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6266 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6267 return -TARGET_EINVAL
;
6270 ts
= g_new0(TaskState
, 1);
6271 init_task_state(ts
);
6272 /* we create a new CPU instance. */
6273 new_env
= cpu_copy(env
);
6274 /* Init regs that differ from the parent. */
6275 cpu_clone_regs(new_env
, newsp
);
6276 new_cpu
= ENV_GET_CPU(new_env
);
6277 new_cpu
->opaque
= ts
;
6278 ts
->bprm
= parent_ts
->bprm
;
6279 ts
->info
= parent_ts
->info
;
6280 ts
->signal_mask
= parent_ts
->signal_mask
;
6282 if (flags
& CLONE_CHILD_CLEARTID
) {
6283 ts
->child_tidptr
= child_tidptr
;
6286 if (flags
& CLONE_SETTLS
) {
6287 cpu_set_tls (new_env
, newtls
);
6290 /* Grab a mutex so that thread setup appears atomic. */
6291 pthread_mutex_lock(&clone_lock
);
6293 memset(&info
, 0, sizeof(info
));
6294 pthread_mutex_init(&info
.mutex
, NULL
);
6295 pthread_mutex_lock(&info
.mutex
);
6296 pthread_cond_init(&info
.cond
, NULL
);
6298 if (flags
& CLONE_CHILD_SETTID
) {
6299 info
.child_tidptr
= child_tidptr
;
6301 if (flags
& CLONE_PARENT_SETTID
) {
6302 info
.parent_tidptr
= parent_tidptr
;
6305 ret
= pthread_attr_init(&attr
);
6306 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6307 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6308 /* It is not safe to deliver signals until the child has finished
6309 initializing, so temporarily block all signals. */
6310 sigfillset(&sigmask
);
6311 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6313 /* If this is our first additional thread, we need to ensure we
6314 * generate code for parallel execution and flush old translations.
6316 if (!parallel_cpus
) {
6317 parallel_cpus
= true;
6321 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6322 /* TODO: Free new CPU state if thread creation failed. */
6324 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6325 pthread_attr_destroy(&attr
);
6327 /* Wait for the child to initialize. */
6328 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6333 pthread_mutex_unlock(&info
.mutex
);
6334 pthread_cond_destroy(&info
.cond
);
6335 pthread_mutex_destroy(&info
.mutex
);
6336 pthread_mutex_unlock(&clone_lock
);
6338 /* if no CLONE_VM, we consider it is a fork */
6339 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6340 return -TARGET_EINVAL
;
6343 /* We can't support custom termination signals */
6344 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6345 return -TARGET_EINVAL
;
6348 if (block_signals()) {
6349 return -TARGET_ERESTARTSYS
;
6355 /* Child Process. */
6357 cpu_clone_regs(env
, newsp
);
6359 /* There is a race condition here. The parent process could
6360 theoretically read the TID in the child process before the child
6361 tid is set. This would require using either ptrace
6362 (not implemented) or having *_tidptr to point at a shared memory
6363 mapping. We can't repeat the spinlock hack used above because
6364 the child process gets its own copy of the lock. */
6365 if (flags
& CLONE_CHILD_SETTID
)
6366 put_user_u32(gettid(), child_tidptr
);
6367 if (flags
& CLONE_PARENT_SETTID
)
6368 put_user_u32(gettid(), parent_tidptr
);
6369 ts
= (TaskState
*)cpu
->opaque
;
6370 if (flags
& CLONE_SETTLS
)
6371 cpu_set_tls (env
, newtls
);
6372 if (flags
& CLONE_CHILD_CLEARTID
)
6373 ts
->child_tidptr
= child_tidptr
;
6381 /* warning : doesn't handle linux specific flags... */
6382 static int target_to_host_fcntl_cmd(int cmd
)
6385 case TARGET_F_DUPFD
:
6386 case TARGET_F_GETFD
:
6387 case TARGET_F_SETFD
:
6388 case TARGET_F_GETFL
:
6389 case TARGET_F_SETFL
:
6391 case TARGET_F_GETLK
:
6393 case TARGET_F_SETLK
:
6395 case TARGET_F_SETLKW
:
6397 case TARGET_F_GETOWN
:
6399 case TARGET_F_SETOWN
:
6401 case TARGET_F_GETSIG
:
6403 case TARGET_F_SETSIG
:
6405 #if TARGET_ABI_BITS == 32
6406 case TARGET_F_GETLK64
:
6408 case TARGET_F_SETLK64
:
6410 case TARGET_F_SETLKW64
:
6413 case TARGET_F_SETLEASE
:
6415 case TARGET_F_GETLEASE
:
6417 #ifdef F_DUPFD_CLOEXEC
6418 case TARGET_F_DUPFD_CLOEXEC
:
6419 return F_DUPFD_CLOEXEC
;
6421 case TARGET_F_NOTIFY
:
6424 case TARGET_F_GETOWN_EX
:
6428 case TARGET_F_SETOWN_EX
:
6432 case TARGET_F_SETPIPE_SZ
:
6433 return F_SETPIPE_SZ
;
6434 case TARGET_F_GETPIPE_SZ
:
6435 return F_GETPIPE_SZ
;
6438 return -TARGET_EINVAL
;
6440 return -TARGET_EINVAL
;
6443 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6444 static const bitmask_transtbl flock_tbl
[] = {
6445 TRANSTBL_CONVERT(F_RDLCK
),
6446 TRANSTBL_CONVERT(F_WRLCK
),
6447 TRANSTBL_CONVERT(F_UNLCK
),
6448 TRANSTBL_CONVERT(F_EXLCK
),
6449 TRANSTBL_CONVERT(F_SHLCK
),
6453 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6454 abi_ulong target_flock_addr
)
6456 struct target_flock
*target_fl
;
6459 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6460 return -TARGET_EFAULT
;
6463 __get_user(l_type
, &target_fl
->l_type
);
6464 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6465 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6466 __get_user(fl
->l_start
, &target_fl
->l_start
);
6467 __get_user(fl
->l_len
, &target_fl
->l_len
);
6468 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6469 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6473 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6474 const struct flock64
*fl
)
6476 struct target_flock
*target_fl
;
6479 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6480 return -TARGET_EFAULT
;
6483 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6484 __put_user(l_type
, &target_fl
->l_type
);
6485 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6486 __put_user(fl
->l_start
, &target_fl
->l_start
);
6487 __put_user(fl
->l_len
, &target_fl
->l_len
);
6488 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6489 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6493 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6494 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6496 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6497 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6498 abi_ulong target_flock_addr
)
6500 struct target_eabi_flock64
*target_fl
;
6503 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6504 return -TARGET_EFAULT
;
6507 __get_user(l_type
, &target_fl
->l_type
);
6508 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6509 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6510 __get_user(fl
->l_start
, &target_fl
->l_start
);
6511 __get_user(fl
->l_len
, &target_fl
->l_len
);
6512 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6513 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6517 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6518 const struct flock64
*fl
)
6520 struct target_eabi_flock64
*target_fl
;
6523 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6524 return -TARGET_EFAULT
;
6527 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6528 __put_user(l_type
, &target_fl
->l_type
);
6529 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6530 __put_user(fl
->l_start
, &target_fl
->l_start
);
6531 __put_user(fl
->l_len
, &target_fl
->l_len
);
6532 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6533 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6538 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6539 abi_ulong target_flock_addr
)
6541 struct target_flock64
*target_fl
;
6544 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6545 return -TARGET_EFAULT
;
6548 __get_user(l_type
, &target_fl
->l_type
);
6549 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6550 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6551 __get_user(fl
->l_start
, &target_fl
->l_start
);
6552 __get_user(fl
->l_len
, &target_fl
->l_len
);
6553 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6554 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6558 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6559 const struct flock64
*fl
)
6561 struct target_flock64
*target_fl
;
6564 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6565 return -TARGET_EFAULT
;
6568 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6569 __put_user(l_type
, &target_fl
->l_type
);
6570 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6571 __put_user(fl
->l_start
, &target_fl
->l_start
);
6572 __put_user(fl
->l_len
, &target_fl
->l_len
);
6573 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6574 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6578 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6580 struct flock64 fl64
;
6582 struct f_owner_ex fox
;
6583 struct target_f_owner_ex
*target_fox
;
6586 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6588 if (host_cmd
== -TARGET_EINVAL
)
6592 case TARGET_F_GETLK
:
6593 ret
= copy_from_user_flock(&fl64
, arg
);
6597 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6599 ret
= copy_to_user_flock(arg
, &fl64
);
6603 case TARGET_F_SETLK
:
6604 case TARGET_F_SETLKW
:
6605 ret
= copy_from_user_flock(&fl64
, arg
);
6609 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6612 case TARGET_F_GETLK64
:
6613 ret
= copy_from_user_flock64(&fl64
, arg
);
6617 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6619 ret
= copy_to_user_flock64(arg
, &fl64
);
6622 case TARGET_F_SETLK64
:
6623 case TARGET_F_SETLKW64
:
6624 ret
= copy_from_user_flock64(&fl64
, arg
);
6628 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6631 case TARGET_F_GETFL
:
6632 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6634 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6638 case TARGET_F_SETFL
:
6639 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6640 target_to_host_bitmask(arg
,
6645 case TARGET_F_GETOWN_EX
:
6646 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6648 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6649 return -TARGET_EFAULT
;
6650 target_fox
->type
= tswap32(fox
.type
);
6651 target_fox
->pid
= tswap32(fox
.pid
);
6652 unlock_user_struct(target_fox
, arg
, 1);
6658 case TARGET_F_SETOWN_EX
:
6659 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6660 return -TARGET_EFAULT
;
6661 fox
.type
= tswap32(target_fox
->type
);
6662 fox
.pid
= tswap32(target_fox
->pid
);
6663 unlock_user_struct(target_fox
, arg
, 0);
6664 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6668 case TARGET_F_SETOWN
:
6669 case TARGET_F_GETOWN
:
6670 case TARGET_F_SETSIG
:
6671 case TARGET_F_GETSIG
:
6672 case TARGET_F_SETLEASE
:
6673 case TARGET_F_GETLEASE
:
6674 case TARGET_F_SETPIPE_SZ
:
6675 case TARGET_F_GETPIPE_SZ
:
6676 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6680 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6688 static inline int high2lowuid(int uid
)
6696 static inline int high2lowgid(int gid
)
6704 static inline int low2highuid(int uid
)
6706 if ((int16_t)uid
== -1)
6712 static inline int low2highgid(int gid
)
6714 if ((int16_t)gid
== -1)
6719 static inline int tswapid(int id
)
6724 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6726 #else /* !USE_UID16 */
6727 static inline int high2lowuid(int uid
)
6731 static inline int high2lowgid(int gid
)
6735 static inline int low2highuid(int uid
)
6739 static inline int low2highgid(int gid
)
6743 static inline int tswapid(int id
)
6748 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6750 #endif /* USE_UID16 */
6752 /* We must do direct syscalls for setting UID/GID, because we want to
6753 * implement the Linux system call semantics of "change only for this thread",
6754 * not the libc/POSIX semantics of "change for all threads in process".
6755 * (See http://ewontfix.com/17/ for more details.)
6756 * We use the 32-bit version of the syscalls if present; if it is not
6757 * then either the host architecture supports 32-bit UIDs natively with
6758 * the standard syscall, or the 16-bit UID is the best we can do.
6760 #ifdef __NR_setuid32
6761 #define __NR_sys_setuid __NR_setuid32
6763 #define __NR_sys_setuid __NR_setuid
6765 #ifdef __NR_setgid32
6766 #define __NR_sys_setgid __NR_setgid32
6768 #define __NR_sys_setgid __NR_setgid
6770 #ifdef __NR_setresuid32
6771 #define __NR_sys_setresuid __NR_setresuid32
6773 #define __NR_sys_setresuid __NR_setresuid
6775 #ifdef __NR_setresgid32
6776 #define __NR_sys_setresgid __NR_setresgid32
6778 #define __NR_sys_setresgid __NR_setresgid
6781 _syscall1(int, sys_setuid
, uid_t
, uid
)
6782 _syscall1(int, sys_setgid
, gid_t
, gid
)
6783 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6784 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6786 void syscall_init(void)
6789 const argtype
*arg_type
;
6793 thunk_init(STRUCT_MAX
);
6795 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6796 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6797 #include "syscall_types.h"
6799 #undef STRUCT_SPECIAL
6801 /* Build target_to_host_errno_table[] table from
6802 * host_to_target_errno_table[]. */
6803 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6804 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6807 /* we patch the ioctl size if necessary. We rely on the fact that
6808 no ioctl has all the bits at '1' in the size field */
6810 while (ie
->target_cmd
!= 0) {
6811 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6812 TARGET_IOC_SIZEMASK
) {
6813 arg_type
= ie
->arg_type
;
6814 if (arg_type
[0] != TYPE_PTR
) {
6815 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6820 size
= thunk_type_size(arg_type
, 0);
6821 ie
->target_cmd
= (ie
->target_cmd
&
6822 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6823 (size
<< TARGET_IOC_SIZESHIFT
);
6826 /* automatic consistency check if same arch */
6827 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6828 (defined(__x86_64__) && defined(TARGET_X86_64))
6829 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6830 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6831 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6838 #if TARGET_ABI_BITS == 32
6839 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6841 #ifdef TARGET_WORDS_BIGENDIAN
6842 return ((uint64_t)word0
<< 32) | word1
;
6844 return ((uint64_t)word1
<< 32) | word0
;
6847 #else /* TARGET_ABI_BITS == 32 */
6848 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6852 #endif /* TARGET_ABI_BITS != 32 */
6854 #ifdef TARGET_NR_truncate64
6855 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6860 if (regpairs_aligned(cpu_env
)) {
6864 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6868 #ifdef TARGET_NR_ftruncate64
6869 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6874 if (regpairs_aligned(cpu_env
)) {
6878 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6882 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6883 abi_ulong target_addr
)
6885 struct target_timespec
*target_ts
;
6887 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6888 return -TARGET_EFAULT
;
6889 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6890 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6891 unlock_user_struct(target_ts
, target_addr
, 0);
6895 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6896 struct timespec
*host_ts
)
6898 struct target_timespec
*target_ts
;
6900 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6901 return -TARGET_EFAULT
;
6902 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6903 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6904 unlock_user_struct(target_ts
, target_addr
, 1);
6908 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6909 abi_ulong target_addr
)
6911 struct target_itimerspec
*target_itspec
;
6913 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6914 return -TARGET_EFAULT
;
6917 host_itspec
->it_interval
.tv_sec
=
6918 tswapal(target_itspec
->it_interval
.tv_sec
);
6919 host_itspec
->it_interval
.tv_nsec
=
6920 tswapal(target_itspec
->it_interval
.tv_nsec
);
6921 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6922 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6924 unlock_user_struct(target_itspec
, target_addr
, 1);
6928 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6929 struct itimerspec
*host_its
)
6931 struct target_itimerspec
*target_itspec
;
6933 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6934 return -TARGET_EFAULT
;
6937 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
6938 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
6940 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
6941 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
6943 unlock_user_struct(target_itspec
, target_addr
, 0);
6947 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
6948 abi_long target_addr
)
6950 struct target_timex
*target_tx
;
6952 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
6953 return -TARGET_EFAULT
;
6956 __get_user(host_tx
->modes
, &target_tx
->modes
);
6957 __get_user(host_tx
->offset
, &target_tx
->offset
);
6958 __get_user(host_tx
->freq
, &target_tx
->freq
);
6959 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6960 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
6961 __get_user(host_tx
->status
, &target_tx
->status
);
6962 __get_user(host_tx
->constant
, &target_tx
->constant
);
6963 __get_user(host_tx
->precision
, &target_tx
->precision
);
6964 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
6965 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
6966 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
6967 __get_user(host_tx
->tick
, &target_tx
->tick
);
6968 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
6969 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
6970 __get_user(host_tx
->shift
, &target_tx
->shift
);
6971 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
6972 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
6973 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
6974 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
6975 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
6976 __get_user(host_tx
->tai
, &target_tx
->tai
);
6978 unlock_user_struct(target_tx
, target_addr
, 0);
6982 static inline abi_long
host_to_target_timex(abi_long target_addr
,
6983 struct timex
*host_tx
)
6985 struct target_timex
*target_tx
;
6987 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
6988 return -TARGET_EFAULT
;
6991 __put_user(host_tx
->modes
, &target_tx
->modes
);
6992 __put_user(host_tx
->offset
, &target_tx
->offset
);
6993 __put_user(host_tx
->freq
, &target_tx
->freq
);
6994 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
6995 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
6996 __put_user(host_tx
->status
, &target_tx
->status
);
6997 __put_user(host_tx
->constant
, &target_tx
->constant
);
6998 __put_user(host_tx
->precision
, &target_tx
->precision
);
6999 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7000 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7001 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7002 __put_user(host_tx
->tick
, &target_tx
->tick
);
7003 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7004 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7005 __put_user(host_tx
->shift
, &target_tx
->shift
);
7006 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7007 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7008 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7009 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7010 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7011 __put_user(host_tx
->tai
, &target_tx
->tai
);
7013 unlock_user_struct(target_tx
, target_addr
, 1);
7018 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7019 abi_ulong target_addr
)
7021 struct target_sigevent
*target_sevp
;
7023 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7024 return -TARGET_EFAULT
;
7027 /* This union is awkward on 64 bit systems because it has a 32 bit
7028 * integer and a pointer in it; we follow the conversion approach
7029 * used for handling sigval types in signal.c so the guest should get
7030 * the correct value back even if we did a 64 bit byteswap and it's
7031 * using the 32 bit integer.
7033 host_sevp
->sigev_value
.sival_ptr
=
7034 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7035 host_sevp
->sigev_signo
=
7036 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7037 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7038 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7040 unlock_user_struct(target_sevp
, target_addr
, 1);
7044 #if defined(TARGET_NR_mlockall)
7045 static inline int target_to_host_mlockall_arg(int arg
)
7049 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7050 result
|= MCL_CURRENT
;
7052 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7053 result
|= MCL_FUTURE
;
7059 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7060 abi_ulong target_addr
,
7061 struct stat
*host_st
)
7063 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7064 if (((CPUARMState
*)cpu_env
)->eabi
) {
7065 struct target_eabi_stat64
*target_st
;
7067 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7068 return -TARGET_EFAULT
;
7069 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7070 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7071 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7072 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7073 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7075 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7076 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7077 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7078 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7079 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7080 __put_user(host_st
->st_size
, &target_st
->st_size
);
7081 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7082 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7083 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7084 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7085 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7086 unlock_user_struct(target_st
, target_addr
, 1);
7090 #if defined(TARGET_HAS_STRUCT_STAT64)
7091 struct target_stat64
*target_st
;
7093 struct target_stat
*target_st
;
7096 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7097 return -TARGET_EFAULT
;
7098 memset(target_st
, 0, sizeof(*target_st
));
7099 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7100 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7101 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7102 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7104 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7105 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7106 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7107 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7108 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7109 /* XXX: better use of kernel struct */
7110 __put_user(host_st
->st_size
, &target_st
->st_size
);
7111 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7112 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7113 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7114 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7115 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7116 unlock_user_struct(target_st
, target_addr
, 1);
7122 /* ??? Using host futex calls even when target atomic operations
7123 are not really atomic probably breaks things. However implementing
7124 futexes locally would make futexes shared between multiple processes
7125 tricky. However they're probably useless because guest atomic
7126 operations won't work either. */
7127 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7128 target_ulong uaddr2
, int val3
)
7130 struct timespec ts
, *pts
;
7133 /* ??? We assume FUTEX_* constants are the same on both host
7135 #ifdef FUTEX_CMD_MASK
7136 base_op
= op
& FUTEX_CMD_MASK
;
7142 case FUTEX_WAIT_BITSET
:
7145 target_to_host_timespec(pts
, timeout
);
7149 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7152 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7154 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7156 case FUTEX_CMP_REQUEUE
:
7158 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7159 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7160 But the prototype takes a `struct timespec *'; insert casts
7161 to satisfy the compiler. We do not need to tswap TIMEOUT
7162 since it's not compared to guest memory. */
7163 pts
= (struct timespec
*)(uintptr_t) timeout
;
7164 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7166 (base_op
== FUTEX_CMP_REQUEUE
7170 return -TARGET_ENOSYS
;
7173 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7174 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7175 abi_long handle
, abi_long mount_id
,
7178 struct file_handle
*target_fh
;
7179 struct file_handle
*fh
;
7183 unsigned int size
, total_size
;
7185 if (get_user_s32(size
, handle
)) {
7186 return -TARGET_EFAULT
;
7189 name
= lock_user_string(pathname
);
7191 return -TARGET_EFAULT
;
7194 total_size
= sizeof(struct file_handle
) + size
;
7195 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7197 unlock_user(name
, pathname
, 0);
7198 return -TARGET_EFAULT
;
7201 fh
= g_malloc0(total_size
);
7202 fh
->handle_bytes
= size
;
7204 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7205 unlock_user(name
, pathname
, 0);
7207 /* man name_to_handle_at(2):
7208 * Other than the use of the handle_bytes field, the caller should treat
7209 * the file_handle structure as an opaque data type
7212 memcpy(target_fh
, fh
, total_size
);
7213 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7214 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7216 unlock_user(target_fh
, handle
, total_size
);
7218 if (put_user_s32(mid
, mount_id
)) {
7219 return -TARGET_EFAULT
;
7227 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7228 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7231 struct file_handle
*target_fh
;
7232 struct file_handle
*fh
;
7233 unsigned int size
, total_size
;
7236 if (get_user_s32(size
, handle
)) {
7237 return -TARGET_EFAULT
;
7240 total_size
= sizeof(struct file_handle
) + size
;
7241 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7243 return -TARGET_EFAULT
;
7246 fh
= g_memdup(target_fh
, total_size
);
7247 fh
->handle_bytes
= size
;
7248 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7250 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7251 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7255 unlock_user(target_fh
, handle
, total_size
);
7261 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7263 /* signalfd siginfo conversion */
7266 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7267 const struct signalfd_siginfo
*info
)
7269 int sig
= host_to_target_signal(info
->ssi_signo
);
7271 /* linux/signalfd.h defines a ssi_addr_lsb
7272 * not defined in sys/signalfd.h but used by some kernels
7275 #ifdef BUS_MCEERR_AO
7276 if (tinfo
->ssi_signo
== SIGBUS
&&
7277 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7278 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7279 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7280 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7281 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7285 tinfo
->ssi_signo
= tswap32(sig
);
7286 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7287 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7288 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7289 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7290 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7291 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7292 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7293 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7294 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7295 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7296 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7297 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7298 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7299 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7300 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7303 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7307 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7308 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7314 static TargetFdTrans target_signalfd_trans
= {
7315 .host_to_target_data
= host_to_target_data_signalfd
,
7318 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7321 target_sigset_t
*target_mask
;
7325 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7326 return -TARGET_EINVAL
;
7328 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7329 return -TARGET_EFAULT
;
7332 target_to_host_sigset(&host_mask
, target_mask
);
7334 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7336 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7338 fd_trans_register(ret
, &target_signalfd_trans
);
7341 unlock_user_struct(target_mask
, mask
, 0);
7347 /* Map host to target signal numbers for the wait family of syscalls.
7348 Assume all other status bits are the same. */
7349 int host_to_target_waitstatus(int status
)
7351 if (WIFSIGNALED(status
)) {
7352 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7354 if (WIFSTOPPED(status
)) {
7355 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7361 static int open_self_cmdline(void *cpu_env
, int fd
)
7363 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7364 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7367 for (i
= 0; i
< bprm
->argc
; i
++) {
7368 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7370 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7378 static int open_self_maps(void *cpu_env
, int fd
)
7380 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7381 TaskState
*ts
= cpu
->opaque
;
7387 fp
= fopen("/proc/self/maps", "r");
7392 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7393 int fields
, dev_maj
, dev_min
, inode
;
7394 uint64_t min
, max
, offset
;
7395 char flag_r
, flag_w
, flag_x
, flag_p
;
7396 char path
[512] = "";
7397 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7398 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7399 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7401 if ((fields
< 10) || (fields
> 11)) {
7404 if (h2g_valid(min
)) {
7405 int flags
= page_get_flags(h2g(min
));
7406 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7407 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7410 if (h2g(min
) == ts
->info
->stack_limit
) {
7411 pstrcpy(path
, sizeof(path
), " [stack]");
7413 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7414 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7415 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7416 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7417 path
[0] ? " " : "", path
);
7427 static int open_self_stat(void *cpu_env
, int fd
)
7429 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7430 TaskState
*ts
= cpu
->opaque
;
7431 abi_ulong start_stack
= ts
->info
->start_stack
;
7434 for (i
= 0; i
< 44; i
++) {
7442 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7443 } else if (i
== 1) {
7445 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7446 } else if (i
== 27) {
7449 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7451 /* for the rest, there is MasterCard */
7452 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7456 if (write(fd
, buf
, len
) != len
) {
7464 static int open_self_auxv(void *cpu_env
, int fd
)
7466 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7467 TaskState
*ts
= cpu
->opaque
;
7468 abi_ulong auxv
= ts
->info
->saved_auxv
;
7469 abi_ulong len
= ts
->info
->auxv_len
;
7473 * Auxiliary vector is stored in target process stack.
7474 * read in whole auxv vector and copy it to file
7476 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7480 r
= write(fd
, ptr
, len
);
7487 lseek(fd
, 0, SEEK_SET
);
7488 unlock_user(ptr
, auxv
, len
);
7494 static int is_proc_myself(const char *filename
, const char *entry
)
7496 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7497 filename
+= strlen("/proc/");
7498 if (!strncmp(filename
, "self/", strlen("self/"))) {
7499 filename
+= strlen("self/");
7500 } else if (*filename
>= '1' && *filename
<= '9') {
7502 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7503 if (!strncmp(filename
, myself
, strlen(myself
))) {
7504 filename
+= strlen(myself
);
7511 if (!strcmp(filename
, entry
)) {
7518 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7519 static int is_proc(const char *filename
, const char *entry
)
7521 return strcmp(filename
, entry
) == 0;
7524 static int open_net_route(void *cpu_env
, int fd
)
7531 fp
= fopen("/proc/net/route", "r");
7538 read
= getline(&line
, &len
, fp
);
7539 dprintf(fd
, "%s", line
);
7543 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7545 uint32_t dest
, gw
, mask
;
7546 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7547 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7548 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7549 &mask
, &mtu
, &window
, &irtt
);
7550 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7551 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7552 metric
, tswap32(mask
), mtu
, window
, irtt
);
7562 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7565 const char *filename
;
7566 int (*fill
)(void *cpu_env
, int fd
);
7567 int (*cmp
)(const char *s1
, const char *s2
);
7569 const struct fake_open
*fake_open
;
7570 static const struct fake_open fakes
[] = {
7571 { "maps", open_self_maps
, is_proc_myself
},
7572 { "stat", open_self_stat
, is_proc_myself
},
7573 { "auxv", open_self_auxv
, is_proc_myself
},
7574 { "cmdline", open_self_cmdline
, is_proc_myself
},
7575 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7576 { "/proc/net/route", open_net_route
, is_proc
},
7578 { NULL
, NULL
, NULL
}
7581 if (is_proc_myself(pathname
, "exe")) {
7582 int execfd
= qemu_getauxval(AT_EXECFD
);
7583 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7586 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7587 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7592 if (fake_open
->filename
) {
7594 char filename
[PATH_MAX
];
7597 /* create temporary file to map stat to */
7598 tmpdir
= getenv("TMPDIR");
7601 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7602 fd
= mkstemp(filename
);
7608 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7614 lseek(fd
, 0, SEEK_SET
);
7619 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7622 #define TIMER_MAGIC 0x0caf0000
7623 #define TIMER_MAGIC_MASK 0xffff0000
7625 /* Convert QEMU provided timer ID back to internal 16bit index format */
7626 static target_timer_t
get_timer_id(abi_long arg
)
7628 target_timer_t timerid
= arg
;
7630 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7631 return -TARGET_EINVAL
;
7636 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7637 return -TARGET_EINVAL
;
7643 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7645 uint64_t *counter
= buf
;
7648 if (len
< sizeof(uint64_t)) {
7652 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7653 *counter
= tswap64(*counter
);
7660 static TargetFdTrans target_eventfd_trans
= {
7661 .host_to_target_data
= swap_data_eventfd
,
7662 .target_to_host_data
= swap_data_eventfd
,
7665 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7666 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7667 defined(__NR_inotify_init1))
7668 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7670 struct inotify_event
*ev
;
7674 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7675 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7678 ev
->wd
= tswap32(ev
->wd
);
7679 ev
->mask
= tswap32(ev
->mask
);
7680 ev
->cookie
= tswap32(ev
->cookie
);
7681 ev
->len
= tswap32(name_len
);
7687 static TargetFdTrans target_inotify_trans
= {
7688 .host_to_target_data
= host_to_target_data_inotify
,
7692 /* do_syscall() should always have a single exit point at the end so
7693 that actions, such as logging of syscall results, can be performed.
7694 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7695 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7696 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7697 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7700 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7706 #if defined(DEBUG_ERESTARTSYS)
7707 /* Debug-only code for exercising the syscall-restart code paths
7708 * in the per-architecture cpu main loops: restart every syscall
7709 * the guest makes once before letting it through.
7716 return -TARGET_ERESTARTSYS
;
7722 gemu_log("syscall %d", num
);
7724 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7726 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7729 case TARGET_NR_exit
:
7730 /* In old applications this may be used to implement _exit(2).
7731 However in threaded applictions it is used for thread termination,
7732 and _exit_group is used for application termination.
7733 Do thread termination if we have more then one thread. */
7735 if (block_signals()) {
7736 ret
= -TARGET_ERESTARTSYS
;
7742 if (CPU_NEXT(first_cpu
)) {
7745 /* Remove the CPU from the list. */
7746 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7751 if (ts
->child_tidptr
) {
7752 put_user_u32(0, ts
->child_tidptr
);
7753 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7757 object_unref(OBJECT(cpu
));
7759 rcu_unregister_thread();
7767 gdb_exit(cpu_env
, arg1
);
7769 ret
= 0; /* avoid warning */
7771 case TARGET_NR_read
:
7775 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7777 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7779 fd_trans_host_to_target_data(arg1
)) {
7780 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7782 unlock_user(p
, arg2
, ret
);
7785 case TARGET_NR_write
:
7786 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7788 if (fd_trans_target_to_host_data(arg1
)) {
7789 void *copy
= g_malloc(arg3
);
7790 memcpy(copy
, p
, arg3
);
7791 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7793 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7797 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7799 unlock_user(p
, arg2
, 0);
7801 #ifdef TARGET_NR_open
7802 case TARGET_NR_open
:
7803 if (!(p
= lock_user_string(arg1
)))
7805 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7806 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7808 fd_trans_unregister(ret
);
7809 unlock_user(p
, arg1
, 0);
7812 case TARGET_NR_openat
:
7813 if (!(p
= lock_user_string(arg2
)))
7815 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7816 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7818 fd_trans_unregister(ret
);
7819 unlock_user(p
, arg2
, 0);
7821 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7822 case TARGET_NR_name_to_handle_at
:
7823 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7826 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7827 case TARGET_NR_open_by_handle_at
:
7828 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7829 fd_trans_unregister(ret
);
7832 case TARGET_NR_close
:
7833 fd_trans_unregister(arg1
);
7834 ret
= get_errno(close(arg1
));
7839 #ifdef TARGET_NR_fork
7840 case TARGET_NR_fork
:
7841 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7844 #ifdef TARGET_NR_waitpid
7845 case TARGET_NR_waitpid
:
7848 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7849 if (!is_error(ret
) && arg2
&& ret
7850 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7855 #ifdef TARGET_NR_waitid
7856 case TARGET_NR_waitid
:
7860 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7861 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7862 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7864 host_to_target_siginfo(p
, &info
);
7865 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7870 #ifdef TARGET_NR_creat /* not on alpha */
7871 case TARGET_NR_creat
:
7872 if (!(p
= lock_user_string(arg1
)))
7874 ret
= get_errno(creat(p
, arg2
));
7875 fd_trans_unregister(ret
);
7876 unlock_user(p
, arg1
, 0);
7879 #ifdef TARGET_NR_link
7880 case TARGET_NR_link
:
7883 p
= lock_user_string(arg1
);
7884 p2
= lock_user_string(arg2
);
7886 ret
= -TARGET_EFAULT
;
7888 ret
= get_errno(link(p
, p2
));
7889 unlock_user(p2
, arg2
, 0);
7890 unlock_user(p
, arg1
, 0);
7894 #if defined(TARGET_NR_linkat)
7895 case TARGET_NR_linkat
:
7900 p
= lock_user_string(arg2
);
7901 p2
= lock_user_string(arg4
);
7903 ret
= -TARGET_EFAULT
;
7905 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
7906 unlock_user(p
, arg2
, 0);
7907 unlock_user(p2
, arg4
, 0);
7911 #ifdef TARGET_NR_unlink
7912 case TARGET_NR_unlink
:
7913 if (!(p
= lock_user_string(arg1
)))
7915 ret
= get_errno(unlink(p
));
7916 unlock_user(p
, arg1
, 0);
7919 #if defined(TARGET_NR_unlinkat)
7920 case TARGET_NR_unlinkat
:
7921 if (!(p
= lock_user_string(arg2
)))
7923 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
7924 unlock_user(p
, arg2
, 0);
7927 case TARGET_NR_execve
:
7929 char **argp
, **envp
;
7932 abi_ulong guest_argp
;
7933 abi_ulong guest_envp
;
7940 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
7941 if (get_user_ual(addr
, gp
))
7949 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
7950 if (get_user_ual(addr
, gp
))
7957 argp
= g_new0(char *, argc
+ 1);
7958 envp
= g_new0(char *, envc
+ 1);
7960 for (gp
= guest_argp
, q
= argp
; gp
;
7961 gp
+= sizeof(abi_ulong
), q
++) {
7962 if (get_user_ual(addr
, gp
))
7966 if (!(*q
= lock_user_string(addr
)))
7968 total_size
+= strlen(*q
) + 1;
7972 for (gp
= guest_envp
, q
= envp
; gp
;
7973 gp
+= sizeof(abi_ulong
), q
++) {
7974 if (get_user_ual(addr
, gp
))
7978 if (!(*q
= lock_user_string(addr
)))
7980 total_size
+= strlen(*q
) + 1;
7984 if (!(p
= lock_user_string(arg1
)))
7986 /* Although execve() is not an interruptible syscall it is
7987 * a special case where we must use the safe_syscall wrapper:
7988 * if we allow a signal to happen before we make the host
7989 * syscall then we will 'lose' it, because at the point of
7990 * execve the process leaves QEMU's control. So we use the
7991 * safe syscall wrapper to ensure that we either take the
7992 * signal as a guest signal, or else it does not happen
7993 * before the execve completes and makes it the other
7994 * program's problem.
7996 ret
= get_errno(safe_execve(p
, argp
, envp
));
7997 unlock_user(p
, arg1
, 0);
8002 ret
= -TARGET_EFAULT
;
8005 for (gp
= guest_argp
, q
= argp
; *q
;
8006 gp
+= sizeof(abi_ulong
), q
++) {
8007 if (get_user_ual(addr
, gp
)
8010 unlock_user(*q
, addr
, 0);
8012 for (gp
= guest_envp
, q
= envp
; *q
;
8013 gp
+= sizeof(abi_ulong
), q
++) {
8014 if (get_user_ual(addr
, gp
)
8017 unlock_user(*q
, addr
, 0);
8024 case TARGET_NR_chdir
:
8025 if (!(p
= lock_user_string(arg1
)))
8027 ret
= get_errno(chdir(p
));
8028 unlock_user(p
, arg1
, 0);
8030 #ifdef TARGET_NR_time
8031 case TARGET_NR_time
:
8034 ret
= get_errno(time(&host_time
));
8037 && put_user_sal(host_time
, arg1
))
8042 #ifdef TARGET_NR_mknod
8043 case TARGET_NR_mknod
:
8044 if (!(p
= lock_user_string(arg1
)))
8046 ret
= get_errno(mknod(p
, arg2
, arg3
));
8047 unlock_user(p
, arg1
, 0);
8050 #if defined(TARGET_NR_mknodat)
8051 case TARGET_NR_mknodat
:
8052 if (!(p
= lock_user_string(arg2
)))
8054 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8055 unlock_user(p
, arg2
, 0);
8058 #ifdef TARGET_NR_chmod
8059 case TARGET_NR_chmod
:
8060 if (!(p
= lock_user_string(arg1
)))
8062 ret
= get_errno(chmod(p
, arg2
));
8063 unlock_user(p
, arg1
, 0);
8066 #ifdef TARGET_NR_break
8067 case TARGET_NR_break
:
8070 #ifdef TARGET_NR_oldstat
8071 case TARGET_NR_oldstat
:
8074 case TARGET_NR_lseek
:
8075 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8077 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8078 /* Alpha specific */
8079 case TARGET_NR_getxpid
:
8080 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8081 ret
= get_errno(getpid());
8084 #ifdef TARGET_NR_getpid
8085 case TARGET_NR_getpid
:
8086 ret
= get_errno(getpid());
8089 case TARGET_NR_mount
:
8091 /* need to look at the data field */
8095 p
= lock_user_string(arg1
);
8103 p2
= lock_user_string(arg2
);
8106 unlock_user(p
, arg1
, 0);
8112 p3
= lock_user_string(arg3
);
8115 unlock_user(p
, arg1
, 0);
8117 unlock_user(p2
, arg2
, 0);
8124 /* FIXME - arg5 should be locked, but it isn't clear how to
8125 * do that since it's not guaranteed to be a NULL-terminated
8129 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8131 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8133 ret
= get_errno(ret
);
8136 unlock_user(p
, arg1
, 0);
8138 unlock_user(p2
, arg2
, 0);
8140 unlock_user(p3
, arg3
, 0);
8144 #ifdef TARGET_NR_umount
8145 case TARGET_NR_umount
:
8146 if (!(p
= lock_user_string(arg1
)))
8148 ret
= get_errno(umount(p
));
8149 unlock_user(p
, arg1
, 0);
8152 #ifdef TARGET_NR_stime /* not on alpha */
8153 case TARGET_NR_stime
:
8156 if (get_user_sal(host_time
, arg1
))
8158 ret
= get_errno(stime(&host_time
));
8162 case TARGET_NR_ptrace
:
8164 #ifdef TARGET_NR_alarm /* not on alpha */
8165 case TARGET_NR_alarm
:
8169 #ifdef TARGET_NR_oldfstat
8170 case TARGET_NR_oldfstat
:
8173 #ifdef TARGET_NR_pause /* not on alpha */
8174 case TARGET_NR_pause
:
8175 if (!block_signals()) {
8176 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8178 ret
= -TARGET_EINTR
;
8181 #ifdef TARGET_NR_utime
8182 case TARGET_NR_utime
:
8184 struct utimbuf tbuf
, *host_tbuf
;
8185 struct target_utimbuf
*target_tbuf
;
8187 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8189 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8190 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8191 unlock_user_struct(target_tbuf
, arg2
, 0);
8196 if (!(p
= lock_user_string(arg1
)))
8198 ret
= get_errno(utime(p
, host_tbuf
));
8199 unlock_user(p
, arg1
, 0);
8203 #ifdef TARGET_NR_utimes
8204 case TARGET_NR_utimes
:
8206 struct timeval
*tvp
, tv
[2];
8208 if (copy_from_user_timeval(&tv
[0], arg2
)
8209 || copy_from_user_timeval(&tv
[1],
8210 arg2
+ sizeof(struct target_timeval
)))
8216 if (!(p
= lock_user_string(arg1
)))
8218 ret
= get_errno(utimes(p
, tvp
));
8219 unlock_user(p
, arg1
, 0);
8223 #if defined(TARGET_NR_futimesat)
8224 case TARGET_NR_futimesat
:
8226 struct timeval
*tvp
, tv
[2];
8228 if (copy_from_user_timeval(&tv
[0], arg3
)
8229 || copy_from_user_timeval(&tv
[1],
8230 arg3
+ sizeof(struct target_timeval
)))
8236 if (!(p
= lock_user_string(arg2
)))
8238 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8239 unlock_user(p
, arg2
, 0);
8243 #ifdef TARGET_NR_stty
8244 case TARGET_NR_stty
:
8247 #ifdef TARGET_NR_gtty
8248 case TARGET_NR_gtty
:
8251 #ifdef TARGET_NR_access
8252 case TARGET_NR_access
:
8253 if (!(p
= lock_user_string(arg1
)))
8255 ret
= get_errno(access(path(p
), arg2
));
8256 unlock_user(p
, arg1
, 0);
8259 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8260 case TARGET_NR_faccessat
:
8261 if (!(p
= lock_user_string(arg2
)))
8263 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8264 unlock_user(p
, arg2
, 0);
8267 #ifdef TARGET_NR_nice /* not on alpha */
8268 case TARGET_NR_nice
:
8269 ret
= get_errno(nice(arg1
));
8272 #ifdef TARGET_NR_ftime
8273 case TARGET_NR_ftime
:
8276 case TARGET_NR_sync
:
8280 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8281 case TARGET_NR_syncfs
:
8282 ret
= get_errno(syncfs(arg1
));
8285 case TARGET_NR_kill
:
8286 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8288 #ifdef TARGET_NR_rename
8289 case TARGET_NR_rename
:
8292 p
= lock_user_string(arg1
);
8293 p2
= lock_user_string(arg2
);
8295 ret
= -TARGET_EFAULT
;
8297 ret
= get_errno(rename(p
, p2
));
8298 unlock_user(p2
, arg2
, 0);
8299 unlock_user(p
, arg1
, 0);
8303 #if defined(TARGET_NR_renameat)
8304 case TARGET_NR_renameat
:
8307 p
= lock_user_string(arg2
);
8308 p2
= lock_user_string(arg4
);
8310 ret
= -TARGET_EFAULT
;
8312 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8313 unlock_user(p2
, arg4
, 0);
8314 unlock_user(p
, arg2
, 0);
8318 #ifdef TARGET_NR_mkdir
8319 case TARGET_NR_mkdir
:
8320 if (!(p
= lock_user_string(arg1
)))
8322 ret
= get_errno(mkdir(p
, arg2
));
8323 unlock_user(p
, arg1
, 0);
8326 #if defined(TARGET_NR_mkdirat)
8327 case TARGET_NR_mkdirat
:
8328 if (!(p
= lock_user_string(arg2
)))
8330 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8331 unlock_user(p
, arg2
, 0);
8334 #ifdef TARGET_NR_rmdir
8335 case TARGET_NR_rmdir
:
8336 if (!(p
= lock_user_string(arg1
)))
8338 ret
= get_errno(rmdir(p
));
8339 unlock_user(p
, arg1
, 0);
8343 ret
= get_errno(dup(arg1
));
8345 fd_trans_dup(arg1
, ret
);
8348 #ifdef TARGET_NR_pipe
8349 case TARGET_NR_pipe
:
8350 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8353 #ifdef TARGET_NR_pipe2
8354 case TARGET_NR_pipe2
:
8355 ret
= do_pipe(cpu_env
, arg1
,
8356 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8359 case TARGET_NR_times
:
8361 struct target_tms
*tmsp
;
8363 ret
= get_errno(times(&tms
));
8365 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8368 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8369 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8370 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8371 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8374 ret
= host_to_target_clock_t(ret
);
8377 #ifdef TARGET_NR_prof
8378 case TARGET_NR_prof
:
8381 #ifdef TARGET_NR_signal
8382 case TARGET_NR_signal
:
8385 case TARGET_NR_acct
:
8387 ret
= get_errno(acct(NULL
));
8389 if (!(p
= lock_user_string(arg1
)))
8391 ret
= get_errno(acct(path(p
)));
8392 unlock_user(p
, arg1
, 0);
8395 #ifdef TARGET_NR_umount2
8396 case TARGET_NR_umount2
:
8397 if (!(p
= lock_user_string(arg1
)))
8399 ret
= get_errno(umount2(p
, arg2
));
8400 unlock_user(p
, arg1
, 0);
8403 #ifdef TARGET_NR_lock
8404 case TARGET_NR_lock
:
8407 case TARGET_NR_ioctl
:
8408 ret
= do_ioctl(arg1
, arg2
, arg3
);
8410 case TARGET_NR_fcntl
:
8411 ret
= do_fcntl(arg1
, arg2
, arg3
);
8413 #ifdef TARGET_NR_mpx
8417 case TARGET_NR_setpgid
:
8418 ret
= get_errno(setpgid(arg1
, arg2
));
8420 #ifdef TARGET_NR_ulimit
8421 case TARGET_NR_ulimit
:
8424 #ifdef TARGET_NR_oldolduname
8425 case TARGET_NR_oldolduname
:
8428 case TARGET_NR_umask
:
8429 ret
= get_errno(umask(arg1
));
8431 case TARGET_NR_chroot
:
8432 if (!(p
= lock_user_string(arg1
)))
8434 ret
= get_errno(chroot(p
));
8435 unlock_user(p
, arg1
, 0);
8437 #ifdef TARGET_NR_ustat
8438 case TARGET_NR_ustat
:
8441 #ifdef TARGET_NR_dup2
8442 case TARGET_NR_dup2
:
8443 ret
= get_errno(dup2(arg1
, arg2
));
8445 fd_trans_dup(arg1
, arg2
);
8449 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8450 case TARGET_NR_dup3
:
8451 ret
= get_errno(dup3(arg1
, arg2
, arg3
));
8453 fd_trans_dup(arg1
, arg2
);
8457 #ifdef TARGET_NR_getppid /* not on alpha */
8458 case TARGET_NR_getppid
:
8459 ret
= get_errno(getppid());
8462 #ifdef TARGET_NR_getpgrp
8463 case TARGET_NR_getpgrp
:
8464 ret
= get_errno(getpgrp());
8467 case TARGET_NR_setsid
:
8468 ret
= get_errno(setsid());
8470 #ifdef TARGET_NR_sigaction
8471 case TARGET_NR_sigaction
:
8473 #if defined(TARGET_ALPHA)
8474 struct target_sigaction act
, oact
, *pact
= 0;
8475 struct target_old_sigaction
*old_act
;
8477 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8479 act
._sa_handler
= old_act
->_sa_handler
;
8480 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8481 act
.sa_flags
= old_act
->sa_flags
;
8482 act
.sa_restorer
= 0;
8483 unlock_user_struct(old_act
, arg2
, 0);
8486 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8487 if (!is_error(ret
) && arg3
) {
8488 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8490 old_act
->_sa_handler
= oact
._sa_handler
;
8491 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8492 old_act
->sa_flags
= oact
.sa_flags
;
8493 unlock_user_struct(old_act
, arg3
, 1);
8495 #elif defined(TARGET_MIPS)
8496 struct target_sigaction act
, oact
, *pact
, *old_act
;
8499 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8501 act
._sa_handler
= old_act
->_sa_handler
;
8502 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8503 act
.sa_flags
= old_act
->sa_flags
;
8504 unlock_user_struct(old_act
, arg2
, 0);
8510 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8512 if (!is_error(ret
) && arg3
) {
8513 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8515 old_act
->_sa_handler
= oact
._sa_handler
;
8516 old_act
->sa_flags
= oact
.sa_flags
;
8517 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8518 old_act
->sa_mask
.sig
[1] = 0;
8519 old_act
->sa_mask
.sig
[2] = 0;
8520 old_act
->sa_mask
.sig
[3] = 0;
8521 unlock_user_struct(old_act
, arg3
, 1);
8524 struct target_old_sigaction
*old_act
;
8525 struct target_sigaction act
, oact
, *pact
;
8527 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8529 act
._sa_handler
= old_act
->_sa_handler
;
8530 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8531 act
.sa_flags
= old_act
->sa_flags
;
8532 act
.sa_restorer
= old_act
->sa_restorer
;
8533 unlock_user_struct(old_act
, arg2
, 0);
8538 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8539 if (!is_error(ret
) && arg3
) {
8540 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8542 old_act
->_sa_handler
= oact
._sa_handler
;
8543 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8544 old_act
->sa_flags
= oact
.sa_flags
;
8545 old_act
->sa_restorer
= oact
.sa_restorer
;
8546 unlock_user_struct(old_act
, arg3
, 1);
8552 case TARGET_NR_rt_sigaction
:
8554 #if defined(TARGET_ALPHA)
8555 struct target_sigaction act
, oact
, *pact
= 0;
8556 struct target_rt_sigaction
*rt_act
;
8558 if (arg4
!= sizeof(target_sigset_t
)) {
8559 ret
= -TARGET_EINVAL
;
8563 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8565 act
._sa_handler
= rt_act
->_sa_handler
;
8566 act
.sa_mask
= rt_act
->sa_mask
;
8567 act
.sa_flags
= rt_act
->sa_flags
;
8568 act
.sa_restorer
= arg5
;
8569 unlock_user_struct(rt_act
, arg2
, 0);
8572 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8573 if (!is_error(ret
) && arg3
) {
8574 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8576 rt_act
->_sa_handler
= oact
._sa_handler
;
8577 rt_act
->sa_mask
= oact
.sa_mask
;
8578 rt_act
->sa_flags
= oact
.sa_flags
;
8579 unlock_user_struct(rt_act
, arg3
, 1);
8582 struct target_sigaction
*act
;
8583 struct target_sigaction
*oact
;
8585 if (arg4
!= sizeof(target_sigset_t
)) {
8586 ret
= -TARGET_EINVAL
;
8590 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1))
8595 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8596 ret
= -TARGET_EFAULT
;
8597 goto rt_sigaction_fail
;
8601 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8604 unlock_user_struct(act
, arg2
, 0);
8606 unlock_user_struct(oact
, arg3
, 1);
8610 #ifdef TARGET_NR_sgetmask /* not on alpha */
8611 case TARGET_NR_sgetmask
:
8614 abi_ulong target_set
;
8615 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8617 host_to_target_old_sigset(&target_set
, &cur_set
);
8623 #ifdef TARGET_NR_ssetmask /* not on alpha */
8624 case TARGET_NR_ssetmask
:
8627 abi_ulong target_set
= arg1
;
8628 target_to_host_old_sigset(&set
, &target_set
);
8629 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8631 host_to_target_old_sigset(&target_set
, &oset
);
8637 #ifdef TARGET_NR_sigprocmask
8638 case TARGET_NR_sigprocmask
:
8640 #if defined(TARGET_ALPHA)
8641 sigset_t set
, oldset
;
8646 case TARGET_SIG_BLOCK
:
8649 case TARGET_SIG_UNBLOCK
:
8652 case TARGET_SIG_SETMASK
:
8656 ret
= -TARGET_EINVAL
;
8660 target_to_host_old_sigset(&set
, &mask
);
8662 ret
= do_sigprocmask(how
, &set
, &oldset
);
8663 if (!is_error(ret
)) {
8664 host_to_target_old_sigset(&mask
, &oldset
);
8666 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8669 sigset_t set
, oldset
, *set_ptr
;
8674 case TARGET_SIG_BLOCK
:
8677 case TARGET_SIG_UNBLOCK
:
8680 case TARGET_SIG_SETMASK
:
8684 ret
= -TARGET_EINVAL
;
8687 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8689 target_to_host_old_sigset(&set
, p
);
8690 unlock_user(p
, arg2
, 0);
8696 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8697 if (!is_error(ret
) && arg3
) {
8698 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8700 host_to_target_old_sigset(p
, &oldset
);
8701 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8707 case TARGET_NR_rt_sigprocmask
:
8710 sigset_t set
, oldset
, *set_ptr
;
8712 if (arg4
!= sizeof(target_sigset_t
)) {
8713 ret
= -TARGET_EINVAL
;
8719 case TARGET_SIG_BLOCK
:
8722 case TARGET_SIG_UNBLOCK
:
8725 case TARGET_SIG_SETMASK
:
8729 ret
= -TARGET_EINVAL
;
8732 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8734 target_to_host_sigset(&set
, p
);
8735 unlock_user(p
, arg2
, 0);
8741 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8742 if (!is_error(ret
) && arg3
) {
8743 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8745 host_to_target_sigset(p
, &oldset
);
8746 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8750 #ifdef TARGET_NR_sigpending
8751 case TARGET_NR_sigpending
:
8754 ret
= get_errno(sigpending(&set
));
8755 if (!is_error(ret
)) {
8756 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8758 host_to_target_old_sigset(p
, &set
);
8759 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8764 case TARGET_NR_rt_sigpending
:
8768 /* Yes, this check is >, not != like most. We follow the kernel's
8769 * logic and it does it like this because it implements
8770 * NR_sigpending through the same code path, and in that case
8771 * the old_sigset_t is smaller in size.
8773 if (arg2
> sizeof(target_sigset_t
)) {
8774 ret
= -TARGET_EINVAL
;
8778 ret
= get_errno(sigpending(&set
));
8779 if (!is_error(ret
)) {
8780 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8782 host_to_target_sigset(p
, &set
);
8783 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8787 #ifdef TARGET_NR_sigsuspend
8788 case TARGET_NR_sigsuspend
:
8790 TaskState
*ts
= cpu
->opaque
;
8791 #if defined(TARGET_ALPHA)
8792 abi_ulong mask
= arg1
;
8793 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8795 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8797 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8798 unlock_user(p
, arg1
, 0);
8800 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8802 if (ret
!= -TARGET_ERESTARTSYS
) {
8803 ts
->in_sigsuspend
= 1;
8808 case TARGET_NR_rt_sigsuspend
:
8810 TaskState
*ts
= cpu
->opaque
;
8812 if (arg2
!= sizeof(target_sigset_t
)) {
8813 ret
= -TARGET_EINVAL
;
8816 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8818 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8819 unlock_user(p
, arg1
, 0);
8820 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8822 if (ret
!= -TARGET_ERESTARTSYS
) {
8823 ts
->in_sigsuspend
= 1;
8827 case TARGET_NR_rt_sigtimedwait
:
8830 struct timespec uts
, *puts
;
8833 if (arg4
!= sizeof(target_sigset_t
)) {
8834 ret
= -TARGET_EINVAL
;
8838 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8840 target_to_host_sigset(&set
, p
);
8841 unlock_user(p
, arg1
, 0);
8844 target_to_host_timespec(puts
, arg3
);
8848 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
8850 if (!is_error(ret
)) {
8852 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
8857 host_to_target_siginfo(p
, &uinfo
);
8858 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
8860 ret
= host_to_target_signal(ret
);
8864 case TARGET_NR_rt_sigqueueinfo
:
8868 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
8872 target_to_host_siginfo(&uinfo
, p
);
8873 unlock_user(p
, arg3
, 0);
8874 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
8877 case TARGET_NR_rt_tgsigqueueinfo
:
8881 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
8885 target_to_host_siginfo(&uinfo
, p
);
8886 unlock_user(p
, arg4
, 0);
8887 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
8890 #ifdef TARGET_NR_sigreturn
8891 case TARGET_NR_sigreturn
:
8892 if (block_signals()) {
8893 ret
= -TARGET_ERESTARTSYS
;
8895 ret
= do_sigreturn(cpu_env
);
8899 case TARGET_NR_rt_sigreturn
:
8900 if (block_signals()) {
8901 ret
= -TARGET_ERESTARTSYS
;
8903 ret
= do_rt_sigreturn(cpu_env
);
8906 case TARGET_NR_sethostname
:
8907 if (!(p
= lock_user_string(arg1
)))
8909 ret
= get_errno(sethostname(p
, arg2
));
8910 unlock_user(p
, arg1
, 0);
8912 case TARGET_NR_setrlimit
:
8914 int resource
= target_to_host_resource(arg1
);
8915 struct target_rlimit
*target_rlim
;
8917 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
8919 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
8920 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
8921 unlock_user_struct(target_rlim
, arg2
, 0);
8922 ret
= get_errno(setrlimit(resource
, &rlim
));
8925 case TARGET_NR_getrlimit
:
8927 int resource
= target_to_host_resource(arg1
);
8928 struct target_rlimit
*target_rlim
;
8931 ret
= get_errno(getrlimit(resource
, &rlim
));
8932 if (!is_error(ret
)) {
8933 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
8935 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
8936 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
8937 unlock_user_struct(target_rlim
, arg2
, 1);
8941 case TARGET_NR_getrusage
:
8943 struct rusage rusage
;
8944 ret
= get_errno(getrusage(arg1
, &rusage
));
8945 if (!is_error(ret
)) {
8946 ret
= host_to_target_rusage(arg2
, &rusage
);
8950 case TARGET_NR_gettimeofday
:
8953 ret
= get_errno(gettimeofday(&tv
, NULL
));
8954 if (!is_error(ret
)) {
8955 if (copy_to_user_timeval(arg1
, &tv
))
8960 case TARGET_NR_settimeofday
:
8962 struct timeval tv
, *ptv
= NULL
;
8963 struct timezone tz
, *ptz
= NULL
;
8966 if (copy_from_user_timeval(&tv
, arg1
)) {
8973 if (copy_from_user_timezone(&tz
, arg2
)) {
8979 ret
= get_errno(settimeofday(ptv
, ptz
));
8982 #if defined(TARGET_NR_select)
8983 case TARGET_NR_select
:
8984 #if defined(TARGET_WANT_NI_OLD_SELECT)
8985 /* some architectures used to have old_select here
8986 * but now ENOSYS it.
8988 ret
= -TARGET_ENOSYS
;
8989 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8990 ret
= do_old_select(arg1
);
8992 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
8996 #ifdef TARGET_NR_pselect6
8997 case TARGET_NR_pselect6
:
8999 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9000 fd_set rfds
, wfds
, efds
;
9001 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9002 struct timespec ts
, *ts_ptr
;
9005 * The 6th arg is actually two args smashed together,
9006 * so we cannot use the C library.
9014 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9015 target_sigset_t
*target_sigset
;
9023 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9027 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9031 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9037 * This takes a timespec, and not a timeval, so we cannot
9038 * use the do_select() helper ...
9041 if (target_to_host_timespec(&ts
, ts_addr
)) {
9049 /* Extract the two packed args for the sigset */
9052 sig
.size
= SIGSET_T_SIZE
;
9054 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9058 arg_sigset
= tswapal(arg7
[0]);
9059 arg_sigsize
= tswapal(arg7
[1]);
9060 unlock_user(arg7
, arg6
, 0);
9064 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9065 /* Like the kernel, we enforce correct size sigsets */
9066 ret
= -TARGET_EINVAL
;
9069 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9070 sizeof(*target_sigset
), 1);
9071 if (!target_sigset
) {
9074 target_to_host_sigset(&set
, target_sigset
);
9075 unlock_user(target_sigset
, arg_sigset
, 0);
9083 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9086 if (!is_error(ret
)) {
9087 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9089 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9091 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9094 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9100 #ifdef TARGET_NR_symlink
9101 case TARGET_NR_symlink
:
9104 p
= lock_user_string(arg1
);
9105 p2
= lock_user_string(arg2
);
9107 ret
= -TARGET_EFAULT
;
9109 ret
= get_errno(symlink(p
, p2
));
9110 unlock_user(p2
, arg2
, 0);
9111 unlock_user(p
, arg1
, 0);
9115 #if defined(TARGET_NR_symlinkat)
9116 case TARGET_NR_symlinkat
:
9119 p
= lock_user_string(arg1
);
9120 p2
= lock_user_string(arg3
);
9122 ret
= -TARGET_EFAULT
;
9124 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9125 unlock_user(p2
, arg3
, 0);
9126 unlock_user(p
, arg1
, 0);
9130 #ifdef TARGET_NR_oldlstat
9131 case TARGET_NR_oldlstat
:
9134 #ifdef TARGET_NR_readlink
9135 case TARGET_NR_readlink
:
9138 p
= lock_user_string(arg1
);
9139 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9141 ret
= -TARGET_EFAULT
;
9143 /* Short circuit this for the magic exe check. */
9144 ret
= -TARGET_EINVAL
;
9145 } else if (is_proc_myself((const char *)p
, "exe")) {
9146 char real
[PATH_MAX
], *temp
;
9147 temp
= realpath(exec_path
, real
);
9148 /* Return value is # of bytes that we wrote to the buffer. */
9150 ret
= get_errno(-1);
9152 /* Don't worry about sign mismatch as earlier mapping
9153 * logic would have thrown a bad address error. */
9154 ret
= MIN(strlen(real
), arg3
);
9155 /* We cannot NUL terminate the string. */
9156 memcpy(p2
, real
, ret
);
9159 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9161 unlock_user(p2
, arg2
, ret
);
9162 unlock_user(p
, arg1
, 0);
9166 #if defined(TARGET_NR_readlinkat)
9167 case TARGET_NR_readlinkat
:
9170 p
= lock_user_string(arg2
);
9171 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9173 ret
= -TARGET_EFAULT
;
9174 } else if (is_proc_myself((const char *)p
, "exe")) {
9175 char real
[PATH_MAX
], *temp
;
9176 temp
= realpath(exec_path
, real
);
9177 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9178 snprintf((char *)p2
, arg4
, "%s", real
);
9180 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9182 unlock_user(p2
, arg3
, ret
);
9183 unlock_user(p
, arg2
, 0);
9187 #ifdef TARGET_NR_uselib
9188 case TARGET_NR_uselib
:
9191 #ifdef TARGET_NR_swapon
9192 case TARGET_NR_swapon
:
9193 if (!(p
= lock_user_string(arg1
)))
9195 ret
= get_errno(swapon(p
, arg2
));
9196 unlock_user(p
, arg1
, 0);
9199 case TARGET_NR_reboot
:
9200 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9201 /* arg4 must be ignored in all other cases */
9202 p
= lock_user_string(arg4
);
9206 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9207 unlock_user(p
, arg4
, 0);
9209 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9212 #ifdef TARGET_NR_readdir
9213 case TARGET_NR_readdir
:
9216 #ifdef TARGET_NR_mmap
9217 case TARGET_NR_mmap
:
9218 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9219 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9220 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9221 || defined(TARGET_S390X)
9224 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9225 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9233 unlock_user(v
, arg1
, 0);
9234 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9235 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9239 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9240 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9246 #ifdef TARGET_NR_mmap2
9247 case TARGET_NR_mmap2
:
9249 #define MMAP_SHIFT 12
9251 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9252 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9254 arg6
<< MMAP_SHIFT
));
9257 case TARGET_NR_munmap
:
9258 ret
= get_errno(target_munmap(arg1
, arg2
));
9260 case TARGET_NR_mprotect
:
9262 TaskState
*ts
= cpu
->opaque
;
9263 /* Special hack to detect libc making the stack executable. */
9264 if ((arg3
& PROT_GROWSDOWN
)
9265 && arg1
>= ts
->info
->stack_limit
9266 && arg1
<= ts
->info
->start_stack
) {
9267 arg3
&= ~PROT_GROWSDOWN
;
9268 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9269 arg1
= ts
->info
->stack_limit
;
9272 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9274 #ifdef TARGET_NR_mremap
9275 case TARGET_NR_mremap
:
9276 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9279 /* ??? msync/mlock/munlock are broken for softmmu. */
9280 #ifdef TARGET_NR_msync
9281 case TARGET_NR_msync
:
9282 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9285 #ifdef TARGET_NR_mlock
9286 case TARGET_NR_mlock
:
9287 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9290 #ifdef TARGET_NR_munlock
9291 case TARGET_NR_munlock
:
9292 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9295 #ifdef TARGET_NR_mlockall
9296 case TARGET_NR_mlockall
:
9297 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9300 #ifdef TARGET_NR_munlockall
9301 case TARGET_NR_munlockall
:
9302 ret
= get_errno(munlockall());
9305 case TARGET_NR_truncate
:
9306 if (!(p
= lock_user_string(arg1
)))
9308 ret
= get_errno(truncate(p
, arg2
));
9309 unlock_user(p
, arg1
, 0);
9311 case TARGET_NR_ftruncate
:
9312 ret
= get_errno(ftruncate(arg1
, arg2
));
9314 case TARGET_NR_fchmod
:
9315 ret
= get_errno(fchmod(arg1
, arg2
));
9317 #if defined(TARGET_NR_fchmodat)
9318 case TARGET_NR_fchmodat
:
9319 if (!(p
= lock_user_string(arg2
)))
9321 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9322 unlock_user(p
, arg2
, 0);
9325 case TARGET_NR_getpriority
:
9326 /* Note that negative values are valid for getpriority, so we must
9327 differentiate based on errno settings. */
9329 ret
= getpriority(arg1
, arg2
);
9330 if (ret
== -1 && errno
!= 0) {
9331 ret
= -host_to_target_errno(errno
);
9335 /* Return value is the unbiased priority. Signal no error. */
9336 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9338 /* Return value is a biased priority to avoid negative numbers. */
9342 case TARGET_NR_setpriority
:
9343 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9345 #ifdef TARGET_NR_profil
9346 case TARGET_NR_profil
:
9349 case TARGET_NR_statfs
:
9350 if (!(p
= lock_user_string(arg1
)))
9352 ret
= get_errno(statfs(path(p
), &stfs
));
9353 unlock_user(p
, arg1
, 0);
9355 if (!is_error(ret
)) {
9356 struct target_statfs
*target_stfs
;
9358 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9360 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9361 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9362 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9363 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9364 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9365 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9366 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9367 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9368 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9369 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9370 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9371 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9372 unlock_user_struct(target_stfs
, arg2
, 1);
9375 case TARGET_NR_fstatfs
:
9376 ret
= get_errno(fstatfs(arg1
, &stfs
));
9377 goto convert_statfs
;
9378 #ifdef TARGET_NR_statfs64
9379 case TARGET_NR_statfs64
:
9380 if (!(p
= lock_user_string(arg1
)))
9382 ret
= get_errno(statfs(path(p
), &stfs
));
9383 unlock_user(p
, arg1
, 0);
9385 if (!is_error(ret
)) {
9386 struct target_statfs64
*target_stfs
;
9388 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9390 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9391 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9392 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9393 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9394 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9395 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9396 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9397 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9398 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9399 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9400 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9401 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9402 unlock_user_struct(target_stfs
, arg3
, 1);
9405 case TARGET_NR_fstatfs64
:
9406 ret
= get_errno(fstatfs(arg1
, &stfs
));
9407 goto convert_statfs64
;
9409 #ifdef TARGET_NR_ioperm
9410 case TARGET_NR_ioperm
:
9413 #ifdef TARGET_NR_socketcall
9414 case TARGET_NR_socketcall
:
9415 ret
= do_socketcall(arg1
, arg2
);
9418 #ifdef TARGET_NR_accept
9419 case TARGET_NR_accept
:
9420 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9423 #ifdef TARGET_NR_accept4
9424 case TARGET_NR_accept4
:
9425 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9428 #ifdef TARGET_NR_bind
9429 case TARGET_NR_bind
:
9430 ret
= do_bind(arg1
, arg2
, arg3
);
9433 #ifdef TARGET_NR_connect
9434 case TARGET_NR_connect
:
9435 ret
= do_connect(arg1
, arg2
, arg3
);
9438 #ifdef TARGET_NR_getpeername
9439 case TARGET_NR_getpeername
:
9440 ret
= do_getpeername(arg1
, arg2
, arg3
);
9443 #ifdef TARGET_NR_getsockname
9444 case TARGET_NR_getsockname
:
9445 ret
= do_getsockname(arg1
, arg2
, arg3
);
9448 #ifdef TARGET_NR_getsockopt
9449 case TARGET_NR_getsockopt
:
9450 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9453 #ifdef TARGET_NR_listen
9454 case TARGET_NR_listen
:
9455 ret
= get_errno(listen(arg1
, arg2
));
9458 #ifdef TARGET_NR_recv
9459 case TARGET_NR_recv
:
9460 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9463 #ifdef TARGET_NR_recvfrom
9464 case TARGET_NR_recvfrom
:
9465 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9468 #ifdef TARGET_NR_recvmsg
9469 case TARGET_NR_recvmsg
:
9470 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9473 #ifdef TARGET_NR_send
9474 case TARGET_NR_send
:
9475 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9478 #ifdef TARGET_NR_sendmsg
9479 case TARGET_NR_sendmsg
:
9480 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9483 #ifdef TARGET_NR_sendmmsg
9484 case TARGET_NR_sendmmsg
:
9485 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9487 case TARGET_NR_recvmmsg
:
9488 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9491 #ifdef TARGET_NR_sendto
9492 case TARGET_NR_sendto
:
9493 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9496 #ifdef TARGET_NR_shutdown
9497 case TARGET_NR_shutdown
:
9498 ret
= get_errno(shutdown(arg1
, arg2
));
9501 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9502 case TARGET_NR_getrandom
:
9503 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9507 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9508 unlock_user(p
, arg1
, ret
);
9511 #ifdef TARGET_NR_socket
9512 case TARGET_NR_socket
:
9513 ret
= do_socket(arg1
, arg2
, arg3
);
9516 #ifdef TARGET_NR_socketpair
9517 case TARGET_NR_socketpair
:
9518 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9521 #ifdef TARGET_NR_setsockopt
9522 case TARGET_NR_setsockopt
:
9523 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9526 #if defined(TARGET_NR_syslog)
9527 case TARGET_NR_syslog
:
9532 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9533 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9534 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9535 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9536 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9537 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9538 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9539 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9541 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9544 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9545 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9546 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9548 ret
= -TARGET_EINVAL
;
9556 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9558 ret
= -TARGET_EFAULT
;
9561 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9562 unlock_user(p
, arg2
, arg3
);
9572 case TARGET_NR_setitimer
:
9574 struct itimerval value
, ovalue
, *pvalue
;
9578 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9579 || copy_from_user_timeval(&pvalue
->it_value
,
9580 arg2
+ sizeof(struct target_timeval
)))
9585 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9586 if (!is_error(ret
) && arg3
) {
9587 if (copy_to_user_timeval(arg3
,
9588 &ovalue
.it_interval
)
9589 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9595 case TARGET_NR_getitimer
:
9597 struct itimerval value
;
9599 ret
= get_errno(getitimer(arg1
, &value
));
9600 if (!is_error(ret
) && arg2
) {
9601 if (copy_to_user_timeval(arg2
,
9603 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9609 #ifdef TARGET_NR_stat
9610 case TARGET_NR_stat
:
9611 if (!(p
= lock_user_string(arg1
)))
9613 ret
= get_errno(stat(path(p
), &st
));
9614 unlock_user(p
, arg1
, 0);
9617 #ifdef TARGET_NR_lstat
9618 case TARGET_NR_lstat
:
9619 if (!(p
= lock_user_string(arg1
)))
9621 ret
= get_errno(lstat(path(p
), &st
));
9622 unlock_user(p
, arg1
, 0);
9625 case TARGET_NR_fstat
:
9627 ret
= get_errno(fstat(arg1
, &st
));
9628 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9631 if (!is_error(ret
)) {
9632 struct target_stat
*target_st
;
9634 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9636 memset(target_st
, 0, sizeof(*target_st
));
9637 __put_user(st
.st_dev
, &target_st
->st_dev
);
9638 __put_user(st
.st_ino
, &target_st
->st_ino
);
9639 __put_user(st
.st_mode
, &target_st
->st_mode
);
9640 __put_user(st
.st_uid
, &target_st
->st_uid
);
9641 __put_user(st
.st_gid
, &target_st
->st_gid
);
9642 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9643 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9644 __put_user(st
.st_size
, &target_st
->st_size
);
9645 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9646 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9647 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9648 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9649 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9650 unlock_user_struct(target_st
, arg2
, 1);
9654 #ifdef TARGET_NR_olduname
9655 case TARGET_NR_olduname
:
9658 #ifdef TARGET_NR_iopl
9659 case TARGET_NR_iopl
:
9662 case TARGET_NR_vhangup
:
9663 ret
= get_errno(vhangup());
9665 #ifdef TARGET_NR_idle
9666 case TARGET_NR_idle
:
9669 #ifdef TARGET_NR_syscall
9670 case TARGET_NR_syscall
:
9671 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9672 arg6
, arg7
, arg8
, 0);
9675 case TARGET_NR_wait4
:
9678 abi_long status_ptr
= arg2
;
9679 struct rusage rusage
, *rusage_ptr
;
9680 abi_ulong target_rusage
= arg4
;
9681 abi_long rusage_err
;
9683 rusage_ptr
= &rusage
;
9686 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9687 if (!is_error(ret
)) {
9688 if (status_ptr
&& ret
) {
9689 status
= host_to_target_waitstatus(status
);
9690 if (put_user_s32(status
, status_ptr
))
9693 if (target_rusage
) {
9694 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9702 #ifdef TARGET_NR_swapoff
9703 case TARGET_NR_swapoff
:
9704 if (!(p
= lock_user_string(arg1
)))
9706 ret
= get_errno(swapoff(p
));
9707 unlock_user(p
, arg1
, 0);
9710 case TARGET_NR_sysinfo
:
9712 struct target_sysinfo
*target_value
;
9713 struct sysinfo value
;
9714 ret
= get_errno(sysinfo(&value
));
9715 if (!is_error(ret
) && arg1
)
9717 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9719 __put_user(value
.uptime
, &target_value
->uptime
);
9720 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9721 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9722 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9723 __put_user(value
.totalram
, &target_value
->totalram
);
9724 __put_user(value
.freeram
, &target_value
->freeram
);
9725 __put_user(value
.sharedram
, &target_value
->sharedram
);
9726 __put_user(value
.bufferram
, &target_value
->bufferram
);
9727 __put_user(value
.totalswap
, &target_value
->totalswap
);
9728 __put_user(value
.freeswap
, &target_value
->freeswap
);
9729 __put_user(value
.procs
, &target_value
->procs
);
9730 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9731 __put_user(value
.freehigh
, &target_value
->freehigh
);
9732 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9733 unlock_user_struct(target_value
, arg1
, 1);
9737 #ifdef TARGET_NR_ipc
9739 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9742 #ifdef TARGET_NR_semget
9743 case TARGET_NR_semget
:
9744 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9747 #ifdef TARGET_NR_semop
9748 case TARGET_NR_semop
:
9749 ret
= do_semop(arg1
, arg2
, arg3
);
9752 #ifdef TARGET_NR_semctl
9753 case TARGET_NR_semctl
:
9754 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9757 #ifdef TARGET_NR_msgctl
9758 case TARGET_NR_msgctl
:
9759 ret
= do_msgctl(arg1
, arg2
, arg3
);
9762 #ifdef TARGET_NR_msgget
9763 case TARGET_NR_msgget
:
9764 ret
= get_errno(msgget(arg1
, arg2
));
9767 #ifdef TARGET_NR_msgrcv
9768 case TARGET_NR_msgrcv
:
9769 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9772 #ifdef TARGET_NR_msgsnd
9773 case TARGET_NR_msgsnd
:
9774 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9777 #ifdef TARGET_NR_shmget
9778 case TARGET_NR_shmget
:
9779 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9782 #ifdef TARGET_NR_shmctl
9783 case TARGET_NR_shmctl
:
9784 ret
= do_shmctl(arg1
, arg2
, arg3
);
9787 #ifdef TARGET_NR_shmat
9788 case TARGET_NR_shmat
:
9789 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9792 #ifdef TARGET_NR_shmdt
9793 case TARGET_NR_shmdt
:
9794 ret
= do_shmdt(arg1
);
9797 case TARGET_NR_fsync
:
9798 ret
= get_errno(fsync(arg1
));
9800 case TARGET_NR_clone
:
9801 /* Linux manages to have three different orderings for its
9802 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9803 * match the kernel's CONFIG_CLONE_* settings.
9804 * Microblaze is further special in that it uses a sixth
9805 * implicit argument to clone for the TLS pointer.
9807 #if defined(TARGET_MICROBLAZE)
9808 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9809 #elif defined(TARGET_CLONE_BACKWARDS)
9810 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9811 #elif defined(TARGET_CLONE_BACKWARDS2)
9812 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9814 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9817 #ifdef __NR_exit_group
9818 /* new thread calls */
9819 case TARGET_NR_exit_group
:
9823 gdb_exit(cpu_env
, arg1
);
9824 ret
= get_errno(exit_group(arg1
));
9827 case TARGET_NR_setdomainname
:
9828 if (!(p
= lock_user_string(arg1
)))
9830 ret
= get_errno(setdomainname(p
, arg2
));
9831 unlock_user(p
, arg1
, 0);
9833 case TARGET_NR_uname
:
9834 /* no need to transcode because we use the linux syscall */
9836 struct new_utsname
* buf
;
9838 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
9840 ret
= get_errno(sys_uname(buf
));
9841 if (!is_error(ret
)) {
9842 /* Overwrite the native machine name with whatever is being
9844 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
9845 /* Allow the user to override the reported release. */
9846 if (qemu_uname_release
&& *qemu_uname_release
) {
9847 g_strlcpy(buf
->release
, qemu_uname_release
,
9848 sizeof(buf
->release
));
9851 unlock_user_struct(buf
, arg1
, 1);
9855 case TARGET_NR_modify_ldt
:
9856 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
9858 #if !defined(TARGET_X86_64)
9859 case TARGET_NR_vm86old
:
9861 case TARGET_NR_vm86
:
9862 ret
= do_vm86(cpu_env
, arg1
, arg2
);
9866 case TARGET_NR_adjtimex
:
9868 struct timex host_buf
;
9870 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
9873 ret
= get_errno(adjtimex(&host_buf
));
9874 if (!is_error(ret
)) {
9875 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
9881 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9882 case TARGET_NR_clock_adjtime
:
9884 struct timex htx
, *phtx
= &htx
;
9886 if (target_to_host_timex(phtx
, arg2
) != 0) {
9889 ret
= get_errno(clock_adjtime(arg1
, phtx
));
9890 if (!is_error(ret
) && phtx
) {
9891 if (host_to_target_timex(arg2
, phtx
) != 0) {
9898 #ifdef TARGET_NR_create_module
9899 case TARGET_NR_create_module
:
9901 case TARGET_NR_init_module
:
9902 case TARGET_NR_delete_module
:
9903 #ifdef TARGET_NR_get_kernel_syms
9904 case TARGET_NR_get_kernel_syms
:
9907 case TARGET_NR_quotactl
:
9909 case TARGET_NR_getpgid
:
9910 ret
= get_errno(getpgid(arg1
));
9912 case TARGET_NR_fchdir
:
9913 ret
= get_errno(fchdir(arg1
));
9915 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9916 case TARGET_NR_bdflush
:
9919 #ifdef TARGET_NR_sysfs
9920 case TARGET_NR_sysfs
:
9923 case TARGET_NR_personality
:
9924 ret
= get_errno(personality(arg1
));
9926 #ifdef TARGET_NR_afs_syscall
9927 case TARGET_NR_afs_syscall
:
9930 #ifdef TARGET_NR__llseek /* Not on alpha */
9931 case TARGET_NR__llseek
:
9934 #if !defined(__NR_llseek)
9935 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
9937 ret
= get_errno(res
);
9942 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
9944 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
9950 #ifdef TARGET_NR_getdents
9951 case TARGET_NR_getdents
:
9952 #ifdef __NR_getdents
9953 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9955 struct target_dirent
*target_dirp
;
9956 struct linux_dirent
*dirp
;
9957 abi_long count
= arg3
;
9959 dirp
= g_try_malloc(count
);
9961 ret
= -TARGET_ENOMEM
;
9965 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
9966 if (!is_error(ret
)) {
9967 struct linux_dirent
*de
;
9968 struct target_dirent
*tde
;
9970 int reclen
, treclen
;
9971 int count1
, tnamelen
;
9975 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
9979 reclen
= de
->d_reclen
;
9980 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
9981 assert(tnamelen
>= 0);
9982 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
9983 assert(count1
+ treclen
<= count
);
9984 tde
->d_reclen
= tswap16(treclen
);
9985 tde
->d_ino
= tswapal(de
->d_ino
);
9986 tde
->d_off
= tswapal(de
->d_off
);
9987 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
9988 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
9990 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
9994 unlock_user(target_dirp
, arg2
, ret
);
10000 struct linux_dirent
*dirp
;
10001 abi_long count
= arg3
;
10003 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10005 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10006 if (!is_error(ret
)) {
10007 struct linux_dirent
*de
;
10012 reclen
= de
->d_reclen
;
10015 de
->d_reclen
= tswap16(reclen
);
10016 tswapls(&de
->d_ino
);
10017 tswapls(&de
->d_off
);
10018 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10022 unlock_user(dirp
, arg2
, ret
);
10026 /* Implement getdents in terms of getdents64 */
10028 struct linux_dirent64
*dirp
;
10029 abi_long count
= arg3
;
10031 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10035 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10036 if (!is_error(ret
)) {
10037 /* Convert the dirent64 structs to target dirent. We do this
10038 * in-place, since we can guarantee that a target_dirent is no
10039 * larger than a dirent64; however this means we have to be
10040 * careful to read everything before writing in the new format.
10042 struct linux_dirent64
*de
;
10043 struct target_dirent
*tde
;
10048 tde
= (struct target_dirent
*)dirp
;
10050 int namelen
, treclen
;
10051 int reclen
= de
->d_reclen
;
10052 uint64_t ino
= de
->d_ino
;
10053 int64_t off
= de
->d_off
;
10054 uint8_t type
= de
->d_type
;
10056 namelen
= strlen(de
->d_name
);
10057 treclen
= offsetof(struct target_dirent
, d_name
)
10059 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10061 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10062 tde
->d_ino
= tswapal(ino
);
10063 tde
->d_off
= tswapal(off
);
10064 tde
->d_reclen
= tswap16(treclen
);
10065 /* The target_dirent type is in what was formerly a padding
10066 * byte at the end of the structure:
10068 *(((char *)tde
) + treclen
- 1) = type
;
10070 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10071 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10077 unlock_user(dirp
, arg2
, ret
);
10081 #endif /* TARGET_NR_getdents */
10082 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10083 case TARGET_NR_getdents64
:
10085 struct linux_dirent64
*dirp
;
10086 abi_long count
= arg3
;
10087 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10089 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10090 if (!is_error(ret
)) {
10091 struct linux_dirent64
*de
;
10096 reclen
= de
->d_reclen
;
10099 de
->d_reclen
= tswap16(reclen
);
10100 tswap64s((uint64_t *)&de
->d_ino
);
10101 tswap64s((uint64_t *)&de
->d_off
);
10102 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10106 unlock_user(dirp
, arg2
, ret
);
10109 #endif /* TARGET_NR_getdents64 */
10110 #if defined(TARGET_NR__newselect)
10111 case TARGET_NR__newselect
:
10112 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10115 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10116 # ifdef TARGET_NR_poll
10117 case TARGET_NR_poll
:
10119 # ifdef TARGET_NR_ppoll
10120 case TARGET_NR_ppoll
:
10123 struct target_pollfd
*target_pfd
;
10124 unsigned int nfds
= arg2
;
10125 struct pollfd
*pfd
;
10131 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10132 ret
= -TARGET_EINVAL
;
10136 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10137 sizeof(struct target_pollfd
) * nfds
, 1);
10142 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10143 for (i
= 0; i
< nfds
; i
++) {
10144 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10145 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10150 # ifdef TARGET_NR_ppoll
10151 case TARGET_NR_ppoll
:
10153 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10154 target_sigset_t
*target_set
;
10155 sigset_t _set
, *set
= &_set
;
10158 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10159 unlock_user(target_pfd
, arg1
, 0);
10167 if (arg5
!= sizeof(target_sigset_t
)) {
10168 unlock_user(target_pfd
, arg1
, 0);
10169 ret
= -TARGET_EINVAL
;
10173 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10175 unlock_user(target_pfd
, arg1
, 0);
10178 target_to_host_sigset(set
, target_set
);
10183 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10184 set
, SIGSET_T_SIZE
));
10186 if (!is_error(ret
) && arg3
) {
10187 host_to_target_timespec(arg3
, timeout_ts
);
10190 unlock_user(target_set
, arg4
, 0);
10195 # ifdef TARGET_NR_poll
10196 case TARGET_NR_poll
:
10198 struct timespec ts
, *pts
;
10201 /* Convert ms to secs, ns */
10202 ts
.tv_sec
= arg3
/ 1000;
10203 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10206 /* -ve poll() timeout means "infinite" */
10209 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10214 g_assert_not_reached();
10217 if (!is_error(ret
)) {
10218 for(i
= 0; i
< nfds
; i
++) {
10219 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10222 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10226 case TARGET_NR_flock
:
10227 /* NOTE: the flock constant seems to be the same for every
10229 ret
= get_errno(safe_flock(arg1
, arg2
));
10231 case TARGET_NR_readv
:
10233 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10235 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10236 unlock_iovec(vec
, arg2
, arg3
, 1);
10238 ret
= -host_to_target_errno(errno
);
10242 case TARGET_NR_writev
:
10244 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10246 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10247 unlock_iovec(vec
, arg2
, arg3
, 0);
10249 ret
= -host_to_target_errno(errno
);
10253 #if defined(TARGET_NR_preadv)
10254 case TARGET_NR_preadv
:
10256 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10258 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10259 unlock_iovec(vec
, arg2
, arg3
, 1);
10261 ret
= -host_to_target_errno(errno
);
10266 #if defined(TARGET_NR_pwritev)
10267 case TARGET_NR_pwritev
:
10269 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10271 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10272 unlock_iovec(vec
, arg2
, arg3
, 0);
10274 ret
= -host_to_target_errno(errno
);
10279 case TARGET_NR_getsid
:
10280 ret
= get_errno(getsid(arg1
));
10282 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10283 case TARGET_NR_fdatasync
:
10284 ret
= get_errno(fdatasync(arg1
));
10287 #ifdef TARGET_NR__sysctl
10288 case TARGET_NR__sysctl
:
10289 /* We don't implement this, but ENOTDIR is always a safe
10291 ret
= -TARGET_ENOTDIR
;
10294 case TARGET_NR_sched_getaffinity
:
10296 unsigned int mask_size
;
10297 unsigned long *mask
;
10300 * sched_getaffinity needs multiples of ulong, so need to take
10301 * care of mismatches between target ulong and host ulong sizes.
10303 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10304 ret
= -TARGET_EINVAL
;
10307 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10309 mask
= alloca(mask_size
);
10310 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10312 if (!is_error(ret
)) {
10314 /* More data returned than the caller's buffer will fit.
10315 * This only happens if sizeof(abi_long) < sizeof(long)
10316 * and the caller passed us a buffer holding an odd number
10317 * of abi_longs. If the host kernel is actually using the
10318 * extra 4 bytes then fail EINVAL; otherwise we can just
10319 * ignore them and only copy the interesting part.
10321 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10322 if (numcpus
> arg2
* 8) {
10323 ret
= -TARGET_EINVAL
;
10329 if (copy_to_user(arg3
, mask
, ret
)) {
10335 case TARGET_NR_sched_setaffinity
:
10337 unsigned int mask_size
;
10338 unsigned long *mask
;
10341 * sched_setaffinity needs multiples of ulong, so need to take
10342 * care of mismatches between target ulong and host ulong sizes.
10344 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10345 ret
= -TARGET_EINVAL
;
10348 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10350 mask
= alloca(mask_size
);
10351 if (!lock_user_struct(VERIFY_READ
, p
, arg3
, 1)) {
10354 memcpy(mask
, p
, arg2
);
10355 unlock_user_struct(p
, arg2
, 0);
10357 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10360 case TARGET_NR_sched_setparam
:
10362 struct sched_param
*target_schp
;
10363 struct sched_param schp
;
10366 return -TARGET_EINVAL
;
10368 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10370 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10371 unlock_user_struct(target_schp
, arg2
, 0);
10372 ret
= get_errno(sched_setparam(arg1
, &schp
));
10375 case TARGET_NR_sched_getparam
:
10377 struct sched_param
*target_schp
;
10378 struct sched_param schp
;
10381 return -TARGET_EINVAL
;
10383 ret
= get_errno(sched_getparam(arg1
, &schp
));
10384 if (!is_error(ret
)) {
10385 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10387 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10388 unlock_user_struct(target_schp
, arg2
, 1);
10392 case TARGET_NR_sched_setscheduler
:
10394 struct sched_param
*target_schp
;
10395 struct sched_param schp
;
10397 return -TARGET_EINVAL
;
10399 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10401 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10402 unlock_user_struct(target_schp
, arg3
, 0);
10403 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10406 case TARGET_NR_sched_getscheduler
:
10407 ret
= get_errno(sched_getscheduler(arg1
));
10409 case TARGET_NR_sched_yield
:
10410 ret
= get_errno(sched_yield());
10412 case TARGET_NR_sched_get_priority_max
:
10413 ret
= get_errno(sched_get_priority_max(arg1
));
10415 case TARGET_NR_sched_get_priority_min
:
10416 ret
= get_errno(sched_get_priority_min(arg1
));
10418 case TARGET_NR_sched_rr_get_interval
:
10420 struct timespec ts
;
10421 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10422 if (!is_error(ret
)) {
10423 ret
= host_to_target_timespec(arg2
, &ts
);
10427 case TARGET_NR_nanosleep
:
10429 struct timespec req
, rem
;
10430 target_to_host_timespec(&req
, arg1
);
10431 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10432 if (is_error(ret
) && arg2
) {
10433 host_to_target_timespec(arg2
, &rem
);
10437 #ifdef TARGET_NR_query_module
10438 case TARGET_NR_query_module
:
10439 goto unimplemented
;
10441 #ifdef TARGET_NR_nfsservctl
10442 case TARGET_NR_nfsservctl
:
10443 goto unimplemented
;
10445 case TARGET_NR_prctl
:
10447 case PR_GET_PDEATHSIG
:
10450 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10451 if (!is_error(ret
) && arg2
10452 && put_user_ual(deathsig
, arg2
)) {
10460 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10464 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10465 arg3
, arg4
, arg5
));
10466 unlock_user(name
, arg2
, 16);
10471 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10475 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10476 arg3
, arg4
, arg5
));
10477 unlock_user(name
, arg2
, 0);
10482 /* Most prctl options have no pointer arguments */
10483 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10487 #ifdef TARGET_NR_arch_prctl
10488 case TARGET_NR_arch_prctl
:
10489 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10490 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10493 goto unimplemented
;
10496 #ifdef TARGET_NR_pread64
10497 case TARGET_NR_pread64
:
10498 if (regpairs_aligned(cpu_env
)) {
10502 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10504 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10505 unlock_user(p
, arg2
, ret
);
10507 case TARGET_NR_pwrite64
:
10508 if (regpairs_aligned(cpu_env
)) {
10512 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10514 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10515 unlock_user(p
, arg2
, 0);
10518 case TARGET_NR_getcwd
:
10519 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10521 ret
= get_errno(sys_getcwd1(p
, arg2
));
10522 unlock_user(p
, arg1
, ret
);
10524 case TARGET_NR_capget
:
10525 case TARGET_NR_capset
:
10527 struct target_user_cap_header
*target_header
;
10528 struct target_user_cap_data
*target_data
= NULL
;
10529 struct __user_cap_header_struct header
;
10530 struct __user_cap_data_struct data
[2];
10531 struct __user_cap_data_struct
*dataptr
= NULL
;
10532 int i
, target_datalen
;
10533 int data_items
= 1;
10535 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10538 header
.version
= tswap32(target_header
->version
);
10539 header
.pid
= tswap32(target_header
->pid
);
10541 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10542 /* Version 2 and up takes pointer to two user_data structs */
10546 target_datalen
= sizeof(*target_data
) * data_items
;
10549 if (num
== TARGET_NR_capget
) {
10550 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10552 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10554 if (!target_data
) {
10555 unlock_user_struct(target_header
, arg1
, 0);
10559 if (num
== TARGET_NR_capset
) {
10560 for (i
= 0; i
< data_items
; i
++) {
10561 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10562 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10563 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10570 if (num
== TARGET_NR_capget
) {
10571 ret
= get_errno(capget(&header
, dataptr
));
10573 ret
= get_errno(capset(&header
, dataptr
));
10576 /* The kernel always updates version for both capget and capset */
10577 target_header
->version
= tswap32(header
.version
);
10578 unlock_user_struct(target_header
, arg1
, 1);
10581 if (num
== TARGET_NR_capget
) {
10582 for (i
= 0; i
< data_items
; i
++) {
10583 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10584 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10585 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10587 unlock_user(target_data
, arg2
, target_datalen
);
10589 unlock_user(target_data
, arg2
, 0);
10594 case TARGET_NR_sigaltstack
:
10595 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10598 #ifdef CONFIG_SENDFILE
10599 case TARGET_NR_sendfile
:
10601 off_t
*offp
= NULL
;
10604 ret
= get_user_sal(off
, arg3
);
10605 if (is_error(ret
)) {
10610 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10611 if (!is_error(ret
) && arg3
) {
10612 abi_long ret2
= put_user_sal(off
, arg3
);
10613 if (is_error(ret2
)) {
10619 #ifdef TARGET_NR_sendfile64
10620 case TARGET_NR_sendfile64
:
10622 off_t
*offp
= NULL
;
10625 ret
= get_user_s64(off
, arg3
);
10626 if (is_error(ret
)) {
10631 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10632 if (!is_error(ret
) && arg3
) {
10633 abi_long ret2
= put_user_s64(off
, arg3
);
10634 if (is_error(ret2
)) {
10642 case TARGET_NR_sendfile
:
10643 #ifdef TARGET_NR_sendfile64
10644 case TARGET_NR_sendfile64
:
10646 goto unimplemented
;
10649 #ifdef TARGET_NR_getpmsg
10650 case TARGET_NR_getpmsg
:
10651 goto unimplemented
;
10653 #ifdef TARGET_NR_putpmsg
10654 case TARGET_NR_putpmsg
:
10655 goto unimplemented
;
10657 #ifdef TARGET_NR_vfork
10658 case TARGET_NR_vfork
:
10659 ret
= get_errno(do_fork(cpu_env
,
10660 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10664 #ifdef TARGET_NR_ugetrlimit
10665 case TARGET_NR_ugetrlimit
:
10667 struct rlimit rlim
;
10668 int resource
= target_to_host_resource(arg1
);
10669 ret
= get_errno(getrlimit(resource
, &rlim
));
10670 if (!is_error(ret
)) {
10671 struct target_rlimit
*target_rlim
;
10672 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10674 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10675 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10676 unlock_user_struct(target_rlim
, arg2
, 1);
10681 #ifdef TARGET_NR_truncate64
10682 case TARGET_NR_truncate64
:
10683 if (!(p
= lock_user_string(arg1
)))
10685 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10686 unlock_user(p
, arg1
, 0);
10689 #ifdef TARGET_NR_ftruncate64
10690 case TARGET_NR_ftruncate64
:
10691 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10694 #ifdef TARGET_NR_stat64
10695 case TARGET_NR_stat64
:
10696 if (!(p
= lock_user_string(arg1
)))
10698 ret
= get_errno(stat(path(p
), &st
));
10699 unlock_user(p
, arg1
, 0);
10700 if (!is_error(ret
))
10701 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10704 #ifdef TARGET_NR_lstat64
10705 case TARGET_NR_lstat64
:
10706 if (!(p
= lock_user_string(arg1
)))
10708 ret
= get_errno(lstat(path(p
), &st
));
10709 unlock_user(p
, arg1
, 0);
10710 if (!is_error(ret
))
10711 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10714 #ifdef TARGET_NR_fstat64
10715 case TARGET_NR_fstat64
:
10716 ret
= get_errno(fstat(arg1
, &st
));
10717 if (!is_error(ret
))
10718 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10721 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10722 #ifdef TARGET_NR_fstatat64
10723 case TARGET_NR_fstatat64
:
10725 #ifdef TARGET_NR_newfstatat
10726 case TARGET_NR_newfstatat
:
10728 if (!(p
= lock_user_string(arg2
)))
10730 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10731 if (!is_error(ret
))
10732 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10735 #ifdef TARGET_NR_lchown
10736 case TARGET_NR_lchown
:
10737 if (!(p
= lock_user_string(arg1
)))
10739 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10740 unlock_user(p
, arg1
, 0);
10743 #ifdef TARGET_NR_getuid
10744 case TARGET_NR_getuid
:
10745 ret
= get_errno(high2lowuid(getuid()));
10748 #ifdef TARGET_NR_getgid
10749 case TARGET_NR_getgid
:
10750 ret
= get_errno(high2lowgid(getgid()));
10753 #ifdef TARGET_NR_geteuid
10754 case TARGET_NR_geteuid
:
10755 ret
= get_errno(high2lowuid(geteuid()));
10758 #ifdef TARGET_NR_getegid
10759 case TARGET_NR_getegid
:
10760 ret
= get_errno(high2lowgid(getegid()));
10763 case TARGET_NR_setreuid
:
10764 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10766 case TARGET_NR_setregid
:
10767 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10769 case TARGET_NR_getgroups
:
10771 int gidsetsize
= arg1
;
10772 target_id
*target_grouplist
;
10776 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10777 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10778 if (gidsetsize
== 0)
10780 if (!is_error(ret
)) {
10781 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10782 if (!target_grouplist
)
10784 for(i
= 0;i
< ret
; i
++)
10785 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10786 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10790 case TARGET_NR_setgroups
:
10792 int gidsetsize
= arg1
;
10793 target_id
*target_grouplist
;
10794 gid_t
*grouplist
= NULL
;
10797 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10798 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10799 if (!target_grouplist
) {
10800 ret
= -TARGET_EFAULT
;
10803 for (i
= 0; i
< gidsetsize
; i
++) {
10804 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10806 unlock_user(target_grouplist
, arg2
, 0);
10808 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
10811 case TARGET_NR_fchown
:
10812 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
10814 #if defined(TARGET_NR_fchownat)
10815 case TARGET_NR_fchownat
:
10816 if (!(p
= lock_user_string(arg2
)))
10818 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
10819 low2highgid(arg4
), arg5
));
10820 unlock_user(p
, arg2
, 0);
10823 #ifdef TARGET_NR_setresuid
10824 case TARGET_NR_setresuid
:
10825 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
10827 low2highuid(arg3
)));
10830 #ifdef TARGET_NR_getresuid
10831 case TARGET_NR_getresuid
:
10833 uid_t ruid
, euid
, suid
;
10834 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
10835 if (!is_error(ret
)) {
10836 if (put_user_id(high2lowuid(ruid
), arg1
)
10837 || put_user_id(high2lowuid(euid
), arg2
)
10838 || put_user_id(high2lowuid(suid
), arg3
))
10844 #ifdef TARGET_NR_getresgid
10845 case TARGET_NR_setresgid
:
10846 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
10848 low2highgid(arg3
)));
10851 #ifdef TARGET_NR_getresgid
10852 case TARGET_NR_getresgid
:
10854 gid_t rgid
, egid
, sgid
;
10855 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
10856 if (!is_error(ret
)) {
10857 if (put_user_id(high2lowgid(rgid
), arg1
)
10858 || put_user_id(high2lowgid(egid
), arg2
)
10859 || put_user_id(high2lowgid(sgid
), arg3
))
10865 #ifdef TARGET_NR_chown
10866 case TARGET_NR_chown
:
10867 if (!(p
= lock_user_string(arg1
)))
10869 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10870 unlock_user(p
, arg1
, 0);
10873 case TARGET_NR_setuid
:
10874 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
10876 case TARGET_NR_setgid
:
10877 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
10879 case TARGET_NR_setfsuid
:
10880 ret
= get_errno(setfsuid(arg1
));
10882 case TARGET_NR_setfsgid
:
10883 ret
= get_errno(setfsgid(arg1
));
10886 #ifdef TARGET_NR_lchown32
10887 case TARGET_NR_lchown32
:
10888 if (!(p
= lock_user_string(arg1
)))
10890 ret
= get_errno(lchown(p
, arg2
, arg3
));
10891 unlock_user(p
, arg1
, 0);
10894 #ifdef TARGET_NR_getuid32
10895 case TARGET_NR_getuid32
:
10896 ret
= get_errno(getuid());
10900 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10901 /* Alpha specific */
10902 case TARGET_NR_getxuid
:
10906 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
10908 ret
= get_errno(getuid());
10911 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10912 /* Alpha specific */
10913 case TARGET_NR_getxgid
:
10917 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
10919 ret
= get_errno(getgid());
10922 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10923 /* Alpha specific */
10924 case TARGET_NR_osf_getsysinfo
:
10925 ret
= -TARGET_EOPNOTSUPP
;
10927 case TARGET_GSI_IEEE_FP_CONTROL
:
10929 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
10931 /* Copied from linux ieee_fpcr_to_swcr. */
10932 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
10933 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
10934 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
10935 | SWCR_TRAP_ENABLE_DZE
10936 | SWCR_TRAP_ENABLE_OVF
);
10937 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
10938 | SWCR_TRAP_ENABLE_INE
);
10939 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
10940 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
10942 if (put_user_u64 (swcr
, arg2
))
10948 /* case GSI_IEEE_STATE_AT_SIGNAL:
10949 -- Not implemented in linux kernel.
10951 -- Retrieves current unaligned access state; not much used.
10952 case GSI_PROC_TYPE:
10953 -- Retrieves implver information; surely not used.
10954 case GSI_GET_HWRPB:
10955 -- Grabs a copy of the HWRPB; surely not used.
10960 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10961 /* Alpha specific */
10962 case TARGET_NR_osf_setsysinfo
:
10963 ret
= -TARGET_EOPNOTSUPP
;
10965 case TARGET_SSI_IEEE_FP_CONTROL
:
10967 uint64_t swcr
, fpcr
, orig_fpcr
;
10969 if (get_user_u64 (swcr
, arg2
)) {
10972 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
10973 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
10975 /* Copied from linux ieee_swcr_to_fpcr. */
10976 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
10977 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
10978 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
10979 | SWCR_TRAP_ENABLE_DZE
10980 | SWCR_TRAP_ENABLE_OVF
)) << 48;
10981 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
10982 | SWCR_TRAP_ENABLE_INE
)) << 57;
10983 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
10984 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
10986 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
10991 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
10993 uint64_t exc
, fpcr
, orig_fpcr
;
10996 if (get_user_u64(exc
, arg2
)) {
11000 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11002 /* We only add to the exception status here. */
11003 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11005 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11008 /* Old exceptions are not signaled. */
11009 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11011 /* If any exceptions set by this call,
11012 and are unmasked, send a signal. */
11014 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11015 si_code
= TARGET_FPE_FLTRES
;
11017 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11018 si_code
= TARGET_FPE_FLTUND
;
11020 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11021 si_code
= TARGET_FPE_FLTOVF
;
11023 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11024 si_code
= TARGET_FPE_FLTDIV
;
11026 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11027 si_code
= TARGET_FPE_FLTINV
;
11029 if (si_code
!= 0) {
11030 target_siginfo_t info
;
11031 info
.si_signo
= SIGFPE
;
11033 info
.si_code
= si_code
;
11034 info
._sifields
._sigfault
._addr
11035 = ((CPUArchState
*)cpu_env
)->pc
;
11036 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11037 QEMU_SI_FAULT
, &info
);
11042 /* case SSI_NVPAIRS:
11043 -- Used with SSIN_UACPROC to enable unaligned accesses.
11044 case SSI_IEEE_STATE_AT_SIGNAL:
11045 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11046 -- Not implemented in linux kernel
11051 #ifdef TARGET_NR_osf_sigprocmask
11052 /* Alpha specific. */
11053 case TARGET_NR_osf_sigprocmask
:
11057 sigset_t set
, oldset
;
11060 case TARGET_SIG_BLOCK
:
11063 case TARGET_SIG_UNBLOCK
:
11066 case TARGET_SIG_SETMASK
:
11070 ret
= -TARGET_EINVAL
;
11074 target_to_host_old_sigset(&set
, &mask
);
11075 ret
= do_sigprocmask(how
, &set
, &oldset
);
11077 host_to_target_old_sigset(&mask
, &oldset
);
11084 #ifdef TARGET_NR_getgid32
11085 case TARGET_NR_getgid32
:
11086 ret
= get_errno(getgid());
11089 #ifdef TARGET_NR_geteuid32
11090 case TARGET_NR_geteuid32
:
11091 ret
= get_errno(geteuid());
11094 #ifdef TARGET_NR_getegid32
11095 case TARGET_NR_getegid32
:
11096 ret
= get_errno(getegid());
11099 #ifdef TARGET_NR_setreuid32
11100 case TARGET_NR_setreuid32
:
11101 ret
= get_errno(setreuid(arg1
, arg2
));
11104 #ifdef TARGET_NR_setregid32
11105 case TARGET_NR_setregid32
:
11106 ret
= get_errno(setregid(arg1
, arg2
));
11109 #ifdef TARGET_NR_getgroups32
11110 case TARGET_NR_getgroups32
:
11112 int gidsetsize
= arg1
;
11113 uint32_t *target_grouplist
;
11117 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11118 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11119 if (gidsetsize
== 0)
11121 if (!is_error(ret
)) {
11122 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11123 if (!target_grouplist
) {
11124 ret
= -TARGET_EFAULT
;
11127 for(i
= 0;i
< ret
; i
++)
11128 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11129 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11134 #ifdef TARGET_NR_setgroups32
11135 case TARGET_NR_setgroups32
:
11137 int gidsetsize
= arg1
;
11138 uint32_t *target_grouplist
;
11142 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11143 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11144 if (!target_grouplist
) {
11145 ret
= -TARGET_EFAULT
;
11148 for(i
= 0;i
< gidsetsize
; i
++)
11149 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11150 unlock_user(target_grouplist
, arg2
, 0);
11151 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11155 #ifdef TARGET_NR_fchown32
11156 case TARGET_NR_fchown32
:
11157 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11160 #ifdef TARGET_NR_setresuid32
11161 case TARGET_NR_setresuid32
:
11162 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11165 #ifdef TARGET_NR_getresuid32
11166 case TARGET_NR_getresuid32
:
11168 uid_t ruid
, euid
, suid
;
11169 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11170 if (!is_error(ret
)) {
11171 if (put_user_u32(ruid
, arg1
)
11172 || put_user_u32(euid
, arg2
)
11173 || put_user_u32(suid
, arg3
))
11179 #ifdef TARGET_NR_setresgid32
11180 case TARGET_NR_setresgid32
:
11181 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11184 #ifdef TARGET_NR_getresgid32
11185 case TARGET_NR_getresgid32
:
11187 gid_t rgid
, egid
, sgid
;
11188 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11189 if (!is_error(ret
)) {
11190 if (put_user_u32(rgid
, arg1
)
11191 || put_user_u32(egid
, arg2
)
11192 || put_user_u32(sgid
, arg3
))
11198 #ifdef TARGET_NR_chown32
11199 case TARGET_NR_chown32
:
11200 if (!(p
= lock_user_string(arg1
)))
11202 ret
= get_errno(chown(p
, arg2
, arg3
));
11203 unlock_user(p
, arg1
, 0);
11206 #ifdef TARGET_NR_setuid32
11207 case TARGET_NR_setuid32
:
11208 ret
= get_errno(sys_setuid(arg1
));
11211 #ifdef TARGET_NR_setgid32
11212 case TARGET_NR_setgid32
:
11213 ret
= get_errno(sys_setgid(arg1
));
11216 #ifdef TARGET_NR_setfsuid32
11217 case TARGET_NR_setfsuid32
:
11218 ret
= get_errno(setfsuid(arg1
));
11221 #ifdef TARGET_NR_setfsgid32
11222 case TARGET_NR_setfsgid32
:
11223 ret
= get_errno(setfsgid(arg1
));
11227 case TARGET_NR_pivot_root
:
11228 goto unimplemented
;
11229 #ifdef TARGET_NR_mincore
11230 case TARGET_NR_mincore
:
11233 ret
= -TARGET_ENOMEM
;
11234 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11238 ret
= -TARGET_EFAULT
;
11239 p
= lock_user_string(arg3
);
11243 ret
= get_errno(mincore(a
, arg2
, p
));
11244 unlock_user(p
, arg3
, ret
);
11246 unlock_user(a
, arg1
, 0);
11250 #ifdef TARGET_NR_arm_fadvise64_64
11251 case TARGET_NR_arm_fadvise64_64
:
11252 /* arm_fadvise64_64 looks like fadvise64_64 but
11253 * with different argument order: fd, advice, offset, len
11254 * rather than the usual fd, offset, len, advice.
11255 * Note that offset and len are both 64-bit so appear as
11256 * pairs of 32-bit registers.
11258 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11259 target_offset64(arg5
, arg6
), arg2
);
11260 ret
= -host_to_target_errno(ret
);
11264 #if TARGET_ABI_BITS == 32
11266 #ifdef TARGET_NR_fadvise64_64
11267 case TARGET_NR_fadvise64_64
:
11268 #if defined(TARGET_PPC)
11269 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11277 /* 6 args: fd, offset (high, low), len (high, low), advice */
11278 if (regpairs_aligned(cpu_env
)) {
11279 /* offset is in (3,4), len in (5,6) and advice in 7 */
11287 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11288 target_offset64(arg2
, arg3
),
11289 target_offset64(arg4
, arg5
),
11294 #ifdef TARGET_NR_fadvise64
11295 case TARGET_NR_fadvise64
:
11296 /* 5 args: fd, offset (high, low), len, advice */
11297 if (regpairs_aligned(cpu_env
)) {
11298 /* offset is in (3,4), len in 5 and advice in 6 */
11304 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11305 target_offset64(arg2
, arg3
),
11310 #else /* not a 32-bit ABI */
11311 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11312 #ifdef TARGET_NR_fadvise64_64
11313 case TARGET_NR_fadvise64_64
:
11315 #ifdef TARGET_NR_fadvise64
11316 case TARGET_NR_fadvise64
:
11318 #ifdef TARGET_S390X
11320 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11321 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11322 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11323 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11327 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11330 #endif /* end of 64-bit ABI fadvise handling */
11332 #ifdef TARGET_NR_madvise
11333 case TARGET_NR_madvise
:
11334 /* A straight passthrough may not be safe because qemu sometimes
11335 turns private file-backed mappings into anonymous mappings.
11336 This will break MADV_DONTNEED.
11337 This is a hint, so ignoring and returning success is ok. */
11338 ret
= get_errno(0);
11341 #if TARGET_ABI_BITS == 32
11342 case TARGET_NR_fcntl64
:
11346 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11347 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11350 if (((CPUARMState
*)cpu_env
)->eabi
) {
11351 copyfrom
= copy_from_user_eabi_flock64
;
11352 copyto
= copy_to_user_eabi_flock64
;
11356 cmd
= target_to_host_fcntl_cmd(arg2
);
11357 if (cmd
== -TARGET_EINVAL
) {
11363 case TARGET_F_GETLK64
:
11364 ret
= copyfrom(&fl
, arg3
);
11368 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11370 ret
= copyto(arg3
, &fl
);
11374 case TARGET_F_SETLK64
:
11375 case TARGET_F_SETLKW64
:
11376 ret
= copyfrom(&fl
, arg3
);
11380 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11383 ret
= do_fcntl(arg1
, arg2
, arg3
);
11389 #ifdef TARGET_NR_cacheflush
11390 case TARGET_NR_cacheflush
:
11391 /* self-modifying code is handled automatically, so nothing needed */
11395 #ifdef TARGET_NR_security
11396 case TARGET_NR_security
:
11397 goto unimplemented
;
11399 #ifdef TARGET_NR_getpagesize
11400 case TARGET_NR_getpagesize
:
11401 ret
= TARGET_PAGE_SIZE
;
11404 case TARGET_NR_gettid
:
11405 ret
= get_errno(gettid());
11407 #ifdef TARGET_NR_readahead
11408 case TARGET_NR_readahead
:
11409 #if TARGET_ABI_BITS == 32
11410 if (regpairs_aligned(cpu_env
)) {
11415 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11417 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11422 #ifdef TARGET_NR_setxattr
11423 case TARGET_NR_listxattr
:
11424 case TARGET_NR_llistxattr
:
11428 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11430 ret
= -TARGET_EFAULT
;
11434 p
= lock_user_string(arg1
);
11436 if (num
== TARGET_NR_listxattr
) {
11437 ret
= get_errno(listxattr(p
, b
, arg3
));
11439 ret
= get_errno(llistxattr(p
, b
, arg3
));
11442 ret
= -TARGET_EFAULT
;
11444 unlock_user(p
, arg1
, 0);
11445 unlock_user(b
, arg2
, arg3
);
11448 case TARGET_NR_flistxattr
:
11452 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11454 ret
= -TARGET_EFAULT
;
11458 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11459 unlock_user(b
, arg2
, arg3
);
11462 case TARGET_NR_setxattr
:
11463 case TARGET_NR_lsetxattr
:
11465 void *p
, *n
, *v
= 0;
11467 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11469 ret
= -TARGET_EFAULT
;
11473 p
= lock_user_string(arg1
);
11474 n
= lock_user_string(arg2
);
11476 if (num
== TARGET_NR_setxattr
) {
11477 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11479 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11482 ret
= -TARGET_EFAULT
;
11484 unlock_user(p
, arg1
, 0);
11485 unlock_user(n
, arg2
, 0);
11486 unlock_user(v
, arg3
, 0);
11489 case TARGET_NR_fsetxattr
:
11493 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11495 ret
= -TARGET_EFAULT
;
11499 n
= lock_user_string(arg2
);
11501 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11503 ret
= -TARGET_EFAULT
;
11505 unlock_user(n
, arg2
, 0);
11506 unlock_user(v
, arg3
, 0);
11509 case TARGET_NR_getxattr
:
11510 case TARGET_NR_lgetxattr
:
11512 void *p
, *n
, *v
= 0;
11514 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11516 ret
= -TARGET_EFAULT
;
11520 p
= lock_user_string(arg1
);
11521 n
= lock_user_string(arg2
);
11523 if (num
== TARGET_NR_getxattr
) {
11524 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11526 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11529 ret
= -TARGET_EFAULT
;
11531 unlock_user(p
, arg1
, 0);
11532 unlock_user(n
, arg2
, 0);
11533 unlock_user(v
, arg3
, arg4
);
11536 case TARGET_NR_fgetxattr
:
11540 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11542 ret
= -TARGET_EFAULT
;
11546 n
= lock_user_string(arg2
);
11548 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11550 ret
= -TARGET_EFAULT
;
11552 unlock_user(n
, arg2
, 0);
11553 unlock_user(v
, arg3
, arg4
);
11556 case TARGET_NR_removexattr
:
11557 case TARGET_NR_lremovexattr
:
11560 p
= lock_user_string(arg1
);
11561 n
= lock_user_string(arg2
);
11563 if (num
== TARGET_NR_removexattr
) {
11564 ret
= get_errno(removexattr(p
, n
));
11566 ret
= get_errno(lremovexattr(p
, n
));
11569 ret
= -TARGET_EFAULT
;
11571 unlock_user(p
, arg1
, 0);
11572 unlock_user(n
, arg2
, 0);
11575 case TARGET_NR_fremovexattr
:
11578 n
= lock_user_string(arg2
);
11580 ret
= get_errno(fremovexattr(arg1
, n
));
11582 ret
= -TARGET_EFAULT
;
11584 unlock_user(n
, arg2
, 0);
11588 #endif /* CONFIG_ATTR */
11589 #ifdef TARGET_NR_set_thread_area
11590 case TARGET_NR_set_thread_area
:
11591 #if defined(TARGET_MIPS)
11592 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11595 #elif defined(TARGET_CRIS)
11597 ret
= -TARGET_EINVAL
;
11599 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11603 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11604 ret
= do_set_thread_area(cpu_env
, arg1
);
11606 #elif defined(TARGET_M68K)
11608 TaskState
*ts
= cpu
->opaque
;
11609 ts
->tp_value
= arg1
;
11614 goto unimplemented_nowarn
;
11617 #ifdef TARGET_NR_get_thread_area
11618 case TARGET_NR_get_thread_area
:
11619 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11620 ret
= do_get_thread_area(cpu_env
, arg1
);
11622 #elif defined(TARGET_M68K)
11624 TaskState
*ts
= cpu
->opaque
;
11625 ret
= ts
->tp_value
;
11629 goto unimplemented_nowarn
;
11632 #ifdef TARGET_NR_getdomainname
11633 case TARGET_NR_getdomainname
:
11634 goto unimplemented_nowarn
;
11637 #ifdef TARGET_NR_clock_gettime
11638 case TARGET_NR_clock_gettime
:
11640 struct timespec ts
;
11641 ret
= get_errno(clock_gettime(arg1
, &ts
));
11642 if (!is_error(ret
)) {
11643 host_to_target_timespec(arg2
, &ts
);
11648 #ifdef TARGET_NR_clock_getres
11649 case TARGET_NR_clock_getres
:
11651 struct timespec ts
;
11652 ret
= get_errno(clock_getres(arg1
, &ts
));
11653 if (!is_error(ret
)) {
11654 host_to_target_timespec(arg2
, &ts
);
11659 #ifdef TARGET_NR_clock_nanosleep
11660 case TARGET_NR_clock_nanosleep
:
11662 struct timespec ts
;
11663 target_to_host_timespec(&ts
, arg3
);
11664 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11665 &ts
, arg4
? &ts
: NULL
));
11667 host_to_target_timespec(arg4
, &ts
);
11669 #if defined(TARGET_PPC)
11670 /* clock_nanosleep is odd in that it returns positive errno values.
11671 * On PPC, CR0 bit 3 should be set in such a situation. */
11672 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11673 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11680 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11681 case TARGET_NR_set_tid_address
:
11682 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11686 case TARGET_NR_tkill
:
11687 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11690 case TARGET_NR_tgkill
:
11691 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11692 target_to_host_signal(arg3
)));
11695 #ifdef TARGET_NR_set_robust_list
11696 case TARGET_NR_set_robust_list
:
11697 case TARGET_NR_get_robust_list
:
11698 /* The ABI for supporting robust futexes has userspace pass
11699 * the kernel a pointer to a linked list which is updated by
11700 * userspace after the syscall; the list is walked by the kernel
11701 * when the thread exits. Since the linked list in QEMU guest
11702 * memory isn't a valid linked list for the host and we have
11703 * no way to reliably intercept the thread-death event, we can't
11704 * support these. Silently return ENOSYS so that guest userspace
11705 * falls back to a non-robust futex implementation (which should
11706 * be OK except in the corner case of the guest crashing while
11707 * holding a mutex that is shared with another process via
11710 goto unimplemented_nowarn
;
11713 #if defined(TARGET_NR_utimensat)
11714 case TARGET_NR_utimensat
:
11716 struct timespec
*tsp
, ts
[2];
11720 target_to_host_timespec(ts
, arg3
);
11721 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11725 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11727 if (!(p
= lock_user_string(arg2
))) {
11728 ret
= -TARGET_EFAULT
;
11731 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11732 unlock_user(p
, arg2
, 0);
11737 case TARGET_NR_futex
:
11738 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11740 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11741 case TARGET_NR_inotify_init
:
11742 ret
= get_errno(sys_inotify_init());
11743 fd_trans_register(ret
, &target_inotify_trans
);
11746 #ifdef CONFIG_INOTIFY1
11747 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11748 case TARGET_NR_inotify_init1
:
11749 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11750 fcntl_flags_tbl
)));
11751 fd_trans_register(ret
, &target_inotify_trans
);
11755 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11756 case TARGET_NR_inotify_add_watch
:
11757 p
= lock_user_string(arg2
);
11758 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11759 unlock_user(p
, arg2
, 0);
11762 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11763 case TARGET_NR_inotify_rm_watch
:
11764 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11768 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11769 case TARGET_NR_mq_open
:
11771 struct mq_attr posix_mq_attr
;
11772 struct mq_attr
*pposix_mq_attr
;
11775 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11776 pposix_mq_attr
= NULL
;
11778 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11781 pposix_mq_attr
= &posix_mq_attr
;
11783 p
= lock_user_string(arg1
- 1);
11787 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11788 unlock_user (p
, arg1
, 0);
11792 case TARGET_NR_mq_unlink
:
11793 p
= lock_user_string(arg1
- 1);
11795 ret
= -TARGET_EFAULT
;
11798 ret
= get_errno(mq_unlink(p
));
11799 unlock_user (p
, arg1
, 0);
11802 case TARGET_NR_mq_timedsend
:
11804 struct timespec ts
;
11806 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11808 target_to_host_timespec(&ts
, arg5
);
11809 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
11810 host_to_target_timespec(arg5
, &ts
);
11812 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
11814 unlock_user (p
, arg2
, arg3
);
11818 case TARGET_NR_mq_timedreceive
:
11820 struct timespec ts
;
11823 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
11825 target_to_host_timespec(&ts
, arg5
);
11826 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11828 host_to_target_timespec(arg5
, &ts
);
11830 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
11833 unlock_user (p
, arg2
, arg3
);
11835 put_user_u32(prio
, arg4
);
11839 /* Not implemented for now... */
11840 /* case TARGET_NR_mq_notify: */
11843 case TARGET_NR_mq_getsetattr
:
11845 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
11848 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
11849 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
11852 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
11853 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
11860 #ifdef CONFIG_SPLICE
11861 #ifdef TARGET_NR_tee
11862 case TARGET_NR_tee
:
11864 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
11868 #ifdef TARGET_NR_splice
11869 case TARGET_NR_splice
:
11871 loff_t loff_in
, loff_out
;
11872 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
11874 if (get_user_u64(loff_in
, arg2
)) {
11877 ploff_in
= &loff_in
;
11880 if (get_user_u64(loff_out
, arg4
)) {
11883 ploff_out
= &loff_out
;
11885 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
11887 if (put_user_u64(loff_in
, arg2
)) {
11892 if (put_user_u64(loff_out
, arg4
)) {
11899 #ifdef TARGET_NR_vmsplice
11900 case TARGET_NR_vmsplice
:
11902 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
11904 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
11905 unlock_iovec(vec
, arg2
, arg3
, 0);
11907 ret
= -host_to_target_errno(errno
);
11912 #endif /* CONFIG_SPLICE */
11913 #ifdef CONFIG_EVENTFD
11914 #if defined(TARGET_NR_eventfd)
11915 case TARGET_NR_eventfd
:
11916 ret
= get_errno(eventfd(arg1
, 0));
11917 fd_trans_register(ret
, &target_eventfd_trans
);
11920 #if defined(TARGET_NR_eventfd2)
11921 case TARGET_NR_eventfd2
:
11923 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
11924 if (arg2
& TARGET_O_NONBLOCK
) {
11925 host_flags
|= O_NONBLOCK
;
11927 if (arg2
& TARGET_O_CLOEXEC
) {
11928 host_flags
|= O_CLOEXEC
;
11930 ret
= get_errno(eventfd(arg1
, host_flags
));
11931 fd_trans_register(ret
, &target_eventfd_trans
);
11935 #endif /* CONFIG_EVENTFD */
11936 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11937 case TARGET_NR_fallocate
:
11938 #if TARGET_ABI_BITS == 32
11939 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
11940 target_offset64(arg5
, arg6
)));
11942 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
11946 #if defined(CONFIG_SYNC_FILE_RANGE)
11947 #if defined(TARGET_NR_sync_file_range)
11948 case TARGET_NR_sync_file_range
:
11949 #if TARGET_ABI_BITS == 32
11950 #if defined(TARGET_MIPS)
11951 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11952 target_offset64(arg5
, arg6
), arg7
));
11954 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
11955 target_offset64(arg4
, arg5
), arg6
));
11956 #endif /* !TARGET_MIPS */
11958 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
11962 #if defined(TARGET_NR_sync_file_range2)
11963 case TARGET_NR_sync_file_range2
:
11964 /* This is like sync_file_range but the arguments are reordered */
11965 #if TARGET_ABI_BITS == 32
11966 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
11967 target_offset64(arg5
, arg6
), arg2
));
11969 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
11974 #if defined(TARGET_NR_signalfd4)
11975 case TARGET_NR_signalfd4
:
11976 ret
= do_signalfd4(arg1
, arg2
, arg4
);
11979 #if defined(TARGET_NR_signalfd)
11980 case TARGET_NR_signalfd
:
11981 ret
= do_signalfd4(arg1
, arg2
, 0);
11984 #if defined(CONFIG_EPOLL)
11985 #if defined(TARGET_NR_epoll_create)
11986 case TARGET_NR_epoll_create
:
11987 ret
= get_errno(epoll_create(arg1
));
11990 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11991 case TARGET_NR_epoll_create1
:
11992 ret
= get_errno(epoll_create1(arg1
));
11995 #if defined(TARGET_NR_epoll_ctl)
11996 case TARGET_NR_epoll_ctl
:
11998 struct epoll_event ep
;
11999 struct epoll_event
*epp
= 0;
12001 struct target_epoll_event
*target_ep
;
12002 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12005 ep
.events
= tswap32(target_ep
->events
);
12006 /* The epoll_data_t union is just opaque data to the kernel,
12007 * so we transfer all 64 bits across and need not worry what
12008 * actual data type it is.
12010 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12011 unlock_user_struct(target_ep
, arg4
, 0);
12014 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12019 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12020 #if defined(TARGET_NR_epoll_wait)
12021 case TARGET_NR_epoll_wait
:
12023 #if defined(TARGET_NR_epoll_pwait)
12024 case TARGET_NR_epoll_pwait
:
12027 struct target_epoll_event
*target_ep
;
12028 struct epoll_event
*ep
;
12030 int maxevents
= arg3
;
12031 int timeout
= arg4
;
12033 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12034 ret
= -TARGET_EINVAL
;
12038 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12039 maxevents
* sizeof(struct target_epoll_event
), 1);
12044 ep
= g_try_new(struct epoll_event
, maxevents
);
12046 unlock_user(target_ep
, arg2
, 0);
12047 ret
= -TARGET_ENOMEM
;
12052 #if defined(TARGET_NR_epoll_pwait)
12053 case TARGET_NR_epoll_pwait
:
12055 target_sigset_t
*target_set
;
12056 sigset_t _set
, *set
= &_set
;
12059 if (arg6
!= sizeof(target_sigset_t
)) {
12060 ret
= -TARGET_EINVAL
;
12064 target_set
= lock_user(VERIFY_READ
, arg5
,
12065 sizeof(target_sigset_t
), 1);
12067 ret
= -TARGET_EFAULT
;
12070 target_to_host_sigset(set
, target_set
);
12071 unlock_user(target_set
, arg5
, 0);
12076 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12077 set
, SIGSET_T_SIZE
));
12081 #if defined(TARGET_NR_epoll_wait)
12082 case TARGET_NR_epoll_wait
:
12083 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12088 ret
= -TARGET_ENOSYS
;
12090 if (!is_error(ret
)) {
12092 for (i
= 0; i
< ret
; i
++) {
12093 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12094 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12096 unlock_user(target_ep
, arg2
,
12097 ret
* sizeof(struct target_epoll_event
));
12099 unlock_user(target_ep
, arg2
, 0);
12106 #ifdef TARGET_NR_prlimit64
12107 case TARGET_NR_prlimit64
:
12109 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12110 struct target_rlimit64
*target_rnew
, *target_rold
;
12111 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12112 int resource
= target_to_host_resource(arg2
);
12114 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12117 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12118 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12119 unlock_user_struct(target_rnew
, arg3
, 0);
12123 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12124 if (!is_error(ret
) && arg4
) {
12125 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12128 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12129 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12130 unlock_user_struct(target_rold
, arg4
, 1);
12135 #ifdef TARGET_NR_gethostname
12136 case TARGET_NR_gethostname
:
12138 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12140 ret
= get_errno(gethostname(name
, arg2
));
12141 unlock_user(name
, arg1
, arg2
);
12143 ret
= -TARGET_EFAULT
;
12148 #ifdef TARGET_NR_atomic_cmpxchg_32
12149 case TARGET_NR_atomic_cmpxchg_32
:
12151 /* should use start_exclusive from main.c */
12152 abi_ulong mem_value
;
12153 if (get_user_u32(mem_value
, arg6
)) {
12154 target_siginfo_t info
;
12155 info
.si_signo
= SIGSEGV
;
12157 info
.si_code
= TARGET_SEGV_MAPERR
;
12158 info
._sifields
._sigfault
._addr
= arg6
;
12159 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12160 QEMU_SI_FAULT
, &info
);
12164 if (mem_value
== arg2
)
12165 put_user_u32(arg1
, arg6
);
12170 #ifdef TARGET_NR_atomic_barrier
12171 case TARGET_NR_atomic_barrier
:
12173 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12179 #ifdef TARGET_NR_timer_create
12180 case TARGET_NR_timer_create
:
12182 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12184 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12187 int timer_index
= next_free_host_timer();
12189 if (timer_index
< 0) {
12190 ret
= -TARGET_EAGAIN
;
12192 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12195 phost_sevp
= &host_sevp
;
12196 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12202 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12206 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12215 #ifdef TARGET_NR_timer_settime
12216 case TARGET_NR_timer_settime
:
12218 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12219 * struct itimerspec * old_value */
12220 target_timer_t timerid
= get_timer_id(arg1
);
12224 } else if (arg3
== 0) {
12225 ret
= -TARGET_EINVAL
;
12227 timer_t htimer
= g_posix_timers
[timerid
];
12228 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12230 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12234 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12235 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12243 #ifdef TARGET_NR_timer_gettime
12244 case TARGET_NR_timer_gettime
:
12246 /* args: timer_t timerid, struct itimerspec *curr_value */
12247 target_timer_t timerid
= get_timer_id(arg1
);
12251 } else if (!arg2
) {
12252 ret
= -TARGET_EFAULT
;
12254 timer_t htimer
= g_posix_timers
[timerid
];
12255 struct itimerspec hspec
;
12256 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12258 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12259 ret
= -TARGET_EFAULT
;
12266 #ifdef TARGET_NR_timer_getoverrun
12267 case TARGET_NR_timer_getoverrun
:
12269 /* args: timer_t timerid */
12270 target_timer_t timerid
= get_timer_id(arg1
);
12275 timer_t htimer
= g_posix_timers
[timerid
];
12276 ret
= get_errno(timer_getoverrun(htimer
));
12278 fd_trans_unregister(ret
);
12283 #ifdef TARGET_NR_timer_delete
12284 case TARGET_NR_timer_delete
:
12286 /* args: timer_t timerid */
12287 target_timer_t timerid
= get_timer_id(arg1
);
12292 timer_t htimer
= g_posix_timers
[timerid
];
12293 ret
= get_errno(timer_delete(htimer
));
12294 g_posix_timers
[timerid
] = 0;
12300 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12301 case TARGET_NR_timerfd_create
:
12302 ret
= get_errno(timerfd_create(arg1
,
12303 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12307 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12308 case TARGET_NR_timerfd_gettime
:
12310 struct itimerspec its_curr
;
12312 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12314 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12321 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12322 case TARGET_NR_timerfd_settime
:
12324 struct itimerspec its_new
, its_old
, *p_new
;
12327 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12335 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12337 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12344 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12345 case TARGET_NR_ioprio_get
:
12346 ret
= get_errno(ioprio_get(arg1
, arg2
));
12350 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12351 case TARGET_NR_ioprio_set
:
12352 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12356 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12357 case TARGET_NR_setns
:
12358 ret
= get_errno(setns(arg1
, arg2
));
12361 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12362 case TARGET_NR_unshare
:
12363 ret
= get_errno(unshare(arg1
));
12366 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12367 case TARGET_NR_kcmp
:
12368 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12374 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12375 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12376 unimplemented_nowarn
:
12378 ret
= -TARGET_ENOSYS
;
12383 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12386 print_syscall_ret(num
, ret
);
12387 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12390 ret
= -TARGET_EFAULT
;