4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
40 int __clone2(int (*fn
)(void *), void *child_stack_base
,
41 size_t stack_size
, int flags
, void *arg
, ...);
43 #include <sys/socket.h>
47 #include <sys/times.h>
50 #include <sys/statfs.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/errqueue.h>
62 #include <linux/random.h>
63 #include "qemu-common.h"
65 #include <sys/timerfd.h>
71 #include <sys/eventfd.h>
74 #include <sys/epoll.h>
77 #include "qemu/xattr.h"
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
83 #define termios host_termios
84 #define winsize host_winsize
85 #define termio host_termio
86 #define sgttyb host_sgttyb /* same as target */
87 #define tchars host_tchars /* same as target */
88 #define ltchars host_ltchars /* same as target */
90 #include <linux/termios.h>
91 #include <linux/unistd.h>
92 #include <linux/cdrom.h>
93 #include <linux/hdreg.h>
94 #include <linux/soundcard.h>
96 #include <linux/mtio.h>
98 #if defined(CONFIG_FIEMAP)
99 #include <linux/fiemap.h>
101 #include <linux/fb.h>
102 #include <linux/vt.h>
103 #include <linux/dm-ioctl.h>
104 #include <linux/reboot.h>
105 #include <linux/route.h>
106 #include <linux/filter.h>
107 #include <linux/blkpg.h>
108 #include <netpacket/packet.h>
109 #include <linux/netlink.h>
110 #ifdef CONFIG_RTNETLINK
111 #include <linux/rtnetlink.h>
112 #include <linux/if_bridge.h>
114 #include <linux/audit.h>
115 #include "linux_loop.h"
121 #define CLONE_IO 0x80000000 /* Clone io context */
124 /* We can't directly call the host clone syscall, because this will
125 * badly confuse libc (breaking mutexes, for example). So we must
126 * divide clone flags into:
127 * * flag combinations that look like pthread_create()
128 * * flag combinations that look like fork()
129 * * flags we can implement within QEMU itself
130 * * flags we can't support and will return an error for
132 /* For thread creation, all these flags must be present; for
133 * fork, none must be present.
135 #define CLONE_THREAD_FLAGS \
136 (CLONE_VM | CLONE_FS | CLONE_FILES | \
137 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
139 /* These flags are ignored:
140 * CLONE_DETACHED is now ignored by the kernel;
141 * CLONE_IO is just an optimisation hint to the I/O scheduler
143 #define CLONE_IGNORED_FLAGS \
144 (CLONE_DETACHED | CLONE_IO)
146 /* Flags for fork which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_FORK_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
151 /* Flags for thread creation which we can implement within QEMU itself */
152 #define CLONE_OPTIONAL_THREAD_FLAGS \
153 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
154 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
156 #define CLONE_INVALID_FORK_FLAGS \
157 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
159 #define CLONE_INVALID_THREAD_FLAGS \
160 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
161 CLONE_IGNORED_FLAGS))
163 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
164 * have almost all been allocated. We cannot support any of
165 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
166 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
167 * The checks against the invalid thread masks above will catch these.
168 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
172 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
173 * once. This exercises the codepaths for restart.
175 //#define DEBUG_ERESTARTSYS
177 //#include <linux/msdos_fs.h>
178 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
179 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
189 #define _syscall0(type,name) \
190 static type name (void) \
192 return syscall(__NR_##name); \
195 #define _syscall1(type,name,type1,arg1) \
196 static type name (type1 arg1) \
198 return syscall(__NR_##name, arg1); \
201 #define _syscall2(type,name,type1,arg1,type2,arg2) \
202 static type name (type1 arg1,type2 arg2) \
204 return syscall(__NR_##name, arg1, arg2); \
207 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
208 static type name (type1 arg1,type2 arg2,type3 arg3) \
210 return syscall(__NR_##name, arg1, arg2, arg3); \
213 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
214 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
216 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
219 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
221 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
223 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
227 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
228 type5,arg5,type6,arg6) \
229 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
232 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
236 #define __NR_sys_uname __NR_uname
237 #define __NR_sys_getcwd1 __NR_getcwd
238 #define __NR_sys_getdents __NR_getdents
239 #define __NR_sys_getdents64 __NR_getdents64
240 #define __NR_sys_getpriority __NR_getpriority
241 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
242 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
243 #define __NR_sys_syslog __NR_syslog
244 #define __NR_sys_futex __NR_futex
245 #define __NR_sys_inotify_init __NR_inotify_init
246 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
247 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
249 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
251 #define __NR__llseek __NR_lseek
254 /* Newer kernel ports have llseek() instead of _llseek() */
255 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
256 #define TARGET_NR__llseek TARGET_NR_llseek
260 _syscall0(int, gettid
)
262 /* This is a replacement for the host gettid() and must return a host
264 static int gettid(void) {
268 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
269 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
271 #if !defined(__NR_getdents) || \
272 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
273 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
275 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
276 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
277 loff_t
*, res
, uint
, wh
);
279 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
280 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
282 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
283 #ifdef __NR_exit_group
284 _syscall1(int,exit_group
,int,error_code
)
286 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
287 _syscall1(int,set_tid_address
,int *,tidptr
)
289 #if defined(TARGET_NR_futex) && defined(__NR_futex)
290 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
291 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
293 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
294 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
295 unsigned long *, user_mask_ptr
);
296 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
297 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
298 unsigned long *, user_mask_ptr
);
299 #define __NR_sys_getcpu __NR_getcpu
300 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
301 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
303 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
304 struct __user_cap_data_struct
*, data
);
305 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
306 struct __user_cap_data_struct
*, data
);
307 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
308 _syscall2(int, ioprio_get
, int, which
, int, who
)
310 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
311 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
314 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
317 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
318 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
319 unsigned long, idx1
, unsigned long, idx2
)
322 static bitmask_transtbl fcntl_flags_tbl
[] = {
323 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
324 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
325 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
326 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
327 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
328 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
329 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
330 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
331 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
332 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
333 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
334 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
335 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
336 #if defined(O_DIRECT)
337 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
339 #if defined(O_NOATIME)
340 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
342 #if defined(O_CLOEXEC)
343 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
346 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
348 #if defined(O_TMPFILE)
349 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
351 /* Don't terminate the list prematurely on 64-bit host+guest. */
352 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
353 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
360 QEMU_IFLA_BR_FORWARD_DELAY
,
361 QEMU_IFLA_BR_HELLO_TIME
,
362 QEMU_IFLA_BR_MAX_AGE
,
363 QEMU_IFLA_BR_AGEING_TIME
,
364 QEMU_IFLA_BR_STP_STATE
,
365 QEMU_IFLA_BR_PRIORITY
,
366 QEMU_IFLA_BR_VLAN_FILTERING
,
367 QEMU_IFLA_BR_VLAN_PROTOCOL
,
368 QEMU_IFLA_BR_GROUP_FWD_MASK
,
369 QEMU_IFLA_BR_ROOT_ID
,
370 QEMU_IFLA_BR_BRIDGE_ID
,
371 QEMU_IFLA_BR_ROOT_PORT
,
372 QEMU_IFLA_BR_ROOT_PATH_COST
,
373 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
374 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
375 QEMU_IFLA_BR_HELLO_TIMER
,
376 QEMU_IFLA_BR_TCN_TIMER
,
377 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
378 QEMU_IFLA_BR_GC_TIMER
,
379 QEMU_IFLA_BR_GROUP_ADDR
,
380 QEMU_IFLA_BR_FDB_FLUSH
,
381 QEMU_IFLA_BR_MCAST_ROUTER
,
382 QEMU_IFLA_BR_MCAST_SNOOPING
,
383 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
384 QEMU_IFLA_BR_MCAST_QUERIER
,
385 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
386 QEMU_IFLA_BR_MCAST_HASH_MAX
,
387 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
388 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
389 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
390 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
391 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
392 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
393 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
395 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
396 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
397 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
398 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
400 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
401 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
425 QEMU_IFLA_NET_NS_PID
,
428 QEMU_IFLA_VFINFO_LIST
,
436 QEMU_IFLA_PROMISCUITY
,
437 QEMU_IFLA_NUM_TX_QUEUES
,
438 QEMU_IFLA_NUM_RX_QUEUES
,
440 QEMU_IFLA_PHYS_PORT_ID
,
441 QEMU_IFLA_CARRIER_CHANGES
,
442 QEMU_IFLA_PHYS_SWITCH_ID
,
443 QEMU_IFLA_LINK_NETNSID
,
444 QEMU_IFLA_PHYS_PORT_NAME
,
445 QEMU_IFLA_PROTO_DOWN
,
446 QEMU_IFLA_GSO_MAX_SEGS
,
447 QEMU_IFLA_GSO_MAX_SIZE
,
454 QEMU_IFLA_BRPORT_UNSPEC
,
455 QEMU_IFLA_BRPORT_STATE
,
456 QEMU_IFLA_BRPORT_PRIORITY
,
457 QEMU_IFLA_BRPORT_COST
,
458 QEMU_IFLA_BRPORT_MODE
,
459 QEMU_IFLA_BRPORT_GUARD
,
460 QEMU_IFLA_BRPORT_PROTECT
,
461 QEMU_IFLA_BRPORT_FAST_LEAVE
,
462 QEMU_IFLA_BRPORT_LEARNING
,
463 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
464 QEMU_IFLA_BRPORT_PROXYARP
,
465 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
466 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
467 QEMU_IFLA_BRPORT_ROOT_ID
,
468 QEMU_IFLA_BRPORT_BRIDGE_ID
,
469 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
470 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
473 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
474 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
475 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
476 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
477 QEMU_IFLA_BRPORT_HOLD_TIMER
,
478 QEMU_IFLA_BRPORT_FLUSH
,
479 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
480 QEMU_IFLA_BRPORT_PAD
,
481 QEMU___IFLA_BRPORT_MAX
485 QEMU_IFLA_INFO_UNSPEC
,
488 QEMU_IFLA_INFO_XSTATS
,
489 QEMU_IFLA_INFO_SLAVE_KIND
,
490 QEMU_IFLA_INFO_SLAVE_DATA
,
491 QEMU___IFLA_INFO_MAX
,
495 QEMU_IFLA_INET_UNSPEC
,
497 QEMU___IFLA_INET_MAX
,
501 QEMU_IFLA_INET6_UNSPEC
,
502 QEMU_IFLA_INET6_FLAGS
,
503 QEMU_IFLA_INET6_CONF
,
504 QEMU_IFLA_INET6_STATS
,
505 QEMU_IFLA_INET6_MCAST
,
506 QEMU_IFLA_INET6_CACHEINFO
,
507 QEMU_IFLA_INET6_ICMP6STATS
,
508 QEMU_IFLA_INET6_TOKEN
,
509 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
510 QEMU___IFLA_INET6_MAX
513 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
514 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
515 typedef struct TargetFdTrans
{
516 TargetFdDataFunc host_to_target_data
;
517 TargetFdDataFunc target_to_host_data
;
518 TargetFdAddrFunc target_to_host_addr
;
521 static TargetFdTrans
**target_fd_trans
;
523 static unsigned int target_fd_max
;
525 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
527 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
528 return target_fd_trans
[fd
]->target_to_host_data
;
533 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
535 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
536 return target_fd_trans
[fd
]->host_to_target_data
;
541 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
543 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
544 return target_fd_trans
[fd
]->target_to_host_addr
;
549 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
553 if (fd
>= target_fd_max
) {
554 oldmax
= target_fd_max
;
555 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
556 target_fd_trans
= g_renew(TargetFdTrans
*,
557 target_fd_trans
, target_fd_max
);
558 memset((void *)(target_fd_trans
+ oldmax
), 0,
559 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
561 target_fd_trans
[fd
] = trans
;
564 static void fd_trans_unregister(int fd
)
566 if (fd
>= 0 && fd
< target_fd_max
) {
567 target_fd_trans
[fd
] = NULL
;
571 static void fd_trans_dup(int oldfd
, int newfd
)
573 fd_trans_unregister(newfd
);
574 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
575 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
579 static int sys_getcwd1(char *buf
, size_t size
)
581 if (getcwd(buf
, size
) == NULL
) {
582 /* getcwd() sets errno */
585 return strlen(buf
)+1;
588 #ifdef TARGET_NR_utimensat
589 #if defined(__NR_utimensat)
590 #define __NR_sys_utimensat __NR_utimensat
591 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
592 const struct timespec
*,tsp
,int,flags
)
594 static int sys_utimensat(int dirfd
, const char *pathname
,
595 const struct timespec times
[2], int flags
)
601 #endif /* TARGET_NR_utimensat */
603 #ifdef TARGET_NR_renameat2
604 #if defined(__NR_renameat2)
605 #define __NR_sys_renameat2 __NR_renameat2
606 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
607 const char *, new, unsigned int, flags
)
609 static int sys_renameat2(int oldfd
, const char *old
,
610 int newfd
, const char *new, int flags
)
613 return renameat(oldfd
, old
, newfd
, new);
619 #endif /* TARGET_NR_renameat2 */
621 #ifdef CONFIG_INOTIFY
622 #include <sys/inotify.h>
624 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
625 static int sys_inotify_init(void)
627 return (inotify_init());
630 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
631 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
633 return (inotify_add_watch(fd
, pathname
, mask
));
636 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
637 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
639 return (inotify_rm_watch(fd
, wd
));
642 #ifdef CONFIG_INOTIFY1
643 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
644 static int sys_inotify_init1(int flags
)
646 return (inotify_init1(flags
));
651 /* Userspace can usually survive runtime without inotify */
652 #undef TARGET_NR_inotify_init
653 #undef TARGET_NR_inotify_init1
654 #undef TARGET_NR_inotify_add_watch
655 #undef TARGET_NR_inotify_rm_watch
656 #endif /* CONFIG_INOTIFY */
658 #if defined(TARGET_NR_prlimit64)
659 #ifndef __NR_prlimit64
660 # define __NR_prlimit64 -1
662 #define __NR_sys_prlimit64 __NR_prlimit64
663 /* The glibc rlimit structure may not be that used by the underlying syscall */
664 struct host_rlimit64
{
668 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
669 const struct host_rlimit64
*, new_limit
,
670 struct host_rlimit64
*, old_limit
)
674 #if defined(TARGET_NR_timer_create)
675 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
676 static timer_t g_posix_timers
[32] = { 0, } ;
678 static inline int next_free_host_timer(void)
681 /* FIXME: Does finding the next free slot require a lock? */
682 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
683 if (g_posix_timers
[k
] == 0) {
684 g_posix_timers
[k
] = (timer_t
) 1;
692 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
694 static inline int regpairs_aligned(void *cpu_env
, int num
)
696 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
698 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
699 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
700 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
701 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
702 * of registers which translates to the same as ARM/MIPS, because we start with
704 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
705 #elif defined(TARGET_SH4)
706 /* SH4 doesn't align register pairs, except for p{read,write}64 */
707 static inline int regpairs_aligned(void *cpu_env
, int num
)
710 case TARGET_NR_pread64
:
711 case TARGET_NR_pwrite64
:
719 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
722 #define ERRNO_TABLE_SIZE 1200
724 /* target_to_host_errno_table[] is initialized from
725 * host_to_target_errno_table[] in syscall_init(). */
726 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
730 * This list is the union of errno values overridden in asm-<arch>/errno.h
731 * minus the errnos that are not actually generic to all archs.
733 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
734 [EAGAIN
] = TARGET_EAGAIN
,
735 [EIDRM
] = TARGET_EIDRM
,
736 [ECHRNG
] = TARGET_ECHRNG
,
737 [EL2NSYNC
] = TARGET_EL2NSYNC
,
738 [EL3HLT
] = TARGET_EL3HLT
,
739 [EL3RST
] = TARGET_EL3RST
,
740 [ELNRNG
] = TARGET_ELNRNG
,
741 [EUNATCH
] = TARGET_EUNATCH
,
742 [ENOCSI
] = TARGET_ENOCSI
,
743 [EL2HLT
] = TARGET_EL2HLT
,
744 [EDEADLK
] = TARGET_EDEADLK
,
745 [ENOLCK
] = TARGET_ENOLCK
,
746 [EBADE
] = TARGET_EBADE
,
747 [EBADR
] = TARGET_EBADR
,
748 [EXFULL
] = TARGET_EXFULL
,
749 [ENOANO
] = TARGET_ENOANO
,
750 [EBADRQC
] = TARGET_EBADRQC
,
751 [EBADSLT
] = TARGET_EBADSLT
,
752 [EBFONT
] = TARGET_EBFONT
,
753 [ENOSTR
] = TARGET_ENOSTR
,
754 [ENODATA
] = TARGET_ENODATA
,
755 [ETIME
] = TARGET_ETIME
,
756 [ENOSR
] = TARGET_ENOSR
,
757 [ENONET
] = TARGET_ENONET
,
758 [ENOPKG
] = TARGET_ENOPKG
,
759 [EREMOTE
] = TARGET_EREMOTE
,
760 [ENOLINK
] = TARGET_ENOLINK
,
761 [EADV
] = TARGET_EADV
,
762 [ESRMNT
] = TARGET_ESRMNT
,
763 [ECOMM
] = TARGET_ECOMM
,
764 [EPROTO
] = TARGET_EPROTO
,
765 [EDOTDOT
] = TARGET_EDOTDOT
,
766 [EMULTIHOP
] = TARGET_EMULTIHOP
,
767 [EBADMSG
] = TARGET_EBADMSG
,
768 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
769 [EOVERFLOW
] = TARGET_EOVERFLOW
,
770 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
771 [EBADFD
] = TARGET_EBADFD
,
772 [EREMCHG
] = TARGET_EREMCHG
,
773 [ELIBACC
] = TARGET_ELIBACC
,
774 [ELIBBAD
] = TARGET_ELIBBAD
,
775 [ELIBSCN
] = TARGET_ELIBSCN
,
776 [ELIBMAX
] = TARGET_ELIBMAX
,
777 [ELIBEXEC
] = TARGET_ELIBEXEC
,
778 [EILSEQ
] = TARGET_EILSEQ
,
779 [ENOSYS
] = TARGET_ENOSYS
,
780 [ELOOP
] = TARGET_ELOOP
,
781 [ERESTART
] = TARGET_ERESTART
,
782 [ESTRPIPE
] = TARGET_ESTRPIPE
,
783 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
784 [EUSERS
] = TARGET_EUSERS
,
785 [ENOTSOCK
] = TARGET_ENOTSOCK
,
786 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
787 [EMSGSIZE
] = TARGET_EMSGSIZE
,
788 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
789 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
790 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
791 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
792 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
793 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
794 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
795 [EADDRINUSE
] = TARGET_EADDRINUSE
,
796 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
797 [ENETDOWN
] = TARGET_ENETDOWN
,
798 [ENETUNREACH
] = TARGET_ENETUNREACH
,
799 [ENETRESET
] = TARGET_ENETRESET
,
800 [ECONNABORTED
] = TARGET_ECONNABORTED
,
801 [ECONNRESET
] = TARGET_ECONNRESET
,
802 [ENOBUFS
] = TARGET_ENOBUFS
,
803 [EISCONN
] = TARGET_EISCONN
,
804 [ENOTCONN
] = TARGET_ENOTCONN
,
805 [EUCLEAN
] = TARGET_EUCLEAN
,
806 [ENOTNAM
] = TARGET_ENOTNAM
,
807 [ENAVAIL
] = TARGET_ENAVAIL
,
808 [EISNAM
] = TARGET_EISNAM
,
809 [EREMOTEIO
] = TARGET_EREMOTEIO
,
810 [EDQUOT
] = TARGET_EDQUOT
,
811 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
812 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
813 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
814 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
815 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
816 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
817 [EALREADY
] = TARGET_EALREADY
,
818 [EINPROGRESS
] = TARGET_EINPROGRESS
,
819 [ESTALE
] = TARGET_ESTALE
,
820 [ECANCELED
] = TARGET_ECANCELED
,
821 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
822 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
824 [ENOKEY
] = TARGET_ENOKEY
,
827 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
830 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
833 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
836 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
838 #ifdef ENOTRECOVERABLE
839 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
842 [ENOMSG
] = TARGET_ENOMSG
,
845 [ERFKILL
] = TARGET_ERFKILL
,
848 [EHWPOISON
] = TARGET_EHWPOISON
,
852 static inline int host_to_target_errno(int err
)
854 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
855 host_to_target_errno_table
[err
]) {
856 return host_to_target_errno_table
[err
];
861 static inline int target_to_host_errno(int err
)
863 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
864 target_to_host_errno_table
[err
]) {
865 return target_to_host_errno_table
[err
];
870 static inline abi_long
get_errno(abi_long ret
)
873 return -host_to_target_errno(errno
);
878 static inline int is_error(abi_long ret
)
880 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
883 const char *target_strerror(int err
)
885 if (err
== TARGET_ERESTARTSYS
) {
886 return "To be restarted";
888 if (err
== TARGET_QEMU_ESIGRETURN
) {
889 return "Successful exit from sigreturn";
892 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
895 return strerror(target_to_host_errno(err
));
898 #define safe_syscall0(type, name) \
899 static type safe_##name(void) \
901 return safe_syscall(__NR_##name); \
904 #define safe_syscall1(type, name, type1, arg1) \
905 static type safe_##name(type1 arg1) \
907 return safe_syscall(__NR_##name, arg1); \
910 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
911 static type safe_##name(type1 arg1, type2 arg2) \
913 return safe_syscall(__NR_##name, arg1, arg2); \
916 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
917 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
919 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
922 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
924 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
926 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
929 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
930 type4, arg4, type5, arg5) \
931 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
934 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
937 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
938 type4, arg4, type5, arg5, type6, arg6) \
939 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
940 type5 arg5, type6 arg6) \
942 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
945 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
946 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
947 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
948 int, flags
, mode_t
, mode
)
949 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
950 struct rusage
*, rusage
)
951 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
952 int, options
, struct rusage
*, rusage
)
953 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
954 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
955 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
956 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
957 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
959 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
960 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
962 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
963 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
964 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
965 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
966 safe_syscall2(int, tkill
, int, tid
, int, sig
)
967 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
968 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
969 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
970 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
971 unsigned long, pos_l
, unsigned long, pos_h
)
972 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
973 unsigned long, pos_l
, unsigned long, pos_h
)
974 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
976 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
977 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
978 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
979 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
980 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
981 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
982 safe_syscall2(int, flock
, int, fd
, int, operation
)
983 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
984 const struct timespec
*, uts
, size_t, sigsetsize
)
985 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
987 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
988 struct timespec
*, rem
)
989 #ifdef TARGET_NR_clock_nanosleep
990 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
991 const struct timespec
*, req
, struct timespec
*, rem
)
994 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
996 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
997 long, msgtype
, int, flags
)
998 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
999 unsigned, nsops
, const struct timespec
*, timeout
)
1001 /* This host kernel architecture uses a single ipc syscall; fake up
1002 * wrappers for the sub-operations to hide this implementation detail.
1003 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1004 * for the call parameter because some structs in there conflict with the
1005 * sys/ipc.h ones. So we just define them here, and rely on them being
1006 * the same for all host architectures.
1008 #define Q_SEMTIMEDOP 4
1011 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1013 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1014 void *, ptr
, long, fifth
)
1015 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1017 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1019 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1021 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1023 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1024 const struct timespec
*timeout
)
1026 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1030 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1031 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1032 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1033 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1034 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1036 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1037 * "third argument might be integer or pointer or not present" behaviour of
1038 * the libc function.
1040 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1041 /* Similarly for fcntl. Note that callers must always:
1042 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1043 * use the flock64 struct rather than unsuffixed flock
1044 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1047 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1049 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1052 static inline int host_to_target_sock_type(int host_type
)
1056 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1058 target_type
= TARGET_SOCK_DGRAM
;
1061 target_type
= TARGET_SOCK_STREAM
;
1064 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1068 #if defined(SOCK_CLOEXEC)
1069 if (host_type
& SOCK_CLOEXEC
) {
1070 target_type
|= TARGET_SOCK_CLOEXEC
;
1074 #if defined(SOCK_NONBLOCK)
1075 if (host_type
& SOCK_NONBLOCK
) {
1076 target_type
|= TARGET_SOCK_NONBLOCK
;
1083 static abi_ulong target_brk
;
1084 static abi_ulong target_original_brk
;
1085 static abi_ulong brk_page
;
1087 void target_set_brk(abi_ulong new_brk
)
1089 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1090 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1093 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1094 #define DEBUGF_BRK(message, args...)
1096 /* do_brk() must return target values and target errnos. */
1097 abi_long
do_brk(abi_ulong new_brk
)
1099 abi_long mapped_addr
;
1100 abi_ulong new_alloc_size
;
1102 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1105 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1108 if (new_brk
< target_original_brk
) {
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1114 /* If the new brk is less than the highest page reserved to the
1115 * target heap allocation, set it and we're almost done... */
1116 if (new_brk
<= brk_page
) {
1117 /* Heap contents are initialized to zero, as for anonymous
1119 if (new_brk
> target_brk
) {
1120 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1122 target_brk
= new_brk
;
1123 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1127 /* We need to allocate more memory after the brk... Note that
1128 * we don't use MAP_FIXED because that will map over the top of
1129 * any existing mapping (like the one with the host libc or qemu
1130 * itself); instead we treat "mapped but at wrong address" as
1131 * a failure and unmap again.
1133 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1134 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1135 PROT_READ
|PROT_WRITE
,
1136 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1138 if (mapped_addr
== brk_page
) {
1139 /* Heap contents are initialized to zero, as for anonymous
1140 * mapped pages. Technically the new pages are already
1141 * initialized to zero since they *are* anonymous mapped
1142 * pages, however we have to take care with the contents that
1143 * come from the remaining part of the previous page: it may
1144 * contains garbage data due to a previous heap usage (grown
1145 * then shrunken). */
1146 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1148 target_brk
= new_brk
;
1149 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1150 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1153 } else if (mapped_addr
!= -1) {
1154 /* Mapped but at wrong address, meaning there wasn't actually
1155 * enough space for this brk.
1157 target_munmap(mapped_addr
, new_alloc_size
);
1159 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1162 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1165 #if defined(TARGET_ALPHA)
1166 /* We (partially) emulate OSF/1 on Alpha, which requires we
1167 return a proper errno, not an unchanged brk value. */
1168 return -TARGET_ENOMEM
;
1170 /* For everything else, return the previous break. */
1174 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1175 abi_ulong target_fds_addr
,
1179 abi_ulong b
, *target_fds
;
1181 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1182 if (!(target_fds
= lock_user(VERIFY_READ
,
1184 sizeof(abi_ulong
) * nw
,
1186 return -TARGET_EFAULT
;
1190 for (i
= 0; i
< nw
; i
++) {
1191 /* grab the abi_ulong */
1192 __get_user(b
, &target_fds
[i
]);
1193 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1194 /* check the bit inside the abi_ulong */
1201 unlock_user(target_fds
, target_fds_addr
, 0);
1206 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1207 abi_ulong target_fds_addr
,
1210 if (target_fds_addr
) {
1211 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1212 return -TARGET_EFAULT
;
1220 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1226 abi_ulong
*target_fds
;
1228 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1229 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1231 sizeof(abi_ulong
) * nw
,
1233 return -TARGET_EFAULT
;
1236 for (i
= 0; i
< nw
; i
++) {
1238 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1239 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1242 __put_user(v
, &target_fds
[i
]);
1245 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1250 #if defined(__alpha__)
1251 #define HOST_HZ 1024
1256 static inline abi_long
host_to_target_clock_t(long ticks
)
1258 #if HOST_HZ == TARGET_HZ
1261 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1265 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1266 const struct rusage
*rusage
)
1268 struct target_rusage
*target_rusage
;
1270 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1271 return -TARGET_EFAULT
;
1272 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1273 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1274 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1275 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1276 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1277 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1278 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1279 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1280 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1281 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1282 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1283 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1284 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1285 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1286 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1287 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1288 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1289 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1290 unlock_user_struct(target_rusage
, target_addr
, 1);
1295 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1297 abi_ulong target_rlim_swap
;
1300 target_rlim_swap
= tswapal(target_rlim
);
1301 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1302 return RLIM_INFINITY
;
1304 result
= target_rlim_swap
;
1305 if (target_rlim_swap
!= (rlim_t
)result
)
1306 return RLIM_INFINITY
;
1311 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1313 abi_ulong target_rlim_swap
;
1316 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1317 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1319 target_rlim_swap
= rlim
;
1320 result
= tswapal(target_rlim_swap
);
1325 static inline int target_to_host_resource(int code
)
1328 case TARGET_RLIMIT_AS
:
1330 case TARGET_RLIMIT_CORE
:
1332 case TARGET_RLIMIT_CPU
:
1334 case TARGET_RLIMIT_DATA
:
1336 case TARGET_RLIMIT_FSIZE
:
1337 return RLIMIT_FSIZE
;
1338 case TARGET_RLIMIT_LOCKS
:
1339 return RLIMIT_LOCKS
;
1340 case TARGET_RLIMIT_MEMLOCK
:
1341 return RLIMIT_MEMLOCK
;
1342 case TARGET_RLIMIT_MSGQUEUE
:
1343 return RLIMIT_MSGQUEUE
;
1344 case TARGET_RLIMIT_NICE
:
1346 case TARGET_RLIMIT_NOFILE
:
1347 return RLIMIT_NOFILE
;
1348 case TARGET_RLIMIT_NPROC
:
1349 return RLIMIT_NPROC
;
1350 case TARGET_RLIMIT_RSS
:
1352 case TARGET_RLIMIT_RTPRIO
:
1353 return RLIMIT_RTPRIO
;
1354 case TARGET_RLIMIT_SIGPENDING
:
1355 return RLIMIT_SIGPENDING
;
1356 case TARGET_RLIMIT_STACK
:
1357 return RLIMIT_STACK
;
1363 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1364 abi_ulong target_tv_addr
)
1366 struct target_timeval
*target_tv
;
1368 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1369 return -TARGET_EFAULT
;
1371 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1372 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1374 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1379 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1380 const struct timeval
*tv
)
1382 struct target_timeval
*target_tv
;
1384 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1385 return -TARGET_EFAULT
;
1387 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1388 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1390 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1395 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1396 abi_ulong target_tz_addr
)
1398 struct target_timezone
*target_tz
;
1400 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1401 return -TARGET_EFAULT
;
1404 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1405 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1407 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1412 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1415 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1416 abi_ulong target_mq_attr_addr
)
1418 struct target_mq_attr
*target_mq_attr
;
1420 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1421 target_mq_attr_addr
, 1))
1422 return -TARGET_EFAULT
;
1424 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1425 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1426 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1427 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1429 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1434 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1435 const struct mq_attr
*attr
)
1437 struct target_mq_attr
*target_mq_attr
;
1439 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1440 target_mq_attr_addr
, 0))
1441 return -TARGET_EFAULT
;
1443 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1444 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1445 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1446 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1448 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1454 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1455 /* do_select() must return target values and target errnos. */
1456 static abi_long
do_select(int n
,
1457 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1458 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1460 fd_set rfds
, wfds
, efds
;
1461 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1463 struct timespec ts
, *ts_ptr
;
1466 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1470 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1474 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1479 if (target_tv_addr
) {
1480 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1481 return -TARGET_EFAULT
;
1482 ts
.tv_sec
= tv
.tv_sec
;
1483 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1489 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1492 if (!is_error(ret
)) {
1493 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1494 return -TARGET_EFAULT
;
1495 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1496 return -TARGET_EFAULT
;
1497 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1498 return -TARGET_EFAULT
;
1500 if (target_tv_addr
) {
1501 tv
.tv_sec
= ts
.tv_sec
;
1502 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1503 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1504 return -TARGET_EFAULT
;
1512 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1513 static abi_long
do_old_select(abi_ulong arg1
)
1515 struct target_sel_arg_struct
*sel
;
1516 abi_ulong inp
, outp
, exp
, tvp
;
1519 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1520 return -TARGET_EFAULT
;
1523 nsel
= tswapal(sel
->n
);
1524 inp
= tswapal(sel
->inp
);
1525 outp
= tswapal(sel
->outp
);
1526 exp
= tswapal(sel
->exp
);
1527 tvp
= tswapal(sel
->tvp
);
1529 unlock_user_struct(sel
, arg1
, 0);
1531 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1536 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1539 return pipe2(host_pipe
, flags
);
1545 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1546 int flags
, int is_pipe2
)
1550 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1553 return get_errno(ret
);
1555 /* Several targets have special calling conventions for the original
1556 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1558 #if defined(TARGET_ALPHA)
1559 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1560 return host_pipe
[0];
1561 #elif defined(TARGET_MIPS)
1562 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1563 return host_pipe
[0];
1564 #elif defined(TARGET_SH4)
1565 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1566 return host_pipe
[0];
1567 #elif defined(TARGET_SPARC)
1568 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1569 return host_pipe
[0];
1573 if (put_user_s32(host_pipe
[0], pipedes
)
1574 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1575 return -TARGET_EFAULT
;
1576 return get_errno(ret
);
1579 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1580 abi_ulong target_addr
,
1583 struct target_ip_mreqn
*target_smreqn
;
1585 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1587 return -TARGET_EFAULT
;
1588 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1589 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1590 if (len
== sizeof(struct target_ip_mreqn
))
1591 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1592 unlock_user(target_smreqn
, target_addr
, 0);
1597 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1598 abi_ulong target_addr
,
1601 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1602 sa_family_t sa_family
;
1603 struct target_sockaddr
*target_saddr
;
1605 if (fd_trans_target_to_host_addr(fd
)) {
1606 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1609 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1611 return -TARGET_EFAULT
;
1613 sa_family
= tswap16(target_saddr
->sa_family
);
1615 /* Oops. The caller might send a incomplete sun_path; sun_path
1616 * must be terminated by \0 (see the manual page), but
1617 * unfortunately it is quite common to specify sockaddr_un
1618 * length as "strlen(x->sun_path)" while it should be
1619 * "strlen(...) + 1". We'll fix that here if needed.
1620 * Linux kernel has a similar feature.
1623 if (sa_family
== AF_UNIX
) {
1624 if (len
< unix_maxlen
&& len
> 0) {
1625 char *cp
= (char*)target_saddr
;
1627 if ( cp
[len
-1] && !cp
[len
] )
1630 if (len
> unix_maxlen
)
1634 memcpy(addr
, target_saddr
, len
);
1635 addr
->sa_family
= sa_family
;
1636 if (sa_family
== AF_NETLINK
) {
1637 struct sockaddr_nl
*nladdr
;
1639 nladdr
= (struct sockaddr_nl
*)addr
;
1640 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1641 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1642 } else if (sa_family
== AF_PACKET
) {
1643 struct target_sockaddr_ll
*lladdr
;
1645 lladdr
= (struct target_sockaddr_ll
*)addr
;
1646 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1647 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1649 unlock_user(target_saddr
, target_addr
, 0);
1654 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1655 struct sockaddr
*addr
,
1658 struct target_sockaddr
*target_saddr
;
1665 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1667 return -TARGET_EFAULT
;
1668 memcpy(target_saddr
, addr
, len
);
1669 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1670 sizeof(target_saddr
->sa_family
)) {
1671 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1673 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1674 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1675 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1676 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1677 } else if (addr
->sa_family
== AF_PACKET
) {
1678 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1679 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1680 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1681 } else if (addr
->sa_family
== AF_INET6
&&
1682 len
>= sizeof(struct target_sockaddr_in6
)) {
1683 struct target_sockaddr_in6
*target_in6
=
1684 (struct target_sockaddr_in6
*)target_saddr
;
1685 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1687 unlock_user(target_saddr
, target_addr
, len
);
1692 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1693 struct target_msghdr
*target_msgh
)
1695 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1696 abi_long msg_controllen
;
1697 abi_ulong target_cmsg_addr
;
1698 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1699 socklen_t space
= 0;
1701 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1702 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1704 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1705 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1706 target_cmsg_start
= target_cmsg
;
1708 return -TARGET_EFAULT
;
1710 while (cmsg
&& target_cmsg
) {
1711 void *data
= CMSG_DATA(cmsg
);
1712 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1714 int len
= tswapal(target_cmsg
->cmsg_len
)
1715 - sizeof(struct target_cmsghdr
);
1717 space
+= CMSG_SPACE(len
);
1718 if (space
> msgh
->msg_controllen
) {
1719 space
-= CMSG_SPACE(len
);
1720 /* This is a QEMU bug, since we allocated the payload
1721 * area ourselves (unlike overflow in host-to-target
1722 * conversion, which is just the guest giving us a buffer
1723 * that's too small). It can't happen for the payload types
1724 * we currently support; if it becomes an issue in future
1725 * we would need to improve our allocation strategy to
1726 * something more intelligent than "twice the size of the
1727 * target buffer we're reading from".
1729 gemu_log("Host cmsg overflow\n");
1733 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1734 cmsg
->cmsg_level
= SOL_SOCKET
;
1736 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1738 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1739 cmsg
->cmsg_len
= CMSG_LEN(len
);
1741 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1742 int *fd
= (int *)data
;
1743 int *target_fd
= (int *)target_data
;
1744 int i
, numfds
= len
/ sizeof(int);
1746 for (i
= 0; i
< numfds
; i
++) {
1747 __get_user(fd
[i
], target_fd
+ i
);
1749 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1750 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1751 struct ucred
*cred
= (struct ucred
*)data
;
1752 struct target_ucred
*target_cred
=
1753 (struct target_ucred
*)target_data
;
1755 __get_user(cred
->pid
, &target_cred
->pid
);
1756 __get_user(cred
->uid
, &target_cred
->uid
);
1757 __get_user(cred
->gid
, &target_cred
->gid
);
1759 gemu_log("Unsupported ancillary data: %d/%d\n",
1760 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1761 memcpy(data
, target_data
, len
);
1764 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1765 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1768 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1770 msgh
->msg_controllen
= space
;
1774 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1775 struct msghdr
*msgh
)
1777 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1778 abi_long msg_controllen
;
1779 abi_ulong target_cmsg_addr
;
1780 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1781 socklen_t space
= 0;
1783 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1784 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1786 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1787 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1788 target_cmsg_start
= target_cmsg
;
1790 return -TARGET_EFAULT
;
1792 while (cmsg
&& target_cmsg
) {
1793 void *data
= CMSG_DATA(cmsg
);
1794 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1796 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1797 int tgt_len
, tgt_space
;
1799 /* We never copy a half-header but may copy half-data;
1800 * this is Linux's behaviour in put_cmsg(). Note that
1801 * truncation here is a guest problem (which we report
1802 * to the guest via the CTRUNC bit), unlike truncation
1803 * in target_to_host_cmsg, which is a QEMU bug.
1805 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1806 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1810 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1811 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1813 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1815 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1817 /* Payload types which need a different size of payload on
1818 * the target must adjust tgt_len here.
1820 switch (cmsg
->cmsg_level
) {
1822 switch (cmsg
->cmsg_type
) {
1824 tgt_len
= sizeof(struct target_timeval
);
1834 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1835 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1836 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1839 /* We must now copy-and-convert len bytes of payload
1840 * into tgt_len bytes of destination space. Bear in mind
1841 * that in both source and destination we may be dealing
1842 * with a truncated value!
1844 switch (cmsg
->cmsg_level
) {
1846 switch (cmsg
->cmsg_type
) {
1849 int *fd
= (int *)data
;
1850 int *target_fd
= (int *)target_data
;
1851 int i
, numfds
= tgt_len
/ sizeof(int);
1853 for (i
= 0; i
< numfds
; i
++) {
1854 __put_user(fd
[i
], target_fd
+ i
);
1860 struct timeval
*tv
= (struct timeval
*)data
;
1861 struct target_timeval
*target_tv
=
1862 (struct target_timeval
*)target_data
;
1864 if (len
!= sizeof(struct timeval
) ||
1865 tgt_len
!= sizeof(struct target_timeval
)) {
1869 /* copy struct timeval to target */
1870 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1871 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1874 case SCM_CREDENTIALS
:
1876 struct ucred
*cred
= (struct ucred
*)data
;
1877 struct target_ucred
*target_cred
=
1878 (struct target_ucred
*)target_data
;
1880 __put_user(cred
->pid
, &target_cred
->pid
);
1881 __put_user(cred
->uid
, &target_cred
->uid
);
1882 __put_user(cred
->gid
, &target_cred
->gid
);
1891 switch (cmsg
->cmsg_type
) {
1894 uint32_t *v
= (uint32_t *)data
;
1895 uint32_t *t_int
= (uint32_t *)target_data
;
1897 if (len
!= sizeof(uint32_t) ||
1898 tgt_len
!= sizeof(uint32_t)) {
1901 __put_user(*v
, t_int
);
1907 struct sock_extended_err ee
;
1908 struct sockaddr_in offender
;
1910 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1911 struct errhdr_t
*target_errh
=
1912 (struct errhdr_t
*)target_data
;
1914 if (len
!= sizeof(struct errhdr_t
) ||
1915 tgt_len
!= sizeof(struct errhdr_t
)) {
1918 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1919 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1920 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1921 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1922 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1923 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1924 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1925 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1926 (void *) &errh
->offender
, sizeof(errh
->offender
));
1935 switch (cmsg
->cmsg_type
) {
1938 uint32_t *v
= (uint32_t *)data
;
1939 uint32_t *t_int
= (uint32_t *)target_data
;
1941 if (len
!= sizeof(uint32_t) ||
1942 tgt_len
!= sizeof(uint32_t)) {
1945 __put_user(*v
, t_int
);
1951 struct sock_extended_err ee
;
1952 struct sockaddr_in6 offender
;
1954 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1955 struct errhdr6_t
*target_errh
=
1956 (struct errhdr6_t
*)target_data
;
1958 if (len
!= sizeof(struct errhdr6_t
) ||
1959 tgt_len
!= sizeof(struct errhdr6_t
)) {
1962 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1963 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1964 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1965 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1966 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1967 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1968 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1969 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1970 (void *) &errh
->offender
, sizeof(errh
->offender
));
1980 gemu_log("Unsupported ancillary data: %d/%d\n",
1981 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1982 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1983 if (tgt_len
> len
) {
1984 memset(target_data
+ len
, 0, tgt_len
- len
);
1988 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1989 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1990 if (msg_controllen
< tgt_space
) {
1991 tgt_space
= msg_controllen
;
1993 msg_controllen
-= tgt_space
;
1995 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1996 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1999 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2001 target_msgh
->msg_controllen
= tswapal(space
);
2005 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2007 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2008 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2009 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2010 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2011 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2014 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2016 abi_long (*host_to_target_nlmsg
)
2017 (struct nlmsghdr
*))
2022 while (len
> sizeof(struct nlmsghdr
)) {
2024 nlmsg_len
= nlh
->nlmsg_len
;
2025 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2030 switch (nlh
->nlmsg_type
) {
2032 tswap_nlmsghdr(nlh
);
2038 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2039 e
->error
= tswap32(e
->error
);
2040 tswap_nlmsghdr(&e
->msg
);
2041 tswap_nlmsghdr(nlh
);
2045 ret
= host_to_target_nlmsg(nlh
);
2047 tswap_nlmsghdr(nlh
);
2052 tswap_nlmsghdr(nlh
);
2053 len
-= NLMSG_ALIGN(nlmsg_len
);
2054 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2059 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2061 abi_long (*target_to_host_nlmsg
)
2062 (struct nlmsghdr
*))
2066 while (len
> sizeof(struct nlmsghdr
)) {
2067 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2068 tswap32(nlh
->nlmsg_len
) > len
) {
2071 tswap_nlmsghdr(nlh
);
2072 switch (nlh
->nlmsg_type
) {
2079 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2080 e
->error
= tswap32(e
->error
);
2081 tswap_nlmsghdr(&e
->msg
);
2085 ret
= target_to_host_nlmsg(nlh
);
2090 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2091 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2096 #ifdef CONFIG_RTNETLINK
2097 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2098 size_t len
, void *context
,
2099 abi_long (*host_to_target_nlattr
)
2103 unsigned short nla_len
;
2106 while (len
> sizeof(struct nlattr
)) {
2107 nla_len
= nlattr
->nla_len
;
2108 if (nla_len
< sizeof(struct nlattr
) ||
2112 ret
= host_to_target_nlattr(nlattr
, context
);
2113 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2114 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2118 len
-= NLA_ALIGN(nla_len
);
2119 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2124 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2126 abi_long (*host_to_target_rtattr
)
2129 unsigned short rta_len
;
2132 while (len
> sizeof(struct rtattr
)) {
2133 rta_len
= rtattr
->rta_len
;
2134 if (rta_len
< sizeof(struct rtattr
) ||
2138 ret
= host_to_target_rtattr(rtattr
);
2139 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2140 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2144 len
-= RTA_ALIGN(rta_len
);
2145 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2150 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2152 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2159 switch (nlattr
->nla_type
) {
2161 case QEMU_IFLA_BR_FDB_FLUSH
:
2164 case QEMU_IFLA_BR_GROUP_ADDR
:
2167 case QEMU_IFLA_BR_VLAN_FILTERING
:
2168 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2169 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2170 case QEMU_IFLA_BR_MCAST_ROUTER
:
2171 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2172 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2173 case QEMU_IFLA_BR_MCAST_QUERIER
:
2174 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2175 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2176 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2179 case QEMU_IFLA_BR_PRIORITY
:
2180 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2181 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2182 case QEMU_IFLA_BR_ROOT_PORT
:
2183 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2184 u16
= NLA_DATA(nlattr
);
2185 *u16
= tswap16(*u16
);
2188 case QEMU_IFLA_BR_FORWARD_DELAY
:
2189 case QEMU_IFLA_BR_HELLO_TIME
:
2190 case QEMU_IFLA_BR_MAX_AGE
:
2191 case QEMU_IFLA_BR_AGEING_TIME
:
2192 case QEMU_IFLA_BR_STP_STATE
:
2193 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2194 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2195 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2196 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2197 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2198 u32
= NLA_DATA(nlattr
);
2199 *u32
= tswap32(*u32
);
2202 case QEMU_IFLA_BR_HELLO_TIMER
:
2203 case QEMU_IFLA_BR_TCN_TIMER
:
2204 case QEMU_IFLA_BR_GC_TIMER
:
2205 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2206 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2207 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2208 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2209 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2210 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2211 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2212 u64
= NLA_DATA(nlattr
);
2213 *u64
= tswap64(*u64
);
2215 /* ifla_bridge_id: uin8_t[] */
2216 case QEMU_IFLA_BR_ROOT_ID
:
2217 case QEMU_IFLA_BR_BRIDGE_ID
:
2220 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2226 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2233 switch (nlattr
->nla_type
) {
2235 case QEMU_IFLA_BRPORT_STATE
:
2236 case QEMU_IFLA_BRPORT_MODE
:
2237 case QEMU_IFLA_BRPORT_GUARD
:
2238 case QEMU_IFLA_BRPORT_PROTECT
:
2239 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2240 case QEMU_IFLA_BRPORT_LEARNING
:
2241 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2242 case QEMU_IFLA_BRPORT_PROXYARP
:
2243 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2244 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2245 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2246 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2247 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2250 case QEMU_IFLA_BRPORT_PRIORITY
:
2251 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2252 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2253 case QEMU_IFLA_BRPORT_ID
:
2254 case QEMU_IFLA_BRPORT_NO
:
2255 u16
= NLA_DATA(nlattr
);
2256 *u16
= tswap16(*u16
);
2259 case QEMU_IFLA_BRPORT_COST
:
2260 u32
= NLA_DATA(nlattr
);
2261 *u32
= tswap32(*u32
);
2264 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2265 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2266 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2267 u64
= NLA_DATA(nlattr
);
2268 *u64
= tswap64(*u64
);
2270 /* ifla_bridge_id: uint8_t[] */
2271 case QEMU_IFLA_BRPORT_ROOT_ID
:
2272 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2275 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2281 struct linkinfo_context
{
2288 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2291 struct linkinfo_context
*li_context
= context
;
2293 switch (nlattr
->nla_type
) {
2295 case QEMU_IFLA_INFO_KIND
:
2296 li_context
->name
= NLA_DATA(nlattr
);
2297 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2299 case QEMU_IFLA_INFO_SLAVE_KIND
:
2300 li_context
->slave_name
= NLA_DATA(nlattr
);
2301 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2304 case QEMU_IFLA_INFO_XSTATS
:
2305 /* FIXME: only used by CAN */
2308 case QEMU_IFLA_INFO_DATA
:
2309 if (strncmp(li_context
->name
, "bridge",
2310 li_context
->len
) == 0) {
2311 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2314 host_to_target_data_bridge_nlattr
);
2316 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2319 case QEMU_IFLA_INFO_SLAVE_DATA
:
2320 if (strncmp(li_context
->slave_name
, "bridge",
2321 li_context
->slave_len
) == 0) {
2322 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2325 host_to_target_slave_data_bridge_nlattr
);
2327 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2328 li_context
->slave_name
);
2332 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2339 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2345 switch (nlattr
->nla_type
) {
2346 case QEMU_IFLA_INET_CONF
:
2347 u32
= NLA_DATA(nlattr
);
2348 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2350 u32
[i
] = tswap32(u32
[i
]);
2354 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2359 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2364 struct ifla_cacheinfo
*ci
;
2367 switch (nlattr
->nla_type
) {
2369 case QEMU_IFLA_INET6_TOKEN
:
2372 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2375 case QEMU_IFLA_INET6_FLAGS
:
2376 u32
= NLA_DATA(nlattr
);
2377 *u32
= tswap32(*u32
);
2380 case QEMU_IFLA_INET6_CONF
:
2381 u32
= NLA_DATA(nlattr
);
2382 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2384 u32
[i
] = tswap32(u32
[i
]);
2387 /* ifla_cacheinfo */
2388 case QEMU_IFLA_INET6_CACHEINFO
:
2389 ci
= NLA_DATA(nlattr
);
2390 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2391 ci
->tstamp
= tswap32(ci
->tstamp
);
2392 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2393 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2396 case QEMU_IFLA_INET6_STATS
:
2397 case QEMU_IFLA_INET6_ICMP6STATS
:
2398 u64
= NLA_DATA(nlattr
);
2399 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2401 u64
[i
] = tswap64(u64
[i
]);
2405 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2410 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2413 switch (nlattr
->nla_type
) {
2415 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2417 host_to_target_data_inet_nlattr
);
2419 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2421 host_to_target_data_inet6_nlattr
);
2423 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2429 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2432 struct rtnl_link_stats
*st
;
2433 struct rtnl_link_stats64
*st64
;
2434 struct rtnl_link_ifmap
*map
;
2435 struct linkinfo_context li_context
;
2437 switch (rtattr
->rta_type
) {
2439 case QEMU_IFLA_ADDRESS
:
2440 case QEMU_IFLA_BROADCAST
:
2442 case QEMU_IFLA_IFNAME
:
2443 case QEMU_IFLA_QDISC
:
2446 case QEMU_IFLA_OPERSTATE
:
2447 case QEMU_IFLA_LINKMODE
:
2448 case QEMU_IFLA_CARRIER
:
2449 case QEMU_IFLA_PROTO_DOWN
:
2453 case QEMU_IFLA_LINK
:
2454 case QEMU_IFLA_WEIGHT
:
2455 case QEMU_IFLA_TXQLEN
:
2456 case QEMU_IFLA_CARRIER_CHANGES
:
2457 case QEMU_IFLA_NUM_RX_QUEUES
:
2458 case QEMU_IFLA_NUM_TX_QUEUES
:
2459 case QEMU_IFLA_PROMISCUITY
:
2460 case QEMU_IFLA_EXT_MASK
:
2461 case QEMU_IFLA_LINK_NETNSID
:
2462 case QEMU_IFLA_GROUP
:
2463 case QEMU_IFLA_MASTER
:
2464 case QEMU_IFLA_NUM_VF
:
2465 case QEMU_IFLA_GSO_MAX_SEGS
:
2466 case QEMU_IFLA_GSO_MAX_SIZE
:
2467 u32
= RTA_DATA(rtattr
);
2468 *u32
= tswap32(*u32
);
2470 /* struct rtnl_link_stats */
2471 case QEMU_IFLA_STATS
:
2472 st
= RTA_DATA(rtattr
);
2473 st
->rx_packets
= tswap32(st
->rx_packets
);
2474 st
->tx_packets
= tswap32(st
->tx_packets
);
2475 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2476 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2477 st
->rx_errors
= tswap32(st
->rx_errors
);
2478 st
->tx_errors
= tswap32(st
->tx_errors
);
2479 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2480 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2481 st
->multicast
= tswap32(st
->multicast
);
2482 st
->collisions
= tswap32(st
->collisions
);
2484 /* detailed rx_errors: */
2485 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2486 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2487 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2488 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2489 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2490 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2492 /* detailed tx_errors */
2493 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2494 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2495 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2496 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2497 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2500 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2501 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2503 /* struct rtnl_link_stats64 */
2504 case QEMU_IFLA_STATS64
:
2505 st64
= RTA_DATA(rtattr
);
2506 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2507 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2508 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2509 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2510 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2511 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2512 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2513 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2514 st64
->multicast
= tswap64(st64
->multicast
);
2515 st64
->collisions
= tswap64(st64
->collisions
);
2517 /* detailed rx_errors: */
2518 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2519 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2520 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2521 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2522 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2523 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2525 /* detailed tx_errors */
2526 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2527 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2528 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2529 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2530 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2533 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2534 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2536 /* struct rtnl_link_ifmap */
2538 map
= RTA_DATA(rtattr
);
2539 map
->mem_start
= tswap64(map
->mem_start
);
2540 map
->mem_end
= tswap64(map
->mem_end
);
2541 map
->base_addr
= tswap64(map
->base_addr
);
2542 map
->irq
= tswap16(map
->irq
);
2545 case QEMU_IFLA_LINKINFO
:
2546 memset(&li_context
, 0, sizeof(li_context
));
2547 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2549 host_to_target_data_linkinfo_nlattr
);
2550 case QEMU_IFLA_AF_SPEC
:
2551 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2553 host_to_target_data_spec_nlattr
);
2555 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2561 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2564 struct ifa_cacheinfo
*ci
;
2566 switch (rtattr
->rta_type
) {
2567 /* binary: depends on family type */
2577 u32
= RTA_DATA(rtattr
);
2578 *u32
= tswap32(*u32
);
2580 /* struct ifa_cacheinfo */
2582 ci
= RTA_DATA(rtattr
);
2583 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2584 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2585 ci
->cstamp
= tswap32(ci
->cstamp
);
2586 ci
->tstamp
= tswap32(ci
->tstamp
);
2589 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2595 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2598 switch (rtattr
->rta_type
) {
2599 /* binary: depends on family type */
2608 u32
= RTA_DATA(rtattr
);
2609 *u32
= tswap32(*u32
);
2612 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2618 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2619 uint32_t rtattr_len
)
2621 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2622 host_to_target_data_link_rtattr
);
2625 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2626 uint32_t rtattr_len
)
2628 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2629 host_to_target_data_addr_rtattr
);
2632 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2633 uint32_t rtattr_len
)
2635 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2636 host_to_target_data_route_rtattr
);
2639 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2642 struct ifinfomsg
*ifi
;
2643 struct ifaddrmsg
*ifa
;
2646 nlmsg_len
= nlh
->nlmsg_len
;
2647 switch (nlh
->nlmsg_type
) {
2651 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2652 ifi
= NLMSG_DATA(nlh
);
2653 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2654 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2655 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2656 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2657 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2658 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2664 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2665 ifa
= NLMSG_DATA(nlh
);
2666 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2667 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2668 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2674 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2675 rtm
= NLMSG_DATA(nlh
);
2676 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2677 host_to_target_route_rtattr(RTM_RTA(rtm
),
2678 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2682 return -TARGET_EINVAL
;
2687 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2690 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2693 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2695 abi_long (*target_to_host_rtattr
)
2700 while (len
>= sizeof(struct rtattr
)) {
2701 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2702 tswap16(rtattr
->rta_len
) > len
) {
2705 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2706 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2707 ret
= target_to_host_rtattr(rtattr
);
2711 len
-= RTA_ALIGN(rtattr
->rta_len
);
2712 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2713 RTA_ALIGN(rtattr
->rta_len
));
2718 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2720 switch (rtattr
->rta_type
) {
2722 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2728 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2730 switch (rtattr
->rta_type
) {
2731 /* binary: depends on family type */
2736 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2742 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2745 switch (rtattr
->rta_type
) {
2746 /* binary: depends on family type */
2754 u32
= RTA_DATA(rtattr
);
2755 *u32
= tswap32(*u32
);
2758 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2764 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2765 uint32_t rtattr_len
)
2767 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2768 target_to_host_data_link_rtattr
);
2771 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2772 uint32_t rtattr_len
)
2774 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2775 target_to_host_data_addr_rtattr
);
2778 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2779 uint32_t rtattr_len
)
2781 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2782 target_to_host_data_route_rtattr
);
2785 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2787 struct ifinfomsg
*ifi
;
2788 struct ifaddrmsg
*ifa
;
2791 switch (nlh
->nlmsg_type
) {
2796 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2797 ifi
= NLMSG_DATA(nlh
);
2798 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2799 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2800 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2801 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2802 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2803 NLMSG_LENGTH(sizeof(*ifi
)));
2809 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2810 ifa
= NLMSG_DATA(nlh
);
2811 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2812 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2813 NLMSG_LENGTH(sizeof(*ifa
)));
2820 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2821 rtm
= NLMSG_DATA(nlh
);
2822 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2823 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2824 NLMSG_LENGTH(sizeof(*rtm
)));
2828 return -TARGET_EOPNOTSUPP
;
2833 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2835 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2837 #endif /* CONFIG_RTNETLINK */
2839 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2841 switch (nlh
->nlmsg_type
) {
2843 gemu_log("Unknown host audit message type %d\n",
2845 return -TARGET_EINVAL
;
2850 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2853 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2856 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2858 switch (nlh
->nlmsg_type
) {
2860 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2861 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2864 gemu_log("Unknown target audit message type %d\n",
2866 return -TARGET_EINVAL
;
2872 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2874 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2877 /* do_setsockopt() Must return target values and target errnos. */
2878 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2879 abi_ulong optval_addr
, socklen_t optlen
)
2883 struct ip_mreqn
*ip_mreq
;
2884 struct ip_mreq_source
*ip_mreq_source
;
2888 /* TCP options all take an 'int' value. */
2889 if (optlen
< sizeof(uint32_t))
2890 return -TARGET_EINVAL
;
2892 if (get_user_u32(val
, optval_addr
))
2893 return -TARGET_EFAULT
;
2894 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2901 case IP_ROUTER_ALERT
:
2905 case IP_MTU_DISCOVER
:
2912 case IP_MULTICAST_TTL
:
2913 case IP_MULTICAST_LOOP
:
2915 if (optlen
>= sizeof(uint32_t)) {
2916 if (get_user_u32(val
, optval_addr
))
2917 return -TARGET_EFAULT
;
2918 } else if (optlen
>= 1) {
2919 if (get_user_u8(val
, optval_addr
))
2920 return -TARGET_EFAULT
;
2922 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2924 case IP_ADD_MEMBERSHIP
:
2925 case IP_DROP_MEMBERSHIP
:
2926 if (optlen
< sizeof (struct target_ip_mreq
) ||
2927 optlen
> sizeof (struct target_ip_mreqn
))
2928 return -TARGET_EINVAL
;
2930 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2931 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2932 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2935 case IP_BLOCK_SOURCE
:
2936 case IP_UNBLOCK_SOURCE
:
2937 case IP_ADD_SOURCE_MEMBERSHIP
:
2938 case IP_DROP_SOURCE_MEMBERSHIP
:
2939 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2940 return -TARGET_EINVAL
;
2942 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2943 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2944 unlock_user (ip_mreq_source
, optval_addr
, 0);
2953 case IPV6_MTU_DISCOVER
:
2956 case IPV6_RECVPKTINFO
:
2957 case IPV6_UNICAST_HOPS
:
2959 case IPV6_RECVHOPLIMIT
:
2960 case IPV6_2292HOPLIMIT
:
2963 if (optlen
< sizeof(uint32_t)) {
2964 return -TARGET_EINVAL
;
2966 if (get_user_u32(val
, optval_addr
)) {
2967 return -TARGET_EFAULT
;
2969 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2970 &val
, sizeof(val
)));
2974 struct in6_pktinfo pki
;
2976 if (optlen
< sizeof(pki
)) {
2977 return -TARGET_EINVAL
;
2980 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2981 return -TARGET_EFAULT
;
2984 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2986 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2987 &pki
, sizeof(pki
)));
2998 struct icmp6_filter icmp6f
;
3000 if (optlen
> sizeof(icmp6f
)) {
3001 optlen
= sizeof(icmp6f
);
3004 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3005 return -TARGET_EFAULT
;
3008 for (val
= 0; val
< 8; val
++) {
3009 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3012 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3024 /* those take an u32 value */
3025 if (optlen
< sizeof(uint32_t)) {
3026 return -TARGET_EINVAL
;
3029 if (get_user_u32(val
, optval_addr
)) {
3030 return -TARGET_EFAULT
;
3032 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3033 &val
, sizeof(val
)));
3040 case TARGET_SOL_SOCKET
:
3042 case TARGET_SO_RCVTIMEO
:
3046 optname
= SO_RCVTIMEO
;
3049 if (optlen
!= sizeof(struct target_timeval
)) {
3050 return -TARGET_EINVAL
;
3053 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3054 return -TARGET_EFAULT
;
3057 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3061 case TARGET_SO_SNDTIMEO
:
3062 optname
= SO_SNDTIMEO
;
3064 case TARGET_SO_ATTACH_FILTER
:
3066 struct target_sock_fprog
*tfprog
;
3067 struct target_sock_filter
*tfilter
;
3068 struct sock_fprog fprog
;
3069 struct sock_filter
*filter
;
3072 if (optlen
!= sizeof(*tfprog
)) {
3073 return -TARGET_EINVAL
;
3075 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3076 return -TARGET_EFAULT
;
3078 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3079 tswapal(tfprog
->filter
), 0)) {
3080 unlock_user_struct(tfprog
, optval_addr
, 1);
3081 return -TARGET_EFAULT
;
3084 fprog
.len
= tswap16(tfprog
->len
);
3085 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3086 if (filter
== NULL
) {
3087 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3088 unlock_user_struct(tfprog
, optval_addr
, 1);
3089 return -TARGET_ENOMEM
;
3091 for (i
= 0; i
< fprog
.len
; i
++) {
3092 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3093 filter
[i
].jt
= tfilter
[i
].jt
;
3094 filter
[i
].jf
= tfilter
[i
].jf
;
3095 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3097 fprog
.filter
= filter
;
3099 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3100 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3103 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3104 unlock_user_struct(tfprog
, optval_addr
, 1);
3107 case TARGET_SO_BINDTODEVICE
:
3109 char *dev_ifname
, *addr_ifname
;
3111 if (optlen
> IFNAMSIZ
- 1) {
3112 optlen
= IFNAMSIZ
- 1;
3114 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3116 return -TARGET_EFAULT
;
3118 optname
= SO_BINDTODEVICE
;
3119 addr_ifname
= alloca(IFNAMSIZ
);
3120 memcpy(addr_ifname
, dev_ifname
, optlen
);
3121 addr_ifname
[optlen
] = 0;
3122 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3123 addr_ifname
, optlen
));
3124 unlock_user (dev_ifname
, optval_addr
, 0);
3127 /* Options with 'int' argument. */
3128 case TARGET_SO_DEBUG
:
3131 case TARGET_SO_REUSEADDR
:
3132 optname
= SO_REUSEADDR
;
3134 case TARGET_SO_TYPE
:
3137 case TARGET_SO_ERROR
:
3140 case TARGET_SO_DONTROUTE
:
3141 optname
= SO_DONTROUTE
;
3143 case TARGET_SO_BROADCAST
:
3144 optname
= SO_BROADCAST
;
3146 case TARGET_SO_SNDBUF
:
3147 optname
= SO_SNDBUF
;
3149 case TARGET_SO_SNDBUFFORCE
:
3150 optname
= SO_SNDBUFFORCE
;
3152 case TARGET_SO_RCVBUF
:
3153 optname
= SO_RCVBUF
;
3155 case TARGET_SO_RCVBUFFORCE
:
3156 optname
= SO_RCVBUFFORCE
;
3158 case TARGET_SO_KEEPALIVE
:
3159 optname
= SO_KEEPALIVE
;
3161 case TARGET_SO_OOBINLINE
:
3162 optname
= SO_OOBINLINE
;
3164 case TARGET_SO_NO_CHECK
:
3165 optname
= SO_NO_CHECK
;
3167 case TARGET_SO_PRIORITY
:
3168 optname
= SO_PRIORITY
;
3171 case TARGET_SO_BSDCOMPAT
:
3172 optname
= SO_BSDCOMPAT
;
3175 case TARGET_SO_PASSCRED
:
3176 optname
= SO_PASSCRED
;
3178 case TARGET_SO_PASSSEC
:
3179 optname
= SO_PASSSEC
;
3181 case TARGET_SO_TIMESTAMP
:
3182 optname
= SO_TIMESTAMP
;
3184 case TARGET_SO_RCVLOWAT
:
3185 optname
= SO_RCVLOWAT
;
3190 if (optlen
< sizeof(uint32_t))
3191 return -TARGET_EINVAL
;
3193 if (get_user_u32(val
, optval_addr
))
3194 return -TARGET_EFAULT
;
3195 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3199 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3200 ret
= -TARGET_ENOPROTOOPT
;
3205 /* do_getsockopt() Must return target values and target errnos. */
3206 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3207 abi_ulong optval_addr
, abi_ulong optlen
)
3214 case TARGET_SOL_SOCKET
:
3217 /* These don't just return a single integer */
3218 case TARGET_SO_LINGER
:
3219 case TARGET_SO_RCVTIMEO
:
3220 case TARGET_SO_SNDTIMEO
:
3221 case TARGET_SO_PEERNAME
:
3223 case TARGET_SO_PEERCRED
: {
3226 struct target_ucred
*tcr
;
3228 if (get_user_u32(len
, optlen
)) {
3229 return -TARGET_EFAULT
;
3232 return -TARGET_EINVAL
;
3236 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3244 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3245 return -TARGET_EFAULT
;
3247 __put_user(cr
.pid
, &tcr
->pid
);
3248 __put_user(cr
.uid
, &tcr
->uid
);
3249 __put_user(cr
.gid
, &tcr
->gid
);
3250 unlock_user_struct(tcr
, optval_addr
, 1);
3251 if (put_user_u32(len
, optlen
)) {
3252 return -TARGET_EFAULT
;
3256 /* Options with 'int' argument. */
3257 case TARGET_SO_DEBUG
:
3260 case TARGET_SO_REUSEADDR
:
3261 optname
= SO_REUSEADDR
;
3263 case TARGET_SO_TYPE
:
3266 case TARGET_SO_ERROR
:
3269 case TARGET_SO_DONTROUTE
:
3270 optname
= SO_DONTROUTE
;
3272 case TARGET_SO_BROADCAST
:
3273 optname
= SO_BROADCAST
;
3275 case TARGET_SO_SNDBUF
:
3276 optname
= SO_SNDBUF
;
3278 case TARGET_SO_RCVBUF
:
3279 optname
= SO_RCVBUF
;
3281 case TARGET_SO_KEEPALIVE
:
3282 optname
= SO_KEEPALIVE
;
3284 case TARGET_SO_OOBINLINE
:
3285 optname
= SO_OOBINLINE
;
3287 case TARGET_SO_NO_CHECK
:
3288 optname
= SO_NO_CHECK
;
3290 case TARGET_SO_PRIORITY
:
3291 optname
= SO_PRIORITY
;
3294 case TARGET_SO_BSDCOMPAT
:
3295 optname
= SO_BSDCOMPAT
;
3298 case TARGET_SO_PASSCRED
:
3299 optname
= SO_PASSCRED
;
3301 case TARGET_SO_TIMESTAMP
:
3302 optname
= SO_TIMESTAMP
;
3304 case TARGET_SO_RCVLOWAT
:
3305 optname
= SO_RCVLOWAT
;
3307 case TARGET_SO_ACCEPTCONN
:
3308 optname
= SO_ACCEPTCONN
;
3315 /* TCP options all take an 'int' value. */
3317 if (get_user_u32(len
, optlen
))
3318 return -TARGET_EFAULT
;
3320 return -TARGET_EINVAL
;
3322 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3325 if (optname
== SO_TYPE
) {
3326 val
= host_to_target_sock_type(val
);
3331 if (put_user_u32(val
, optval_addr
))
3332 return -TARGET_EFAULT
;
3334 if (put_user_u8(val
, optval_addr
))
3335 return -TARGET_EFAULT
;
3337 if (put_user_u32(len
, optlen
))
3338 return -TARGET_EFAULT
;
3345 case IP_ROUTER_ALERT
:
3349 case IP_MTU_DISCOVER
:
3355 case IP_MULTICAST_TTL
:
3356 case IP_MULTICAST_LOOP
:
3357 if (get_user_u32(len
, optlen
))
3358 return -TARGET_EFAULT
;
3360 return -TARGET_EINVAL
;
3362 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3365 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3367 if (put_user_u32(len
, optlen
)
3368 || put_user_u8(val
, optval_addr
))
3369 return -TARGET_EFAULT
;
3371 if (len
> sizeof(int))
3373 if (put_user_u32(len
, optlen
)
3374 || put_user_u32(val
, optval_addr
))
3375 return -TARGET_EFAULT
;
3379 ret
= -TARGET_ENOPROTOOPT
;
3385 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3387 ret
= -TARGET_EOPNOTSUPP
;
3393 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3394 abi_ulong count
, int copy
)
3396 struct target_iovec
*target_vec
;
3398 abi_ulong total_len
, max_len
;
3401 bool bad_address
= false;
3407 if (count
> IOV_MAX
) {
3412 vec
= g_try_new0(struct iovec
, count
);
3418 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3419 count
* sizeof(struct target_iovec
), 1);
3420 if (target_vec
== NULL
) {
3425 /* ??? If host page size > target page size, this will result in a
3426 value larger than what we can actually support. */
3427 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3430 for (i
= 0; i
< count
; i
++) {
3431 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3432 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3437 } else if (len
== 0) {
3438 /* Zero length pointer is ignored. */
3439 vec
[i
].iov_base
= 0;
3441 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3442 /* If the first buffer pointer is bad, this is a fault. But
3443 * subsequent bad buffers will result in a partial write; this
3444 * is realized by filling the vector with null pointers and
3446 if (!vec
[i
].iov_base
) {
3457 if (len
> max_len
- total_len
) {
3458 len
= max_len
- total_len
;
3461 vec
[i
].iov_len
= len
;
3465 unlock_user(target_vec
, target_addr
, 0);
3470 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3471 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3474 unlock_user(target_vec
, target_addr
, 0);
3481 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3482 abi_ulong count
, int copy
)
3484 struct target_iovec
*target_vec
;
3487 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3488 count
* sizeof(struct target_iovec
), 1);
3490 for (i
= 0; i
< count
; i
++) {
3491 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3492 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3496 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3498 unlock_user(target_vec
, target_addr
, 0);
3504 static inline int target_to_host_sock_type(int *type
)
3507 int target_type
= *type
;
3509 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3510 case TARGET_SOCK_DGRAM
:
3511 host_type
= SOCK_DGRAM
;
3513 case TARGET_SOCK_STREAM
:
3514 host_type
= SOCK_STREAM
;
3517 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3520 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3521 #if defined(SOCK_CLOEXEC)
3522 host_type
|= SOCK_CLOEXEC
;
3524 return -TARGET_EINVAL
;
3527 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3528 #if defined(SOCK_NONBLOCK)
3529 host_type
|= SOCK_NONBLOCK
;
3530 #elif !defined(O_NONBLOCK)
3531 return -TARGET_EINVAL
;
3538 /* Try to emulate socket type flags after socket creation. */
3539 static int sock_flags_fixup(int fd
, int target_type
)
3541 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3542 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3543 int flags
= fcntl(fd
, F_GETFL
);
3544 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3546 return -TARGET_EINVAL
;
3553 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3554 abi_ulong target_addr
,
3557 struct sockaddr
*addr
= host_addr
;
3558 struct target_sockaddr
*target_saddr
;
3560 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3561 if (!target_saddr
) {
3562 return -TARGET_EFAULT
;
3565 memcpy(addr
, target_saddr
, len
);
3566 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3567 /* spkt_protocol is big-endian */
3569 unlock_user(target_saddr
, target_addr
, 0);
3573 static TargetFdTrans target_packet_trans
= {
3574 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3577 #ifdef CONFIG_RTNETLINK
3578 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3582 ret
= target_to_host_nlmsg_route(buf
, len
);
3590 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3594 ret
= host_to_target_nlmsg_route(buf
, len
);
3602 static TargetFdTrans target_netlink_route_trans
= {
3603 .target_to_host_data
= netlink_route_target_to_host
,
3604 .host_to_target_data
= netlink_route_host_to_target
,
3606 #endif /* CONFIG_RTNETLINK */
3608 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3612 ret
= target_to_host_nlmsg_audit(buf
, len
);
3620 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3624 ret
= host_to_target_nlmsg_audit(buf
, len
);
3632 static TargetFdTrans target_netlink_audit_trans
= {
3633 .target_to_host_data
= netlink_audit_target_to_host
,
3634 .host_to_target_data
= netlink_audit_host_to_target
,
3637 /* do_socket() Must return target values and target errnos. */
3638 static abi_long
do_socket(int domain
, int type
, int protocol
)
3640 int target_type
= type
;
3643 ret
= target_to_host_sock_type(&type
);
3648 if (domain
== PF_NETLINK
&& !(
3649 #ifdef CONFIG_RTNETLINK
3650 protocol
== NETLINK_ROUTE
||
3652 protocol
== NETLINK_KOBJECT_UEVENT
||
3653 protocol
== NETLINK_AUDIT
)) {
3654 return -EPFNOSUPPORT
;
3657 if (domain
== AF_PACKET
||
3658 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3659 protocol
= tswap16(protocol
);
3662 ret
= get_errno(socket(domain
, type
, protocol
));
3664 ret
= sock_flags_fixup(ret
, target_type
);
3665 if (type
== SOCK_PACKET
) {
3666 /* Manage an obsolete case :
3667 * if socket type is SOCK_PACKET, bind by name
3669 fd_trans_register(ret
, &target_packet_trans
);
3670 } else if (domain
== PF_NETLINK
) {
3672 #ifdef CONFIG_RTNETLINK
3674 fd_trans_register(ret
, &target_netlink_route_trans
);
3677 case NETLINK_KOBJECT_UEVENT
:
3678 /* nothing to do: messages are strings */
3681 fd_trans_register(ret
, &target_netlink_audit_trans
);
3684 g_assert_not_reached();
3691 /* do_bind() Must return target values and target errnos. */
3692 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3698 if ((int)addrlen
< 0) {
3699 return -TARGET_EINVAL
;
3702 addr
= alloca(addrlen
+1);
3704 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3708 return get_errno(bind(sockfd
, addr
, addrlen
));
3711 /* do_connect() Must return target values and target errnos. */
3712 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3718 if ((int)addrlen
< 0) {
3719 return -TARGET_EINVAL
;
3722 addr
= alloca(addrlen
+1);
3724 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3728 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3731 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3732 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3733 int flags
, int send
)
3739 abi_ulong target_vec
;
3741 if (msgp
->msg_name
) {
3742 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3743 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3744 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3745 tswapal(msgp
->msg_name
),
3747 if (ret
== -TARGET_EFAULT
) {
3748 /* For connected sockets msg_name and msg_namelen must
3749 * be ignored, so returning EFAULT immediately is wrong.
3750 * Instead, pass a bad msg_name to the host kernel, and
3751 * let it decide whether to return EFAULT or not.
3753 msg
.msg_name
= (void *)-1;
3758 msg
.msg_name
= NULL
;
3759 msg
.msg_namelen
= 0;
3761 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3762 msg
.msg_control
= alloca(msg
.msg_controllen
);
3763 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3765 count
= tswapal(msgp
->msg_iovlen
);
3766 target_vec
= tswapal(msgp
->msg_iov
);
3768 if (count
> IOV_MAX
) {
3769 /* sendrcvmsg returns a different errno for this condition than
3770 * readv/writev, so we must catch it here before lock_iovec() does.
3772 ret
= -TARGET_EMSGSIZE
;
3776 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3777 target_vec
, count
, send
);
3779 ret
= -host_to_target_errno(errno
);
3782 msg
.msg_iovlen
= count
;
3786 if (fd_trans_target_to_host_data(fd
)) {
3789 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3790 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3791 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3792 msg
.msg_iov
->iov_len
);
3794 msg
.msg_iov
->iov_base
= host_msg
;
3795 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3799 ret
= target_to_host_cmsg(&msg
, msgp
);
3801 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3805 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3806 if (!is_error(ret
)) {
3808 if (fd_trans_host_to_target_data(fd
)) {
3809 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3812 ret
= host_to_target_cmsg(msgp
, &msg
);
3814 if (!is_error(ret
)) {
3815 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3816 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3817 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3818 msg
.msg_name
, msg
.msg_namelen
);
3830 unlock_iovec(vec
, target_vec
, count
, !send
);
3835 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3836 int flags
, int send
)
3839 struct target_msghdr
*msgp
;
3841 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3845 return -TARGET_EFAULT
;
3847 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3848 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3852 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3853 * so it might not have this *mmsg-specific flag either.
3855 #ifndef MSG_WAITFORONE
3856 #define MSG_WAITFORONE 0x10000
3859 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3860 unsigned int vlen
, unsigned int flags
,
3863 struct target_mmsghdr
*mmsgp
;
3867 if (vlen
> UIO_MAXIOV
) {
3871 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3873 return -TARGET_EFAULT
;
3876 for (i
= 0; i
< vlen
; i
++) {
3877 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3878 if (is_error(ret
)) {
3881 mmsgp
[i
].msg_len
= tswap32(ret
);
3882 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3883 if (flags
& MSG_WAITFORONE
) {
3884 flags
|= MSG_DONTWAIT
;
3888 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3890 /* Return number of datagrams sent if we sent any at all;
3891 * otherwise return the error.
3899 /* do_accept4() Must return target values and target errnos. */
3900 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3901 abi_ulong target_addrlen_addr
, int flags
)
3908 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3910 if (target_addr
== 0) {
3911 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3914 /* linux returns EINVAL if addrlen pointer is invalid */
3915 if (get_user_u32(addrlen
, target_addrlen_addr
))
3916 return -TARGET_EINVAL
;
3918 if ((int)addrlen
< 0) {
3919 return -TARGET_EINVAL
;
3922 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3923 return -TARGET_EINVAL
;
3925 addr
= alloca(addrlen
);
3927 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3928 if (!is_error(ret
)) {
3929 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3930 if (put_user_u32(addrlen
, target_addrlen_addr
))
3931 ret
= -TARGET_EFAULT
;
3936 /* do_getpeername() Must return target values and target errnos. */
3937 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3938 abi_ulong target_addrlen_addr
)
3944 if (get_user_u32(addrlen
, target_addrlen_addr
))
3945 return -TARGET_EFAULT
;
3947 if ((int)addrlen
< 0) {
3948 return -TARGET_EINVAL
;
3951 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3952 return -TARGET_EFAULT
;
3954 addr
= alloca(addrlen
);
3956 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3957 if (!is_error(ret
)) {
3958 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3959 if (put_user_u32(addrlen
, target_addrlen_addr
))
3960 ret
= -TARGET_EFAULT
;
3965 /* do_getsockname() Must return target values and target errnos. */
3966 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3967 abi_ulong target_addrlen_addr
)
3973 if (get_user_u32(addrlen
, target_addrlen_addr
))
3974 return -TARGET_EFAULT
;
3976 if ((int)addrlen
< 0) {
3977 return -TARGET_EINVAL
;
3980 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3981 return -TARGET_EFAULT
;
3983 addr
= alloca(addrlen
);
3985 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
3986 if (!is_error(ret
)) {
3987 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3988 if (put_user_u32(addrlen
, target_addrlen_addr
))
3989 ret
= -TARGET_EFAULT
;
3994 /* do_socketpair() Must return target values and target errnos. */
3995 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
3996 abi_ulong target_tab_addr
)
4001 target_to_host_sock_type(&type
);
4003 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4004 if (!is_error(ret
)) {
4005 if (put_user_s32(tab
[0], target_tab_addr
)
4006 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4007 ret
= -TARGET_EFAULT
;
4012 /* do_sendto() Must return target values and target errnos. */
4013 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4014 abi_ulong target_addr
, socklen_t addrlen
)
4018 void *copy_msg
= NULL
;
4021 if ((int)addrlen
< 0) {
4022 return -TARGET_EINVAL
;
4025 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4027 return -TARGET_EFAULT
;
4028 if (fd_trans_target_to_host_data(fd
)) {
4029 copy_msg
= host_msg
;
4030 host_msg
= g_malloc(len
);
4031 memcpy(host_msg
, copy_msg
, len
);
4032 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4038 addr
= alloca(addrlen
+1);
4039 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4043 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4045 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4050 host_msg
= copy_msg
;
4052 unlock_user(host_msg
, msg
, 0);
4056 /* do_recvfrom() Must return target values and target errnos. */
4057 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4058 abi_ulong target_addr
,
4059 abi_ulong target_addrlen
)
4066 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4068 return -TARGET_EFAULT
;
4070 if (get_user_u32(addrlen
, target_addrlen
)) {
4071 ret
= -TARGET_EFAULT
;
4074 if ((int)addrlen
< 0) {
4075 ret
= -TARGET_EINVAL
;
4078 addr
= alloca(addrlen
);
4079 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4082 addr
= NULL
; /* To keep compiler quiet. */
4083 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4085 if (!is_error(ret
)) {
4086 if (fd_trans_host_to_target_data(fd
)) {
4087 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4090 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4091 if (put_user_u32(addrlen
, target_addrlen
)) {
4092 ret
= -TARGET_EFAULT
;
4096 unlock_user(host_msg
, msg
, len
);
4099 unlock_user(host_msg
, msg
, 0);
4104 #ifdef TARGET_NR_socketcall
4105 /* do_socketcall() must return target values and target errnos. */
4106 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4108 static const unsigned nargs
[] = { /* number of arguments per operation */
4109 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4110 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4111 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4112 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4113 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4114 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4115 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4116 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4117 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4118 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4119 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4120 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4121 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4122 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4123 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4124 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4125 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4126 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4127 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4128 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4130 abi_long a
[6]; /* max 6 args */
4133 /* check the range of the first argument num */
4134 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4135 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4136 return -TARGET_EINVAL
;
4138 /* ensure we have space for args */
4139 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4140 return -TARGET_EINVAL
;
4142 /* collect the arguments in a[] according to nargs[] */
4143 for (i
= 0; i
< nargs
[num
]; ++i
) {
4144 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4145 return -TARGET_EFAULT
;
4148 /* now when we have the args, invoke the appropriate underlying function */
4150 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4151 return do_socket(a
[0], a
[1], a
[2]);
4152 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4153 return do_bind(a
[0], a
[1], a
[2]);
4154 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4155 return do_connect(a
[0], a
[1], a
[2]);
4156 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4157 return get_errno(listen(a
[0], a
[1]));
4158 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4159 return do_accept4(a
[0], a
[1], a
[2], 0);
4160 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4161 return do_getsockname(a
[0], a
[1], a
[2]);
4162 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4163 return do_getpeername(a
[0], a
[1], a
[2]);
4164 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4165 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4166 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4167 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4168 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4169 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4170 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4171 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4172 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4173 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4174 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4175 return get_errno(shutdown(a
[0], a
[1]));
4176 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4177 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4178 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4179 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4180 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4181 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4182 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4183 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4184 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4185 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4186 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4187 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4188 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4189 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4191 gemu_log("Unsupported socketcall: %d\n", num
);
4192 return -TARGET_EINVAL
;
4197 #define N_SHM_REGIONS 32
4199 static struct shm_region
{
4203 } shm_regions
[N_SHM_REGIONS
];
4205 #ifndef TARGET_SEMID64_DS
4206 /* asm-generic version of this struct */
4207 struct target_semid64_ds
4209 struct target_ipc_perm sem_perm
;
4210 abi_ulong sem_otime
;
4211 #if TARGET_ABI_BITS == 32
4212 abi_ulong __unused1
;
4214 abi_ulong sem_ctime
;
4215 #if TARGET_ABI_BITS == 32
4216 abi_ulong __unused2
;
4218 abi_ulong sem_nsems
;
4219 abi_ulong __unused3
;
4220 abi_ulong __unused4
;
4224 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4225 abi_ulong target_addr
)
4227 struct target_ipc_perm
*target_ip
;
4228 struct target_semid64_ds
*target_sd
;
4230 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4231 return -TARGET_EFAULT
;
4232 target_ip
= &(target_sd
->sem_perm
);
4233 host_ip
->__key
= tswap32(target_ip
->__key
);
4234 host_ip
->uid
= tswap32(target_ip
->uid
);
4235 host_ip
->gid
= tswap32(target_ip
->gid
);
4236 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4237 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4238 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4239 host_ip
->mode
= tswap32(target_ip
->mode
);
4241 host_ip
->mode
= tswap16(target_ip
->mode
);
4243 #if defined(TARGET_PPC)
4244 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4246 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4248 unlock_user_struct(target_sd
, target_addr
, 0);
4252 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4253 struct ipc_perm
*host_ip
)
4255 struct target_ipc_perm
*target_ip
;
4256 struct target_semid64_ds
*target_sd
;
4258 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4259 return -TARGET_EFAULT
;
4260 target_ip
= &(target_sd
->sem_perm
);
4261 target_ip
->__key
= tswap32(host_ip
->__key
);
4262 target_ip
->uid
= tswap32(host_ip
->uid
);
4263 target_ip
->gid
= tswap32(host_ip
->gid
);
4264 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4265 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4266 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4267 target_ip
->mode
= tswap32(host_ip
->mode
);
4269 target_ip
->mode
= tswap16(host_ip
->mode
);
4271 #if defined(TARGET_PPC)
4272 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4274 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4276 unlock_user_struct(target_sd
, target_addr
, 1);
4280 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4281 abi_ulong target_addr
)
4283 struct target_semid64_ds
*target_sd
;
4285 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4286 return -TARGET_EFAULT
;
4287 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4288 return -TARGET_EFAULT
;
4289 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4290 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4291 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4292 unlock_user_struct(target_sd
, target_addr
, 0);
4296 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4297 struct semid_ds
*host_sd
)
4299 struct target_semid64_ds
*target_sd
;
4301 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4302 return -TARGET_EFAULT
;
4303 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4304 return -TARGET_EFAULT
;
4305 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4306 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4307 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4308 unlock_user_struct(target_sd
, target_addr
, 1);
4312 struct target_seminfo
{
4325 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4326 struct seminfo
*host_seminfo
)
4328 struct target_seminfo
*target_seminfo
;
4329 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4330 return -TARGET_EFAULT
;
4331 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4332 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4333 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4334 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4335 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4336 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4337 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4338 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4339 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4340 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4341 unlock_user_struct(target_seminfo
, target_addr
, 1);
4347 struct semid_ds
*buf
;
4348 unsigned short *array
;
4349 struct seminfo
*__buf
;
4352 union target_semun
{
4359 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4360 abi_ulong target_addr
)
4363 unsigned short *array
;
4365 struct semid_ds semid_ds
;
4368 semun
.buf
= &semid_ds
;
4370 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4372 return get_errno(ret
);
4374 nsems
= semid_ds
.sem_nsems
;
4376 *host_array
= g_try_new(unsigned short, nsems
);
4378 return -TARGET_ENOMEM
;
4380 array
= lock_user(VERIFY_READ
, target_addr
,
4381 nsems
*sizeof(unsigned short), 1);
4383 g_free(*host_array
);
4384 return -TARGET_EFAULT
;
4387 for(i
=0; i
<nsems
; i
++) {
4388 __get_user((*host_array
)[i
], &array
[i
]);
4390 unlock_user(array
, target_addr
, 0);
4395 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4396 unsigned short **host_array
)
4399 unsigned short *array
;
4401 struct semid_ds semid_ds
;
4404 semun
.buf
= &semid_ds
;
4406 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4408 return get_errno(ret
);
4410 nsems
= semid_ds
.sem_nsems
;
4412 array
= lock_user(VERIFY_WRITE
, target_addr
,
4413 nsems
*sizeof(unsigned short), 0);
4415 return -TARGET_EFAULT
;
4417 for(i
=0; i
<nsems
; i
++) {
4418 __put_user((*host_array
)[i
], &array
[i
]);
4420 g_free(*host_array
);
4421 unlock_user(array
, target_addr
, 1);
4426 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4427 abi_ulong target_arg
)
4429 union target_semun target_su
= { .buf
= target_arg
};
4431 struct semid_ds dsarg
;
4432 unsigned short *array
= NULL
;
4433 struct seminfo seminfo
;
4434 abi_long ret
= -TARGET_EINVAL
;
4441 /* In 64 bit cross-endian situations, we will erroneously pick up
4442 * the wrong half of the union for the "val" element. To rectify
4443 * this, the entire 8-byte structure is byteswapped, followed by
4444 * a swap of the 4 byte val field. In other cases, the data is
4445 * already in proper host byte order. */
4446 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4447 target_su
.buf
= tswapal(target_su
.buf
);
4448 arg
.val
= tswap32(target_su
.val
);
4450 arg
.val
= target_su
.val
;
4452 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4456 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4460 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4461 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4468 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4472 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4473 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4479 arg
.__buf
= &seminfo
;
4480 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4481 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4489 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4496 struct target_sembuf
{
4497 unsigned short sem_num
;
4502 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4503 abi_ulong target_addr
,
4506 struct target_sembuf
*target_sembuf
;
4509 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4510 nsops
*sizeof(struct target_sembuf
), 1);
4512 return -TARGET_EFAULT
;
4514 for(i
=0; i
<nsops
; i
++) {
4515 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4516 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4517 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4520 unlock_user(target_sembuf
, target_addr
, 0);
4525 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4527 struct sembuf sops
[nsops
];
4529 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4530 return -TARGET_EFAULT
;
4532 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4535 struct target_msqid_ds
4537 struct target_ipc_perm msg_perm
;
4538 abi_ulong msg_stime
;
4539 #if TARGET_ABI_BITS == 32
4540 abi_ulong __unused1
;
4542 abi_ulong msg_rtime
;
4543 #if TARGET_ABI_BITS == 32
4544 abi_ulong __unused2
;
4546 abi_ulong msg_ctime
;
4547 #if TARGET_ABI_BITS == 32
4548 abi_ulong __unused3
;
4550 abi_ulong __msg_cbytes
;
4552 abi_ulong msg_qbytes
;
4553 abi_ulong msg_lspid
;
4554 abi_ulong msg_lrpid
;
4555 abi_ulong __unused4
;
4556 abi_ulong __unused5
;
4559 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4560 abi_ulong target_addr
)
4562 struct target_msqid_ds
*target_md
;
4564 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4565 return -TARGET_EFAULT
;
4566 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4567 return -TARGET_EFAULT
;
4568 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4569 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4570 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4571 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4572 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4573 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4574 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4575 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4576 unlock_user_struct(target_md
, target_addr
, 0);
4580 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4581 struct msqid_ds
*host_md
)
4583 struct target_msqid_ds
*target_md
;
4585 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4586 return -TARGET_EFAULT
;
4587 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4588 return -TARGET_EFAULT
;
4589 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4590 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4591 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4592 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4593 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4594 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4595 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4596 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4597 unlock_user_struct(target_md
, target_addr
, 1);
4601 struct target_msginfo
{
4609 unsigned short int msgseg
;
4612 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4613 struct msginfo
*host_msginfo
)
4615 struct target_msginfo
*target_msginfo
;
4616 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4617 return -TARGET_EFAULT
;
4618 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4619 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4620 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4621 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4622 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4623 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4624 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4625 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4626 unlock_user_struct(target_msginfo
, target_addr
, 1);
4630 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4632 struct msqid_ds dsarg
;
4633 struct msginfo msginfo
;
4634 abi_long ret
= -TARGET_EINVAL
;
4642 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4643 return -TARGET_EFAULT
;
4644 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4645 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4646 return -TARGET_EFAULT
;
4649 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4653 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4654 if (host_to_target_msginfo(ptr
, &msginfo
))
4655 return -TARGET_EFAULT
;
4662 struct target_msgbuf
{
4667 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4668 ssize_t msgsz
, int msgflg
)
4670 struct target_msgbuf
*target_mb
;
4671 struct msgbuf
*host_mb
;
4675 return -TARGET_EINVAL
;
4678 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4679 return -TARGET_EFAULT
;
4680 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4682 unlock_user_struct(target_mb
, msgp
, 0);
4683 return -TARGET_ENOMEM
;
4685 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4686 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4687 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4689 unlock_user_struct(target_mb
, msgp
, 0);
4694 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4695 ssize_t msgsz
, abi_long msgtyp
,
4698 struct target_msgbuf
*target_mb
;
4700 struct msgbuf
*host_mb
;
4704 return -TARGET_EINVAL
;
4707 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4708 return -TARGET_EFAULT
;
4710 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4712 ret
= -TARGET_ENOMEM
;
4715 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4718 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4719 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4720 if (!target_mtext
) {
4721 ret
= -TARGET_EFAULT
;
4724 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4725 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4728 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4732 unlock_user_struct(target_mb
, msgp
, 1);
4737 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4738 abi_ulong target_addr
)
4740 struct target_shmid_ds
*target_sd
;
4742 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4743 return -TARGET_EFAULT
;
4744 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4745 return -TARGET_EFAULT
;
4746 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4747 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4748 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4749 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4750 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4751 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4752 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4753 unlock_user_struct(target_sd
, target_addr
, 0);
4757 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4758 struct shmid_ds
*host_sd
)
4760 struct target_shmid_ds
*target_sd
;
4762 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4763 return -TARGET_EFAULT
;
4764 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4765 return -TARGET_EFAULT
;
4766 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4767 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4768 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4769 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4770 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4771 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4772 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4773 unlock_user_struct(target_sd
, target_addr
, 1);
4777 struct target_shminfo
{
4785 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4786 struct shminfo
*host_shminfo
)
4788 struct target_shminfo
*target_shminfo
;
4789 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4790 return -TARGET_EFAULT
;
4791 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4792 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4793 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4794 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4795 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4796 unlock_user_struct(target_shminfo
, target_addr
, 1);
4800 struct target_shm_info
{
4805 abi_ulong swap_attempts
;
4806 abi_ulong swap_successes
;
4809 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4810 struct shm_info
*host_shm_info
)
4812 struct target_shm_info
*target_shm_info
;
4813 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4814 return -TARGET_EFAULT
;
4815 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4816 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4817 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4818 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4819 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4820 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4821 unlock_user_struct(target_shm_info
, target_addr
, 1);
4825 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4827 struct shmid_ds dsarg
;
4828 struct shminfo shminfo
;
4829 struct shm_info shm_info
;
4830 abi_long ret
= -TARGET_EINVAL
;
4838 if (target_to_host_shmid_ds(&dsarg
, buf
))
4839 return -TARGET_EFAULT
;
4840 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4841 if (host_to_target_shmid_ds(buf
, &dsarg
))
4842 return -TARGET_EFAULT
;
4845 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4846 if (host_to_target_shminfo(buf
, &shminfo
))
4847 return -TARGET_EFAULT
;
4850 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4851 if (host_to_target_shm_info(buf
, &shm_info
))
4852 return -TARGET_EFAULT
;
4857 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4864 #ifndef TARGET_FORCE_SHMLBA
4865 /* For most architectures, SHMLBA is the same as the page size;
4866 * some architectures have larger values, in which case they should
4867 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4868 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4869 * and defining its own value for SHMLBA.
4871 * The kernel also permits SHMLBA to be set by the architecture to a
4872 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4873 * this means that addresses are rounded to the large size if
4874 * SHM_RND is set but addresses not aligned to that size are not rejected
4875 * as long as they are at least page-aligned. Since the only architecture
4876 * which uses this is ia64 this code doesn't provide for that oddity.
4878 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4880 return TARGET_PAGE_SIZE
;
4884 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4885 int shmid
, abi_ulong shmaddr
, int shmflg
)
4889 struct shmid_ds shm_info
;
4893 /* find out the length of the shared memory segment */
4894 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4895 if (is_error(ret
)) {
4896 /* can't get length, bail out */
4900 shmlba
= target_shmlba(cpu_env
);
4902 if (shmaddr
& (shmlba
- 1)) {
4903 if (shmflg
& SHM_RND
) {
4904 shmaddr
&= ~(shmlba
- 1);
4906 return -TARGET_EINVAL
;
4913 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4915 abi_ulong mmap_start
;
4917 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4919 if (mmap_start
== -1) {
4921 host_raddr
= (void *)-1;
4923 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4926 if (host_raddr
== (void *)-1) {
4928 return get_errno((long)host_raddr
);
4930 raddr
=h2g((unsigned long)host_raddr
);
4932 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4933 PAGE_VALID
| PAGE_READ
|
4934 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4936 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4937 if (!shm_regions
[i
].in_use
) {
4938 shm_regions
[i
].in_use
= true;
4939 shm_regions
[i
].start
= raddr
;
4940 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4950 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4954 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4955 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4956 shm_regions
[i
].in_use
= false;
4957 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4962 return get_errno(shmdt(g2h(shmaddr
)));
4965 #ifdef TARGET_NR_ipc
4966 /* ??? This only works with linear mappings. */
4967 /* do_ipc() must return target values and target errnos. */
4968 static abi_long
do_ipc(CPUArchState
*cpu_env
,
4969 unsigned int call
, abi_long first
,
4970 abi_long second
, abi_long third
,
4971 abi_long ptr
, abi_long fifth
)
4976 version
= call
>> 16;
4981 ret
= do_semop(first
, ptr
, second
);
4985 ret
= get_errno(semget(first
, second
, third
));
4988 case IPCOP_semctl
: {
4989 /* The semun argument to semctl is passed by value, so dereference the
4992 get_user_ual(atptr
, ptr
);
4993 ret
= do_semctl(first
, second
, third
, atptr
);
4998 ret
= get_errno(msgget(first
, second
));
5002 ret
= do_msgsnd(first
, ptr
, second
, third
);
5006 ret
= do_msgctl(first
, second
, ptr
);
5013 struct target_ipc_kludge
{
5018 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5019 ret
= -TARGET_EFAULT
;
5023 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5025 unlock_user_struct(tmp
, ptr
, 0);
5029 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5038 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5039 if (is_error(raddr
))
5040 return get_errno(raddr
);
5041 if (put_user_ual(raddr
, third
))
5042 return -TARGET_EFAULT
;
5046 ret
= -TARGET_EINVAL
;
5051 ret
= do_shmdt(ptr
);
5055 /* IPC_* flag values are the same on all linux platforms */
5056 ret
= get_errno(shmget(first
, second
, third
));
5059 /* IPC_* and SHM_* command values are the same on all linux platforms */
5061 ret
= do_shmctl(first
, second
, ptr
);
5064 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5065 ret
= -TARGET_ENOSYS
;
5072 /* kernel structure types definitions */
5074 #define STRUCT(name, ...) STRUCT_ ## name,
5075 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5077 #include "syscall_types.h"
5081 #undef STRUCT_SPECIAL
5083 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5084 #define STRUCT_SPECIAL(name)
5085 #include "syscall_types.h"
5087 #undef STRUCT_SPECIAL
5089 typedef struct IOCTLEntry IOCTLEntry
;
5091 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5092 int fd
, int cmd
, abi_long arg
);
5096 unsigned int host_cmd
;
5099 do_ioctl_fn
*do_ioctl
;
5100 const argtype arg_type
[5];
5103 #define IOC_R 0x0001
5104 #define IOC_W 0x0002
5105 #define IOC_RW (IOC_R | IOC_W)
5107 #define MAX_STRUCT_SIZE 4096
5109 #ifdef CONFIG_FIEMAP
5110 /* So fiemap access checks don't overflow on 32 bit systems.
5111 * This is very slightly smaller than the limit imposed by
5112 * the underlying kernel.
5114 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5115 / sizeof(struct fiemap_extent))
5117 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5118 int fd
, int cmd
, abi_long arg
)
5120 /* The parameter for this ioctl is a struct fiemap followed
5121 * by an array of struct fiemap_extent whose size is set
5122 * in fiemap->fm_extent_count. The array is filled in by the
5125 int target_size_in
, target_size_out
;
5127 const argtype
*arg_type
= ie
->arg_type
;
5128 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5131 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5135 assert(arg_type
[0] == TYPE_PTR
);
5136 assert(ie
->access
== IOC_RW
);
5138 target_size_in
= thunk_type_size(arg_type
, 0);
5139 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5141 return -TARGET_EFAULT
;
5143 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5144 unlock_user(argptr
, arg
, 0);
5145 fm
= (struct fiemap
*)buf_temp
;
5146 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5147 return -TARGET_EINVAL
;
5150 outbufsz
= sizeof (*fm
) +
5151 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5153 if (outbufsz
> MAX_STRUCT_SIZE
) {
5154 /* We can't fit all the extents into the fixed size buffer.
5155 * Allocate one that is large enough and use it instead.
5157 fm
= g_try_malloc(outbufsz
);
5159 return -TARGET_ENOMEM
;
5161 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5164 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5165 if (!is_error(ret
)) {
5166 target_size_out
= target_size_in
;
5167 /* An extent_count of 0 means we were only counting the extents
5168 * so there are no structs to copy
5170 if (fm
->fm_extent_count
!= 0) {
5171 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5173 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5175 ret
= -TARGET_EFAULT
;
5177 /* Convert the struct fiemap */
5178 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5179 if (fm
->fm_extent_count
!= 0) {
5180 p
= argptr
+ target_size_in
;
5181 /* ...and then all the struct fiemap_extents */
5182 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5183 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5188 unlock_user(argptr
, arg
, target_size_out
);
5198 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5199 int fd
, int cmd
, abi_long arg
)
5201 const argtype
*arg_type
= ie
->arg_type
;
5205 struct ifconf
*host_ifconf
;
5207 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5208 int target_ifreq_size
;
5213 abi_long target_ifc_buf
;
5217 assert(arg_type
[0] == TYPE_PTR
);
5218 assert(ie
->access
== IOC_RW
);
5221 target_size
= thunk_type_size(arg_type
, 0);
5223 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5225 return -TARGET_EFAULT
;
5226 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5227 unlock_user(argptr
, arg
, 0);
5229 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5230 target_ifc_len
= host_ifconf
->ifc_len
;
5231 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5233 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5234 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5235 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5237 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5238 if (outbufsz
> MAX_STRUCT_SIZE
) {
5239 /* We can't fit all the extents into the fixed size buffer.
5240 * Allocate one that is large enough and use it instead.
5242 host_ifconf
= malloc(outbufsz
);
5244 return -TARGET_ENOMEM
;
5246 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5249 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5251 host_ifconf
->ifc_len
= host_ifc_len
;
5252 host_ifconf
->ifc_buf
= host_ifc_buf
;
5254 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5255 if (!is_error(ret
)) {
5256 /* convert host ifc_len to target ifc_len */
5258 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5259 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5260 host_ifconf
->ifc_len
= target_ifc_len
;
5262 /* restore target ifc_buf */
5264 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5266 /* copy struct ifconf to target user */
5268 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5270 return -TARGET_EFAULT
;
5271 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5272 unlock_user(argptr
, arg
, target_size
);
5274 /* copy ifreq[] to target user */
5276 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5277 for (i
= 0; i
< nb_ifreq
; i
++) {
5278 thunk_convert(argptr
+ i
* target_ifreq_size
,
5279 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5280 ifreq_arg_type
, THUNK_TARGET
);
5282 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5292 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5293 int cmd
, abi_long arg
)
5296 struct dm_ioctl
*host_dm
;
5297 abi_long guest_data
;
5298 uint32_t guest_data_size
;
5300 const argtype
*arg_type
= ie
->arg_type
;
5302 void *big_buf
= NULL
;
5306 target_size
= thunk_type_size(arg_type
, 0);
5307 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5309 ret
= -TARGET_EFAULT
;
5312 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5313 unlock_user(argptr
, arg
, 0);
5315 /* buf_temp is too small, so fetch things into a bigger buffer */
5316 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5317 memcpy(big_buf
, buf_temp
, target_size
);
5321 guest_data
= arg
+ host_dm
->data_start
;
5322 if ((guest_data
- arg
) < 0) {
5323 ret
= -TARGET_EINVAL
;
5326 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5327 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5329 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5331 ret
= -TARGET_EFAULT
;
5335 switch (ie
->host_cmd
) {
5337 case DM_LIST_DEVICES
:
5340 case DM_DEV_SUSPEND
:
5343 case DM_TABLE_STATUS
:
5344 case DM_TABLE_CLEAR
:
5346 case DM_LIST_VERSIONS
:
5350 case DM_DEV_SET_GEOMETRY
:
5351 /* data contains only strings */
5352 memcpy(host_data
, argptr
, guest_data_size
);
5355 memcpy(host_data
, argptr
, guest_data_size
);
5356 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5360 void *gspec
= argptr
;
5361 void *cur_data
= host_data
;
5362 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5363 int spec_size
= thunk_type_size(arg_type
, 0);
5366 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5367 struct dm_target_spec
*spec
= cur_data
;
5371 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5372 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5374 spec
->next
= sizeof(*spec
) + slen
;
5375 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5377 cur_data
+= spec
->next
;
5382 ret
= -TARGET_EINVAL
;
5383 unlock_user(argptr
, guest_data
, 0);
5386 unlock_user(argptr
, guest_data
, 0);
5388 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5389 if (!is_error(ret
)) {
5390 guest_data
= arg
+ host_dm
->data_start
;
5391 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5392 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5393 switch (ie
->host_cmd
) {
5398 case DM_DEV_SUSPEND
:
5401 case DM_TABLE_CLEAR
:
5403 case DM_DEV_SET_GEOMETRY
:
5404 /* no return data */
5406 case DM_LIST_DEVICES
:
5408 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5409 uint32_t remaining_data
= guest_data_size
;
5410 void *cur_data
= argptr
;
5411 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5412 int nl_size
= 12; /* can't use thunk_size due to alignment */
5415 uint32_t next
= nl
->next
;
5417 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5419 if (remaining_data
< nl
->next
) {
5420 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5423 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5424 strcpy(cur_data
+ nl_size
, nl
->name
);
5425 cur_data
+= nl
->next
;
5426 remaining_data
-= nl
->next
;
5430 nl
= (void*)nl
+ next
;
5435 case DM_TABLE_STATUS
:
5437 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5438 void *cur_data
= argptr
;
5439 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5440 int spec_size
= thunk_type_size(arg_type
, 0);
5443 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5444 uint32_t next
= spec
->next
;
5445 int slen
= strlen((char*)&spec
[1]) + 1;
5446 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5447 if (guest_data_size
< spec
->next
) {
5448 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5451 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5452 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5453 cur_data
= argptr
+ spec
->next
;
5454 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5460 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5461 int count
= *(uint32_t*)hdata
;
5462 uint64_t *hdev
= hdata
+ 8;
5463 uint64_t *gdev
= argptr
+ 8;
5466 *(uint32_t*)argptr
= tswap32(count
);
5467 for (i
= 0; i
< count
; i
++) {
5468 *gdev
= tswap64(*hdev
);
5474 case DM_LIST_VERSIONS
:
5476 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5477 uint32_t remaining_data
= guest_data_size
;
5478 void *cur_data
= argptr
;
5479 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5480 int vers_size
= thunk_type_size(arg_type
, 0);
5483 uint32_t next
= vers
->next
;
5485 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5487 if (remaining_data
< vers
->next
) {
5488 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5491 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5492 strcpy(cur_data
+ vers_size
, vers
->name
);
5493 cur_data
+= vers
->next
;
5494 remaining_data
-= vers
->next
;
5498 vers
= (void*)vers
+ next
;
5503 unlock_user(argptr
, guest_data
, 0);
5504 ret
= -TARGET_EINVAL
;
5507 unlock_user(argptr
, guest_data
, guest_data_size
);
5509 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5511 ret
= -TARGET_EFAULT
;
5514 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5515 unlock_user(argptr
, arg
, target_size
);
5522 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5523 int cmd
, abi_long arg
)
5527 const argtype
*arg_type
= ie
->arg_type
;
5528 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5531 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5532 struct blkpg_partition host_part
;
5534 /* Read and convert blkpg */
5536 target_size
= thunk_type_size(arg_type
, 0);
5537 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5539 ret
= -TARGET_EFAULT
;
5542 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5543 unlock_user(argptr
, arg
, 0);
5545 switch (host_blkpg
->op
) {
5546 case BLKPG_ADD_PARTITION
:
5547 case BLKPG_DEL_PARTITION
:
5548 /* payload is struct blkpg_partition */
5551 /* Unknown opcode */
5552 ret
= -TARGET_EINVAL
;
5556 /* Read and convert blkpg->data */
5557 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5558 target_size
= thunk_type_size(part_arg_type
, 0);
5559 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5561 ret
= -TARGET_EFAULT
;
5564 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5565 unlock_user(argptr
, arg
, 0);
5567 /* Swizzle the data pointer to our local copy and call! */
5568 host_blkpg
->data
= &host_part
;
5569 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5575 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5576 int fd
, int cmd
, abi_long arg
)
5578 const argtype
*arg_type
= ie
->arg_type
;
5579 const StructEntry
*se
;
5580 const argtype
*field_types
;
5581 const int *dst_offsets
, *src_offsets
;
5584 abi_ulong
*target_rt_dev_ptr
;
5585 unsigned long *host_rt_dev_ptr
;
5589 assert(ie
->access
== IOC_W
);
5590 assert(*arg_type
== TYPE_PTR
);
5592 assert(*arg_type
== TYPE_STRUCT
);
5593 target_size
= thunk_type_size(arg_type
, 0);
5594 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5596 return -TARGET_EFAULT
;
5599 assert(*arg_type
== (int)STRUCT_rtentry
);
5600 se
= struct_entries
+ *arg_type
++;
5601 assert(se
->convert
[0] == NULL
);
5602 /* convert struct here to be able to catch rt_dev string */
5603 field_types
= se
->field_types
;
5604 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5605 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5606 for (i
= 0; i
< se
->nb_fields
; i
++) {
5607 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5608 assert(*field_types
== TYPE_PTRVOID
);
5609 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5610 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5611 if (*target_rt_dev_ptr
!= 0) {
5612 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5613 tswapal(*target_rt_dev_ptr
));
5614 if (!*host_rt_dev_ptr
) {
5615 unlock_user(argptr
, arg
, 0);
5616 return -TARGET_EFAULT
;
5619 *host_rt_dev_ptr
= 0;
5624 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5625 argptr
+ src_offsets
[i
],
5626 field_types
, THUNK_HOST
);
5628 unlock_user(argptr
, arg
, 0);
5630 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5631 if (*host_rt_dev_ptr
!= 0) {
5632 unlock_user((void *)*host_rt_dev_ptr
,
5633 *target_rt_dev_ptr
, 0);
5638 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5639 int fd
, int cmd
, abi_long arg
)
5641 int sig
= target_to_host_signal(arg
);
5642 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5645 static IOCTLEntry ioctl_entries
[] = {
5646 #define IOCTL(cmd, access, ...) \
5647 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5648 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5649 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5650 #define IOCTL_IGNORE(cmd) \
5651 { TARGET_ ## cmd, 0, #cmd },
5656 /* ??? Implement proper locking for ioctls. */
5657 /* do_ioctl() Must return target values and target errnos. */
5658 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5660 const IOCTLEntry
*ie
;
5661 const argtype
*arg_type
;
5663 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5669 if (ie
->target_cmd
== 0) {
5670 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5671 return -TARGET_ENOSYS
;
5673 if (ie
->target_cmd
== cmd
)
5677 arg_type
= ie
->arg_type
;
5679 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5682 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5683 } else if (!ie
->host_cmd
) {
5684 /* Some architectures define BSD ioctls in their headers
5685 that are not implemented in Linux. */
5686 return -TARGET_ENOSYS
;
5689 switch(arg_type
[0]) {
5692 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5696 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5700 target_size
= thunk_type_size(arg_type
, 0);
5701 switch(ie
->access
) {
5703 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5704 if (!is_error(ret
)) {
5705 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5707 return -TARGET_EFAULT
;
5708 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5709 unlock_user(argptr
, arg
, target_size
);
5713 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5715 return -TARGET_EFAULT
;
5716 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5717 unlock_user(argptr
, arg
, 0);
5718 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5722 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5724 return -TARGET_EFAULT
;
5725 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5726 unlock_user(argptr
, arg
, 0);
5727 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5728 if (!is_error(ret
)) {
5729 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5731 return -TARGET_EFAULT
;
5732 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5733 unlock_user(argptr
, arg
, target_size
);
5739 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5740 (long)cmd
, arg_type
[0]);
5741 ret
= -TARGET_ENOSYS
;
5747 static const bitmask_transtbl iflag_tbl
[] = {
5748 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5749 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5750 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5751 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5752 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5753 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5754 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5755 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5756 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5757 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5758 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5759 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5760 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5761 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5765 static const bitmask_transtbl oflag_tbl
[] = {
5766 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5767 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5768 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5769 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5770 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5771 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5772 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5773 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5774 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5775 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5776 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5777 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5778 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5779 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5780 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5781 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5782 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5783 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5784 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5785 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5786 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5787 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5788 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5789 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5793 static const bitmask_transtbl cflag_tbl
[] = {
5794 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5795 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5796 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5797 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5798 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5799 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5800 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5801 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5802 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5803 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5804 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5805 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5806 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5807 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5808 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5809 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5810 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5811 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5812 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5813 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5814 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5815 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5816 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5817 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5818 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5819 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5820 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5821 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5822 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5823 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5824 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5828 static const bitmask_transtbl lflag_tbl
[] = {
5829 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5830 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5831 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5832 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5833 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5834 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5835 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5836 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5837 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5838 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5839 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5840 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5841 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5842 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5843 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5847 static void target_to_host_termios (void *dst
, const void *src
)
5849 struct host_termios
*host
= dst
;
5850 const struct target_termios
*target
= src
;
5853 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5855 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5857 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5859 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5860 host
->c_line
= target
->c_line
;
5862 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5863 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5864 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5865 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5866 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5867 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5868 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5869 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5870 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5871 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5872 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5873 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5874 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5875 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5876 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5877 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5878 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5879 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5882 static void host_to_target_termios (void *dst
, const void *src
)
5884 struct target_termios
*target
= dst
;
5885 const struct host_termios
*host
= src
;
5888 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5890 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5892 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5894 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5895 target
->c_line
= host
->c_line
;
5897 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5898 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5899 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5900 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5901 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5902 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5903 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5904 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5905 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5906 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5907 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5908 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5909 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5910 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5911 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5912 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5913 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5914 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5917 static const StructEntry struct_termios_def
= {
5918 .convert
= { host_to_target_termios
, target_to_host_termios
},
5919 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5920 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5923 static bitmask_transtbl mmap_flags_tbl
[] = {
5924 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5925 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5926 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5927 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5928 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5929 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5930 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5931 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5932 MAP_DENYWRITE
, MAP_DENYWRITE
},
5933 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5934 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5935 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5936 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5937 MAP_NORESERVE
, MAP_NORESERVE
},
5938 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5939 /* MAP_STACK had been ignored by the kernel for quite some time.
5940 Recognize it for the target insofar as we do not want to pass
5941 it through to the host. */
5942 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5946 #if defined(TARGET_I386)
5948 /* NOTE: there is really one LDT for all the threads */
5949 static uint8_t *ldt_table
;
5951 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
5958 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
5959 if (size
> bytecount
)
5961 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
5963 return -TARGET_EFAULT
;
5964 /* ??? Should this by byteswapped? */
5965 memcpy(p
, ldt_table
, size
);
5966 unlock_user(p
, ptr
, size
);
5970 /* XXX: add locking support */
5971 static abi_long
write_ldt(CPUX86State
*env
,
5972 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
5974 struct target_modify_ldt_ldt_s ldt_info
;
5975 struct target_modify_ldt_ldt_s
*target_ldt_info
;
5976 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
5977 int seg_not_present
, useable
, lm
;
5978 uint32_t *lp
, entry_1
, entry_2
;
5980 if (bytecount
!= sizeof(ldt_info
))
5981 return -TARGET_EINVAL
;
5982 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
5983 return -TARGET_EFAULT
;
5984 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
5985 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
5986 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
5987 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
5988 unlock_user_struct(target_ldt_info
, ptr
, 0);
5990 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
5991 return -TARGET_EINVAL
;
5992 seg_32bit
= ldt_info
.flags
& 1;
5993 contents
= (ldt_info
.flags
>> 1) & 3;
5994 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
5995 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
5996 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
5997 useable
= (ldt_info
.flags
>> 6) & 1;
6001 lm
= (ldt_info
.flags
>> 7) & 1;
6003 if (contents
== 3) {
6005 return -TARGET_EINVAL
;
6006 if (seg_not_present
== 0)
6007 return -TARGET_EINVAL
;
6009 /* allocate the LDT */
6011 env
->ldt
.base
= target_mmap(0,
6012 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6013 PROT_READ
|PROT_WRITE
,
6014 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6015 if (env
->ldt
.base
== -1)
6016 return -TARGET_ENOMEM
;
6017 memset(g2h(env
->ldt
.base
), 0,
6018 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6019 env
->ldt
.limit
= 0xffff;
6020 ldt_table
= g2h(env
->ldt
.base
);
6023 /* NOTE: same code as Linux kernel */
6024 /* Allow LDTs to be cleared by the user. */
6025 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6028 read_exec_only
== 1 &&
6030 limit_in_pages
== 0 &&
6031 seg_not_present
== 1 &&
6039 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6040 (ldt_info
.limit
& 0x0ffff);
6041 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6042 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6043 (ldt_info
.limit
& 0xf0000) |
6044 ((read_exec_only
^ 1) << 9) |
6046 ((seg_not_present
^ 1) << 15) |
6048 (limit_in_pages
<< 23) |
6052 entry_2
|= (useable
<< 20);
6054 /* Install the new entry ... */
6056 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6057 lp
[0] = tswap32(entry_1
);
6058 lp
[1] = tswap32(entry_2
);
6062 /* specific and weird i386 syscalls */
6063 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6064 unsigned long bytecount
)
6070 ret
= read_ldt(ptr
, bytecount
);
6073 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6076 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6079 ret
= -TARGET_ENOSYS
;
6085 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6086 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6088 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6089 struct target_modify_ldt_ldt_s ldt_info
;
6090 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6091 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6092 int seg_not_present
, useable
, lm
;
6093 uint32_t *lp
, entry_1
, entry_2
;
6096 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6097 if (!target_ldt_info
)
6098 return -TARGET_EFAULT
;
6099 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6100 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6101 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6102 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6103 if (ldt_info
.entry_number
== -1) {
6104 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6105 if (gdt_table
[i
] == 0) {
6106 ldt_info
.entry_number
= i
;
6107 target_ldt_info
->entry_number
= tswap32(i
);
6112 unlock_user_struct(target_ldt_info
, ptr
, 1);
6114 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6115 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6116 return -TARGET_EINVAL
;
6117 seg_32bit
= ldt_info
.flags
& 1;
6118 contents
= (ldt_info
.flags
>> 1) & 3;
6119 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6120 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6121 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6122 useable
= (ldt_info
.flags
>> 6) & 1;
6126 lm
= (ldt_info
.flags
>> 7) & 1;
6129 if (contents
== 3) {
6130 if (seg_not_present
== 0)
6131 return -TARGET_EINVAL
;
6134 /* NOTE: same code as Linux kernel */
6135 /* Allow LDTs to be cleared by the user. */
6136 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6137 if ((contents
== 0 &&
6138 read_exec_only
== 1 &&
6140 limit_in_pages
== 0 &&
6141 seg_not_present
== 1 &&
6149 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6150 (ldt_info
.limit
& 0x0ffff);
6151 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6152 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6153 (ldt_info
.limit
& 0xf0000) |
6154 ((read_exec_only
^ 1) << 9) |
6156 ((seg_not_present
^ 1) << 15) |
6158 (limit_in_pages
<< 23) |
6163 /* Install the new entry ... */
6165 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6166 lp
[0] = tswap32(entry_1
);
6167 lp
[1] = tswap32(entry_2
);
6171 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6173 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6174 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6175 uint32_t base_addr
, limit
, flags
;
6176 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6177 int seg_not_present
, useable
, lm
;
6178 uint32_t *lp
, entry_1
, entry_2
;
6180 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6181 if (!target_ldt_info
)
6182 return -TARGET_EFAULT
;
6183 idx
= tswap32(target_ldt_info
->entry_number
);
6184 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6185 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6186 unlock_user_struct(target_ldt_info
, ptr
, 1);
6187 return -TARGET_EINVAL
;
6189 lp
= (uint32_t *)(gdt_table
+ idx
);
6190 entry_1
= tswap32(lp
[0]);
6191 entry_2
= tswap32(lp
[1]);
6193 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6194 contents
= (entry_2
>> 10) & 3;
6195 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6196 seg_32bit
= (entry_2
>> 22) & 1;
6197 limit_in_pages
= (entry_2
>> 23) & 1;
6198 useable
= (entry_2
>> 20) & 1;
6202 lm
= (entry_2
>> 21) & 1;
6204 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6205 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6206 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6207 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6208 base_addr
= (entry_1
>> 16) |
6209 (entry_2
& 0xff000000) |
6210 ((entry_2
& 0xff) << 16);
6211 target_ldt_info
->base_addr
= tswapal(base_addr
);
6212 target_ldt_info
->limit
= tswap32(limit
);
6213 target_ldt_info
->flags
= tswap32(flags
);
6214 unlock_user_struct(target_ldt_info
, ptr
, 1);
6217 #endif /* TARGET_I386 && TARGET_ABI32 */
6219 #ifndef TARGET_ABI32
6220 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6227 case TARGET_ARCH_SET_GS
:
6228 case TARGET_ARCH_SET_FS
:
6229 if (code
== TARGET_ARCH_SET_GS
)
6233 cpu_x86_load_seg(env
, idx
, 0);
6234 env
->segs
[idx
].base
= addr
;
6236 case TARGET_ARCH_GET_GS
:
6237 case TARGET_ARCH_GET_FS
:
6238 if (code
== TARGET_ARCH_GET_GS
)
6242 val
= env
->segs
[idx
].base
;
6243 if (put_user(val
, addr
, abi_ulong
))
6244 ret
= -TARGET_EFAULT
;
6247 ret
= -TARGET_EINVAL
;
6254 #endif /* defined(TARGET_I386) */
6256 #define NEW_STACK_SIZE 0x40000
6259 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6262 pthread_mutex_t mutex
;
6263 pthread_cond_t cond
;
6266 abi_ulong child_tidptr
;
6267 abi_ulong parent_tidptr
;
6271 static void *clone_func(void *arg
)
6273 new_thread_info
*info
= arg
;
6278 rcu_register_thread();
6279 tcg_register_thread();
6281 cpu
= ENV_GET_CPU(env
);
6283 ts
= (TaskState
*)cpu
->opaque
;
6284 info
->tid
= gettid();
6286 if (info
->child_tidptr
)
6287 put_user_u32(info
->tid
, info
->child_tidptr
);
6288 if (info
->parent_tidptr
)
6289 put_user_u32(info
->tid
, info
->parent_tidptr
);
6290 /* Enable signals. */
6291 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6292 /* Signal to the parent that we're ready. */
6293 pthread_mutex_lock(&info
->mutex
);
6294 pthread_cond_broadcast(&info
->cond
);
6295 pthread_mutex_unlock(&info
->mutex
);
6296 /* Wait until the parent has finished initializing the tls state. */
6297 pthread_mutex_lock(&clone_lock
);
6298 pthread_mutex_unlock(&clone_lock
);
6304 /* do_fork() Must return host values and target errnos (unlike most
6305 do_*() functions). */
6306 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6307 abi_ulong parent_tidptr
, target_ulong newtls
,
6308 abi_ulong child_tidptr
)
6310 CPUState
*cpu
= ENV_GET_CPU(env
);
6314 CPUArchState
*new_env
;
6317 flags
&= ~CLONE_IGNORED_FLAGS
;
6319 /* Emulate vfork() with fork() */
6320 if (flags
& CLONE_VFORK
)
6321 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6323 if (flags
& CLONE_VM
) {
6324 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6325 new_thread_info info
;
6326 pthread_attr_t attr
;
6328 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6329 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6330 return -TARGET_EINVAL
;
6333 ts
= g_new0(TaskState
, 1);
6334 init_task_state(ts
);
6335 /* we create a new CPU instance. */
6336 new_env
= cpu_copy(env
);
6337 /* Init regs that differ from the parent. */
6338 cpu_clone_regs(new_env
, newsp
);
6339 new_cpu
= ENV_GET_CPU(new_env
);
6340 new_cpu
->opaque
= ts
;
6341 ts
->bprm
= parent_ts
->bprm
;
6342 ts
->info
= parent_ts
->info
;
6343 ts
->signal_mask
= parent_ts
->signal_mask
;
6345 if (flags
& CLONE_CHILD_CLEARTID
) {
6346 ts
->child_tidptr
= child_tidptr
;
6349 if (flags
& CLONE_SETTLS
) {
6350 cpu_set_tls (new_env
, newtls
);
6353 /* Grab a mutex so that thread setup appears atomic. */
6354 pthread_mutex_lock(&clone_lock
);
6356 memset(&info
, 0, sizeof(info
));
6357 pthread_mutex_init(&info
.mutex
, NULL
);
6358 pthread_mutex_lock(&info
.mutex
);
6359 pthread_cond_init(&info
.cond
, NULL
);
6361 if (flags
& CLONE_CHILD_SETTID
) {
6362 info
.child_tidptr
= child_tidptr
;
6364 if (flags
& CLONE_PARENT_SETTID
) {
6365 info
.parent_tidptr
= parent_tidptr
;
6368 ret
= pthread_attr_init(&attr
);
6369 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6370 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6371 /* It is not safe to deliver signals until the child has finished
6372 initializing, so temporarily block all signals. */
6373 sigfillset(&sigmask
);
6374 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6376 /* If this is our first additional thread, we need to ensure we
6377 * generate code for parallel execution and flush old translations.
6379 if (!parallel_cpus
) {
6380 parallel_cpus
= true;
6384 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6385 /* TODO: Free new CPU state if thread creation failed. */
6387 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6388 pthread_attr_destroy(&attr
);
6390 /* Wait for the child to initialize. */
6391 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6396 pthread_mutex_unlock(&info
.mutex
);
6397 pthread_cond_destroy(&info
.cond
);
6398 pthread_mutex_destroy(&info
.mutex
);
6399 pthread_mutex_unlock(&clone_lock
);
6401 /* if no CLONE_VM, we consider it is a fork */
6402 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6403 return -TARGET_EINVAL
;
6406 /* We can't support custom termination signals */
6407 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6408 return -TARGET_EINVAL
;
6411 if (block_signals()) {
6412 return -TARGET_ERESTARTSYS
;
6418 /* Child Process. */
6419 cpu_clone_regs(env
, newsp
);
6421 /* There is a race condition here. The parent process could
6422 theoretically read the TID in the child process before the child
6423 tid is set. This would require using either ptrace
6424 (not implemented) or having *_tidptr to point at a shared memory
6425 mapping. We can't repeat the spinlock hack used above because
6426 the child process gets its own copy of the lock. */
6427 if (flags
& CLONE_CHILD_SETTID
)
6428 put_user_u32(gettid(), child_tidptr
);
6429 if (flags
& CLONE_PARENT_SETTID
)
6430 put_user_u32(gettid(), parent_tidptr
);
6431 ts
= (TaskState
*)cpu
->opaque
;
6432 if (flags
& CLONE_SETTLS
)
6433 cpu_set_tls (env
, newtls
);
6434 if (flags
& CLONE_CHILD_CLEARTID
)
6435 ts
->child_tidptr
= child_tidptr
;
6443 /* warning : doesn't handle linux specific flags... */
6444 static int target_to_host_fcntl_cmd(int cmd
)
6447 case TARGET_F_DUPFD
:
6448 case TARGET_F_GETFD
:
6449 case TARGET_F_SETFD
:
6450 case TARGET_F_GETFL
:
6451 case TARGET_F_SETFL
:
6453 case TARGET_F_GETLK
:
6455 case TARGET_F_SETLK
:
6457 case TARGET_F_SETLKW
:
6459 case TARGET_F_GETOWN
:
6461 case TARGET_F_SETOWN
:
6463 case TARGET_F_GETSIG
:
6465 case TARGET_F_SETSIG
:
6467 #if TARGET_ABI_BITS == 32
6468 case TARGET_F_GETLK64
:
6470 case TARGET_F_SETLK64
:
6472 case TARGET_F_SETLKW64
:
6475 case TARGET_F_SETLEASE
:
6477 case TARGET_F_GETLEASE
:
6479 #ifdef F_DUPFD_CLOEXEC
6480 case TARGET_F_DUPFD_CLOEXEC
:
6481 return F_DUPFD_CLOEXEC
;
6483 case TARGET_F_NOTIFY
:
6486 case TARGET_F_GETOWN_EX
:
6490 case TARGET_F_SETOWN_EX
:
6494 case TARGET_F_SETPIPE_SZ
:
6495 return F_SETPIPE_SZ
;
6496 case TARGET_F_GETPIPE_SZ
:
6497 return F_GETPIPE_SZ
;
6500 return -TARGET_EINVAL
;
6502 return -TARGET_EINVAL
;
6505 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6506 static const bitmask_transtbl flock_tbl
[] = {
6507 TRANSTBL_CONVERT(F_RDLCK
),
6508 TRANSTBL_CONVERT(F_WRLCK
),
6509 TRANSTBL_CONVERT(F_UNLCK
),
6510 TRANSTBL_CONVERT(F_EXLCK
),
6511 TRANSTBL_CONVERT(F_SHLCK
),
6515 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6516 abi_ulong target_flock_addr
)
6518 struct target_flock
*target_fl
;
6521 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6522 return -TARGET_EFAULT
;
6525 __get_user(l_type
, &target_fl
->l_type
);
6526 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6527 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6528 __get_user(fl
->l_start
, &target_fl
->l_start
);
6529 __get_user(fl
->l_len
, &target_fl
->l_len
);
6530 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6531 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6535 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6536 const struct flock64
*fl
)
6538 struct target_flock
*target_fl
;
6541 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6542 return -TARGET_EFAULT
;
6545 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6546 __put_user(l_type
, &target_fl
->l_type
);
6547 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6548 __put_user(fl
->l_start
, &target_fl
->l_start
);
6549 __put_user(fl
->l_len
, &target_fl
->l_len
);
6550 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6551 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6555 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6556 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6558 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6559 static inline abi_long
copy_from_user_eabi_flock64(struct flock64
*fl
,
6560 abi_ulong target_flock_addr
)
6562 struct target_eabi_flock64
*target_fl
;
6565 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6566 return -TARGET_EFAULT
;
6569 __get_user(l_type
, &target_fl
->l_type
);
6570 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6571 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6572 __get_user(fl
->l_start
, &target_fl
->l_start
);
6573 __get_user(fl
->l_len
, &target_fl
->l_len
);
6574 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6575 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6579 static inline abi_long
copy_to_user_eabi_flock64(abi_ulong target_flock_addr
,
6580 const struct flock64
*fl
)
6582 struct target_eabi_flock64
*target_fl
;
6585 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6586 return -TARGET_EFAULT
;
6589 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6590 __put_user(l_type
, &target_fl
->l_type
);
6591 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6592 __put_user(fl
->l_start
, &target_fl
->l_start
);
6593 __put_user(fl
->l_len
, &target_fl
->l_len
);
6594 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6595 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6600 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6601 abi_ulong target_flock_addr
)
6603 struct target_flock64
*target_fl
;
6606 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6607 return -TARGET_EFAULT
;
6610 __get_user(l_type
, &target_fl
->l_type
);
6611 fl
->l_type
= target_to_host_bitmask(l_type
, flock_tbl
);
6612 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6613 __get_user(fl
->l_start
, &target_fl
->l_start
);
6614 __get_user(fl
->l_len
, &target_fl
->l_len
);
6615 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6616 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6620 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6621 const struct flock64
*fl
)
6623 struct target_flock64
*target_fl
;
6626 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6627 return -TARGET_EFAULT
;
6630 l_type
= host_to_target_bitmask(fl
->l_type
, flock_tbl
);
6631 __put_user(l_type
, &target_fl
->l_type
);
6632 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6633 __put_user(fl
->l_start
, &target_fl
->l_start
);
6634 __put_user(fl
->l_len
, &target_fl
->l_len
);
6635 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6636 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6640 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6642 struct flock64 fl64
;
6644 struct f_owner_ex fox
;
6645 struct target_f_owner_ex
*target_fox
;
6648 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6650 if (host_cmd
== -TARGET_EINVAL
)
6654 case TARGET_F_GETLK
:
6655 ret
= copy_from_user_flock(&fl64
, arg
);
6659 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6661 ret
= copy_to_user_flock(arg
, &fl64
);
6665 case TARGET_F_SETLK
:
6666 case TARGET_F_SETLKW
:
6667 ret
= copy_from_user_flock(&fl64
, arg
);
6671 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6674 case TARGET_F_GETLK64
:
6675 ret
= copy_from_user_flock64(&fl64
, arg
);
6679 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6681 ret
= copy_to_user_flock64(arg
, &fl64
);
6684 case TARGET_F_SETLK64
:
6685 case TARGET_F_SETLKW64
:
6686 ret
= copy_from_user_flock64(&fl64
, arg
);
6690 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6693 case TARGET_F_GETFL
:
6694 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6696 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6700 case TARGET_F_SETFL
:
6701 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6702 target_to_host_bitmask(arg
,
6707 case TARGET_F_GETOWN_EX
:
6708 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6710 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6711 return -TARGET_EFAULT
;
6712 target_fox
->type
= tswap32(fox
.type
);
6713 target_fox
->pid
= tswap32(fox
.pid
);
6714 unlock_user_struct(target_fox
, arg
, 1);
6720 case TARGET_F_SETOWN_EX
:
6721 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6722 return -TARGET_EFAULT
;
6723 fox
.type
= tswap32(target_fox
->type
);
6724 fox
.pid
= tswap32(target_fox
->pid
);
6725 unlock_user_struct(target_fox
, arg
, 0);
6726 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6730 case TARGET_F_SETOWN
:
6731 case TARGET_F_GETOWN
:
6732 case TARGET_F_SETSIG
:
6733 case TARGET_F_GETSIG
:
6734 case TARGET_F_SETLEASE
:
6735 case TARGET_F_GETLEASE
:
6736 case TARGET_F_SETPIPE_SZ
:
6737 case TARGET_F_GETPIPE_SZ
:
6738 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6742 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6750 static inline int high2lowuid(int uid
)
6758 static inline int high2lowgid(int gid
)
6766 static inline int low2highuid(int uid
)
6768 if ((int16_t)uid
== -1)
6774 static inline int low2highgid(int gid
)
6776 if ((int16_t)gid
== -1)
6781 static inline int tswapid(int id
)
6786 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6788 #else /* !USE_UID16 */
6789 static inline int high2lowuid(int uid
)
6793 static inline int high2lowgid(int gid
)
6797 static inline int low2highuid(int uid
)
6801 static inline int low2highgid(int gid
)
6805 static inline int tswapid(int id
)
6810 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6812 #endif /* USE_UID16 */
6814 /* We must do direct syscalls for setting UID/GID, because we want to
6815 * implement the Linux system call semantics of "change only for this thread",
6816 * not the libc/POSIX semantics of "change for all threads in process".
6817 * (See http://ewontfix.com/17/ for more details.)
6818 * We use the 32-bit version of the syscalls if present; if it is not
6819 * then either the host architecture supports 32-bit UIDs natively with
6820 * the standard syscall, or the 16-bit UID is the best we can do.
6822 #ifdef __NR_setuid32
6823 #define __NR_sys_setuid __NR_setuid32
6825 #define __NR_sys_setuid __NR_setuid
6827 #ifdef __NR_setgid32
6828 #define __NR_sys_setgid __NR_setgid32
6830 #define __NR_sys_setgid __NR_setgid
6832 #ifdef __NR_setresuid32
6833 #define __NR_sys_setresuid __NR_setresuid32
6835 #define __NR_sys_setresuid __NR_setresuid
6837 #ifdef __NR_setresgid32
6838 #define __NR_sys_setresgid __NR_setresgid32
6840 #define __NR_sys_setresgid __NR_setresgid
6843 _syscall1(int, sys_setuid
, uid_t
, uid
)
6844 _syscall1(int, sys_setgid
, gid_t
, gid
)
6845 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6846 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6848 void syscall_init(void)
6851 const argtype
*arg_type
;
6855 thunk_init(STRUCT_MAX
);
6857 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6858 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6859 #include "syscall_types.h"
6861 #undef STRUCT_SPECIAL
6863 /* Build target_to_host_errno_table[] table from
6864 * host_to_target_errno_table[]. */
6865 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6866 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6869 /* we patch the ioctl size if necessary. We rely on the fact that
6870 no ioctl has all the bits at '1' in the size field */
6872 while (ie
->target_cmd
!= 0) {
6873 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6874 TARGET_IOC_SIZEMASK
) {
6875 arg_type
= ie
->arg_type
;
6876 if (arg_type
[0] != TYPE_PTR
) {
6877 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6882 size
= thunk_type_size(arg_type
, 0);
6883 ie
->target_cmd
= (ie
->target_cmd
&
6884 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6885 (size
<< TARGET_IOC_SIZESHIFT
);
6888 /* automatic consistency check if same arch */
6889 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6890 (defined(__x86_64__) && defined(TARGET_X86_64))
6891 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6892 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6893 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6900 #if TARGET_ABI_BITS == 32
6901 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6903 #ifdef TARGET_WORDS_BIGENDIAN
6904 return ((uint64_t)word0
<< 32) | word1
;
6906 return ((uint64_t)word1
<< 32) | word0
;
6909 #else /* TARGET_ABI_BITS == 32 */
6910 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6914 #endif /* TARGET_ABI_BITS != 32 */
6916 #ifdef TARGET_NR_truncate64
6917 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6922 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
6926 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
6930 #ifdef TARGET_NR_ftruncate64
6931 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
6936 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
6940 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
6944 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
6945 abi_ulong target_addr
)
6947 struct target_timespec
*target_ts
;
6949 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
6950 return -TARGET_EFAULT
;
6951 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6952 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6953 unlock_user_struct(target_ts
, target_addr
, 0);
6957 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
6958 struct timespec
*host_ts
)
6960 struct target_timespec
*target_ts
;
6962 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
6963 return -TARGET_EFAULT
;
6964 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
6965 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
6966 unlock_user_struct(target_ts
, target_addr
, 1);
6970 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
6971 abi_ulong target_addr
)
6973 struct target_itimerspec
*target_itspec
;
6975 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
6976 return -TARGET_EFAULT
;
6979 host_itspec
->it_interval
.tv_sec
=
6980 tswapal(target_itspec
->it_interval
.tv_sec
);
6981 host_itspec
->it_interval
.tv_nsec
=
6982 tswapal(target_itspec
->it_interval
.tv_nsec
);
6983 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
6984 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
6986 unlock_user_struct(target_itspec
, target_addr
, 1);
6990 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
6991 struct itimerspec
*host_its
)
6993 struct target_itimerspec
*target_itspec
;
6995 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
6996 return -TARGET_EFAULT
;
6999 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7000 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7002 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7003 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7005 unlock_user_struct(target_itspec
, target_addr
, 0);
7009 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7010 abi_long target_addr
)
7012 struct target_timex
*target_tx
;
7014 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7015 return -TARGET_EFAULT
;
7018 __get_user(host_tx
->modes
, &target_tx
->modes
);
7019 __get_user(host_tx
->offset
, &target_tx
->offset
);
7020 __get_user(host_tx
->freq
, &target_tx
->freq
);
7021 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7022 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7023 __get_user(host_tx
->status
, &target_tx
->status
);
7024 __get_user(host_tx
->constant
, &target_tx
->constant
);
7025 __get_user(host_tx
->precision
, &target_tx
->precision
);
7026 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7027 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7028 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7029 __get_user(host_tx
->tick
, &target_tx
->tick
);
7030 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7031 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7032 __get_user(host_tx
->shift
, &target_tx
->shift
);
7033 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7034 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7035 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7036 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7037 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7038 __get_user(host_tx
->tai
, &target_tx
->tai
);
7040 unlock_user_struct(target_tx
, target_addr
, 0);
7044 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7045 struct timex
*host_tx
)
7047 struct target_timex
*target_tx
;
7049 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7050 return -TARGET_EFAULT
;
7053 __put_user(host_tx
->modes
, &target_tx
->modes
);
7054 __put_user(host_tx
->offset
, &target_tx
->offset
);
7055 __put_user(host_tx
->freq
, &target_tx
->freq
);
7056 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7057 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7058 __put_user(host_tx
->status
, &target_tx
->status
);
7059 __put_user(host_tx
->constant
, &target_tx
->constant
);
7060 __put_user(host_tx
->precision
, &target_tx
->precision
);
7061 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7062 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7063 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7064 __put_user(host_tx
->tick
, &target_tx
->tick
);
7065 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7066 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7067 __put_user(host_tx
->shift
, &target_tx
->shift
);
7068 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7069 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7070 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7071 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7072 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7073 __put_user(host_tx
->tai
, &target_tx
->tai
);
7075 unlock_user_struct(target_tx
, target_addr
, 1);
7080 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7081 abi_ulong target_addr
)
7083 struct target_sigevent
*target_sevp
;
7085 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7086 return -TARGET_EFAULT
;
7089 /* This union is awkward on 64 bit systems because it has a 32 bit
7090 * integer and a pointer in it; we follow the conversion approach
7091 * used for handling sigval types in signal.c so the guest should get
7092 * the correct value back even if we did a 64 bit byteswap and it's
7093 * using the 32 bit integer.
7095 host_sevp
->sigev_value
.sival_ptr
=
7096 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7097 host_sevp
->sigev_signo
=
7098 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7099 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7100 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7102 unlock_user_struct(target_sevp
, target_addr
, 1);
7106 #if defined(TARGET_NR_mlockall)
7107 static inline int target_to_host_mlockall_arg(int arg
)
7111 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7112 result
|= MCL_CURRENT
;
7114 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7115 result
|= MCL_FUTURE
;
7121 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7122 abi_ulong target_addr
,
7123 struct stat
*host_st
)
7125 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7126 if (((CPUARMState
*)cpu_env
)->eabi
) {
7127 struct target_eabi_stat64
*target_st
;
7129 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7130 return -TARGET_EFAULT
;
7131 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7132 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7133 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7134 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7135 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7137 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7138 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7139 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7140 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7141 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7142 __put_user(host_st
->st_size
, &target_st
->st_size
);
7143 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7144 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7145 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7146 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7147 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7148 unlock_user_struct(target_st
, target_addr
, 1);
7152 #if defined(TARGET_HAS_STRUCT_STAT64)
7153 struct target_stat64
*target_st
;
7155 struct target_stat
*target_st
;
7158 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7159 return -TARGET_EFAULT
;
7160 memset(target_st
, 0, sizeof(*target_st
));
7161 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7162 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7163 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7164 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7166 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7167 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7168 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7169 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7170 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7171 /* XXX: better use of kernel struct */
7172 __put_user(host_st
->st_size
, &target_st
->st_size
);
7173 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7174 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7175 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7176 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7177 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7178 unlock_user_struct(target_st
, target_addr
, 1);
7184 /* ??? Using host futex calls even when target atomic operations
7185 are not really atomic probably breaks things. However implementing
7186 futexes locally would make futexes shared between multiple processes
7187 tricky. However they're probably useless because guest atomic
7188 operations won't work either. */
7189 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7190 target_ulong uaddr2
, int val3
)
7192 struct timespec ts
, *pts
;
7195 /* ??? We assume FUTEX_* constants are the same on both host
7197 #ifdef FUTEX_CMD_MASK
7198 base_op
= op
& FUTEX_CMD_MASK
;
7204 case FUTEX_WAIT_BITSET
:
7207 target_to_host_timespec(pts
, timeout
);
7211 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7214 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7216 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7218 case FUTEX_CMP_REQUEUE
:
7220 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7221 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7222 But the prototype takes a `struct timespec *'; insert casts
7223 to satisfy the compiler. We do not need to tswap TIMEOUT
7224 since it's not compared to guest memory. */
7225 pts
= (struct timespec
*)(uintptr_t) timeout
;
7226 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7228 (base_op
== FUTEX_CMP_REQUEUE
7232 return -TARGET_ENOSYS
;
7235 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7236 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7237 abi_long handle
, abi_long mount_id
,
7240 struct file_handle
*target_fh
;
7241 struct file_handle
*fh
;
7245 unsigned int size
, total_size
;
7247 if (get_user_s32(size
, handle
)) {
7248 return -TARGET_EFAULT
;
7251 name
= lock_user_string(pathname
);
7253 return -TARGET_EFAULT
;
7256 total_size
= sizeof(struct file_handle
) + size
;
7257 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7259 unlock_user(name
, pathname
, 0);
7260 return -TARGET_EFAULT
;
7263 fh
= g_malloc0(total_size
);
7264 fh
->handle_bytes
= size
;
7266 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7267 unlock_user(name
, pathname
, 0);
7269 /* man name_to_handle_at(2):
7270 * Other than the use of the handle_bytes field, the caller should treat
7271 * the file_handle structure as an opaque data type
7274 memcpy(target_fh
, fh
, total_size
);
7275 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7276 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7278 unlock_user(target_fh
, handle
, total_size
);
7280 if (put_user_s32(mid
, mount_id
)) {
7281 return -TARGET_EFAULT
;
7289 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7290 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7293 struct file_handle
*target_fh
;
7294 struct file_handle
*fh
;
7295 unsigned int size
, total_size
;
7298 if (get_user_s32(size
, handle
)) {
7299 return -TARGET_EFAULT
;
7302 total_size
= sizeof(struct file_handle
) + size
;
7303 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7305 return -TARGET_EFAULT
;
7308 fh
= g_memdup(target_fh
, total_size
);
7309 fh
->handle_bytes
= size
;
7310 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7312 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7313 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7317 unlock_user(target_fh
, handle
, total_size
);
7323 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7325 /* signalfd siginfo conversion */
7328 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7329 const struct signalfd_siginfo
*info
)
7331 int sig
= host_to_target_signal(info
->ssi_signo
);
7333 /* linux/signalfd.h defines a ssi_addr_lsb
7334 * not defined in sys/signalfd.h but used by some kernels
7337 #ifdef BUS_MCEERR_AO
7338 if (tinfo
->ssi_signo
== SIGBUS
&&
7339 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7340 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7341 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7342 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7343 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7347 tinfo
->ssi_signo
= tswap32(sig
);
7348 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7349 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7350 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7351 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7352 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7353 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7354 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7355 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7356 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7357 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7358 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7359 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7360 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7361 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7362 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7365 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7369 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7370 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7376 static TargetFdTrans target_signalfd_trans
= {
7377 .host_to_target_data
= host_to_target_data_signalfd
,
7380 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7383 target_sigset_t
*target_mask
;
7387 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7388 return -TARGET_EINVAL
;
7390 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7391 return -TARGET_EFAULT
;
7394 target_to_host_sigset(&host_mask
, target_mask
);
7396 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7398 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7400 fd_trans_register(ret
, &target_signalfd_trans
);
7403 unlock_user_struct(target_mask
, mask
, 0);
7409 /* Map host to target signal numbers for the wait family of syscalls.
7410 Assume all other status bits are the same. */
7411 int host_to_target_waitstatus(int status
)
7413 if (WIFSIGNALED(status
)) {
7414 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7416 if (WIFSTOPPED(status
)) {
7417 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7423 static int open_self_cmdline(void *cpu_env
, int fd
)
7425 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7426 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7429 for (i
= 0; i
< bprm
->argc
; i
++) {
7430 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7432 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7440 static int open_self_maps(void *cpu_env
, int fd
)
7442 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7443 TaskState
*ts
= cpu
->opaque
;
7449 fp
= fopen("/proc/self/maps", "r");
7454 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7455 int fields
, dev_maj
, dev_min
, inode
;
7456 uint64_t min
, max
, offset
;
7457 char flag_r
, flag_w
, flag_x
, flag_p
;
7458 char path
[512] = "";
7459 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7460 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7461 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7463 if ((fields
< 10) || (fields
> 11)) {
7466 if (h2g_valid(min
)) {
7467 int flags
= page_get_flags(h2g(min
));
7468 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
);
7469 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7472 if (h2g(min
) == ts
->info
->stack_limit
) {
7473 pstrcpy(path
, sizeof(path
), " [stack]");
7475 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7476 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7477 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7478 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7479 path
[0] ? " " : "", path
);
7489 static int open_self_stat(void *cpu_env
, int fd
)
7491 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7492 TaskState
*ts
= cpu
->opaque
;
7493 abi_ulong start_stack
= ts
->info
->start_stack
;
7496 for (i
= 0; i
< 44; i
++) {
7504 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7505 } else if (i
== 1) {
7507 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7508 } else if (i
== 27) {
7511 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7513 /* for the rest, there is MasterCard */
7514 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7518 if (write(fd
, buf
, len
) != len
) {
7526 static int open_self_auxv(void *cpu_env
, int fd
)
7528 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7529 TaskState
*ts
= cpu
->opaque
;
7530 abi_ulong auxv
= ts
->info
->saved_auxv
;
7531 abi_ulong len
= ts
->info
->auxv_len
;
7535 * Auxiliary vector is stored in target process stack.
7536 * read in whole auxv vector and copy it to file
7538 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7542 r
= write(fd
, ptr
, len
);
7549 lseek(fd
, 0, SEEK_SET
);
7550 unlock_user(ptr
, auxv
, len
);
7556 static int is_proc_myself(const char *filename
, const char *entry
)
7558 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7559 filename
+= strlen("/proc/");
7560 if (!strncmp(filename
, "self/", strlen("self/"))) {
7561 filename
+= strlen("self/");
7562 } else if (*filename
>= '1' && *filename
<= '9') {
7564 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7565 if (!strncmp(filename
, myself
, strlen(myself
))) {
7566 filename
+= strlen(myself
);
7573 if (!strcmp(filename
, entry
)) {
7580 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7581 static int is_proc(const char *filename
, const char *entry
)
7583 return strcmp(filename
, entry
) == 0;
7586 static int open_net_route(void *cpu_env
, int fd
)
7593 fp
= fopen("/proc/net/route", "r");
7600 read
= getline(&line
, &len
, fp
);
7601 dprintf(fd
, "%s", line
);
7605 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7607 uint32_t dest
, gw
, mask
;
7608 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7609 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7610 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7611 &mask
, &mtu
, &window
, &irtt
);
7612 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7613 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7614 metric
, tswap32(mask
), mtu
, window
, irtt
);
7624 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7627 const char *filename
;
7628 int (*fill
)(void *cpu_env
, int fd
);
7629 int (*cmp
)(const char *s1
, const char *s2
);
7631 const struct fake_open
*fake_open
;
7632 static const struct fake_open fakes
[] = {
7633 { "maps", open_self_maps
, is_proc_myself
},
7634 { "stat", open_self_stat
, is_proc_myself
},
7635 { "auxv", open_self_auxv
, is_proc_myself
},
7636 { "cmdline", open_self_cmdline
, is_proc_myself
},
7637 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7638 { "/proc/net/route", open_net_route
, is_proc
},
7640 { NULL
, NULL
, NULL
}
7643 if (is_proc_myself(pathname
, "exe")) {
7644 int execfd
= qemu_getauxval(AT_EXECFD
);
7645 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7648 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7649 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7654 if (fake_open
->filename
) {
7656 char filename
[PATH_MAX
];
7659 /* create temporary file to map stat to */
7660 tmpdir
= getenv("TMPDIR");
7663 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7664 fd
= mkstemp(filename
);
7670 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7676 lseek(fd
, 0, SEEK_SET
);
7681 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7684 #define TIMER_MAGIC 0x0caf0000
7685 #define TIMER_MAGIC_MASK 0xffff0000
7687 /* Convert QEMU provided timer ID back to internal 16bit index format */
7688 static target_timer_t
get_timer_id(abi_long arg
)
7690 target_timer_t timerid
= arg
;
7692 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7693 return -TARGET_EINVAL
;
7698 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7699 return -TARGET_EINVAL
;
7705 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7707 uint64_t *counter
= buf
;
7710 if (len
< sizeof(uint64_t)) {
7714 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7715 *counter
= tswap64(*counter
);
7722 static TargetFdTrans target_eventfd_trans
= {
7723 .host_to_target_data
= swap_data_eventfd
,
7724 .target_to_host_data
= swap_data_eventfd
,
7727 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7728 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7729 defined(__NR_inotify_init1))
7730 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7732 struct inotify_event
*ev
;
7736 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7737 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7740 ev
->wd
= tswap32(ev
->wd
);
7741 ev
->mask
= tswap32(ev
->mask
);
7742 ev
->cookie
= tswap32(ev
->cookie
);
7743 ev
->len
= tswap32(name_len
);
7749 static TargetFdTrans target_inotify_trans
= {
7750 .host_to_target_data
= host_to_target_data_inotify
,
7754 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7756 abi_ulong target_addr
,
7759 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7760 unsigned host_bits
= sizeof(*host_mask
) * 8;
7761 abi_ulong
*target_mask
;
7764 assert(host_size
>= target_size
);
7766 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7768 return -TARGET_EFAULT
;
7770 memset(host_mask
, 0, host_size
);
7772 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7773 unsigned bit
= i
* target_bits
;
7776 __get_user(val
, &target_mask
[i
]);
7777 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7778 if (val
& (1UL << j
)) {
7779 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7784 unlock_user(target_mask
, target_addr
, 0);
7788 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7790 abi_ulong target_addr
,
7793 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7794 unsigned host_bits
= sizeof(*host_mask
) * 8;
7795 abi_ulong
*target_mask
;
7798 assert(host_size
>= target_size
);
7800 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7802 return -TARGET_EFAULT
;
7805 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7806 unsigned bit
= i
* target_bits
;
7809 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7810 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7814 __put_user(val
, &target_mask
[i
]);
7817 unlock_user(target_mask
, target_addr
, target_size
);
7821 /* do_syscall() should always have a single exit point at the end so
7822 that actions, such as logging of syscall results, can be performed.
7823 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7824 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7825 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7826 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7829 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7835 #if defined(DEBUG_ERESTARTSYS)
7836 /* Debug-only code for exercising the syscall-restart code paths
7837 * in the per-architecture cpu main loops: restart every syscall
7838 * the guest makes once before letting it through.
7845 return -TARGET_ERESTARTSYS
;
7851 gemu_log("syscall %d", num
);
7853 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7855 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7858 case TARGET_NR_exit
:
7859 /* In old applications this may be used to implement _exit(2).
7860 However in threaded applictions it is used for thread termination,
7861 and _exit_group is used for application termination.
7862 Do thread termination if we have more then one thread. */
7864 if (block_signals()) {
7865 ret
= -TARGET_ERESTARTSYS
;
7871 if (CPU_NEXT(first_cpu
)) {
7874 /* Remove the CPU from the list. */
7875 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7880 if (ts
->child_tidptr
) {
7881 put_user_u32(0, ts
->child_tidptr
);
7882 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7886 object_unref(OBJECT(cpu
));
7888 rcu_unregister_thread();
7896 gdb_exit(cpu_env
, arg1
);
7898 ret
= 0; /* avoid warning */
7900 case TARGET_NR_read
:
7904 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7906 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7908 fd_trans_host_to_target_data(arg1
)) {
7909 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7911 unlock_user(p
, arg2
, ret
);
7914 case TARGET_NR_write
:
7915 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7917 if (fd_trans_target_to_host_data(arg1
)) {
7918 void *copy
= g_malloc(arg3
);
7919 memcpy(copy
, p
, arg3
);
7920 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7922 ret
= get_errno(safe_write(arg1
, copy
, ret
));
7926 ret
= get_errno(safe_write(arg1
, p
, arg3
));
7928 unlock_user(p
, arg2
, 0);
7930 #ifdef TARGET_NR_open
7931 case TARGET_NR_open
:
7932 if (!(p
= lock_user_string(arg1
)))
7934 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
7935 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
7937 fd_trans_unregister(ret
);
7938 unlock_user(p
, arg1
, 0);
7941 case TARGET_NR_openat
:
7942 if (!(p
= lock_user_string(arg2
)))
7944 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
7945 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
7947 fd_trans_unregister(ret
);
7948 unlock_user(p
, arg2
, 0);
7950 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7951 case TARGET_NR_name_to_handle_at
:
7952 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
7955 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7956 case TARGET_NR_open_by_handle_at
:
7957 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
7958 fd_trans_unregister(ret
);
7961 case TARGET_NR_close
:
7962 fd_trans_unregister(arg1
);
7963 ret
= get_errno(close(arg1
));
7968 #ifdef TARGET_NR_fork
7969 case TARGET_NR_fork
:
7970 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
7973 #ifdef TARGET_NR_waitpid
7974 case TARGET_NR_waitpid
:
7977 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
7978 if (!is_error(ret
) && arg2
&& ret
7979 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
7984 #ifdef TARGET_NR_waitid
7985 case TARGET_NR_waitid
:
7989 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
7990 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
7991 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
7993 host_to_target_siginfo(p
, &info
);
7994 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
7999 #ifdef TARGET_NR_creat /* not on alpha */
8000 case TARGET_NR_creat
:
8001 if (!(p
= lock_user_string(arg1
)))
8003 ret
= get_errno(creat(p
, arg2
));
8004 fd_trans_unregister(ret
);
8005 unlock_user(p
, arg1
, 0);
8008 #ifdef TARGET_NR_link
8009 case TARGET_NR_link
:
8012 p
= lock_user_string(arg1
);
8013 p2
= lock_user_string(arg2
);
8015 ret
= -TARGET_EFAULT
;
8017 ret
= get_errno(link(p
, p2
));
8018 unlock_user(p2
, arg2
, 0);
8019 unlock_user(p
, arg1
, 0);
8023 #if defined(TARGET_NR_linkat)
8024 case TARGET_NR_linkat
:
8029 p
= lock_user_string(arg2
);
8030 p2
= lock_user_string(arg4
);
8032 ret
= -TARGET_EFAULT
;
8034 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8035 unlock_user(p
, arg2
, 0);
8036 unlock_user(p2
, arg4
, 0);
8040 #ifdef TARGET_NR_unlink
8041 case TARGET_NR_unlink
:
8042 if (!(p
= lock_user_string(arg1
)))
8044 ret
= get_errno(unlink(p
));
8045 unlock_user(p
, arg1
, 0);
8048 #if defined(TARGET_NR_unlinkat)
8049 case TARGET_NR_unlinkat
:
8050 if (!(p
= lock_user_string(arg2
)))
8052 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8053 unlock_user(p
, arg2
, 0);
8056 case TARGET_NR_execve
:
8058 char **argp
, **envp
;
8061 abi_ulong guest_argp
;
8062 abi_ulong guest_envp
;
8069 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8070 if (get_user_ual(addr
, gp
))
8078 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8079 if (get_user_ual(addr
, gp
))
8086 argp
= g_new0(char *, argc
+ 1);
8087 envp
= g_new0(char *, envc
+ 1);
8089 for (gp
= guest_argp
, q
= argp
; gp
;
8090 gp
+= sizeof(abi_ulong
), q
++) {
8091 if (get_user_ual(addr
, gp
))
8095 if (!(*q
= lock_user_string(addr
)))
8097 total_size
+= strlen(*q
) + 1;
8101 for (gp
= guest_envp
, q
= envp
; gp
;
8102 gp
+= sizeof(abi_ulong
), q
++) {
8103 if (get_user_ual(addr
, gp
))
8107 if (!(*q
= lock_user_string(addr
)))
8109 total_size
+= strlen(*q
) + 1;
8113 if (!(p
= lock_user_string(arg1
)))
8115 /* Although execve() is not an interruptible syscall it is
8116 * a special case where we must use the safe_syscall wrapper:
8117 * if we allow a signal to happen before we make the host
8118 * syscall then we will 'lose' it, because at the point of
8119 * execve the process leaves QEMU's control. So we use the
8120 * safe syscall wrapper to ensure that we either take the
8121 * signal as a guest signal, or else it does not happen
8122 * before the execve completes and makes it the other
8123 * program's problem.
8125 ret
= get_errno(safe_execve(p
, argp
, envp
));
8126 unlock_user(p
, arg1
, 0);
8131 ret
= -TARGET_EFAULT
;
8134 for (gp
= guest_argp
, q
= argp
; *q
;
8135 gp
+= sizeof(abi_ulong
), q
++) {
8136 if (get_user_ual(addr
, gp
)
8139 unlock_user(*q
, addr
, 0);
8141 for (gp
= guest_envp
, q
= envp
; *q
;
8142 gp
+= sizeof(abi_ulong
), q
++) {
8143 if (get_user_ual(addr
, gp
)
8146 unlock_user(*q
, addr
, 0);
8153 case TARGET_NR_chdir
:
8154 if (!(p
= lock_user_string(arg1
)))
8156 ret
= get_errno(chdir(p
));
8157 unlock_user(p
, arg1
, 0);
8159 #ifdef TARGET_NR_time
8160 case TARGET_NR_time
:
8163 ret
= get_errno(time(&host_time
));
8166 && put_user_sal(host_time
, arg1
))
8171 #ifdef TARGET_NR_mknod
8172 case TARGET_NR_mknod
:
8173 if (!(p
= lock_user_string(arg1
)))
8175 ret
= get_errno(mknod(p
, arg2
, arg3
));
8176 unlock_user(p
, arg1
, 0);
8179 #if defined(TARGET_NR_mknodat)
8180 case TARGET_NR_mknodat
:
8181 if (!(p
= lock_user_string(arg2
)))
8183 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8184 unlock_user(p
, arg2
, 0);
8187 #ifdef TARGET_NR_chmod
8188 case TARGET_NR_chmod
:
8189 if (!(p
= lock_user_string(arg1
)))
8191 ret
= get_errno(chmod(p
, arg2
));
8192 unlock_user(p
, arg1
, 0);
8195 #ifdef TARGET_NR_break
8196 case TARGET_NR_break
:
8199 #ifdef TARGET_NR_oldstat
8200 case TARGET_NR_oldstat
:
8203 case TARGET_NR_lseek
:
8204 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8206 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8207 /* Alpha specific */
8208 case TARGET_NR_getxpid
:
8209 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8210 ret
= get_errno(getpid());
8213 #ifdef TARGET_NR_getpid
8214 case TARGET_NR_getpid
:
8215 ret
= get_errno(getpid());
8218 case TARGET_NR_mount
:
8220 /* need to look at the data field */
8224 p
= lock_user_string(arg1
);
8232 p2
= lock_user_string(arg2
);
8235 unlock_user(p
, arg1
, 0);
8241 p3
= lock_user_string(arg3
);
8244 unlock_user(p
, arg1
, 0);
8246 unlock_user(p2
, arg2
, 0);
8253 /* FIXME - arg5 should be locked, but it isn't clear how to
8254 * do that since it's not guaranteed to be a NULL-terminated
8258 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8260 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8262 ret
= get_errno(ret
);
8265 unlock_user(p
, arg1
, 0);
8267 unlock_user(p2
, arg2
, 0);
8269 unlock_user(p3
, arg3
, 0);
8273 #ifdef TARGET_NR_umount
8274 case TARGET_NR_umount
:
8275 if (!(p
= lock_user_string(arg1
)))
8277 ret
= get_errno(umount(p
));
8278 unlock_user(p
, arg1
, 0);
8281 #ifdef TARGET_NR_stime /* not on alpha */
8282 case TARGET_NR_stime
:
8285 if (get_user_sal(host_time
, arg1
))
8287 ret
= get_errno(stime(&host_time
));
8291 case TARGET_NR_ptrace
:
8293 #ifdef TARGET_NR_alarm /* not on alpha */
8294 case TARGET_NR_alarm
:
8298 #ifdef TARGET_NR_oldfstat
8299 case TARGET_NR_oldfstat
:
8302 #ifdef TARGET_NR_pause /* not on alpha */
8303 case TARGET_NR_pause
:
8304 if (!block_signals()) {
8305 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8307 ret
= -TARGET_EINTR
;
8310 #ifdef TARGET_NR_utime
8311 case TARGET_NR_utime
:
8313 struct utimbuf tbuf
, *host_tbuf
;
8314 struct target_utimbuf
*target_tbuf
;
8316 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8318 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8319 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8320 unlock_user_struct(target_tbuf
, arg2
, 0);
8325 if (!(p
= lock_user_string(arg1
)))
8327 ret
= get_errno(utime(p
, host_tbuf
));
8328 unlock_user(p
, arg1
, 0);
8332 #ifdef TARGET_NR_utimes
8333 case TARGET_NR_utimes
:
8335 struct timeval
*tvp
, tv
[2];
8337 if (copy_from_user_timeval(&tv
[0], arg2
)
8338 || copy_from_user_timeval(&tv
[1],
8339 arg2
+ sizeof(struct target_timeval
)))
8345 if (!(p
= lock_user_string(arg1
)))
8347 ret
= get_errno(utimes(p
, tvp
));
8348 unlock_user(p
, arg1
, 0);
8352 #if defined(TARGET_NR_futimesat)
8353 case TARGET_NR_futimesat
:
8355 struct timeval
*tvp
, tv
[2];
8357 if (copy_from_user_timeval(&tv
[0], arg3
)
8358 || copy_from_user_timeval(&tv
[1],
8359 arg3
+ sizeof(struct target_timeval
)))
8365 if (!(p
= lock_user_string(arg2
)))
8367 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8368 unlock_user(p
, arg2
, 0);
8372 #ifdef TARGET_NR_stty
8373 case TARGET_NR_stty
:
8376 #ifdef TARGET_NR_gtty
8377 case TARGET_NR_gtty
:
8380 #ifdef TARGET_NR_access
8381 case TARGET_NR_access
:
8382 if (!(p
= lock_user_string(arg1
)))
8384 ret
= get_errno(access(path(p
), arg2
));
8385 unlock_user(p
, arg1
, 0);
8388 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8389 case TARGET_NR_faccessat
:
8390 if (!(p
= lock_user_string(arg2
)))
8392 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8393 unlock_user(p
, arg2
, 0);
8396 #ifdef TARGET_NR_nice /* not on alpha */
8397 case TARGET_NR_nice
:
8398 ret
= get_errno(nice(arg1
));
8401 #ifdef TARGET_NR_ftime
8402 case TARGET_NR_ftime
:
8405 case TARGET_NR_sync
:
8409 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8410 case TARGET_NR_syncfs
:
8411 ret
= get_errno(syncfs(arg1
));
8414 case TARGET_NR_kill
:
8415 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8417 #ifdef TARGET_NR_rename
8418 case TARGET_NR_rename
:
8421 p
= lock_user_string(arg1
);
8422 p2
= lock_user_string(arg2
);
8424 ret
= -TARGET_EFAULT
;
8426 ret
= get_errno(rename(p
, p2
));
8427 unlock_user(p2
, arg2
, 0);
8428 unlock_user(p
, arg1
, 0);
8432 #if defined(TARGET_NR_renameat)
8433 case TARGET_NR_renameat
:
8436 p
= lock_user_string(arg2
);
8437 p2
= lock_user_string(arg4
);
8439 ret
= -TARGET_EFAULT
;
8441 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8442 unlock_user(p2
, arg4
, 0);
8443 unlock_user(p
, arg2
, 0);
8447 #if defined(TARGET_NR_renameat2)
8448 case TARGET_NR_renameat2
:
8451 p
= lock_user_string(arg2
);
8452 p2
= lock_user_string(arg4
);
8454 ret
= -TARGET_EFAULT
;
8456 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8458 unlock_user(p2
, arg4
, 0);
8459 unlock_user(p
, arg2
, 0);
8463 #ifdef TARGET_NR_mkdir
8464 case TARGET_NR_mkdir
:
8465 if (!(p
= lock_user_string(arg1
)))
8467 ret
= get_errno(mkdir(p
, arg2
));
8468 unlock_user(p
, arg1
, 0);
8471 #if defined(TARGET_NR_mkdirat)
8472 case TARGET_NR_mkdirat
:
8473 if (!(p
= lock_user_string(arg2
)))
8475 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8476 unlock_user(p
, arg2
, 0);
8479 #ifdef TARGET_NR_rmdir
8480 case TARGET_NR_rmdir
:
8481 if (!(p
= lock_user_string(arg1
)))
8483 ret
= get_errno(rmdir(p
));
8484 unlock_user(p
, arg1
, 0);
8488 ret
= get_errno(dup(arg1
));
8490 fd_trans_dup(arg1
, ret
);
8493 #ifdef TARGET_NR_pipe
8494 case TARGET_NR_pipe
:
8495 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8498 #ifdef TARGET_NR_pipe2
8499 case TARGET_NR_pipe2
:
8500 ret
= do_pipe(cpu_env
, arg1
,
8501 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8504 case TARGET_NR_times
:
8506 struct target_tms
*tmsp
;
8508 ret
= get_errno(times(&tms
));
8510 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8513 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8514 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8515 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8516 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8519 ret
= host_to_target_clock_t(ret
);
8522 #ifdef TARGET_NR_prof
8523 case TARGET_NR_prof
:
8526 #ifdef TARGET_NR_signal
8527 case TARGET_NR_signal
:
8530 case TARGET_NR_acct
:
8532 ret
= get_errno(acct(NULL
));
8534 if (!(p
= lock_user_string(arg1
)))
8536 ret
= get_errno(acct(path(p
)));
8537 unlock_user(p
, arg1
, 0);
8540 #ifdef TARGET_NR_umount2
8541 case TARGET_NR_umount2
:
8542 if (!(p
= lock_user_string(arg1
)))
8544 ret
= get_errno(umount2(p
, arg2
));
8545 unlock_user(p
, arg1
, 0);
8548 #ifdef TARGET_NR_lock
8549 case TARGET_NR_lock
:
8552 case TARGET_NR_ioctl
:
8553 ret
= do_ioctl(arg1
, arg2
, arg3
);
8555 case TARGET_NR_fcntl
:
8556 ret
= do_fcntl(arg1
, arg2
, arg3
);
8558 #ifdef TARGET_NR_mpx
8562 case TARGET_NR_setpgid
:
8563 ret
= get_errno(setpgid(arg1
, arg2
));
8565 #ifdef TARGET_NR_ulimit
8566 case TARGET_NR_ulimit
:
8569 #ifdef TARGET_NR_oldolduname
8570 case TARGET_NR_oldolduname
:
8573 case TARGET_NR_umask
:
8574 ret
= get_errno(umask(arg1
));
8576 case TARGET_NR_chroot
:
8577 if (!(p
= lock_user_string(arg1
)))
8579 ret
= get_errno(chroot(p
));
8580 unlock_user(p
, arg1
, 0);
8582 #ifdef TARGET_NR_ustat
8583 case TARGET_NR_ustat
:
8586 #ifdef TARGET_NR_dup2
8587 case TARGET_NR_dup2
:
8588 ret
= get_errno(dup2(arg1
, arg2
));
8590 fd_trans_dup(arg1
, arg2
);
8594 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8595 case TARGET_NR_dup3
:
8599 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8602 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8603 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8605 fd_trans_dup(arg1
, arg2
);
8610 #ifdef TARGET_NR_getppid /* not on alpha */
8611 case TARGET_NR_getppid
:
8612 ret
= get_errno(getppid());
8615 #ifdef TARGET_NR_getpgrp
8616 case TARGET_NR_getpgrp
:
8617 ret
= get_errno(getpgrp());
8620 case TARGET_NR_setsid
:
8621 ret
= get_errno(setsid());
8623 #ifdef TARGET_NR_sigaction
8624 case TARGET_NR_sigaction
:
8626 #if defined(TARGET_ALPHA)
8627 struct target_sigaction act
, oact
, *pact
= 0;
8628 struct target_old_sigaction
*old_act
;
8630 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8632 act
._sa_handler
= old_act
->_sa_handler
;
8633 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8634 act
.sa_flags
= old_act
->sa_flags
;
8635 act
.sa_restorer
= 0;
8636 unlock_user_struct(old_act
, arg2
, 0);
8639 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8640 if (!is_error(ret
) && arg3
) {
8641 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8643 old_act
->_sa_handler
= oact
._sa_handler
;
8644 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8645 old_act
->sa_flags
= oact
.sa_flags
;
8646 unlock_user_struct(old_act
, arg3
, 1);
8648 #elif defined(TARGET_MIPS)
8649 struct target_sigaction act
, oact
, *pact
, *old_act
;
8652 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8654 act
._sa_handler
= old_act
->_sa_handler
;
8655 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8656 act
.sa_flags
= old_act
->sa_flags
;
8657 unlock_user_struct(old_act
, arg2
, 0);
8663 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8665 if (!is_error(ret
) && arg3
) {
8666 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8668 old_act
->_sa_handler
= oact
._sa_handler
;
8669 old_act
->sa_flags
= oact
.sa_flags
;
8670 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8671 old_act
->sa_mask
.sig
[1] = 0;
8672 old_act
->sa_mask
.sig
[2] = 0;
8673 old_act
->sa_mask
.sig
[3] = 0;
8674 unlock_user_struct(old_act
, arg3
, 1);
8677 struct target_old_sigaction
*old_act
;
8678 struct target_sigaction act
, oact
, *pact
;
8680 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8682 act
._sa_handler
= old_act
->_sa_handler
;
8683 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8684 act
.sa_flags
= old_act
->sa_flags
;
8685 act
.sa_restorer
= old_act
->sa_restorer
;
8686 unlock_user_struct(old_act
, arg2
, 0);
8691 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8692 if (!is_error(ret
) && arg3
) {
8693 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8695 old_act
->_sa_handler
= oact
._sa_handler
;
8696 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8697 old_act
->sa_flags
= oact
.sa_flags
;
8698 old_act
->sa_restorer
= oact
.sa_restorer
;
8699 unlock_user_struct(old_act
, arg3
, 1);
8705 case TARGET_NR_rt_sigaction
:
8707 #if defined(TARGET_ALPHA)
8708 /* For Alpha and SPARC this is a 5 argument syscall, with
8709 * a 'restorer' parameter which must be copied into the
8710 * sa_restorer field of the sigaction struct.
8711 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8712 * and arg5 is the sigsetsize.
8713 * Alpha also has a separate rt_sigaction struct that it uses
8714 * here; SPARC uses the usual sigaction struct.
8716 struct target_rt_sigaction
*rt_act
;
8717 struct target_sigaction act
, oact
, *pact
= 0;
8719 if (arg4
!= sizeof(target_sigset_t
)) {
8720 ret
= -TARGET_EINVAL
;
8724 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8726 act
._sa_handler
= rt_act
->_sa_handler
;
8727 act
.sa_mask
= rt_act
->sa_mask
;
8728 act
.sa_flags
= rt_act
->sa_flags
;
8729 act
.sa_restorer
= arg5
;
8730 unlock_user_struct(rt_act
, arg2
, 0);
8733 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8734 if (!is_error(ret
) && arg3
) {
8735 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8737 rt_act
->_sa_handler
= oact
._sa_handler
;
8738 rt_act
->sa_mask
= oact
.sa_mask
;
8739 rt_act
->sa_flags
= oact
.sa_flags
;
8740 unlock_user_struct(rt_act
, arg3
, 1);
8744 target_ulong restorer
= arg4
;
8745 target_ulong sigsetsize
= arg5
;
8747 target_ulong sigsetsize
= arg4
;
8749 struct target_sigaction
*act
;
8750 struct target_sigaction
*oact
;
8752 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8753 ret
= -TARGET_EINVAL
;
8757 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8761 act
->sa_restorer
= restorer
;
8767 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8768 ret
= -TARGET_EFAULT
;
8769 goto rt_sigaction_fail
;
8773 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8776 unlock_user_struct(act
, arg2
, 0);
8778 unlock_user_struct(oact
, arg3
, 1);
8782 #ifdef TARGET_NR_sgetmask /* not on alpha */
8783 case TARGET_NR_sgetmask
:
8786 abi_ulong target_set
;
8787 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8789 host_to_target_old_sigset(&target_set
, &cur_set
);
8795 #ifdef TARGET_NR_ssetmask /* not on alpha */
8796 case TARGET_NR_ssetmask
:
8799 abi_ulong target_set
= arg1
;
8800 target_to_host_old_sigset(&set
, &target_set
);
8801 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8803 host_to_target_old_sigset(&target_set
, &oset
);
8809 #ifdef TARGET_NR_sigprocmask
8810 case TARGET_NR_sigprocmask
:
8812 #if defined(TARGET_ALPHA)
8813 sigset_t set
, oldset
;
8818 case TARGET_SIG_BLOCK
:
8821 case TARGET_SIG_UNBLOCK
:
8824 case TARGET_SIG_SETMASK
:
8828 ret
= -TARGET_EINVAL
;
8832 target_to_host_old_sigset(&set
, &mask
);
8834 ret
= do_sigprocmask(how
, &set
, &oldset
);
8835 if (!is_error(ret
)) {
8836 host_to_target_old_sigset(&mask
, &oldset
);
8838 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8841 sigset_t set
, oldset
, *set_ptr
;
8846 case TARGET_SIG_BLOCK
:
8849 case TARGET_SIG_UNBLOCK
:
8852 case TARGET_SIG_SETMASK
:
8856 ret
= -TARGET_EINVAL
;
8859 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8861 target_to_host_old_sigset(&set
, p
);
8862 unlock_user(p
, arg2
, 0);
8868 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8869 if (!is_error(ret
) && arg3
) {
8870 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8872 host_to_target_old_sigset(p
, &oldset
);
8873 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8879 case TARGET_NR_rt_sigprocmask
:
8882 sigset_t set
, oldset
, *set_ptr
;
8884 if (arg4
!= sizeof(target_sigset_t
)) {
8885 ret
= -TARGET_EINVAL
;
8891 case TARGET_SIG_BLOCK
:
8894 case TARGET_SIG_UNBLOCK
:
8897 case TARGET_SIG_SETMASK
:
8901 ret
= -TARGET_EINVAL
;
8904 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8906 target_to_host_sigset(&set
, p
);
8907 unlock_user(p
, arg2
, 0);
8913 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8914 if (!is_error(ret
) && arg3
) {
8915 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8917 host_to_target_sigset(p
, &oldset
);
8918 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8922 #ifdef TARGET_NR_sigpending
8923 case TARGET_NR_sigpending
:
8926 ret
= get_errno(sigpending(&set
));
8927 if (!is_error(ret
)) {
8928 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8930 host_to_target_old_sigset(p
, &set
);
8931 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8936 case TARGET_NR_rt_sigpending
:
8940 /* Yes, this check is >, not != like most. We follow the kernel's
8941 * logic and it does it like this because it implements
8942 * NR_sigpending through the same code path, and in that case
8943 * the old_sigset_t is smaller in size.
8945 if (arg2
> sizeof(target_sigset_t
)) {
8946 ret
= -TARGET_EINVAL
;
8950 ret
= get_errno(sigpending(&set
));
8951 if (!is_error(ret
)) {
8952 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
8954 host_to_target_sigset(p
, &set
);
8955 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
8959 #ifdef TARGET_NR_sigsuspend
8960 case TARGET_NR_sigsuspend
:
8962 TaskState
*ts
= cpu
->opaque
;
8963 #if defined(TARGET_ALPHA)
8964 abi_ulong mask
= arg1
;
8965 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
8967 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8969 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
8970 unlock_user(p
, arg1
, 0);
8972 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8974 if (ret
!= -TARGET_ERESTARTSYS
) {
8975 ts
->in_sigsuspend
= 1;
8980 case TARGET_NR_rt_sigsuspend
:
8982 TaskState
*ts
= cpu
->opaque
;
8984 if (arg2
!= sizeof(target_sigset_t
)) {
8985 ret
= -TARGET_EINVAL
;
8988 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
8990 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
8991 unlock_user(p
, arg1
, 0);
8992 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
8994 if (ret
!= -TARGET_ERESTARTSYS
) {
8995 ts
->in_sigsuspend
= 1;
8999 case TARGET_NR_rt_sigtimedwait
:
9002 struct timespec uts
, *puts
;
9005 if (arg4
!= sizeof(target_sigset_t
)) {
9006 ret
= -TARGET_EINVAL
;
9010 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9012 target_to_host_sigset(&set
, p
);
9013 unlock_user(p
, arg1
, 0);
9016 target_to_host_timespec(puts
, arg3
);
9020 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9022 if (!is_error(ret
)) {
9024 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9029 host_to_target_siginfo(p
, &uinfo
);
9030 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9032 ret
= host_to_target_signal(ret
);
9036 case TARGET_NR_rt_sigqueueinfo
:
9040 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9044 target_to_host_siginfo(&uinfo
, p
);
9045 unlock_user(p
, arg3
, 0);
9046 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9049 case TARGET_NR_rt_tgsigqueueinfo
:
9053 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9057 target_to_host_siginfo(&uinfo
, p
);
9058 unlock_user(p
, arg4
, 0);
9059 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9062 #ifdef TARGET_NR_sigreturn
9063 case TARGET_NR_sigreturn
:
9064 if (block_signals()) {
9065 ret
= -TARGET_ERESTARTSYS
;
9067 ret
= do_sigreturn(cpu_env
);
9071 case TARGET_NR_rt_sigreturn
:
9072 if (block_signals()) {
9073 ret
= -TARGET_ERESTARTSYS
;
9075 ret
= do_rt_sigreturn(cpu_env
);
9078 case TARGET_NR_sethostname
:
9079 if (!(p
= lock_user_string(arg1
)))
9081 ret
= get_errno(sethostname(p
, arg2
));
9082 unlock_user(p
, arg1
, 0);
9084 case TARGET_NR_setrlimit
:
9086 int resource
= target_to_host_resource(arg1
);
9087 struct target_rlimit
*target_rlim
;
9089 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9091 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9092 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9093 unlock_user_struct(target_rlim
, arg2
, 0);
9094 ret
= get_errno(setrlimit(resource
, &rlim
));
9097 case TARGET_NR_getrlimit
:
9099 int resource
= target_to_host_resource(arg1
);
9100 struct target_rlimit
*target_rlim
;
9103 ret
= get_errno(getrlimit(resource
, &rlim
));
9104 if (!is_error(ret
)) {
9105 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9107 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9108 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9109 unlock_user_struct(target_rlim
, arg2
, 1);
9113 case TARGET_NR_getrusage
:
9115 struct rusage rusage
;
9116 ret
= get_errno(getrusage(arg1
, &rusage
));
9117 if (!is_error(ret
)) {
9118 ret
= host_to_target_rusage(arg2
, &rusage
);
9122 case TARGET_NR_gettimeofday
:
9125 ret
= get_errno(gettimeofday(&tv
, NULL
));
9126 if (!is_error(ret
)) {
9127 if (copy_to_user_timeval(arg1
, &tv
))
9132 case TARGET_NR_settimeofday
:
9134 struct timeval tv
, *ptv
= NULL
;
9135 struct timezone tz
, *ptz
= NULL
;
9138 if (copy_from_user_timeval(&tv
, arg1
)) {
9145 if (copy_from_user_timezone(&tz
, arg2
)) {
9151 ret
= get_errno(settimeofday(ptv
, ptz
));
9154 #if defined(TARGET_NR_select)
9155 case TARGET_NR_select
:
9156 #if defined(TARGET_WANT_NI_OLD_SELECT)
9157 /* some architectures used to have old_select here
9158 * but now ENOSYS it.
9160 ret
= -TARGET_ENOSYS
;
9161 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9162 ret
= do_old_select(arg1
);
9164 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9168 #ifdef TARGET_NR_pselect6
9169 case TARGET_NR_pselect6
:
9171 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9172 fd_set rfds
, wfds
, efds
;
9173 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9174 struct timespec ts
, *ts_ptr
;
9177 * The 6th arg is actually two args smashed together,
9178 * so we cannot use the C library.
9186 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9187 target_sigset_t
*target_sigset
;
9195 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9199 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9203 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9209 * This takes a timespec, and not a timeval, so we cannot
9210 * use the do_select() helper ...
9213 if (target_to_host_timespec(&ts
, ts_addr
)) {
9221 /* Extract the two packed args for the sigset */
9224 sig
.size
= SIGSET_T_SIZE
;
9226 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9230 arg_sigset
= tswapal(arg7
[0]);
9231 arg_sigsize
= tswapal(arg7
[1]);
9232 unlock_user(arg7
, arg6
, 0);
9236 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9237 /* Like the kernel, we enforce correct size sigsets */
9238 ret
= -TARGET_EINVAL
;
9241 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9242 sizeof(*target_sigset
), 1);
9243 if (!target_sigset
) {
9246 target_to_host_sigset(&set
, target_sigset
);
9247 unlock_user(target_sigset
, arg_sigset
, 0);
9255 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9258 if (!is_error(ret
)) {
9259 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9261 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9263 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9266 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9272 #ifdef TARGET_NR_symlink
9273 case TARGET_NR_symlink
:
9276 p
= lock_user_string(arg1
);
9277 p2
= lock_user_string(arg2
);
9279 ret
= -TARGET_EFAULT
;
9281 ret
= get_errno(symlink(p
, p2
));
9282 unlock_user(p2
, arg2
, 0);
9283 unlock_user(p
, arg1
, 0);
9287 #if defined(TARGET_NR_symlinkat)
9288 case TARGET_NR_symlinkat
:
9291 p
= lock_user_string(arg1
);
9292 p2
= lock_user_string(arg3
);
9294 ret
= -TARGET_EFAULT
;
9296 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9297 unlock_user(p2
, arg3
, 0);
9298 unlock_user(p
, arg1
, 0);
9302 #ifdef TARGET_NR_oldlstat
9303 case TARGET_NR_oldlstat
:
9306 #ifdef TARGET_NR_readlink
9307 case TARGET_NR_readlink
:
9310 p
= lock_user_string(arg1
);
9311 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9313 ret
= -TARGET_EFAULT
;
9315 /* Short circuit this for the magic exe check. */
9316 ret
= -TARGET_EINVAL
;
9317 } else if (is_proc_myself((const char *)p
, "exe")) {
9318 char real
[PATH_MAX
], *temp
;
9319 temp
= realpath(exec_path
, real
);
9320 /* Return value is # of bytes that we wrote to the buffer. */
9322 ret
= get_errno(-1);
9324 /* Don't worry about sign mismatch as earlier mapping
9325 * logic would have thrown a bad address error. */
9326 ret
= MIN(strlen(real
), arg3
);
9327 /* We cannot NUL terminate the string. */
9328 memcpy(p2
, real
, ret
);
9331 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9333 unlock_user(p2
, arg2
, ret
);
9334 unlock_user(p
, arg1
, 0);
9338 #if defined(TARGET_NR_readlinkat)
9339 case TARGET_NR_readlinkat
:
9342 p
= lock_user_string(arg2
);
9343 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9345 ret
= -TARGET_EFAULT
;
9346 } else if (is_proc_myself((const char *)p
, "exe")) {
9347 char real
[PATH_MAX
], *temp
;
9348 temp
= realpath(exec_path
, real
);
9349 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9350 snprintf((char *)p2
, arg4
, "%s", real
);
9352 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9354 unlock_user(p2
, arg3
, ret
);
9355 unlock_user(p
, arg2
, 0);
9359 #ifdef TARGET_NR_uselib
9360 case TARGET_NR_uselib
:
9363 #ifdef TARGET_NR_swapon
9364 case TARGET_NR_swapon
:
9365 if (!(p
= lock_user_string(arg1
)))
9367 ret
= get_errno(swapon(p
, arg2
));
9368 unlock_user(p
, arg1
, 0);
9371 case TARGET_NR_reboot
:
9372 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9373 /* arg4 must be ignored in all other cases */
9374 p
= lock_user_string(arg4
);
9378 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9379 unlock_user(p
, arg4
, 0);
9381 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9384 #ifdef TARGET_NR_readdir
9385 case TARGET_NR_readdir
:
9388 #ifdef TARGET_NR_mmap
9389 case TARGET_NR_mmap
:
9390 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9391 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9392 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9393 || defined(TARGET_S390X)
9396 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9397 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9405 unlock_user(v
, arg1
, 0);
9406 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9407 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9411 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9412 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9418 #ifdef TARGET_NR_mmap2
9419 case TARGET_NR_mmap2
:
9421 #define MMAP_SHIFT 12
9423 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9424 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9426 arg6
<< MMAP_SHIFT
));
9429 case TARGET_NR_munmap
:
9430 ret
= get_errno(target_munmap(arg1
, arg2
));
9432 case TARGET_NR_mprotect
:
9434 TaskState
*ts
= cpu
->opaque
;
9435 /* Special hack to detect libc making the stack executable. */
9436 if ((arg3
& PROT_GROWSDOWN
)
9437 && arg1
>= ts
->info
->stack_limit
9438 && arg1
<= ts
->info
->start_stack
) {
9439 arg3
&= ~PROT_GROWSDOWN
;
9440 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9441 arg1
= ts
->info
->stack_limit
;
9444 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9446 #ifdef TARGET_NR_mremap
9447 case TARGET_NR_mremap
:
9448 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9451 /* ??? msync/mlock/munlock are broken for softmmu. */
9452 #ifdef TARGET_NR_msync
9453 case TARGET_NR_msync
:
9454 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9457 #ifdef TARGET_NR_mlock
9458 case TARGET_NR_mlock
:
9459 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9462 #ifdef TARGET_NR_munlock
9463 case TARGET_NR_munlock
:
9464 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9467 #ifdef TARGET_NR_mlockall
9468 case TARGET_NR_mlockall
:
9469 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9472 #ifdef TARGET_NR_munlockall
9473 case TARGET_NR_munlockall
:
9474 ret
= get_errno(munlockall());
9477 case TARGET_NR_truncate
:
9478 if (!(p
= lock_user_string(arg1
)))
9480 ret
= get_errno(truncate(p
, arg2
));
9481 unlock_user(p
, arg1
, 0);
9483 case TARGET_NR_ftruncate
:
9484 ret
= get_errno(ftruncate(arg1
, arg2
));
9486 case TARGET_NR_fchmod
:
9487 ret
= get_errno(fchmod(arg1
, arg2
));
9489 #if defined(TARGET_NR_fchmodat)
9490 case TARGET_NR_fchmodat
:
9491 if (!(p
= lock_user_string(arg2
)))
9493 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9494 unlock_user(p
, arg2
, 0);
9497 case TARGET_NR_getpriority
:
9498 /* Note that negative values are valid for getpriority, so we must
9499 differentiate based on errno settings. */
9501 ret
= getpriority(arg1
, arg2
);
9502 if (ret
== -1 && errno
!= 0) {
9503 ret
= -host_to_target_errno(errno
);
9507 /* Return value is the unbiased priority. Signal no error. */
9508 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9510 /* Return value is a biased priority to avoid negative numbers. */
9514 case TARGET_NR_setpriority
:
9515 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9517 #ifdef TARGET_NR_profil
9518 case TARGET_NR_profil
:
9521 case TARGET_NR_statfs
:
9522 if (!(p
= lock_user_string(arg1
)))
9524 ret
= get_errno(statfs(path(p
), &stfs
));
9525 unlock_user(p
, arg1
, 0);
9527 if (!is_error(ret
)) {
9528 struct target_statfs
*target_stfs
;
9530 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9532 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9533 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9534 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9535 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9536 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9537 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9538 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9539 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9540 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9541 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9542 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9543 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9544 unlock_user_struct(target_stfs
, arg2
, 1);
9547 case TARGET_NR_fstatfs
:
9548 ret
= get_errno(fstatfs(arg1
, &stfs
));
9549 goto convert_statfs
;
9550 #ifdef TARGET_NR_statfs64
9551 case TARGET_NR_statfs64
:
9552 if (!(p
= lock_user_string(arg1
)))
9554 ret
= get_errno(statfs(path(p
), &stfs
));
9555 unlock_user(p
, arg1
, 0);
9557 if (!is_error(ret
)) {
9558 struct target_statfs64
*target_stfs
;
9560 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9562 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9563 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9564 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9565 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9566 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9567 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9568 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9569 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9570 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9571 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9572 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9573 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9574 unlock_user_struct(target_stfs
, arg3
, 1);
9577 case TARGET_NR_fstatfs64
:
9578 ret
= get_errno(fstatfs(arg1
, &stfs
));
9579 goto convert_statfs64
;
9581 #ifdef TARGET_NR_ioperm
9582 case TARGET_NR_ioperm
:
9585 #ifdef TARGET_NR_socketcall
9586 case TARGET_NR_socketcall
:
9587 ret
= do_socketcall(arg1
, arg2
);
9590 #ifdef TARGET_NR_accept
9591 case TARGET_NR_accept
:
9592 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9595 #ifdef TARGET_NR_accept4
9596 case TARGET_NR_accept4
:
9597 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9600 #ifdef TARGET_NR_bind
9601 case TARGET_NR_bind
:
9602 ret
= do_bind(arg1
, arg2
, arg3
);
9605 #ifdef TARGET_NR_connect
9606 case TARGET_NR_connect
:
9607 ret
= do_connect(arg1
, arg2
, arg3
);
9610 #ifdef TARGET_NR_getpeername
9611 case TARGET_NR_getpeername
:
9612 ret
= do_getpeername(arg1
, arg2
, arg3
);
9615 #ifdef TARGET_NR_getsockname
9616 case TARGET_NR_getsockname
:
9617 ret
= do_getsockname(arg1
, arg2
, arg3
);
9620 #ifdef TARGET_NR_getsockopt
9621 case TARGET_NR_getsockopt
:
9622 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9625 #ifdef TARGET_NR_listen
9626 case TARGET_NR_listen
:
9627 ret
= get_errno(listen(arg1
, arg2
));
9630 #ifdef TARGET_NR_recv
9631 case TARGET_NR_recv
:
9632 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9635 #ifdef TARGET_NR_recvfrom
9636 case TARGET_NR_recvfrom
:
9637 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9640 #ifdef TARGET_NR_recvmsg
9641 case TARGET_NR_recvmsg
:
9642 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9645 #ifdef TARGET_NR_send
9646 case TARGET_NR_send
:
9647 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9650 #ifdef TARGET_NR_sendmsg
9651 case TARGET_NR_sendmsg
:
9652 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9655 #ifdef TARGET_NR_sendmmsg
9656 case TARGET_NR_sendmmsg
:
9657 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9659 case TARGET_NR_recvmmsg
:
9660 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9663 #ifdef TARGET_NR_sendto
9664 case TARGET_NR_sendto
:
9665 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9668 #ifdef TARGET_NR_shutdown
9669 case TARGET_NR_shutdown
:
9670 ret
= get_errno(shutdown(arg1
, arg2
));
9673 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9674 case TARGET_NR_getrandom
:
9675 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9679 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9680 unlock_user(p
, arg1
, ret
);
9683 #ifdef TARGET_NR_socket
9684 case TARGET_NR_socket
:
9685 ret
= do_socket(arg1
, arg2
, arg3
);
9688 #ifdef TARGET_NR_socketpair
9689 case TARGET_NR_socketpair
:
9690 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9693 #ifdef TARGET_NR_setsockopt
9694 case TARGET_NR_setsockopt
:
9695 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9698 #if defined(TARGET_NR_syslog)
9699 case TARGET_NR_syslog
:
9704 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9705 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9706 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9707 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9708 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9709 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9710 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9711 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9713 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9716 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9717 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9718 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9720 ret
= -TARGET_EINVAL
;
9728 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9730 ret
= -TARGET_EFAULT
;
9733 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9734 unlock_user(p
, arg2
, arg3
);
9744 case TARGET_NR_setitimer
:
9746 struct itimerval value
, ovalue
, *pvalue
;
9750 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9751 || copy_from_user_timeval(&pvalue
->it_value
,
9752 arg2
+ sizeof(struct target_timeval
)))
9757 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9758 if (!is_error(ret
) && arg3
) {
9759 if (copy_to_user_timeval(arg3
,
9760 &ovalue
.it_interval
)
9761 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9767 case TARGET_NR_getitimer
:
9769 struct itimerval value
;
9771 ret
= get_errno(getitimer(arg1
, &value
));
9772 if (!is_error(ret
) && arg2
) {
9773 if (copy_to_user_timeval(arg2
,
9775 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9781 #ifdef TARGET_NR_stat
9782 case TARGET_NR_stat
:
9783 if (!(p
= lock_user_string(arg1
)))
9785 ret
= get_errno(stat(path(p
), &st
));
9786 unlock_user(p
, arg1
, 0);
9789 #ifdef TARGET_NR_lstat
9790 case TARGET_NR_lstat
:
9791 if (!(p
= lock_user_string(arg1
)))
9793 ret
= get_errno(lstat(path(p
), &st
));
9794 unlock_user(p
, arg1
, 0);
9797 case TARGET_NR_fstat
:
9799 ret
= get_errno(fstat(arg1
, &st
));
9800 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9803 if (!is_error(ret
)) {
9804 struct target_stat
*target_st
;
9806 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9808 memset(target_st
, 0, sizeof(*target_st
));
9809 __put_user(st
.st_dev
, &target_st
->st_dev
);
9810 __put_user(st
.st_ino
, &target_st
->st_ino
);
9811 __put_user(st
.st_mode
, &target_st
->st_mode
);
9812 __put_user(st
.st_uid
, &target_st
->st_uid
);
9813 __put_user(st
.st_gid
, &target_st
->st_gid
);
9814 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9815 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9816 __put_user(st
.st_size
, &target_st
->st_size
);
9817 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9818 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9819 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9820 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9821 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9822 unlock_user_struct(target_st
, arg2
, 1);
9826 #ifdef TARGET_NR_olduname
9827 case TARGET_NR_olduname
:
9830 #ifdef TARGET_NR_iopl
9831 case TARGET_NR_iopl
:
9834 case TARGET_NR_vhangup
:
9835 ret
= get_errno(vhangup());
9837 #ifdef TARGET_NR_idle
9838 case TARGET_NR_idle
:
9841 #ifdef TARGET_NR_syscall
9842 case TARGET_NR_syscall
:
9843 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9844 arg6
, arg7
, arg8
, 0);
9847 case TARGET_NR_wait4
:
9850 abi_long status_ptr
= arg2
;
9851 struct rusage rusage
, *rusage_ptr
;
9852 abi_ulong target_rusage
= arg4
;
9853 abi_long rusage_err
;
9855 rusage_ptr
= &rusage
;
9858 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9859 if (!is_error(ret
)) {
9860 if (status_ptr
&& ret
) {
9861 status
= host_to_target_waitstatus(status
);
9862 if (put_user_s32(status
, status_ptr
))
9865 if (target_rusage
) {
9866 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9874 #ifdef TARGET_NR_swapoff
9875 case TARGET_NR_swapoff
:
9876 if (!(p
= lock_user_string(arg1
)))
9878 ret
= get_errno(swapoff(p
));
9879 unlock_user(p
, arg1
, 0);
9882 case TARGET_NR_sysinfo
:
9884 struct target_sysinfo
*target_value
;
9885 struct sysinfo value
;
9886 ret
= get_errno(sysinfo(&value
));
9887 if (!is_error(ret
) && arg1
)
9889 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9891 __put_user(value
.uptime
, &target_value
->uptime
);
9892 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9893 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9894 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9895 __put_user(value
.totalram
, &target_value
->totalram
);
9896 __put_user(value
.freeram
, &target_value
->freeram
);
9897 __put_user(value
.sharedram
, &target_value
->sharedram
);
9898 __put_user(value
.bufferram
, &target_value
->bufferram
);
9899 __put_user(value
.totalswap
, &target_value
->totalswap
);
9900 __put_user(value
.freeswap
, &target_value
->freeswap
);
9901 __put_user(value
.procs
, &target_value
->procs
);
9902 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9903 __put_user(value
.freehigh
, &target_value
->freehigh
);
9904 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9905 unlock_user_struct(target_value
, arg1
, 1);
9909 #ifdef TARGET_NR_ipc
9911 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9914 #ifdef TARGET_NR_semget
9915 case TARGET_NR_semget
:
9916 ret
= get_errno(semget(arg1
, arg2
, arg3
));
9919 #ifdef TARGET_NR_semop
9920 case TARGET_NR_semop
:
9921 ret
= do_semop(arg1
, arg2
, arg3
);
9924 #ifdef TARGET_NR_semctl
9925 case TARGET_NR_semctl
:
9926 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
9929 #ifdef TARGET_NR_msgctl
9930 case TARGET_NR_msgctl
:
9931 ret
= do_msgctl(arg1
, arg2
, arg3
);
9934 #ifdef TARGET_NR_msgget
9935 case TARGET_NR_msgget
:
9936 ret
= get_errno(msgget(arg1
, arg2
));
9939 #ifdef TARGET_NR_msgrcv
9940 case TARGET_NR_msgrcv
:
9941 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
9944 #ifdef TARGET_NR_msgsnd
9945 case TARGET_NR_msgsnd
:
9946 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
9949 #ifdef TARGET_NR_shmget
9950 case TARGET_NR_shmget
:
9951 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
9954 #ifdef TARGET_NR_shmctl
9955 case TARGET_NR_shmctl
:
9956 ret
= do_shmctl(arg1
, arg2
, arg3
);
9959 #ifdef TARGET_NR_shmat
9960 case TARGET_NR_shmat
:
9961 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
9964 #ifdef TARGET_NR_shmdt
9965 case TARGET_NR_shmdt
:
9966 ret
= do_shmdt(arg1
);
9969 case TARGET_NR_fsync
:
9970 ret
= get_errno(fsync(arg1
));
9972 case TARGET_NR_clone
:
9973 /* Linux manages to have three different orderings for its
9974 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9975 * match the kernel's CONFIG_CLONE_* settings.
9976 * Microblaze is further special in that it uses a sixth
9977 * implicit argument to clone for the TLS pointer.
9979 #if defined(TARGET_MICROBLAZE)
9980 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
9981 #elif defined(TARGET_CLONE_BACKWARDS)
9982 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
9983 #elif defined(TARGET_CLONE_BACKWARDS2)
9984 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
9986 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
9989 #ifdef __NR_exit_group
9990 /* new thread calls */
9991 case TARGET_NR_exit_group
:
9995 gdb_exit(cpu_env
, arg1
);
9996 ret
= get_errno(exit_group(arg1
));
9999 case TARGET_NR_setdomainname
:
10000 if (!(p
= lock_user_string(arg1
)))
10002 ret
= get_errno(setdomainname(p
, arg2
));
10003 unlock_user(p
, arg1
, 0);
10005 case TARGET_NR_uname
:
10006 /* no need to transcode because we use the linux syscall */
10008 struct new_utsname
* buf
;
10010 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10012 ret
= get_errno(sys_uname(buf
));
10013 if (!is_error(ret
)) {
10014 /* Overwrite the native machine name with whatever is being
10016 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
10017 /* Allow the user to override the reported release. */
10018 if (qemu_uname_release
&& *qemu_uname_release
) {
10019 g_strlcpy(buf
->release
, qemu_uname_release
,
10020 sizeof(buf
->release
));
10023 unlock_user_struct(buf
, arg1
, 1);
10027 case TARGET_NR_modify_ldt
:
10028 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10030 #if !defined(TARGET_X86_64)
10031 case TARGET_NR_vm86old
:
10032 goto unimplemented
;
10033 case TARGET_NR_vm86
:
10034 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10038 case TARGET_NR_adjtimex
:
10040 struct timex host_buf
;
10042 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10045 ret
= get_errno(adjtimex(&host_buf
));
10046 if (!is_error(ret
)) {
10047 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10053 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10054 case TARGET_NR_clock_adjtime
:
10056 struct timex htx
, *phtx
= &htx
;
10058 if (target_to_host_timex(phtx
, arg2
) != 0) {
10061 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10062 if (!is_error(ret
) && phtx
) {
10063 if (host_to_target_timex(arg2
, phtx
) != 0) {
10070 #ifdef TARGET_NR_create_module
10071 case TARGET_NR_create_module
:
10073 case TARGET_NR_init_module
:
10074 case TARGET_NR_delete_module
:
10075 #ifdef TARGET_NR_get_kernel_syms
10076 case TARGET_NR_get_kernel_syms
:
10078 goto unimplemented
;
10079 case TARGET_NR_quotactl
:
10080 goto unimplemented
;
10081 case TARGET_NR_getpgid
:
10082 ret
= get_errno(getpgid(arg1
));
10084 case TARGET_NR_fchdir
:
10085 ret
= get_errno(fchdir(arg1
));
10087 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10088 case TARGET_NR_bdflush
:
10089 goto unimplemented
;
10091 #ifdef TARGET_NR_sysfs
10092 case TARGET_NR_sysfs
:
10093 goto unimplemented
;
10095 case TARGET_NR_personality
:
10096 ret
= get_errno(personality(arg1
));
10098 #ifdef TARGET_NR_afs_syscall
10099 case TARGET_NR_afs_syscall
:
10100 goto unimplemented
;
10102 #ifdef TARGET_NR__llseek /* Not on alpha */
10103 case TARGET_NR__llseek
:
10106 #if !defined(__NR_llseek)
10107 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10109 ret
= get_errno(res
);
10114 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10116 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10122 #ifdef TARGET_NR_getdents
10123 case TARGET_NR_getdents
:
10124 #ifdef __NR_getdents
10125 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10127 struct target_dirent
*target_dirp
;
10128 struct linux_dirent
*dirp
;
10129 abi_long count
= arg3
;
10131 dirp
= g_try_malloc(count
);
10133 ret
= -TARGET_ENOMEM
;
10137 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10138 if (!is_error(ret
)) {
10139 struct linux_dirent
*de
;
10140 struct target_dirent
*tde
;
10142 int reclen
, treclen
;
10143 int count1
, tnamelen
;
10147 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10151 reclen
= de
->d_reclen
;
10152 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10153 assert(tnamelen
>= 0);
10154 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10155 assert(count1
+ treclen
<= count
);
10156 tde
->d_reclen
= tswap16(treclen
);
10157 tde
->d_ino
= tswapal(de
->d_ino
);
10158 tde
->d_off
= tswapal(de
->d_off
);
10159 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10160 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10162 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10166 unlock_user(target_dirp
, arg2
, ret
);
10172 struct linux_dirent
*dirp
;
10173 abi_long count
= arg3
;
10175 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10177 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10178 if (!is_error(ret
)) {
10179 struct linux_dirent
*de
;
10184 reclen
= de
->d_reclen
;
10187 de
->d_reclen
= tswap16(reclen
);
10188 tswapls(&de
->d_ino
);
10189 tswapls(&de
->d_off
);
10190 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10194 unlock_user(dirp
, arg2
, ret
);
10198 /* Implement getdents in terms of getdents64 */
10200 struct linux_dirent64
*dirp
;
10201 abi_long count
= arg3
;
10203 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10207 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10208 if (!is_error(ret
)) {
10209 /* Convert the dirent64 structs to target dirent. We do this
10210 * in-place, since we can guarantee that a target_dirent is no
10211 * larger than a dirent64; however this means we have to be
10212 * careful to read everything before writing in the new format.
10214 struct linux_dirent64
*de
;
10215 struct target_dirent
*tde
;
10220 tde
= (struct target_dirent
*)dirp
;
10222 int namelen
, treclen
;
10223 int reclen
= de
->d_reclen
;
10224 uint64_t ino
= de
->d_ino
;
10225 int64_t off
= de
->d_off
;
10226 uint8_t type
= de
->d_type
;
10228 namelen
= strlen(de
->d_name
);
10229 treclen
= offsetof(struct target_dirent
, d_name
)
10231 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10233 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10234 tde
->d_ino
= tswapal(ino
);
10235 tde
->d_off
= tswapal(off
);
10236 tde
->d_reclen
= tswap16(treclen
);
10237 /* The target_dirent type is in what was formerly a padding
10238 * byte at the end of the structure:
10240 *(((char *)tde
) + treclen
- 1) = type
;
10242 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10243 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10249 unlock_user(dirp
, arg2
, ret
);
10253 #endif /* TARGET_NR_getdents */
10254 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10255 case TARGET_NR_getdents64
:
10257 struct linux_dirent64
*dirp
;
10258 abi_long count
= arg3
;
10259 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10261 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10262 if (!is_error(ret
)) {
10263 struct linux_dirent64
*de
;
10268 reclen
= de
->d_reclen
;
10271 de
->d_reclen
= tswap16(reclen
);
10272 tswap64s((uint64_t *)&de
->d_ino
);
10273 tswap64s((uint64_t *)&de
->d_off
);
10274 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10278 unlock_user(dirp
, arg2
, ret
);
10281 #endif /* TARGET_NR_getdents64 */
10282 #if defined(TARGET_NR__newselect)
10283 case TARGET_NR__newselect
:
10284 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10287 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10288 # ifdef TARGET_NR_poll
10289 case TARGET_NR_poll
:
10291 # ifdef TARGET_NR_ppoll
10292 case TARGET_NR_ppoll
:
10295 struct target_pollfd
*target_pfd
;
10296 unsigned int nfds
= arg2
;
10297 struct pollfd
*pfd
;
10303 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10304 ret
= -TARGET_EINVAL
;
10308 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10309 sizeof(struct target_pollfd
) * nfds
, 1);
10314 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10315 for (i
= 0; i
< nfds
; i
++) {
10316 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10317 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10322 # ifdef TARGET_NR_ppoll
10323 case TARGET_NR_ppoll
:
10325 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10326 target_sigset_t
*target_set
;
10327 sigset_t _set
, *set
= &_set
;
10330 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10331 unlock_user(target_pfd
, arg1
, 0);
10339 if (arg5
!= sizeof(target_sigset_t
)) {
10340 unlock_user(target_pfd
, arg1
, 0);
10341 ret
= -TARGET_EINVAL
;
10345 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10347 unlock_user(target_pfd
, arg1
, 0);
10350 target_to_host_sigset(set
, target_set
);
10355 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10356 set
, SIGSET_T_SIZE
));
10358 if (!is_error(ret
) && arg3
) {
10359 host_to_target_timespec(arg3
, timeout_ts
);
10362 unlock_user(target_set
, arg4
, 0);
10367 # ifdef TARGET_NR_poll
10368 case TARGET_NR_poll
:
10370 struct timespec ts
, *pts
;
10373 /* Convert ms to secs, ns */
10374 ts
.tv_sec
= arg3
/ 1000;
10375 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10378 /* -ve poll() timeout means "infinite" */
10381 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10386 g_assert_not_reached();
10389 if (!is_error(ret
)) {
10390 for(i
= 0; i
< nfds
; i
++) {
10391 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10394 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10398 case TARGET_NR_flock
:
10399 /* NOTE: the flock constant seems to be the same for every
10401 ret
= get_errno(safe_flock(arg1
, arg2
));
10403 case TARGET_NR_readv
:
10405 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10407 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10408 unlock_iovec(vec
, arg2
, arg3
, 1);
10410 ret
= -host_to_target_errno(errno
);
10414 case TARGET_NR_writev
:
10416 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10418 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10419 unlock_iovec(vec
, arg2
, arg3
, 0);
10421 ret
= -host_to_target_errno(errno
);
10425 #if defined(TARGET_NR_preadv)
10426 case TARGET_NR_preadv
:
10428 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10430 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, arg4
, arg5
));
10431 unlock_iovec(vec
, arg2
, arg3
, 1);
10433 ret
= -host_to_target_errno(errno
);
10438 #if defined(TARGET_NR_pwritev)
10439 case TARGET_NR_pwritev
:
10441 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10443 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, arg4
, arg5
));
10444 unlock_iovec(vec
, arg2
, arg3
, 0);
10446 ret
= -host_to_target_errno(errno
);
10451 case TARGET_NR_getsid
:
10452 ret
= get_errno(getsid(arg1
));
10454 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10455 case TARGET_NR_fdatasync
:
10456 ret
= get_errno(fdatasync(arg1
));
10459 #ifdef TARGET_NR__sysctl
10460 case TARGET_NR__sysctl
:
10461 /* We don't implement this, but ENOTDIR is always a safe
10463 ret
= -TARGET_ENOTDIR
;
10466 case TARGET_NR_sched_getaffinity
:
10468 unsigned int mask_size
;
10469 unsigned long *mask
;
10472 * sched_getaffinity needs multiples of ulong, so need to take
10473 * care of mismatches between target ulong and host ulong sizes.
10475 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10476 ret
= -TARGET_EINVAL
;
10479 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10481 mask
= alloca(mask_size
);
10482 memset(mask
, 0, mask_size
);
10483 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10485 if (!is_error(ret
)) {
10487 /* More data returned than the caller's buffer will fit.
10488 * This only happens if sizeof(abi_long) < sizeof(long)
10489 * and the caller passed us a buffer holding an odd number
10490 * of abi_longs. If the host kernel is actually using the
10491 * extra 4 bytes then fail EINVAL; otherwise we can just
10492 * ignore them and only copy the interesting part.
10494 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10495 if (numcpus
> arg2
* 8) {
10496 ret
= -TARGET_EINVAL
;
10502 ret
= host_to_target_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10506 case TARGET_NR_sched_setaffinity
:
10508 unsigned int mask_size
;
10509 unsigned long *mask
;
10512 * sched_setaffinity needs multiples of ulong, so need to take
10513 * care of mismatches between target ulong and host ulong sizes.
10515 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10516 ret
= -TARGET_EINVAL
;
10519 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10520 mask
= alloca(mask_size
);
10522 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10527 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10530 case TARGET_NR_getcpu
:
10532 unsigned cpu
, node
;
10533 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10534 arg2
? &node
: NULL
,
10536 if (is_error(ret
)) {
10539 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10542 if (arg2
&& put_user_u32(node
, arg2
)) {
10547 case TARGET_NR_sched_setparam
:
10549 struct sched_param
*target_schp
;
10550 struct sched_param schp
;
10553 return -TARGET_EINVAL
;
10555 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10557 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10558 unlock_user_struct(target_schp
, arg2
, 0);
10559 ret
= get_errno(sched_setparam(arg1
, &schp
));
10562 case TARGET_NR_sched_getparam
:
10564 struct sched_param
*target_schp
;
10565 struct sched_param schp
;
10568 return -TARGET_EINVAL
;
10570 ret
= get_errno(sched_getparam(arg1
, &schp
));
10571 if (!is_error(ret
)) {
10572 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10574 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10575 unlock_user_struct(target_schp
, arg2
, 1);
10579 case TARGET_NR_sched_setscheduler
:
10581 struct sched_param
*target_schp
;
10582 struct sched_param schp
;
10584 return -TARGET_EINVAL
;
10586 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10588 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10589 unlock_user_struct(target_schp
, arg3
, 0);
10590 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10593 case TARGET_NR_sched_getscheduler
:
10594 ret
= get_errno(sched_getscheduler(arg1
));
10596 case TARGET_NR_sched_yield
:
10597 ret
= get_errno(sched_yield());
10599 case TARGET_NR_sched_get_priority_max
:
10600 ret
= get_errno(sched_get_priority_max(arg1
));
10602 case TARGET_NR_sched_get_priority_min
:
10603 ret
= get_errno(sched_get_priority_min(arg1
));
10605 case TARGET_NR_sched_rr_get_interval
:
10607 struct timespec ts
;
10608 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10609 if (!is_error(ret
)) {
10610 ret
= host_to_target_timespec(arg2
, &ts
);
10614 case TARGET_NR_nanosleep
:
10616 struct timespec req
, rem
;
10617 target_to_host_timespec(&req
, arg1
);
10618 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10619 if (is_error(ret
) && arg2
) {
10620 host_to_target_timespec(arg2
, &rem
);
10624 #ifdef TARGET_NR_query_module
10625 case TARGET_NR_query_module
:
10626 goto unimplemented
;
10628 #ifdef TARGET_NR_nfsservctl
10629 case TARGET_NR_nfsservctl
:
10630 goto unimplemented
;
10632 case TARGET_NR_prctl
:
10634 case PR_GET_PDEATHSIG
:
10637 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10638 if (!is_error(ret
) && arg2
10639 && put_user_ual(deathsig
, arg2
)) {
10647 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10651 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10652 arg3
, arg4
, arg5
));
10653 unlock_user(name
, arg2
, 16);
10658 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10662 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10663 arg3
, arg4
, arg5
));
10664 unlock_user(name
, arg2
, 0);
10668 case PR_GET_SECCOMP
:
10669 case PR_SET_SECCOMP
:
10670 /* Disable seccomp to prevent the target disabling syscalls we
10672 ret
= -TARGET_EINVAL
;
10675 /* Most prctl options have no pointer arguments */
10676 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10680 #ifdef TARGET_NR_arch_prctl
10681 case TARGET_NR_arch_prctl
:
10682 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10683 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10686 goto unimplemented
;
10689 #ifdef TARGET_NR_pread64
10690 case TARGET_NR_pread64
:
10691 if (regpairs_aligned(cpu_env
, num
)) {
10695 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10697 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10698 unlock_user(p
, arg2
, ret
);
10700 case TARGET_NR_pwrite64
:
10701 if (regpairs_aligned(cpu_env
, num
)) {
10705 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10707 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10708 unlock_user(p
, arg2
, 0);
10711 case TARGET_NR_getcwd
:
10712 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10714 ret
= get_errno(sys_getcwd1(p
, arg2
));
10715 unlock_user(p
, arg1
, ret
);
10717 case TARGET_NR_capget
:
10718 case TARGET_NR_capset
:
10720 struct target_user_cap_header
*target_header
;
10721 struct target_user_cap_data
*target_data
= NULL
;
10722 struct __user_cap_header_struct header
;
10723 struct __user_cap_data_struct data
[2];
10724 struct __user_cap_data_struct
*dataptr
= NULL
;
10725 int i
, target_datalen
;
10726 int data_items
= 1;
10728 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10731 header
.version
= tswap32(target_header
->version
);
10732 header
.pid
= tswap32(target_header
->pid
);
10734 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10735 /* Version 2 and up takes pointer to two user_data structs */
10739 target_datalen
= sizeof(*target_data
) * data_items
;
10742 if (num
== TARGET_NR_capget
) {
10743 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10745 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10747 if (!target_data
) {
10748 unlock_user_struct(target_header
, arg1
, 0);
10752 if (num
== TARGET_NR_capset
) {
10753 for (i
= 0; i
< data_items
; i
++) {
10754 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10755 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10756 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10763 if (num
== TARGET_NR_capget
) {
10764 ret
= get_errno(capget(&header
, dataptr
));
10766 ret
= get_errno(capset(&header
, dataptr
));
10769 /* The kernel always updates version for both capget and capset */
10770 target_header
->version
= tswap32(header
.version
);
10771 unlock_user_struct(target_header
, arg1
, 1);
10774 if (num
== TARGET_NR_capget
) {
10775 for (i
= 0; i
< data_items
; i
++) {
10776 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10777 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10778 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10780 unlock_user(target_data
, arg2
, target_datalen
);
10782 unlock_user(target_data
, arg2
, 0);
10787 case TARGET_NR_sigaltstack
:
10788 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10791 #ifdef CONFIG_SENDFILE
10792 case TARGET_NR_sendfile
:
10794 off_t
*offp
= NULL
;
10797 ret
= get_user_sal(off
, arg3
);
10798 if (is_error(ret
)) {
10803 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10804 if (!is_error(ret
) && arg3
) {
10805 abi_long ret2
= put_user_sal(off
, arg3
);
10806 if (is_error(ret2
)) {
10812 #ifdef TARGET_NR_sendfile64
10813 case TARGET_NR_sendfile64
:
10815 off_t
*offp
= NULL
;
10818 ret
= get_user_s64(off
, arg3
);
10819 if (is_error(ret
)) {
10824 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10825 if (!is_error(ret
) && arg3
) {
10826 abi_long ret2
= put_user_s64(off
, arg3
);
10827 if (is_error(ret2
)) {
10835 case TARGET_NR_sendfile
:
10836 #ifdef TARGET_NR_sendfile64
10837 case TARGET_NR_sendfile64
:
10839 goto unimplemented
;
10842 #ifdef TARGET_NR_getpmsg
10843 case TARGET_NR_getpmsg
:
10844 goto unimplemented
;
10846 #ifdef TARGET_NR_putpmsg
10847 case TARGET_NR_putpmsg
:
10848 goto unimplemented
;
10850 #ifdef TARGET_NR_vfork
10851 case TARGET_NR_vfork
:
10852 ret
= get_errno(do_fork(cpu_env
,
10853 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10857 #ifdef TARGET_NR_ugetrlimit
10858 case TARGET_NR_ugetrlimit
:
10860 struct rlimit rlim
;
10861 int resource
= target_to_host_resource(arg1
);
10862 ret
= get_errno(getrlimit(resource
, &rlim
));
10863 if (!is_error(ret
)) {
10864 struct target_rlimit
*target_rlim
;
10865 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10867 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10868 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10869 unlock_user_struct(target_rlim
, arg2
, 1);
10874 #ifdef TARGET_NR_truncate64
10875 case TARGET_NR_truncate64
:
10876 if (!(p
= lock_user_string(arg1
)))
10878 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10879 unlock_user(p
, arg1
, 0);
10882 #ifdef TARGET_NR_ftruncate64
10883 case TARGET_NR_ftruncate64
:
10884 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
10887 #ifdef TARGET_NR_stat64
10888 case TARGET_NR_stat64
:
10889 if (!(p
= lock_user_string(arg1
)))
10891 ret
= get_errno(stat(path(p
), &st
));
10892 unlock_user(p
, arg1
, 0);
10893 if (!is_error(ret
))
10894 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10897 #ifdef TARGET_NR_lstat64
10898 case TARGET_NR_lstat64
:
10899 if (!(p
= lock_user_string(arg1
)))
10901 ret
= get_errno(lstat(path(p
), &st
));
10902 unlock_user(p
, arg1
, 0);
10903 if (!is_error(ret
))
10904 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10907 #ifdef TARGET_NR_fstat64
10908 case TARGET_NR_fstat64
:
10909 ret
= get_errno(fstat(arg1
, &st
));
10910 if (!is_error(ret
))
10911 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
10914 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10915 #ifdef TARGET_NR_fstatat64
10916 case TARGET_NR_fstatat64
:
10918 #ifdef TARGET_NR_newfstatat
10919 case TARGET_NR_newfstatat
:
10921 if (!(p
= lock_user_string(arg2
)))
10923 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
10924 if (!is_error(ret
))
10925 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
10928 #ifdef TARGET_NR_lchown
10929 case TARGET_NR_lchown
:
10930 if (!(p
= lock_user_string(arg1
)))
10932 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
10933 unlock_user(p
, arg1
, 0);
10936 #ifdef TARGET_NR_getuid
10937 case TARGET_NR_getuid
:
10938 ret
= get_errno(high2lowuid(getuid()));
10941 #ifdef TARGET_NR_getgid
10942 case TARGET_NR_getgid
:
10943 ret
= get_errno(high2lowgid(getgid()));
10946 #ifdef TARGET_NR_geteuid
10947 case TARGET_NR_geteuid
:
10948 ret
= get_errno(high2lowuid(geteuid()));
10951 #ifdef TARGET_NR_getegid
10952 case TARGET_NR_getegid
:
10953 ret
= get_errno(high2lowgid(getegid()));
10956 case TARGET_NR_setreuid
:
10957 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
10959 case TARGET_NR_setregid
:
10960 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
10962 case TARGET_NR_getgroups
:
10964 int gidsetsize
= arg1
;
10965 target_id
*target_grouplist
;
10969 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10970 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
10971 if (gidsetsize
== 0)
10973 if (!is_error(ret
)) {
10974 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
10975 if (!target_grouplist
)
10977 for(i
= 0;i
< ret
; i
++)
10978 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
10979 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
10983 case TARGET_NR_setgroups
:
10985 int gidsetsize
= arg1
;
10986 target_id
*target_grouplist
;
10987 gid_t
*grouplist
= NULL
;
10990 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
10991 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
10992 if (!target_grouplist
) {
10993 ret
= -TARGET_EFAULT
;
10996 for (i
= 0; i
< gidsetsize
; i
++) {
10997 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
10999 unlock_user(target_grouplist
, arg2
, 0);
11001 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11004 case TARGET_NR_fchown
:
11005 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11007 #if defined(TARGET_NR_fchownat)
11008 case TARGET_NR_fchownat
:
11009 if (!(p
= lock_user_string(arg2
)))
11011 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11012 low2highgid(arg4
), arg5
));
11013 unlock_user(p
, arg2
, 0);
11016 #ifdef TARGET_NR_setresuid
11017 case TARGET_NR_setresuid
:
11018 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11020 low2highuid(arg3
)));
11023 #ifdef TARGET_NR_getresuid
11024 case TARGET_NR_getresuid
:
11026 uid_t ruid
, euid
, suid
;
11027 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11028 if (!is_error(ret
)) {
11029 if (put_user_id(high2lowuid(ruid
), arg1
)
11030 || put_user_id(high2lowuid(euid
), arg2
)
11031 || put_user_id(high2lowuid(suid
), arg3
))
11037 #ifdef TARGET_NR_getresgid
11038 case TARGET_NR_setresgid
:
11039 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11041 low2highgid(arg3
)));
11044 #ifdef TARGET_NR_getresgid
11045 case TARGET_NR_getresgid
:
11047 gid_t rgid
, egid
, sgid
;
11048 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11049 if (!is_error(ret
)) {
11050 if (put_user_id(high2lowgid(rgid
), arg1
)
11051 || put_user_id(high2lowgid(egid
), arg2
)
11052 || put_user_id(high2lowgid(sgid
), arg3
))
11058 #ifdef TARGET_NR_chown
11059 case TARGET_NR_chown
:
11060 if (!(p
= lock_user_string(arg1
)))
11062 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11063 unlock_user(p
, arg1
, 0);
11066 case TARGET_NR_setuid
:
11067 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11069 case TARGET_NR_setgid
:
11070 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11072 case TARGET_NR_setfsuid
:
11073 ret
= get_errno(setfsuid(arg1
));
11075 case TARGET_NR_setfsgid
:
11076 ret
= get_errno(setfsgid(arg1
));
11079 #ifdef TARGET_NR_lchown32
11080 case TARGET_NR_lchown32
:
11081 if (!(p
= lock_user_string(arg1
)))
11083 ret
= get_errno(lchown(p
, arg2
, arg3
));
11084 unlock_user(p
, arg1
, 0);
11087 #ifdef TARGET_NR_getuid32
11088 case TARGET_NR_getuid32
:
11089 ret
= get_errno(getuid());
11093 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11094 /* Alpha specific */
11095 case TARGET_NR_getxuid
:
11099 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11101 ret
= get_errno(getuid());
11104 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11105 /* Alpha specific */
11106 case TARGET_NR_getxgid
:
11110 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11112 ret
= get_errno(getgid());
11115 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11116 /* Alpha specific */
11117 case TARGET_NR_osf_getsysinfo
:
11118 ret
= -TARGET_EOPNOTSUPP
;
11120 case TARGET_GSI_IEEE_FP_CONTROL
:
11122 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11124 /* Copied from linux ieee_fpcr_to_swcr. */
11125 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11126 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11127 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11128 | SWCR_TRAP_ENABLE_DZE
11129 | SWCR_TRAP_ENABLE_OVF
);
11130 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11131 | SWCR_TRAP_ENABLE_INE
);
11132 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11133 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11135 if (put_user_u64 (swcr
, arg2
))
11141 /* case GSI_IEEE_STATE_AT_SIGNAL:
11142 -- Not implemented in linux kernel.
11144 -- Retrieves current unaligned access state; not much used.
11145 case GSI_PROC_TYPE:
11146 -- Retrieves implver information; surely not used.
11147 case GSI_GET_HWRPB:
11148 -- Grabs a copy of the HWRPB; surely not used.
11153 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11154 /* Alpha specific */
11155 case TARGET_NR_osf_setsysinfo
:
11156 ret
= -TARGET_EOPNOTSUPP
;
11158 case TARGET_SSI_IEEE_FP_CONTROL
:
11160 uint64_t swcr
, fpcr
, orig_fpcr
;
11162 if (get_user_u64 (swcr
, arg2
)) {
11165 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11166 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11168 /* Copied from linux ieee_swcr_to_fpcr. */
11169 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11170 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11171 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11172 | SWCR_TRAP_ENABLE_DZE
11173 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11174 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11175 | SWCR_TRAP_ENABLE_INE
)) << 57;
11176 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11177 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11179 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11184 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11186 uint64_t exc
, fpcr
, orig_fpcr
;
11189 if (get_user_u64(exc
, arg2
)) {
11193 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11195 /* We only add to the exception status here. */
11196 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11198 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11201 /* Old exceptions are not signaled. */
11202 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11204 /* If any exceptions set by this call,
11205 and are unmasked, send a signal. */
11207 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11208 si_code
= TARGET_FPE_FLTRES
;
11210 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11211 si_code
= TARGET_FPE_FLTUND
;
11213 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11214 si_code
= TARGET_FPE_FLTOVF
;
11216 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11217 si_code
= TARGET_FPE_FLTDIV
;
11219 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11220 si_code
= TARGET_FPE_FLTINV
;
11222 if (si_code
!= 0) {
11223 target_siginfo_t info
;
11224 info
.si_signo
= SIGFPE
;
11226 info
.si_code
= si_code
;
11227 info
._sifields
._sigfault
._addr
11228 = ((CPUArchState
*)cpu_env
)->pc
;
11229 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11230 QEMU_SI_FAULT
, &info
);
11235 /* case SSI_NVPAIRS:
11236 -- Used with SSIN_UACPROC to enable unaligned accesses.
11237 case SSI_IEEE_STATE_AT_SIGNAL:
11238 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11239 -- Not implemented in linux kernel
11244 #ifdef TARGET_NR_osf_sigprocmask
11245 /* Alpha specific. */
11246 case TARGET_NR_osf_sigprocmask
:
11250 sigset_t set
, oldset
;
11253 case TARGET_SIG_BLOCK
:
11256 case TARGET_SIG_UNBLOCK
:
11259 case TARGET_SIG_SETMASK
:
11263 ret
= -TARGET_EINVAL
;
11267 target_to_host_old_sigset(&set
, &mask
);
11268 ret
= do_sigprocmask(how
, &set
, &oldset
);
11270 host_to_target_old_sigset(&mask
, &oldset
);
11277 #ifdef TARGET_NR_getgid32
11278 case TARGET_NR_getgid32
:
11279 ret
= get_errno(getgid());
11282 #ifdef TARGET_NR_geteuid32
11283 case TARGET_NR_geteuid32
:
11284 ret
= get_errno(geteuid());
11287 #ifdef TARGET_NR_getegid32
11288 case TARGET_NR_getegid32
:
11289 ret
= get_errno(getegid());
11292 #ifdef TARGET_NR_setreuid32
11293 case TARGET_NR_setreuid32
:
11294 ret
= get_errno(setreuid(arg1
, arg2
));
11297 #ifdef TARGET_NR_setregid32
11298 case TARGET_NR_setregid32
:
11299 ret
= get_errno(setregid(arg1
, arg2
));
11302 #ifdef TARGET_NR_getgroups32
11303 case TARGET_NR_getgroups32
:
11305 int gidsetsize
= arg1
;
11306 uint32_t *target_grouplist
;
11310 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11311 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11312 if (gidsetsize
== 0)
11314 if (!is_error(ret
)) {
11315 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11316 if (!target_grouplist
) {
11317 ret
= -TARGET_EFAULT
;
11320 for(i
= 0;i
< ret
; i
++)
11321 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11322 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11327 #ifdef TARGET_NR_setgroups32
11328 case TARGET_NR_setgroups32
:
11330 int gidsetsize
= arg1
;
11331 uint32_t *target_grouplist
;
11335 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11336 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11337 if (!target_grouplist
) {
11338 ret
= -TARGET_EFAULT
;
11341 for(i
= 0;i
< gidsetsize
; i
++)
11342 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11343 unlock_user(target_grouplist
, arg2
, 0);
11344 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11348 #ifdef TARGET_NR_fchown32
11349 case TARGET_NR_fchown32
:
11350 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11353 #ifdef TARGET_NR_setresuid32
11354 case TARGET_NR_setresuid32
:
11355 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11358 #ifdef TARGET_NR_getresuid32
11359 case TARGET_NR_getresuid32
:
11361 uid_t ruid
, euid
, suid
;
11362 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11363 if (!is_error(ret
)) {
11364 if (put_user_u32(ruid
, arg1
)
11365 || put_user_u32(euid
, arg2
)
11366 || put_user_u32(suid
, arg3
))
11372 #ifdef TARGET_NR_setresgid32
11373 case TARGET_NR_setresgid32
:
11374 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11377 #ifdef TARGET_NR_getresgid32
11378 case TARGET_NR_getresgid32
:
11380 gid_t rgid
, egid
, sgid
;
11381 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11382 if (!is_error(ret
)) {
11383 if (put_user_u32(rgid
, arg1
)
11384 || put_user_u32(egid
, arg2
)
11385 || put_user_u32(sgid
, arg3
))
11391 #ifdef TARGET_NR_chown32
11392 case TARGET_NR_chown32
:
11393 if (!(p
= lock_user_string(arg1
)))
11395 ret
= get_errno(chown(p
, arg2
, arg3
));
11396 unlock_user(p
, arg1
, 0);
11399 #ifdef TARGET_NR_setuid32
11400 case TARGET_NR_setuid32
:
11401 ret
= get_errno(sys_setuid(arg1
));
11404 #ifdef TARGET_NR_setgid32
11405 case TARGET_NR_setgid32
:
11406 ret
= get_errno(sys_setgid(arg1
));
11409 #ifdef TARGET_NR_setfsuid32
11410 case TARGET_NR_setfsuid32
:
11411 ret
= get_errno(setfsuid(arg1
));
11414 #ifdef TARGET_NR_setfsgid32
11415 case TARGET_NR_setfsgid32
:
11416 ret
= get_errno(setfsgid(arg1
));
11420 case TARGET_NR_pivot_root
:
11421 goto unimplemented
;
11422 #ifdef TARGET_NR_mincore
11423 case TARGET_NR_mincore
:
11426 ret
= -TARGET_ENOMEM
;
11427 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11431 ret
= -TARGET_EFAULT
;
11432 p
= lock_user_string(arg3
);
11436 ret
= get_errno(mincore(a
, arg2
, p
));
11437 unlock_user(p
, arg3
, ret
);
11439 unlock_user(a
, arg1
, 0);
11443 #ifdef TARGET_NR_arm_fadvise64_64
11444 case TARGET_NR_arm_fadvise64_64
:
11445 /* arm_fadvise64_64 looks like fadvise64_64 but
11446 * with different argument order: fd, advice, offset, len
11447 * rather than the usual fd, offset, len, advice.
11448 * Note that offset and len are both 64-bit so appear as
11449 * pairs of 32-bit registers.
11451 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11452 target_offset64(arg5
, arg6
), arg2
);
11453 ret
= -host_to_target_errno(ret
);
11457 #if TARGET_ABI_BITS == 32
11459 #ifdef TARGET_NR_fadvise64_64
11460 case TARGET_NR_fadvise64_64
:
11461 #if defined(TARGET_PPC)
11462 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11470 /* 6 args: fd, offset (high, low), len (high, low), advice */
11471 if (regpairs_aligned(cpu_env
, num
)) {
11472 /* offset is in (3,4), len in (5,6) and advice in 7 */
11480 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11481 target_offset64(arg2
, arg3
),
11482 target_offset64(arg4
, arg5
),
11487 #ifdef TARGET_NR_fadvise64
11488 case TARGET_NR_fadvise64
:
11489 /* 5 args: fd, offset (high, low), len, advice */
11490 if (regpairs_aligned(cpu_env
, num
)) {
11491 /* offset is in (3,4), len in 5 and advice in 6 */
11497 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11498 target_offset64(arg2
, arg3
),
11503 #else /* not a 32-bit ABI */
11504 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11505 #ifdef TARGET_NR_fadvise64_64
11506 case TARGET_NR_fadvise64_64
:
11508 #ifdef TARGET_NR_fadvise64
11509 case TARGET_NR_fadvise64
:
11511 #ifdef TARGET_S390X
11513 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11514 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11515 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11516 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11520 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11523 #endif /* end of 64-bit ABI fadvise handling */
11525 #ifdef TARGET_NR_madvise
11526 case TARGET_NR_madvise
:
11527 /* A straight passthrough may not be safe because qemu sometimes
11528 turns private file-backed mappings into anonymous mappings.
11529 This will break MADV_DONTNEED.
11530 This is a hint, so ignoring and returning success is ok. */
11531 ret
= get_errno(0);
11534 #if TARGET_ABI_BITS == 32
11535 case TARGET_NR_fcntl64
:
11539 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11540 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11543 if (((CPUARMState
*)cpu_env
)->eabi
) {
11544 copyfrom
= copy_from_user_eabi_flock64
;
11545 copyto
= copy_to_user_eabi_flock64
;
11549 cmd
= target_to_host_fcntl_cmd(arg2
);
11550 if (cmd
== -TARGET_EINVAL
) {
11556 case TARGET_F_GETLK64
:
11557 ret
= copyfrom(&fl
, arg3
);
11561 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11563 ret
= copyto(arg3
, &fl
);
11567 case TARGET_F_SETLK64
:
11568 case TARGET_F_SETLKW64
:
11569 ret
= copyfrom(&fl
, arg3
);
11573 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11576 ret
= do_fcntl(arg1
, arg2
, arg3
);
11582 #ifdef TARGET_NR_cacheflush
11583 case TARGET_NR_cacheflush
:
11584 /* self-modifying code is handled automatically, so nothing needed */
11588 #ifdef TARGET_NR_security
11589 case TARGET_NR_security
:
11590 goto unimplemented
;
11592 #ifdef TARGET_NR_getpagesize
11593 case TARGET_NR_getpagesize
:
11594 ret
= TARGET_PAGE_SIZE
;
11597 case TARGET_NR_gettid
:
11598 ret
= get_errno(gettid());
11600 #ifdef TARGET_NR_readahead
11601 case TARGET_NR_readahead
:
11602 #if TARGET_ABI_BITS == 32
11603 if (regpairs_aligned(cpu_env
, num
)) {
11608 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11610 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11615 #ifdef TARGET_NR_setxattr
11616 case TARGET_NR_listxattr
:
11617 case TARGET_NR_llistxattr
:
11621 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11623 ret
= -TARGET_EFAULT
;
11627 p
= lock_user_string(arg1
);
11629 if (num
== TARGET_NR_listxattr
) {
11630 ret
= get_errno(listxattr(p
, b
, arg3
));
11632 ret
= get_errno(llistxattr(p
, b
, arg3
));
11635 ret
= -TARGET_EFAULT
;
11637 unlock_user(p
, arg1
, 0);
11638 unlock_user(b
, arg2
, arg3
);
11641 case TARGET_NR_flistxattr
:
11645 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11647 ret
= -TARGET_EFAULT
;
11651 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11652 unlock_user(b
, arg2
, arg3
);
11655 case TARGET_NR_setxattr
:
11656 case TARGET_NR_lsetxattr
:
11658 void *p
, *n
, *v
= 0;
11660 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11662 ret
= -TARGET_EFAULT
;
11666 p
= lock_user_string(arg1
);
11667 n
= lock_user_string(arg2
);
11669 if (num
== TARGET_NR_setxattr
) {
11670 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11672 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11675 ret
= -TARGET_EFAULT
;
11677 unlock_user(p
, arg1
, 0);
11678 unlock_user(n
, arg2
, 0);
11679 unlock_user(v
, arg3
, 0);
11682 case TARGET_NR_fsetxattr
:
11686 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11688 ret
= -TARGET_EFAULT
;
11692 n
= lock_user_string(arg2
);
11694 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11696 ret
= -TARGET_EFAULT
;
11698 unlock_user(n
, arg2
, 0);
11699 unlock_user(v
, arg3
, 0);
11702 case TARGET_NR_getxattr
:
11703 case TARGET_NR_lgetxattr
:
11705 void *p
, *n
, *v
= 0;
11707 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11709 ret
= -TARGET_EFAULT
;
11713 p
= lock_user_string(arg1
);
11714 n
= lock_user_string(arg2
);
11716 if (num
== TARGET_NR_getxattr
) {
11717 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11719 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11722 ret
= -TARGET_EFAULT
;
11724 unlock_user(p
, arg1
, 0);
11725 unlock_user(n
, arg2
, 0);
11726 unlock_user(v
, arg3
, arg4
);
11729 case TARGET_NR_fgetxattr
:
11733 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11735 ret
= -TARGET_EFAULT
;
11739 n
= lock_user_string(arg2
);
11741 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11743 ret
= -TARGET_EFAULT
;
11745 unlock_user(n
, arg2
, 0);
11746 unlock_user(v
, arg3
, arg4
);
11749 case TARGET_NR_removexattr
:
11750 case TARGET_NR_lremovexattr
:
11753 p
= lock_user_string(arg1
);
11754 n
= lock_user_string(arg2
);
11756 if (num
== TARGET_NR_removexattr
) {
11757 ret
= get_errno(removexattr(p
, n
));
11759 ret
= get_errno(lremovexattr(p
, n
));
11762 ret
= -TARGET_EFAULT
;
11764 unlock_user(p
, arg1
, 0);
11765 unlock_user(n
, arg2
, 0);
11768 case TARGET_NR_fremovexattr
:
11771 n
= lock_user_string(arg2
);
11773 ret
= get_errno(fremovexattr(arg1
, n
));
11775 ret
= -TARGET_EFAULT
;
11777 unlock_user(n
, arg2
, 0);
11781 #endif /* CONFIG_ATTR */
11782 #ifdef TARGET_NR_set_thread_area
11783 case TARGET_NR_set_thread_area
:
11784 #if defined(TARGET_MIPS)
11785 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11788 #elif defined(TARGET_CRIS)
11790 ret
= -TARGET_EINVAL
;
11792 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11796 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11797 ret
= do_set_thread_area(cpu_env
, arg1
);
11799 #elif defined(TARGET_M68K)
11801 TaskState
*ts
= cpu
->opaque
;
11802 ts
->tp_value
= arg1
;
11807 goto unimplemented_nowarn
;
11810 #ifdef TARGET_NR_get_thread_area
11811 case TARGET_NR_get_thread_area
:
11812 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11813 ret
= do_get_thread_area(cpu_env
, arg1
);
11815 #elif defined(TARGET_M68K)
11817 TaskState
*ts
= cpu
->opaque
;
11818 ret
= ts
->tp_value
;
11822 goto unimplemented_nowarn
;
11825 #ifdef TARGET_NR_getdomainname
11826 case TARGET_NR_getdomainname
:
11827 goto unimplemented_nowarn
;
11830 #ifdef TARGET_NR_clock_gettime
11831 case TARGET_NR_clock_gettime
:
11833 struct timespec ts
;
11834 ret
= get_errno(clock_gettime(arg1
, &ts
));
11835 if (!is_error(ret
)) {
11836 host_to_target_timespec(arg2
, &ts
);
11841 #ifdef TARGET_NR_clock_getres
11842 case TARGET_NR_clock_getres
:
11844 struct timespec ts
;
11845 ret
= get_errno(clock_getres(arg1
, &ts
));
11846 if (!is_error(ret
)) {
11847 host_to_target_timespec(arg2
, &ts
);
11852 #ifdef TARGET_NR_clock_nanosleep
11853 case TARGET_NR_clock_nanosleep
:
11855 struct timespec ts
;
11856 target_to_host_timespec(&ts
, arg3
);
11857 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11858 &ts
, arg4
? &ts
: NULL
));
11860 host_to_target_timespec(arg4
, &ts
);
11862 #if defined(TARGET_PPC)
11863 /* clock_nanosleep is odd in that it returns positive errno values.
11864 * On PPC, CR0 bit 3 should be set in such a situation. */
11865 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11866 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
11873 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11874 case TARGET_NR_set_tid_address
:
11875 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
11879 case TARGET_NR_tkill
:
11880 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
11883 case TARGET_NR_tgkill
:
11884 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
11885 target_to_host_signal(arg3
)));
11888 #ifdef TARGET_NR_set_robust_list
11889 case TARGET_NR_set_robust_list
:
11890 case TARGET_NR_get_robust_list
:
11891 /* The ABI for supporting robust futexes has userspace pass
11892 * the kernel a pointer to a linked list which is updated by
11893 * userspace after the syscall; the list is walked by the kernel
11894 * when the thread exits. Since the linked list in QEMU guest
11895 * memory isn't a valid linked list for the host and we have
11896 * no way to reliably intercept the thread-death event, we can't
11897 * support these. Silently return ENOSYS so that guest userspace
11898 * falls back to a non-robust futex implementation (which should
11899 * be OK except in the corner case of the guest crashing while
11900 * holding a mutex that is shared with another process via
11903 goto unimplemented_nowarn
;
11906 #if defined(TARGET_NR_utimensat)
11907 case TARGET_NR_utimensat
:
11909 struct timespec
*tsp
, ts
[2];
11913 target_to_host_timespec(ts
, arg3
);
11914 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
11918 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
11920 if (!(p
= lock_user_string(arg2
))) {
11921 ret
= -TARGET_EFAULT
;
11924 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
11925 unlock_user(p
, arg2
, 0);
11930 case TARGET_NR_futex
:
11931 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
11933 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11934 case TARGET_NR_inotify_init
:
11935 ret
= get_errno(sys_inotify_init());
11937 fd_trans_register(ret
, &target_inotify_trans
);
11941 #ifdef CONFIG_INOTIFY1
11942 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11943 case TARGET_NR_inotify_init1
:
11944 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
11945 fcntl_flags_tbl
)));
11947 fd_trans_register(ret
, &target_inotify_trans
);
11952 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11953 case TARGET_NR_inotify_add_watch
:
11954 p
= lock_user_string(arg2
);
11955 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
11956 unlock_user(p
, arg2
, 0);
11959 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11960 case TARGET_NR_inotify_rm_watch
:
11961 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
11965 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11966 case TARGET_NR_mq_open
:
11968 struct mq_attr posix_mq_attr
;
11969 struct mq_attr
*pposix_mq_attr
;
11972 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
11973 pposix_mq_attr
= NULL
;
11975 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
11978 pposix_mq_attr
= &posix_mq_attr
;
11980 p
= lock_user_string(arg1
- 1);
11984 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
11985 unlock_user (p
, arg1
, 0);
11989 case TARGET_NR_mq_unlink
:
11990 p
= lock_user_string(arg1
- 1);
11992 ret
= -TARGET_EFAULT
;
11995 ret
= get_errno(mq_unlink(p
));
11996 unlock_user (p
, arg1
, 0);
11999 case TARGET_NR_mq_timedsend
:
12001 struct timespec ts
;
12003 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12005 target_to_host_timespec(&ts
, arg5
);
12006 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12007 host_to_target_timespec(arg5
, &ts
);
12009 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12011 unlock_user (p
, arg2
, arg3
);
12015 case TARGET_NR_mq_timedreceive
:
12017 struct timespec ts
;
12020 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12022 target_to_host_timespec(&ts
, arg5
);
12023 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12025 host_to_target_timespec(arg5
, &ts
);
12027 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12030 unlock_user (p
, arg2
, arg3
);
12032 put_user_u32(prio
, arg4
);
12036 /* Not implemented for now... */
12037 /* case TARGET_NR_mq_notify: */
12040 case TARGET_NR_mq_getsetattr
:
12042 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12045 ret
= mq_getattr(arg1
, &posix_mq_attr_out
);
12046 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12049 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12050 ret
|= mq_setattr(arg1
, &posix_mq_attr_in
, &posix_mq_attr_out
);
12057 #ifdef CONFIG_SPLICE
12058 #ifdef TARGET_NR_tee
12059 case TARGET_NR_tee
:
12061 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12065 #ifdef TARGET_NR_splice
12066 case TARGET_NR_splice
:
12068 loff_t loff_in
, loff_out
;
12069 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12071 if (get_user_u64(loff_in
, arg2
)) {
12074 ploff_in
= &loff_in
;
12077 if (get_user_u64(loff_out
, arg4
)) {
12080 ploff_out
= &loff_out
;
12082 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12084 if (put_user_u64(loff_in
, arg2
)) {
12089 if (put_user_u64(loff_out
, arg4
)) {
12096 #ifdef TARGET_NR_vmsplice
12097 case TARGET_NR_vmsplice
:
12099 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12101 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12102 unlock_iovec(vec
, arg2
, arg3
, 0);
12104 ret
= -host_to_target_errno(errno
);
12109 #endif /* CONFIG_SPLICE */
12110 #ifdef CONFIG_EVENTFD
12111 #if defined(TARGET_NR_eventfd)
12112 case TARGET_NR_eventfd
:
12113 ret
= get_errno(eventfd(arg1
, 0));
12115 fd_trans_register(ret
, &target_eventfd_trans
);
12119 #if defined(TARGET_NR_eventfd2)
12120 case TARGET_NR_eventfd2
:
12122 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12123 if (arg2
& TARGET_O_NONBLOCK
) {
12124 host_flags
|= O_NONBLOCK
;
12126 if (arg2
& TARGET_O_CLOEXEC
) {
12127 host_flags
|= O_CLOEXEC
;
12129 ret
= get_errno(eventfd(arg1
, host_flags
));
12131 fd_trans_register(ret
, &target_eventfd_trans
);
12136 #endif /* CONFIG_EVENTFD */
12137 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12138 case TARGET_NR_fallocate
:
12139 #if TARGET_ABI_BITS == 32
12140 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12141 target_offset64(arg5
, arg6
)));
12143 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12147 #if defined(CONFIG_SYNC_FILE_RANGE)
12148 #if defined(TARGET_NR_sync_file_range)
12149 case TARGET_NR_sync_file_range
:
12150 #if TARGET_ABI_BITS == 32
12151 #if defined(TARGET_MIPS)
12152 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12153 target_offset64(arg5
, arg6
), arg7
));
12155 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12156 target_offset64(arg4
, arg5
), arg6
));
12157 #endif /* !TARGET_MIPS */
12159 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12163 #if defined(TARGET_NR_sync_file_range2)
12164 case TARGET_NR_sync_file_range2
:
12165 /* This is like sync_file_range but the arguments are reordered */
12166 #if TARGET_ABI_BITS == 32
12167 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12168 target_offset64(arg5
, arg6
), arg2
));
12170 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12175 #if defined(TARGET_NR_signalfd4)
12176 case TARGET_NR_signalfd4
:
12177 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12180 #if defined(TARGET_NR_signalfd)
12181 case TARGET_NR_signalfd
:
12182 ret
= do_signalfd4(arg1
, arg2
, 0);
12185 #if defined(CONFIG_EPOLL)
12186 #if defined(TARGET_NR_epoll_create)
12187 case TARGET_NR_epoll_create
:
12188 ret
= get_errno(epoll_create(arg1
));
12191 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12192 case TARGET_NR_epoll_create1
:
12193 ret
= get_errno(epoll_create1(arg1
));
12196 #if defined(TARGET_NR_epoll_ctl)
12197 case TARGET_NR_epoll_ctl
:
12199 struct epoll_event ep
;
12200 struct epoll_event
*epp
= 0;
12202 struct target_epoll_event
*target_ep
;
12203 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12206 ep
.events
= tswap32(target_ep
->events
);
12207 /* The epoll_data_t union is just opaque data to the kernel,
12208 * so we transfer all 64 bits across and need not worry what
12209 * actual data type it is.
12211 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12212 unlock_user_struct(target_ep
, arg4
, 0);
12215 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12220 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12221 #if defined(TARGET_NR_epoll_wait)
12222 case TARGET_NR_epoll_wait
:
12224 #if defined(TARGET_NR_epoll_pwait)
12225 case TARGET_NR_epoll_pwait
:
12228 struct target_epoll_event
*target_ep
;
12229 struct epoll_event
*ep
;
12231 int maxevents
= arg3
;
12232 int timeout
= arg4
;
12234 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12235 ret
= -TARGET_EINVAL
;
12239 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12240 maxevents
* sizeof(struct target_epoll_event
), 1);
12245 ep
= g_try_new(struct epoll_event
, maxevents
);
12247 unlock_user(target_ep
, arg2
, 0);
12248 ret
= -TARGET_ENOMEM
;
12253 #if defined(TARGET_NR_epoll_pwait)
12254 case TARGET_NR_epoll_pwait
:
12256 target_sigset_t
*target_set
;
12257 sigset_t _set
, *set
= &_set
;
12260 if (arg6
!= sizeof(target_sigset_t
)) {
12261 ret
= -TARGET_EINVAL
;
12265 target_set
= lock_user(VERIFY_READ
, arg5
,
12266 sizeof(target_sigset_t
), 1);
12268 ret
= -TARGET_EFAULT
;
12271 target_to_host_sigset(set
, target_set
);
12272 unlock_user(target_set
, arg5
, 0);
12277 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12278 set
, SIGSET_T_SIZE
));
12282 #if defined(TARGET_NR_epoll_wait)
12283 case TARGET_NR_epoll_wait
:
12284 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12289 ret
= -TARGET_ENOSYS
;
12291 if (!is_error(ret
)) {
12293 for (i
= 0; i
< ret
; i
++) {
12294 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12295 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12297 unlock_user(target_ep
, arg2
,
12298 ret
* sizeof(struct target_epoll_event
));
12300 unlock_user(target_ep
, arg2
, 0);
12307 #ifdef TARGET_NR_prlimit64
12308 case TARGET_NR_prlimit64
:
12310 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12311 struct target_rlimit64
*target_rnew
, *target_rold
;
12312 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12313 int resource
= target_to_host_resource(arg2
);
12315 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12318 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12319 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12320 unlock_user_struct(target_rnew
, arg3
, 0);
12324 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12325 if (!is_error(ret
) && arg4
) {
12326 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12329 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12330 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12331 unlock_user_struct(target_rold
, arg4
, 1);
12336 #ifdef TARGET_NR_gethostname
12337 case TARGET_NR_gethostname
:
12339 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12341 ret
= get_errno(gethostname(name
, arg2
));
12342 unlock_user(name
, arg1
, arg2
);
12344 ret
= -TARGET_EFAULT
;
12349 #ifdef TARGET_NR_atomic_cmpxchg_32
12350 case TARGET_NR_atomic_cmpxchg_32
:
12352 /* should use start_exclusive from main.c */
12353 abi_ulong mem_value
;
12354 if (get_user_u32(mem_value
, arg6
)) {
12355 target_siginfo_t info
;
12356 info
.si_signo
= SIGSEGV
;
12358 info
.si_code
= TARGET_SEGV_MAPERR
;
12359 info
._sifields
._sigfault
._addr
= arg6
;
12360 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12361 QEMU_SI_FAULT
, &info
);
12365 if (mem_value
== arg2
)
12366 put_user_u32(arg1
, arg6
);
12371 #ifdef TARGET_NR_atomic_barrier
12372 case TARGET_NR_atomic_barrier
:
12374 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12380 #ifdef TARGET_NR_timer_create
12381 case TARGET_NR_timer_create
:
12383 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12385 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12388 int timer_index
= next_free_host_timer();
12390 if (timer_index
< 0) {
12391 ret
= -TARGET_EAGAIN
;
12393 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12396 phost_sevp
= &host_sevp
;
12397 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12403 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12407 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12416 #ifdef TARGET_NR_timer_settime
12417 case TARGET_NR_timer_settime
:
12419 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12420 * struct itimerspec * old_value */
12421 target_timer_t timerid
= get_timer_id(arg1
);
12425 } else if (arg3
== 0) {
12426 ret
= -TARGET_EINVAL
;
12428 timer_t htimer
= g_posix_timers
[timerid
];
12429 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12431 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12435 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12436 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12444 #ifdef TARGET_NR_timer_gettime
12445 case TARGET_NR_timer_gettime
:
12447 /* args: timer_t timerid, struct itimerspec *curr_value */
12448 target_timer_t timerid
= get_timer_id(arg1
);
12452 } else if (!arg2
) {
12453 ret
= -TARGET_EFAULT
;
12455 timer_t htimer
= g_posix_timers
[timerid
];
12456 struct itimerspec hspec
;
12457 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12459 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12460 ret
= -TARGET_EFAULT
;
12467 #ifdef TARGET_NR_timer_getoverrun
12468 case TARGET_NR_timer_getoverrun
:
12470 /* args: timer_t timerid */
12471 target_timer_t timerid
= get_timer_id(arg1
);
12476 timer_t htimer
= g_posix_timers
[timerid
];
12477 ret
= get_errno(timer_getoverrun(htimer
));
12479 fd_trans_unregister(ret
);
12484 #ifdef TARGET_NR_timer_delete
12485 case TARGET_NR_timer_delete
:
12487 /* args: timer_t timerid */
12488 target_timer_t timerid
= get_timer_id(arg1
);
12493 timer_t htimer
= g_posix_timers
[timerid
];
12494 ret
= get_errno(timer_delete(htimer
));
12495 g_posix_timers
[timerid
] = 0;
12501 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12502 case TARGET_NR_timerfd_create
:
12503 ret
= get_errno(timerfd_create(arg1
,
12504 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12508 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12509 case TARGET_NR_timerfd_gettime
:
12511 struct itimerspec its_curr
;
12513 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12515 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12522 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12523 case TARGET_NR_timerfd_settime
:
12525 struct itimerspec its_new
, its_old
, *p_new
;
12528 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12536 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12538 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12545 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12546 case TARGET_NR_ioprio_get
:
12547 ret
= get_errno(ioprio_get(arg1
, arg2
));
12551 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12552 case TARGET_NR_ioprio_set
:
12553 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12557 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12558 case TARGET_NR_setns
:
12559 ret
= get_errno(setns(arg1
, arg2
));
12562 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12563 case TARGET_NR_unshare
:
12564 ret
= get_errno(unshare(arg1
));
12567 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12568 case TARGET_NR_kcmp
:
12569 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12575 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12576 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12577 unimplemented_nowarn
:
12579 ret
= -TARGET_ENOSYS
;
12584 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12587 print_syscall_ret(num
, ret
);
12588 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12591 ret
= -TARGET_EFAULT
;