4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
29 #include <sys/mount.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
36 #include <linux/capability.h>
38 #include <sys/timex.h>
39 #include <sys/socket.h>
43 #include <sys/times.h>
46 #include <sys/statfs.h>
48 #include <sys/sysinfo.h>
49 #include <sys/signalfd.h>
50 //#include <sys/user.h>
51 #include <netinet/ip.h>
52 #include <netinet/tcp.h>
53 #include <linux/wireless.h>
54 #include <linux/icmp.h>
55 #include <linux/icmpv6.h>
56 #include <linux/errqueue.h>
57 #include <linux/random.h>
58 #include "qemu-common.h"
60 #include <sys/timerfd.h>
66 #include <sys/eventfd.h>
69 #include <sys/epoll.h>
72 #include "qemu/xattr.h"
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
91 #include <linux/mtio.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
109 #include <linux/audit.h>
110 #include "linux_loop.h"
116 #define CLONE_IO 0x80000000 /* Clone io context */
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
238 #define __NR_sys_syslog __NR_syslog
239 #define __NR_sys_futex __NR_futex
240 #define __NR_sys_inotify_init __NR_inotify_init
241 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
242 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
244 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
245 #define __NR__llseek __NR_lseek
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
254 _syscall0(int, gettid
)
256 /* This is a replacement for the host gettid() and must return a host
258 static int gettid(void) {
263 /* For the 64-bit guest on 32-bit host case we must emulate
264 * getdents using getdents64, because otherwise the host
265 * might hand us back more dirent records than we can fit
266 * into the guest buffer after structure format conversion.
267 * Otherwise we emulate getdents with getdents if the host has it.
269 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
270 #define EMULATE_GETDENTS_WITH_GETDENTS
273 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
274 _syscall3(int, sys_getdents
, uint
, fd
, struct linux_dirent
*, dirp
, uint
, count
);
276 #if (defined(TARGET_NR_getdents) && \
277 !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
278 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
279 _syscall3(int, sys_getdents64
, uint
, fd
, struct linux_dirent64
*, dirp
, uint
, count
);
281 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
282 _syscall5(int, _llseek
, uint
, fd
, ulong
, hi
, ulong
, lo
,
283 loff_t
*, res
, uint
, wh
);
285 _syscall3(int, sys_rt_sigqueueinfo
, pid_t
, pid
, int, sig
, siginfo_t
*, uinfo
)
286 _syscall4(int, sys_rt_tgsigqueueinfo
, pid_t
, pid
, pid_t
, tid
, int, sig
,
288 _syscall3(int,sys_syslog
,int,type
,char*,bufp
,int,len
)
289 #ifdef __NR_exit_group
290 _syscall1(int,exit_group
,int,error_code
)
292 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
293 _syscall1(int,set_tid_address
,int *,tidptr
)
295 #if defined(TARGET_NR_futex) && defined(__NR_futex)
296 _syscall6(int,sys_futex
,int *,uaddr
,int,op
,int,val
,
297 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
299 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
300 _syscall3(int, sys_sched_getaffinity
, pid_t
, pid
, unsigned int, len
,
301 unsigned long *, user_mask_ptr
);
302 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
303 _syscall3(int, sys_sched_setaffinity
, pid_t
, pid
, unsigned int, len
,
304 unsigned long *, user_mask_ptr
);
305 #define __NR_sys_getcpu __NR_getcpu
306 _syscall3(int, sys_getcpu
, unsigned *, cpu
, unsigned *, node
, void *, tcache
);
307 _syscall4(int, reboot
, int, magic1
, int, magic2
, unsigned int, cmd
,
309 _syscall2(int, capget
, struct __user_cap_header_struct
*, header
,
310 struct __user_cap_data_struct
*, data
);
311 _syscall2(int, capset
, struct __user_cap_header_struct
*, header
,
312 struct __user_cap_data_struct
*, data
);
313 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
314 _syscall2(int, ioprio_get
, int, which
, int, who
)
316 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
317 _syscall3(int, ioprio_set
, int, which
, int, who
, int, ioprio
)
319 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
320 _syscall3(int, getrandom
, void *, buf
, size_t, buflen
, unsigned int, flags
)
323 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
324 _syscall5(int, kcmp
, pid_t
, pid1
, pid_t
, pid2
, int, type
,
325 unsigned long, idx1
, unsigned long, idx2
)
328 static bitmask_transtbl fcntl_flags_tbl
[] = {
329 { TARGET_O_ACCMODE
, TARGET_O_WRONLY
, O_ACCMODE
, O_WRONLY
, },
330 { TARGET_O_ACCMODE
, TARGET_O_RDWR
, O_ACCMODE
, O_RDWR
, },
331 { TARGET_O_CREAT
, TARGET_O_CREAT
, O_CREAT
, O_CREAT
, },
332 { TARGET_O_EXCL
, TARGET_O_EXCL
, O_EXCL
, O_EXCL
, },
333 { TARGET_O_NOCTTY
, TARGET_O_NOCTTY
, O_NOCTTY
, O_NOCTTY
, },
334 { TARGET_O_TRUNC
, TARGET_O_TRUNC
, O_TRUNC
, O_TRUNC
, },
335 { TARGET_O_APPEND
, TARGET_O_APPEND
, O_APPEND
, O_APPEND
, },
336 { TARGET_O_NONBLOCK
, TARGET_O_NONBLOCK
, O_NONBLOCK
, O_NONBLOCK
, },
337 { TARGET_O_SYNC
, TARGET_O_DSYNC
, O_SYNC
, O_DSYNC
, },
338 { TARGET_O_SYNC
, TARGET_O_SYNC
, O_SYNC
, O_SYNC
, },
339 { TARGET_FASYNC
, TARGET_FASYNC
, FASYNC
, FASYNC
, },
340 { TARGET_O_DIRECTORY
, TARGET_O_DIRECTORY
, O_DIRECTORY
, O_DIRECTORY
, },
341 { TARGET_O_NOFOLLOW
, TARGET_O_NOFOLLOW
, O_NOFOLLOW
, O_NOFOLLOW
, },
342 #if defined(O_DIRECT)
343 { TARGET_O_DIRECT
, TARGET_O_DIRECT
, O_DIRECT
, O_DIRECT
, },
345 #if defined(O_NOATIME)
346 { TARGET_O_NOATIME
, TARGET_O_NOATIME
, O_NOATIME
, O_NOATIME
},
348 #if defined(O_CLOEXEC)
349 { TARGET_O_CLOEXEC
, TARGET_O_CLOEXEC
, O_CLOEXEC
, O_CLOEXEC
},
352 { TARGET_O_PATH
, TARGET_O_PATH
, O_PATH
, O_PATH
},
354 #if defined(O_TMPFILE)
355 { TARGET_O_TMPFILE
, TARGET_O_TMPFILE
, O_TMPFILE
, O_TMPFILE
},
357 /* Don't terminate the list prematurely on 64-bit host+guest. */
358 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
359 { TARGET_O_LARGEFILE
, TARGET_O_LARGEFILE
, O_LARGEFILE
, O_LARGEFILE
, },
366 QEMU_IFLA_BR_FORWARD_DELAY
,
367 QEMU_IFLA_BR_HELLO_TIME
,
368 QEMU_IFLA_BR_MAX_AGE
,
369 QEMU_IFLA_BR_AGEING_TIME
,
370 QEMU_IFLA_BR_STP_STATE
,
371 QEMU_IFLA_BR_PRIORITY
,
372 QEMU_IFLA_BR_VLAN_FILTERING
,
373 QEMU_IFLA_BR_VLAN_PROTOCOL
,
374 QEMU_IFLA_BR_GROUP_FWD_MASK
,
375 QEMU_IFLA_BR_ROOT_ID
,
376 QEMU_IFLA_BR_BRIDGE_ID
,
377 QEMU_IFLA_BR_ROOT_PORT
,
378 QEMU_IFLA_BR_ROOT_PATH_COST
,
379 QEMU_IFLA_BR_TOPOLOGY_CHANGE
,
380 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
,
381 QEMU_IFLA_BR_HELLO_TIMER
,
382 QEMU_IFLA_BR_TCN_TIMER
,
383 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
,
384 QEMU_IFLA_BR_GC_TIMER
,
385 QEMU_IFLA_BR_GROUP_ADDR
,
386 QEMU_IFLA_BR_FDB_FLUSH
,
387 QEMU_IFLA_BR_MCAST_ROUTER
,
388 QEMU_IFLA_BR_MCAST_SNOOPING
,
389 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
,
390 QEMU_IFLA_BR_MCAST_QUERIER
,
391 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
,
392 QEMU_IFLA_BR_MCAST_HASH_MAX
,
393 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
,
394 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
,
395 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
,
396 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
,
397 QEMU_IFLA_BR_MCAST_QUERIER_INTVL
,
398 QEMU_IFLA_BR_MCAST_QUERY_INTVL
,
399 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
,
400 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
,
401 QEMU_IFLA_BR_NF_CALL_IPTABLES
,
402 QEMU_IFLA_BR_NF_CALL_IP6TABLES
,
403 QEMU_IFLA_BR_NF_CALL_ARPTABLES
,
404 QEMU_IFLA_BR_VLAN_DEFAULT_PVID
,
406 QEMU_IFLA_BR_VLAN_STATS_ENABLED
,
407 QEMU_IFLA_BR_MCAST_STATS_ENABLED
,
431 QEMU_IFLA_NET_NS_PID
,
434 QEMU_IFLA_VFINFO_LIST
,
442 QEMU_IFLA_PROMISCUITY
,
443 QEMU_IFLA_NUM_TX_QUEUES
,
444 QEMU_IFLA_NUM_RX_QUEUES
,
446 QEMU_IFLA_PHYS_PORT_ID
,
447 QEMU_IFLA_CARRIER_CHANGES
,
448 QEMU_IFLA_PHYS_SWITCH_ID
,
449 QEMU_IFLA_LINK_NETNSID
,
450 QEMU_IFLA_PHYS_PORT_NAME
,
451 QEMU_IFLA_PROTO_DOWN
,
452 QEMU_IFLA_GSO_MAX_SEGS
,
453 QEMU_IFLA_GSO_MAX_SIZE
,
460 QEMU_IFLA_BRPORT_UNSPEC
,
461 QEMU_IFLA_BRPORT_STATE
,
462 QEMU_IFLA_BRPORT_PRIORITY
,
463 QEMU_IFLA_BRPORT_COST
,
464 QEMU_IFLA_BRPORT_MODE
,
465 QEMU_IFLA_BRPORT_GUARD
,
466 QEMU_IFLA_BRPORT_PROTECT
,
467 QEMU_IFLA_BRPORT_FAST_LEAVE
,
468 QEMU_IFLA_BRPORT_LEARNING
,
469 QEMU_IFLA_BRPORT_UNICAST_FLOOD
,
470 QEMU_IFLA_BRPORT_PROXYARP
,
471 QEMU_IFLA_BRPORT_LEARNING_SYNC
,
472 QEMU_IFLA_BRPORT_PROXYARP_WIFI
,
473 QEMU_IFLA_BRPORT_ROOT_ID
,
474 QEMU_IFLA_BRPORT_BRIDGE_ID
,
475 QEMU_IFLA_BRPORT_DESIGNATED_PORT
,
476 QEMU_IFLA_BRPORT_DESIGNATED_COST
,
479 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
,
480 QEMU_IFLA_BRPORT_CONFIG_PENDING
,
481 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
,
482 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
,
483 QEMU_IFLA_BRPORT_HOLD_TIMER
,
484 QEMU_IFLA_BRPORT_FLUSH
,
485 QEMU_IFLA_BRPORT_MULTICAST_ROUTER
,
486 QEMU_IFLA_BRPORT_PAD
,
487 QEMU___IFLA_BRPORT_MAX
491 QEMU_IFLA_INFO_UNSPEC
,
494 QEMU_IFLA_INFO_XSTATS
,
495 QEMU_IFLA_INFO_SLAVE_KIND
,
496 QEMU_IFLA_INFO_SLAVE_DATA
,
497 QEMU___IFLA_INFO_MAX
,
501 QEMU_IFLA_INET_UNSPEC
,
503 QEMU___IFLA_INET_MAX
,
507 QEMU_IFLA_INET6_UNSPEC
,
508 QEMU_IFLA_INET6_FLAGS
,
509 QEMU_IFLA_INET6_CONF
,
510 QEMU_IFLA_INET6_STATS
,
511 QEMU_IFLA_INET6_MCAST
,
512 QEMU_IFLA_INET6_CACHEINFO
,
513 QEMU_IFLA_INET6_ICMP6STATS
,
514 QEMU_IFLA_INET6_TOKEN
,
515 QEMU_IFLA_INET6_ADDR_GEN_MODE
,
516 QEMU___IFLA_INET6_MAX
519 typedef abi_long (*TargetFdDataFunc
)(void *, size_t);
520 typedef abi_long (*TargetFdAddrFunc
)(void *, abi_ulong
, socklen_t
);
521 typedef struct TargetFdTrans
{
522 TargetFdDataFunc host_to_target_data
;
523 TargetFdDataFunc target_to_host_data
;
524 TargetFdAddrFunc target_to_host_addr
;
527 static TargetFdTrans
**target_fd_trans
;
529 static unsigned int target_fd_max
;
531 static TargetFdDataFunc
fd_trans_target_to_host_data(int fd
)
533 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
534 return target_fd_trans
[fd
]->target_to_host_data
;
539 static TargetFdDataFunc
fd_trans_host_to_target_data(int fd
)
541 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
542 return target_fd_trans
[fd
]->host_to_target_data
;
547 static TargetFdAddrFunc
fd_trans_target_to_host_addr(int fd
)
549 if (fd
>= 0 && fd
< target_fd_max
&& target_fd_trans
[fd
]) {
550 return target_fd_trans
[fd
]->target_to_host_addr
;
555 static void fd_trans_register(int fd
, TargetFdTrans
*trans
)
559 if (fd
>= target_fd_max
) {
560 oldmax
= target_fd_max
;
561 target_fd_max
= ((fd
>> 6) + 1) << 6; /* by slice of 64 entries */
562 target_fd_trans
= g_renew(TargetFdTrans
*,
563 target_fd_trans
, target_fd_max
);
564 memset((void *)(target_fd_trans
+ oldmax
), 0,
565 (target_fd_max
- oldmax
) * sizeof(TargetFdTrans
*));
567 target_fd_trans
[fd
] = trans
;
570 static void fd_trans_unregister(int fd
)
572 if (fd
>= 0 && fd
< target_fd_max
) {
573 target_fd_trans
[fd
] = NULL
;
577 static void fd_trans_dup(int oldfd
, int newfd
)
579 fd_trans_unregister(newfd
);
580 if (oldfd
< target_fd_max
&& target_fd_trans
[oldfd
]) {
581 fd_trans_register(newfd
, target_fd_trans
[oldfd
]);
585 static int sys_getcwd1(char *buf
, size_t size
)
587 if (getcwd(buf
, size
) == NULL
) {
588 /* getcwd() sets errno */
591 return strlen(buf
)+1;
594 #ifdef TARGET_NR_utimensat
595 #if defined(__NR_utimensat)
596 #define __NR_sys_utimensat __NR_utimensat
597 _syscall4(int,sys_utimensat
,int,dirfd
,const char *,pathname
,
598 const struct timespec
*,tsp
,int,flags
)
600 static int sys_utimensat(int dirfd
, const char *pathname
,
601 const struct timespec times
[2], int flags
)
607 #endif /* TARGET_NR_utimensat */
609 #ifdef TARGET_NR_renameat2
610 #if defined(__NR_renameat2)
611 #define __NR_sys_renameat2 __NR_renameat2
612 _syscall5(int, sys_renameat2
, int, oldfd
, const char *, old
, int, newfd
,
613 const char *, new, unsigned int, flags
)
615 static int sys_renameat2(int oldfd
, const char *old
,
616 int newfd
, const char *new, int flags
)
619 return renameat(oldfd
, old
, newfd
, new);
625 #endif /* TARGET_NR_renameat2 */
627 #ifdef CONFIG_INOTIFY
628 #include <sys/inotify.h>
630 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
631 static int sys_inotify_init(void)
633 return (inotify_init());
636 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
637 static int sys_inotify_add_watch(int fd
,const char *pathname
, int32_t mask
)
639 return (inotify_add_watch(fd
, pathname
, mask
));
642 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
643 static int sys_inotify_rm_watch(int fd
, int32_t wd
)
645 return (inotify_rm_watch(fd
, wd
));
648 #ifdef CONFIG_INOTIFY1
649 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
650 static int sys_inotify_init1(int flags
)
652 return (inotify_init1(flags
));
657 /* Userspace can usually survive runtime without inotify */
658 #undef TARGET_NR_inotify_init
659 #undef TARGET_NR_inotify_init1
660 #undef TARGET_NR_inotify_add_watch
661 #undef TARGET_NR_inotify_rm_watch
662 #endif /* CONFIG_INOTIFY */
664 #if defined(TARGET_NR_prlimit64)
665 #ifndef __NR_prlimit64
666 # define __NR_prlimit64 -1
668 #define __NR_sys_prlimit64 __NR_prlimit64
669 /* The glibc rlimit structure may not be that used by the underlying syscall */
670 struct host_rlimit64
{
674 _syscall4(int, sys_prlimit64
, pid_t
, pid
, int, resource
,
675 const struct host_rlimit64
*, new_limit
,
676 struct host_rlimit64
*, old_limit
)
680 #if defined(TARGET_NR_timer_create)
681 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
682 static timer_t g_posix_timers
[32] = { 0, } ;
684 static inline int next_free_host_timer(void)
687 /* FIXME: Does finding the next free slot require a lock? */
688 for (k
= 0; k
< ARRAY_SIZE(g_posix_timers
); k
++) {
689 if (g_posix_timers
[k
] == 0) {
690 g_posix_timers
[k
] = (timer_t
) 1;
698 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
700 static inline int regpairs_aligned(void *cpu_env
, int num
)
702 return ((((CPUARMState
*)cpu_env
)->eabi
) == 1) ;
704 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
705 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
706 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
707 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
708 * of registers which translates to the same as ARM/MIPS, because we start with
710 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
711 #elif defined(TARGET_SH4)
712 /* SH4 doesn't align register pairs, except for p{read,write}64 */
713 static inline int regpairs_aligned(void *cpu_env
, int num
)
716 case TARGET_NR_pread64
:
717 case TARGET_NR_pwrite64
:
724 #elif defined(TARGET_XTENSA)
725 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 1; }
727 static inline int regpairs_aligned(void *cpu_env
, int num
) { return 0; }
730 #define ERRNO_TABLE_SIZE 1200
732 /* target_to_host_errno_table[] is initialized from
733 * host_to_target_errno_table[] in syscall_init(). */
734 static uint16_t target_to_host_errno_table
[ERRNO_TABLE_SIZE
] = {
738 * This list is the union of errno values overridden in asm-<arch>/errno.h
739 * minus the errnos that are not actually generic to all archs.
741 static uint16_t host_to_target_errno_table
[ERRNO_TABLE_SIZE
] = {
742 [EAGAIN
] = TARGET_EAGAIN
,
743 [EIDRM
] = TARGET_EIDRM
,
744 [ECHRNG
] = TARGET_ECHRNG
,
745 [EL2NSYNC
] = TARGET_EL2NSYNC
,
746 [EL3HLT
] = TARGET_EL3HLT
,
747 [EL3RST
] = TARGET_EL3RST
,
748 [ELNRNG
] = TARGET_ELNRNG
,
749 [EUNATCH
] = TARGET_EUNATCH
,
750 [ENOCSI
] = TARGET_ENOCSI
,
751 [EL2HLT
] = TARGET_EL2HLT
,
752 [EDEADLK
] = TARGET_EDEADLK
,
753 [ENOLCK
] = TARGET_ENOLCK
,
754 [EBADE
] = TARGET_EBADE
,
755 [EBADR
] = TARGET_EBADR
,
756 [EXFULL
] = TARGET_EXFULL
,
757 [ENOANO
] = TARGET_ENOANO
,
758 [EBADRQC
] = TARGET_EBADRQC
,
759 [EBADSLT
] = TARGET_EBADSLT
,
760 [EBFONT
] = TARGET_EBFONT
,
761 [ENOSTR
] = TARGET_ENOSTR
,
762 [ENODATA
] = TARGET_ENODATA
,
763 [ETIME
] = TARGET_ETIME
,
764 [ENOSR
] = TARGET_ENOSR
,
765 [ENONET
] = TARGET_ENONET
,
766 [ENOPKG
] = TARGET_ENOPKG
,
767 [EREMOTE
] = TARGET_EREMOTE
,
768 [ENOLINK
] = TARGET_ENOLINK
,
769 [EADV
] = TARGET_EADV
,
770 [ESRMNT
] = TARGET_ESRMNT
,
771 [ECOMM
] = TARGET_ECOMM
,
772 [EPROTO
] = TARGET_EPROTO
,
773 [EDOTDOT
] = TARGET_EDOTDOT
,
774 [EMULTIHOP
] = TARGET_EMULTIHOP
,
775 [EBADMSG
] = TARGET_EBADMSG
,
776 [ENAMETOOLONG
] = TARGET_ENAMETOOLONG
,
777 [EOVERFLOW
] = TARGET_EOVERFLOW
,
778 [ENOTUNIQ
] = TARGET_ENOTUNIQ
,
779 [EBADFD
] = TARGET_EBADFD
,
780 [EREMCHG
] = TARGET_EREMCHG
,
781 [ELIBACC
] = TARGET_ELIBACC
,
782 [ELIBBAD
] = TARGET_ELIBBAD
,
783 [ELIBSCN
] = TARGET_ELIBSCN
,
784 [ELIBMAX
] = TARGET_ELIBMAX
,
785 [ELIBEXEC
] = TARGET_ELIBEXEC
,
786 [EILSEQ
] = TARGET_EILSEQ
,
787 [ENOSYS
] = TARGET_ENOSYS
,
788 [ELOOP
] = TARGET_ELOOP
,
789 [ERESTART
] = TARGET_ERESTART
,
790 [ESTRPIPE
] = TARGET_ESTRPIPE
,
791 [ENOTEMPTY
] = TARGET_ENOTEMPTY
,
792 [EUSERS
] = TARGET_EUSERS
,
793 [ENOTSOCK
] = TARGET_ENOTSOCK
,
794 [EDESTADDRREQ
] = TARGET_EDESTADDRREQ
,
795 [EMSGSIZE
] = TARGET_EMSGSIZE
,
796 [EPROTOTYPE
] = TARGET_EPROTOTYPE
,
797 [ENOPROTOOPT
] = TARGET_ENOPROTOOPT
,
798 [EPROTONOSUPPORT
] = TARGET_EPROTONOSUPPORT
,
799 [ESOCKTNOSUPPORT
] = TARGET_ESOCKTNOSUPPORT
,
800 [EOPNOTSUPP
] = TARGET_EOPNOTSUPP
,
801 [EPFNOSUPPORT
] = TARGET_EPFNOSUPPORT
,
802 [EAFNOSUPPORT
] = TARGET_EAFNOSUPPORT
,
803 [EADDRINUSE
] = TARGET_EADDRINUSE
,
804 [EADDRNOTAVAIL
] = TARGET_EADDRNOTAVAIL
,
805 [ENETDOWN
] = TARGET_ENETDOWN
,
806 [ENETUNREACH
] = TARGET_ENETUNREACH
,
807 [ENETRESET
] = TARGET_ENETRESET
,
808 [ECONNABORTED
] = TARGET_ECONNABORTED
,
809 [ECONNRESET
] = TARGET_ECONNRESET
,
810 [ENOBUFS
] = TARGET_ENOBUFS
,
811 [EISCONN
] = TARGET_EISCONN
,
812 [ENOTCONN
] = TARGET_ENOTCONN
,
813 [EUCLEAN
] = TARGET_EUCLEAN
,
814 [ENOTNAM
] = TARGET_ENOTNAM
,
815 [ENAVAIL
] = TARGET_ENAVAIL
,
816 [EISNAM
] = TARGET_EISNAM
,
817 [EREMOTEIO
] = TARGET_EREMOTEIO
,
818 [EDQUOT
] = TARGET_EDQUOT
,
819 [ESHUTDOWN
] = TARGET_ESHUTDOWN
,
820 [ETOOMANYREFS
] = TARGET_ETOOMANYREFS
,
821 [ETIMEDOUT
] = TARGET_ETIMEDOUT
,
822 [ECONNREFUSED
] = TARGET_ECONNREFUSED
,
823 [EHOSTDOWN
] = TARGET_EHOSTDOWN
,
824 [EHOSTUNREACH
] = TARGET_EHOSTUNREACH
,
825 [EALREADY
] = TARGET_EALREADY
,
826 [EINPROGRESS
] = TARGET_EINPROGRESS
,
827 [ESTALE
] = TARGET_ESTALE
,
828 [ECANCELED
] = TARGET_ECANCELED
,
829 [ENOMEDIUM
] = TARGET_ENOMEDIUM
,
830 [EMEDIUMTYPE
] = TARGET_EMEDIUMTYPE
,
832 [ENOKEY
] = TARGET_ENOKEY
,
835 [EKEYEXPIRED
] = TARGET_EKEYEXPIRED
,
838 [EKEYREVOKED
] = TARGET_EKEYREVOKED
,
841 [EKEYREJECTED
] = TARGET_EKEYREJECTED
,
844 [EOWNERDEAD
] = TARGET_EOWNERDEAD
,
846 #ifdef ENOTRECOVERABLE
847 [ENOTRECOVERABLE
] = TARGET_ENOTRECOVERABLE
,
850 [ENOMSG
] = TARGET_ENOMSG
,
853 [ERFKILL
] = TARGET_ERFKILL
,
856 [EHWPOISON
] = TARGET_EHWPOISON
,
860 static inline int host_to_target_errno(int err
)
862 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
863 host_to_target_errno_table
[err
]) {
864 return host_to_target_errno_table
[err
];
869 static inline int target_to_host_errno(int err
)
871 if (err
>= 0 && err
< ERRNO_TABLE_SIZE
&&
872 target_to_host_errno_table
[err
]) {
873 return target_to_host_errno_table
[err
];
878 static inline abi_long
get_errno(abi_long ret
)
881 return -host_to_target_errno(errno
);
886 static inline int is_error(abi_long ret
)
888 return (abi_ulong
)ret
>= (abi_ulong
)(-4096);
891 const char *target_strerror(int err
)
893 if (err
== TARGET_ERESTARTSYS
) {
894 return "To be restarted";
896 if (err
== TARGET_QEMU_ESIGRETURN
) {
897 return "Successful exit from sigreturn";
900 if ((err
>= ERRNO_TABLE_SIZE
) || (err
< 0)) {
903 return strerror(target_to_host_errno(err
));
906 #define safe_syscall0(type, name) \
907 static type safe_##name(void) \
909 return safe_syscall(__NR_##name); \
912 #define safe_syscall1(type, name, type1, arg1) \
913 static type safe_##name(type1 arg1) \
915 return safe_syscall(__NR_##name, arg1); \
918 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
919 static type safe_##name(type1 arg1, type2 arg2) \
921 return safe_syscall(__NR_##name, arg1, arg2); \
924 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
925 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
927 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
930 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
932 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
934 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
937 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
938 type4, arg4, type5, arg5) \
939 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
942 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
945 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
946 type4, arg4, type5, arg5, type6, arg6) \
947 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
948 type5 arg5, type6 arg6) \
950 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
953 safe_syscall3(ssize_t
, read
, int, fd
, void *, buff
, size_t, count
)
954 safe_syscall3(ssize_t
, write
, int, fd
, const void *, buff
, size_t, count
)
955 safe_syscall4(int, openat
, int, dirfd
, const char *, pathname
, \
956 int, flags
, mode_t
, mode
)
957 safe_syscall4(pid_t
, wait4
, pid_t
, pid
, int *, status
, int, options
, \
958 struct rusage
*, rusage
)
959 safe_syscall5(int, waitid
, idtype_t
, idtype
, id_t
, id
, siginfo_t
*, infop
, \
960 int, options
, struct rusage
*, rusage
)
961 safe_syscall3(int, execve
, const char *, filename
, char **, argv
, char **, envp
)
962 safe_syscall6(int, pselect6
, int, nfds
, fd_set
*, readfds
, fd_set
*, writefds
, \
963 fd_set
*, exceptfds
, struct timespec
*, timeout
, void *, sig
)
964 safe_syscall5(int, ppoll
, struct pollfd
*, ufds
, unsigned int, nfds
,
965 struct timespec
*, tsp
, const sigset_t
*, sigmask
,
967 safe_syscall6(int, epoll_pwait
, int, epfd
, struct epoll_event
*, events
,
968 int, maxevents
, int, timeout
, const sigset_t
*, sigmask
,
970 safe_syscall6(int,futex
,int *,uaddr
,int,op
,int,val
, \
971 const struct timespec
*,timeout
,int *,uaddr2
,int,val3
)
972 safe_syscall2(int, rt_sigsuspend
, sigset_t
*, newset
, size_t, sigsetsize
)
973 safe_syscall2(int, kill
, pid_t
, pid
, int, sig
)
974 safe_syscall2(int, tkill
, int, tid
, int, sig
)
975 safe_syscall3(int, tgkill
, int, tgid
, int, pid
, int, sig
)
976 safe_syscall3(ssize_t
, readv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
977 safe_syscall3(ssize_t
, writev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
)
978 safe_syscall5(ssize_t
, preadv
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
979 unsigned long, pos_l
, unsigned long, pos_h
)
980 safe_syscall5(ssize_t
, pwritev
, int, fd
, const struct iovec
*, iov
, int, iovcnt
,
981 unsigned long, pos_l
, unsigned long, pos_h
)
982 safe_syscall3(int, connect
, int, fd
, const struct sockaddr
*, addr
,
984 safe_syscall6(ssize_t
, sendto
, int, fd
, const void *, buf
, size_t, len
,
985 int, flags
, const struct sockaddr
*, addr
, socklen_t
, addrlen
)
986 safe_syscall6(ssize_t
, recvfrom
, int, fd
, void *, buf
, size_t, len
,
987 int, flags
, struct sockaddr
*, addr
, socklen_t
*, addrlen
)
988 safe_syscall3(ssize_t
, sendmsg
, int, fd
, const struct msghdr
*, msg
, int, flags
)
989 safe_syscall3(ssize_t
, recvmsg
, int, fd
, struct msghdr
*, msg
, int, flags
)
990 safe_syscall2(int, flock
, int, fd
, int, operation
)
991 safe_syscall4(int, rt_sigtimedwait
, const sigset_t
*, these
, siginfo_t
*, uinfo
,
992 const struct timespec
*, uts
, size_t, sigsetsize
)
993 safe_syscall4(int, accept4
, int, fd
, struct sockaddr
*, addr
, socklen_t
*, len
,
995 safe_syscall2(int, nanosleep
, const struct timespec
*, req
,
996 struct timespec
*, rem
)
997 #ifdef TARGET_NR_clock_nanosleep
998 safe_syscall4(int, clock_nanosleep
, const clockid_t
, clock
, int, flags
,
999 const struct timespec
*, req
, struct timespec
*, rem
)
1002 safe_syscall4(int, msgsnd
, int, msgid
, const void *, msgp
, size_t, sz
,
1004 safe_syscall5(int, msgrcv
, int, msgid
, void *, msgp
, size_t, sz
,
1005 long, msgtype
, int, flags
)
1006 safe_syscall4(int, semtimedop
, int, semid
, struct sembuf
*, tsops
,
1007 unsigned, nsops
, const struct timespec
*, timeout
)
1009 /* This host kernel architecture uses a single ipc syscall; fake up
1010 * wrappers for the sub-operations to hide this implementation detail.
1011 * Annoyingly we can't include linux/ipc.h to get the constant definitions
1012 * for the call parameter because some structs in there conflict with the
1013 * sys/ipc.h ones. So we just define them here, and rely on them being
1014 * the same for all host architectures.
1016 #define Q_SEMTIMEDOP 4
1019 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
1021 safe_syscall6(int, ipc
, int, call
, long, first
, long, second
, long, third
,
1022 void *, ptr
, long, fifth
)
1023 static int safe_msgsnd(int msgid
, const void *msgp
, size_t sz
, int flags
)
1025 return safe_ipc(Q_IPCCALL(0, Q_MSGSND
), msgid
, sz
, flags
, (void *)msgp
, 0);
1027 static int safe_msgrcv(int msgid
, void *msgp
, size_t sz
, long type
, int flags
)
1029 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV
), msgid
, sz
, flags
, msgp
, type
);
1031 static int safe_semtimedop(int semid
, struct sembuf
*tsops
, unsigned nsops
,
1032 const struct timespec
*timeout
)
1034 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP
), semid
, nsops
, 0, tsops
,
1038 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1039 safe_syscall5(int, mq_timedsend
, int, mqdes
, const char *, msg_ptr
,
1040 size_t, len
, unsigned, prio
, const struct timespec
*, timeout
)
1041 safe_syscall5(int, mq_timedreceive
, int, mqdes
, char *, msg_ptr
,
1042 size_t, len
, unsigned *, prio
, const struct timespec
*, timeout
)
1044 /* We do ioctl like this rather than via safe_syscall3 to preserve the
1045 * "third argument might be integer or pointer or not present" behaviour of
1046 * the libc function.
1048 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
1049 /* Similarly for fcntl. Note that callers must always:
1050 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
1051 * use the flock64 struct rather than unsuffixed flock
1052 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
1055 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
1057 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1060 static inline int host_to_target_sock_type(int host_type
)
1064 switch (host_type
& 0xf /* SOCK_TYPE_MASK */) {
1066 target_type
= TARGET_SOCK_DGRAM
;
1069 target_type
= TARGET_SOCK_STREAM
;
1072 target_type
= host_type
& 0xf /* SOCK_TYPE_MASK */;
1076 #if defined(SOCK_CLOEXEC)
1077 if (host_type
& SOCK_CLOEXEC
) {
1078 target_type
|= TARGET_SOCK_CLOEXEC
;
1082 #if defined(SOCK_NONBLOCK)
1083 if (host_type
& SOCK_NONBLOCK
) {
1084 target_type
|= TARGET_SOCK_NONBLOCK
;
1091 static abi_ulong target_brk
;
1092 static abi_ulong target_original_brk
;
1093 static abi_ulong brk_page
;
1095 void target_set_brk(abi_ulong new_brk
)
1097 target_original_brk
= target_brk
= HOST_PAGE_ALIGN(new_brk
);
1098 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1101 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1102 #define DEBUGF_BRK(message, args...)
1104 /* do_brk() must return target values and target errnos. */
1105 abi_long
do_brk(abi_ulong new_brk
)
1107 abi_long mapped_addr
;
1108 abi_ulong new_alloc_size
;
1110 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx
") -> ", new_brk
);
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (!new_brk)\n", target_brk
);
1116 if (new_brk
< target_original_brk
) {
1117 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk < target_original_brk)\n",
1122 /* If the new brk is less than the highest page reserved to the
1123 * target heap allocation, set it and we're almost done... */
1124 if (new_brk
<= brk_page
) {
1125 /* Heap contents are initialized to zero, as for anonymous
1127 if (new_brk
> target_brk
) {
1128 memset(g2h(target_brk
), 0, new_brk
- target_brk
);
1130 target_brk
= new_brk
;
1131 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (new_brk <= brk_page)\n", target_brk
);
1135 /* We need to allocate more memory after the brk... Note that
1136 * we don't use MAP_FIXED because that will map over the top of
1137 * any existing mapping (like the one with the host libc or qemu
1138 * itself); instead we treat "mapped but at wrong address" as
1139 * a failure and unmap again.
1141 new_alloc_size
= HOST_PAGE_ALIGN(new_brk
- brk_page
);
1142 mapped_addr
= get_errno(target_mmap(brk_page
, new_alloc_size
,
1143 PROT_READ
|PROT_WRITE
,
1144 MAP_ANON
|MAP_PRIVATE
, 0, 0));
1146 if (mapped_addr
== brk_page
) {
1147 /* Heap contents are initialized to zero, as for anonymous
1148 * mapped pages. Technically the new pages are already
1149 * initialized to zero since they *are* anonymous mapped
1150 * pages, however we have to take care with the contents that
1151 * come from the remaining part of the previous page: it may
1152 * contains garbage data due to a previous heap usage (grown
1153 * then shrunken). */
1154 memset(g2h(target_brk
), 0, brk_page
- target_brk
);
1156 target_brk
= new_brk
;
1157 brk_page
= HOST_PAGE_ALIGN(target_brk
);
1158 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr == brk_page)\n",
1161 } else if (mapped_addr
!= -1) {
1162 /* Mapped but at wrong address, meaning there wasn't actually
1163 * enough space for this brk.
1165 target_munmap(mapped_addr
, new_alloc_size
);
1167 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (mapped_addr != -1)\n", target_brk
);
1170 DEBUGF_BRK(TARGET_ABI_FMT_lx
" (otherwise)\n", target_brk
);
1173 #if defined(TARGET_ALPHA)
1174 /* We (partially) emulate OSF/1 on Alpha, which requires we
1175 return a proper errno, not an unchanged brk value. */
1176 return -TARGET_ENOMEM
;
1178 /* For everything else, return the previous break. */
1182 static inline abi_long
copy_from_user_fdset(fd_set
*fds
,
1183 abi_ulong target_fds_addr
,
1187 abi_ulong b
, *target_fds
;
1189 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1190 if (!(target_fds
= lock_user(VERIFY_READ
,
1192 sizeof(abi_ulong
) * nw
,
1194 return -TARGET_EFAULT
;
1198 for (i
= 0; i
< nw
; i
++) {
1199 /* grab the abi_ulong */
1200 __get_user(b
, &target_fds
[i
]);
1201 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1202 /* check the bit inside the abi_ulong */
1209 unlock_user(target_fds
, target_fds_addr
, 0);
1214 static inline abi_ulong
copy_from_user_fdset_ptr(fd_set
*fds
, fd_set
**fds_ptr
,
1215 abi_ulong target_fds_addr
,
1218 if (target_fds_addr
) {
1219 if (copy_from_user_fdset(fds
, target_fds_addr
, n
))
1220 return -TARGET_EFAULT
;
1228 static inline abi_long
copy_to_user_fdset(abi_ulong target_fds_addr
,
1234 abi_ulong
*target_fds
;
1236 nw
= DIV_ROUND_UP(n
, TARGET_ABI_BITS
);
1237 if (!(target_fds
= lock_user(VERIFY_WRITE
,
1239 sizeof(abi_ulong
) * nw
,
1241 return -TARGET_EFAULT
;
1244 for (i
= 0; i
< nw
; i
++) {
1246 for (j
= 0; j
< TARGET_ABI_BITS
; j
++) {
1247 v
|= ((abi_ulong
)(FD_ISSET(k
, fds
) != 0) << j
);
1250 __put_user(v
, &target_fds
[i
]);
1253 unlock_user(target_fds
, target_fds_addr
, sizeof(abi_ulong
) * nw
);
1258 #if defined(__alpha__)
1259 #define HOST_HZ 1024
1264 static inline abi_long
host_to_target_clock_t(long ticks
)
1266 #if HOST_HZ == TARGET_HZ
1269 return ((int64_t)ticks
* TARGET_HZ
) / HOST_HZ
;
1273 static inline abi_long
host_to_target_rusage(abi_ulong target_addr
,
1274 const struct rusage
*rusage
)
1276 struct target_rusage
*target_rusage
;
1278 if (!lock_user_struct(VERIFY_WRITE
, target_rusage
, target_addr
, 0))
1279 return -TARGET_EFAULT
;
1280 target_rusage
->ru_utime
.tv_sec
= tswapal(rusage
->ru_utime
.tv_sec
);
1281 target_rusage
->ru_utime
.tv_usec
= tswapal(rusage
->ru_utime
.tv_usec
);
1282 target_rusage
->ru_stime
.tv_sec
= tswapal(rusage
->ru_stime
.tv_sec
);
1283 target_rusage
->ru_stime
.tv_usec
= tswapal(rusage
->ru_stime
.tv_usec
);
1284 target_rusage
->ru_maxrss
= tswapal(rusage
->ru_maxrss
);
1285 target_rusage
->ru_ixrss
= tswapal(rusage
->ru_ixrss
);
1286 target_rusage
->ru_idrss
= tswapal(rusage
->ru_idrss
);
1287 target_rusage
->ru_isrss
= tswapal(rusage
->ru_isrss
);
1288 target_rusage
->ru_minflt
= tswapal(rusage
->ru_minflt
);
1289 target_rusage
->ru_majflt
= tswapal(rusage
->ru_majflt
);
1290 target_rusage
->ru_nswap
= tswapal(rusage
->ru_nswap
);
1291 target_rusage
->ru_inblock
= tswapal(rusage
->ru_inblock
);
1292 target_rusage
->ru_oublock
= tswapal(rusage
->ru_oublock
);
1293 target_rusage
->ru_msgsnd
= tswapal(rusage
->ru_msgsnd
);
1294 target_rusage
->ru_msgrcv
= tswapal(rusage
->ru_msgrcv
);
1295 target_rusage
->ru_nsignals
= tswapal(rusage
->ru_nsignals
);
1296 target_rusage
->ru_nvcsw
= tswapal(rusage
->ru_nvcsw
);
1297 target_rusage
->ru_nivcsw
= tswapal(rusage
->ru_nivcsw
);
1298 unlock_user_struct(target_rusage
, target_addr
, 1);
1303 static inline rlim_t
target_to_host_rlim(abi_ulong target_rlim
)
1305 abi_ulong target_rlim_swap
;
1308 target_rlim_swap
= tswapal(target_rlim
);
1309 if (target_rlim_swap
== TARGET_RLIM_INFINITY
)
1310 return RLIM_INFINITY
;
1312 result
= target_rlim_swap
;
1313 if (target_rlim_swap
!= (rlim_t
)result
)
1314 return RLIM_INFINITY
;
1319 static inline abi_ulong
host_to_target_rlim(rlim_t rlim
)
1321 abi_ulong target_rlim_swap
;
1324 if (rlim
== RLIM_INFINITY
|| rlim
!= (abi_long
)rlim
)
1325 target_rlim_swap
= TARGET_RLIM_INFINITY
;
1327 target_rlim_swap
= rlim
;
1328 result
= tswapal(target_rlim_swap
);
1333 static inline int target_to_host_resource(int code
)
1336 case TARGET_RLIMIT_AS
:
1338 case TARGET_RLIMIT_CORE
:
1340 case TARGET_RLIMIT_CPU
:
1342 case TARGET_RLIMIT_DATA
:
1344 case TARGET_RLIMIT_FSIZE
:
1345 return RLIMIT_FSIZE
;
1346 case TARGET_RLIMIT_LOCKS
:
1347 return RLIMIT_LOCKS
;
1348 case TARGET_RLIMIT_MEMLOCK
:
1349 return RLIMIT_MEMLOCK
;
1350 case TARGET_RLIMIT_MSGQUEUE
:
1351 return RLIMIT_MSGQUEUE
;
1352 case TARGET_RLIMIT_NICE
:
1354 case TARGET_RLIMIT_NOFILE
:
1355 return RLIMIT_NOFILE
;
1356 case TARGET_RLIMIT_NPROC
:
1357 return RLIMIT_NPROC
;
1358 case TARGET_RLIMIT_RSS
:
1360 case TARGET_RLIMIT_RTPRIO
:
1361 return RLIMIT_RTPRIO
;
1362 case TARGET_RLIMIT_SIGPENDING
:
1363 return RLIMIT_SIGPENDING
;
1364 case TARGET_RLIMIT_STACK
:
1365 return RLIMIT_STACK
;
1371 static inline abi_long
copy_from_user_timeval(struct timeval
*tv
,
1372 abi_ulong target_tv_addr
)
1374 struct target_timeval
*target_tv
;
1376 if (!lock_user_struct(VERIFY_READ
, target_tv
, target_tv_addr
, 1))
1377 return -TARGET_EFAULT
;
1379 __get_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1380 __get_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1382 unlock_user_struct(target_tv
, target_tv_addr
, 0);
1387 static inline abi_long
copy_to_user_timeval(abi_ulong target_tv_addr
,
1388 const struct timeval
*tv
)
1390 struct target_timeval
*target_tv
;
1392 if (!lock_user_struct(VERIFY_WRITE
, target_tv
, target_tv_addr
, 0))
1393 return -TARGET_EFAULT
;
1395 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1396 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1398 unlock_user_struct(target_tv
, target_tv_addr
, 1);
1403 static inline abi_long
copy_from_user_timezone(struct timezone
*tz
,
1404 abi_ulong target_tz_addr
)
1406 struct target_timezone
*target_tz
;
1408 if (!lock_user_struct(VERIFY_READ
, target_tz
, target_tz_addr
, 1)) {
1409 return -TARGET_EFAULT
;
1412 __get_user(tz
->tz_minuteswest
, &target_tz
->tz_minuteswest
);
1413 __get_user(tz
->tz_dsttime
, &target_tz
->tz_dsttime
);
1415 unlock_user_struct(target_tz
, target_tz_addr
, 0);
1420 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1423 static inline abi_long
copy_from_user_mq_attr(struct mq_attr
*attr
,
1424 abi_ulong target_mq_attr_addr
)
1426 struct target_mq_attr
*target_mq_attr
;
1428 if (!lock_user_struct(VERIFY_READ
, target_mq_attr
,
1429 target_mq_attr_addr
, 1))
1430 return -TARGET_EFAULT
;
1432 __get_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1433 __get_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1434 __get_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1435 __get_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1437 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 0);
1442 static inline abi_long
copy_to_user_mq_attr(abi_ulong target_mq_attr_addr
,
1443 const struct mq_attr
*attr
)
1445 struct target_mq_attr
*target_mq_attr
;
1447 if (!lock_user_struct(VERIFY_WRITE
, target_mq_attr
,
1448 target_mq_attr_addr
, 0))
1449 return -TARGET_EFAULT
;
1451 __put_user(attr
->mq_flags
, &target_mq_attr
->mq_flags
);
1452 __put_user(attr
->mq_maxmsg
, &target_mq_attr
->mq_maxmsg
);
1453 __put_user(attr
->mq_msgsize
, &target_mq_attr
->mq_msgsize
);
1454 __put_user(attr
->mq_curmsgs
, &target_mq_attr
->mq_curmsgs
);
1456 unlock_user_struct(target_mq_attr
, target_mq_attr_addr
, 1);
1462 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1463 /* do_select() must return target values and target errnos. */
1464 static abi_long
do_select(int n
,
1465 abi_ulong rfd_addr
, abi_ulong wfd_addr
,
1466 abi_ulong efd_addr
, abi_ulong target_tv_addr
)
1468 fd_set rfds
, wfds
, efds
;
1469 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
1471 struct timespec ts
, *ts_ptr
;
1474 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
1478 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
1482 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
1487 if (target_tv_addr
) {
1488 if (copy_from_user_timeval(&tv
, target_tv_addr
))
1489 return -TARGET_EFAULT
;
1490 ts
.tv_sec
= tv
.tv_sec
;
1491 ts
.tv_nsec
= tv
.tv_usec
* 1000;
1497 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
1500 if (!is_error(ret
)) {
1501 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
1502 return -TARGET_EFAULT
;
1503 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
1504 return -TARGET_EFAULT
;
1505 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
1506 return -TARGET_EFAULT
;
1508 if (target_tv_addr
) {
1509 tv
.tv_sec
= ts
.tv_sec
;
1510 tv
.tv_usec
= ts
.tv_nsec
/ 1000;
1511 if (copy_to_user_timeval(target_tv_addr
, &tv
)) {
1512 return -TARGET_EFAULT
;
1520 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1521 static abi_long
do_old_select(abi_ulong arg1
)
1523 struct target_sel_arg_struct
*sel
;
1524 abi_ulong inp
, outp
, exp
, tvp
;
1527 if (!lock_user_struct(VERIFY_READ
, sel
, arg1
, 1)) {
1528 return -TARGET_EFAULT
;
1531 nsel
= tswapal(sel
->n
);
1532 inp
= tswapal(sel
->inp
);
1533 outp
= tswapal(sel
->outp
);
1534 exp
= tswapal(sel
->exp
);
1535 tvp
= tswapal(sel
->tvp
);
1537 unlock_user_struct(sel
, arg1
, 0);
1539 return do_select(nsel
, inp
, outp
, exp
, tvp
);
1544 static abi_long
do_pipe2(int host_pipe
[], int flags
)
1547 return pipe2(host_pipe
, flags
);
1553 static abi_long
do_pipe(void *cpu_env
, abi_ulong pipedes
,
1554 int flags
, int is_pipe2
)
1558 ret
= flags
? do_pipe2(host_pipe
, flags
) : pipe(host_pipe
);
1561 return get_errno(ret
);
1563 /* Several targets have special calling conventions for the original
1564 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1566 #if defined(TARGET_ALPHA)
1567 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = host_pipe
[1];
1568 return host_pipe
[0];
1569 #elif defined(TARGET_MIPS)
1570 ((CPUMIPSState
*)cpu_env
)->active_tc
.gpr
[3] = host_pipe
[1];
1571 return host_pipe
[0];
1572 #elif defined(TARGET_SH4)
1573 ((CPUSH4State
*)cpu_env
)->gregs
[1] = host_pipe
[1];
1574 return host_pipe
[0];
1575 #elif defined(TARGET_SPARC)
1576 ((CPUSPARCState
*)cpu_env
)->regwptr
[1] = host_pipe
[1];
1577 return host_pipe
[0];
1581 if (put_user_s32(host_pipe
[0], pipedes
)
1582 || put_user_s32(host_pipe
[1], pipedes
+ sizeof(host_pipe
[0])))
1583 return -TARGET_EFAULT
;
1584 return get_errno(ret
);
1587 static inline abi_long
target_to_host_ip_mreq(struct ip_mreqn
*mreqn
,
1588 abi_ulong target_addr
,
1591 struct target_ip_mreqn
*target_smreqn
;
1593 target_smreqn
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1595 return -TARGET_EFAULT
;
1596 mreqn
->imr_multiaddr
.s_addr
= target_smreqn
->imr_multiaddr
.s_addr
;
1597 mreqn
->imr_address
.s_addr
= target_smreqn
->imr_address
.s_addr
;
1598 if (len
== sizeof(struct target_ip_mreqn
))
1599 mreqn
->imr_ifindex
= tswapal(target_smreqn
->imr_ifindex
);
1600 unlock_user(target_smreqn
, target_addr
, 0);
1605 static inline abi_long
target_to_host_sockaddr(int fd
, struct sockaddr
*addr
,
1606 abi_ulong target_addr
,
1609 const socklen_t unix_maxlen
= sizeof (struct sockaddr_un
);
1610 sa_family_t sa_family
;
1611 struct target_sockaddr
*target_saddr
;
1613 if (fd_trans_target_to_host_addr(fd
)) {
1614 return fd_trans_target_to_host_addr(fd
)(addr
, target_addr
, len
);
1617 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
1619 return -TARGET_EFAULT
;
1621 sa_family
= tswap16(target_saddr
->sa_family
);
1623 /* Oops. The caller might send a incomplete sun_path; sun_path
1624 * must be terminated by \0 (see the manual page), but
1625 * unfortunately it is quite common to specify sockaddr_un
1626 * length as "strlen(x->sun_path)" while it should be
1627 * "strlen(...) + 1". We'll fix that here if needed.
1628 * Linux kernel has a similar feature.
1631 if (sa_family
== AF_UNIX
) {
1632 if (len
< unix_maxlen
&& len
> 0) {
1633 char *cp
= (char*)target_saddr
;
1635 if ( cp
[len
-1] && !cp
[len
] )
1638 if (len
> unix_maxlen
)
1642 memcpy(addr
, target_saddr
, len
);
1643 addr
->sa_family
= sa_family
;
1644 if (sa_family
== AF_NETLINK
) {
1645 struct sockaddr_nl
*nladdr
;
1647 nladdr
= (struct sockaddr_nl
*)addr
;
1648 nladdr
->nl_pid
= tswap32(nladdr
->nl_pid
);
1649 nladdr
->nl_groups
= tswap32(nladdr
->nl_groups
);
1650 } else if (sa_family
== AF_PACKET
) {
1651 struct target_sockaddr_ll
*lladdr
;
1653 lladdr
= (struct target_sockaddr_ll
*)addr
;
1654 lladdr
->sll_ifindex
= tswap32(lladdr
->sll_ifindex
);
1655 lladdr
->sll_hatype
= tswap16(lladdr
->sll_hatype
);
1657 unlock_user(target_saddr
, target_addr
, 0);
1662 static inline abi_long
host_to_target_sockaddr(abi_ulong target_addr
,
1663 struct sockaddr
*addr
,
1666 struct target_sockaddr
*target_saddr
;
1673 target_saddr
= lock_user(VERIFY_WRITE
, target_addr
, len
, 0);
1675 return -TARGET_EFAULT
;
1676 memcpy(target_saddr
, addr
, len
);
1677 if (len
>= offsetof(struct target_sockaddr
, sa_family
) +
1678 sizeof(target_saddr
->sa_family
)) {
1679 target_saddr
->sa_family
= tswap16(addr
->sa_family
);
1681 if (addr
->sa_family
== AF_NETLINK
&& len
>= sizeof(struct sockaddr_nl
)) {
1682 struct sockaddr_nl
*target_nl
= (struct sockaddr_nl
*)target_saddr
;
1683 target_nl
->nl_pid
= tswap32(target_nl
->nl_pid
);
1684 target_nl
->nl_groups
= tswap32(target_nl
->nl_groups
);
1685 } else if (addr
->sa_family
== AF_PACKET
) {
1686 struct sockaddr_ll
*target_ll
= (struct sockaddr_ll
*)target_saddr
;
1687 target_ll
->sll_ifindex
= tswap32(target_ll
->sll_ifindex
);
1688 target_ll
->sll_hatype
= tswap16(target_ll
->sll_hatype
);
1689 } else if (addr
->sa_family
== AF_INET6
&&
1690 len
>= sizeof(struct target_sockaddr_in6
)) {
1691 struct target_sockaddr_in6
*target_in6
=
1692 (struct target_sockaddr_in6
*)target_saddr
;
1693 target_in6
->sin6_scope_id
= tswap16(target_in6
->sin6_scope_id
);
1695 unlock_user(target_saddr
, target_addr
, len
);
1700 static inline abi_long
target_to_host_cmsg(struct msghdr
*msgh
,
1701 struct target_msghdr
*target_msgh
)
1703 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1704 abi_long msg_controllen
;
1705 abi_ulong target_cmsg_addr
;
1706 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1707 socklen_t space
= 0;
1709 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1710 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1712 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1713 target_cmsg
= lock_user(VERIFY_READ
, target_cmsg_addr
, msg_controllen
, 1);
1714 target_cmsg_start
= target_cmsg
;
1716 return -TARGET_EFAULT
;
1718 while (cmsg
&& target_cmsg
) {
1719 void *data
= CMSG_DATA(cmsg
);
1720 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1722 int len
= tswapal(target_cmsg
->cmsg_len
)
1723 - sizeof(struct target_cmsghdr
);
1725 space
+= CMSG_SPACE(len
);
1726 if (space
> msgh
->msg_controllen
) {
1727 space
-= CMSG_SPACE(len
);
1728 /* This is a QEMU bug, since we allocated the payload
1729 * area ourselves (unlike overflow in host-to-target
1730 * conversion, which is just the guest giving us a buffer
1731 * that's too small). It can't happen for the payload types
1732 * we currently support; if it becomes an issue in future
1733 * we would need to improve our allocation strategy to
1734 * something more intelligent than "twice the size of the
1735 * target buffer we're reading from".
1737 gemu_log("Host cmsg overflow\n");
1741 if (tswap32(target_cmsg
->cmsg_level
) == TARGET_SOL_SOCKET
) {
1742 cmsg
->cmsg_level
= SOL_SOCKET
;
1744 cmsg
->cmsg_level
= tswap32(target_cmsg
->cmsg_level
);
1746 cmsg
->cmsg_type
= tswap32(target_cmsg
->cmsg_type
);
1747 cmsg
->cmsg_len
= CMSG_LEN(len
);
1749 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
1750 int *fd
= (int *)data
;
1751 int *target_fd
= (int *)target_data
;
1752 int i
, numfds
= len
/ sizeof(int);
1754 for (i
= 0; i
< numfds
; i
++) {
1755 __get_user(fd
[i
], target_fd
+ i
);
1757 } else if (cmsg
->cmsg_level
== SOL_SOCKET
1758 && cmsg
->cmsg_type
== SCM_CREDENTIALS
) {
1759 struct ucred
*cred
= (struct ucred
*)data
;
1760 struct target_ucred
*target_cred
=
1761 (struct target_ucred
*)target_data
;
1763 __get_user(cred
->pid
, &target_cred
->pid
);
1764 __get_user(cred
->uid
, &target_cred
->uid
);
1765 __get_user(cred
->gid
, &target_cred
->gid
);
1767 gemu_log("Unsupported ancillary data: %d/%d\n",
1768 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1769 memcpy(data
, target_data
, len
);
1772 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
1773 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
1776 unlock_user(target_cmsg
, target_cmsg_addr
, 0);
1778 msgh
->msg_controllen
= space
;
1782 static inline abi_long
host_to_target_cmsg(struct target_msghdr
*target_msgh
,
1783 struct msghdr
*msgh
)
1785 struct cmsghdr
*cmsg
= CMSG_FIRSTHDR(msgh
);
1786 abi_long msg_controllen
;
1787 abi_ulong target_cmsg_addr
;
1788 struct target_cmsghdr
*target_cmsg
, *target_cmsg_start
;
1789 socklen_t space
= 0;
1791 msg_controllen
= tswapal(target_msgh
->msg_controllen
);
1792 if (msg_controllen
< sizeof (struct target_cmsghdr
))
1794 target_cmsg_addr
= tswapal(target_msgh
->msg_control
);
1795 target_cmsg
= lock_user(VERIFY_WRITE
, target_cmsg_addr
, msg_controllen
, 0);
1796 target_cmsg_start
= target_cmsg
;
1798 return -TARGET_EFAULT
;
1800 while (cmsg
&& target_cmsg
) {
1801 void *data
= CMSG_DATA(cmsg
);
1802 void *target_data
= TARGET_CMSG_DATA(target_cmsg
);
1804 int len
= cmsg
->cmsg_len
- sizeof(struct cmsghdr
);
1805 int tgt_len
, tgt_space
;
1807 /* We never copy a half-header but may copy half-data;
1808 * this is Linux's behaviour in put_cmsg(). Note that
1809 * truncation here is a guest problem (which we report
1810 * to the guest via the CTRUNC bit), unlike truncation
1811 * in target_to_host_cmsg, which is a QEMU bug.
1813 if (msg_controllen
< sizeof(struct target_cmsghdr
)) {
1814 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1818 if (cmsg
->cmsg_level
== SOL_SOCKET
) {
1819 target_cmsg
->cmsg_level
= tswap32(TARGET_SOL_SOCKET
);
1821 target_cmsg
->cmsg_level
= tswap32(cmsg
->cmsg_level
);
1823 target_cmsg
->cmsg_type
= tswap32(cmsg
->cmsg_type
);
1825 /* Payload types which need a different size of payload on
1826 * the target must adjust tgt_len here.
1828 switch (cmsg
->cmsg_level
) {
1830 switch (cmsg
->cmsg_type
) {
1832 tgt_len
= sizeof(struct target_timeval
);
1842 if (msg_controllen
< TARGET_CMSG_LEN(tgt_len
)) {
1843 target_msgh
->msg_flags
|= tswap32(MSG_CTRUNC
);
1844 tgt_len
= msg_controllen
- sizeof(struct target_cmsghdr
);
1847 /* We must now copy-and-convert len bytes of payload
1848 * into tgt_len bytes of destination space. Bear in mind
1849 * that in both source and destination we may be dealing
1850 * with a truncated value!
1852 switch (cmsg
->cmsg_level
) {
1854 switch (cmsg
->cmsg_type
) {
1857 int *fd
= (int *)data
;
1858 int *target_fd
= (int *)target_data
;
1859 int i
, numfds
= tgt_len
/ sizeof(int);
1861 for (i
= 0; i
< numfds
; i
++) {
1862 __put_user(fd
[i
], target_fd
+ i
);
1868 struct timeval
*tv
= (struct timeval
*)data
;
1869 struct target_timeval
*target_tv
=
1870 (struct target_timeval
*)target_data
;
1872 if (len
!= sizeof(struct timeval
) ||
1873 tgt_len
!= sizeof(struct target_timeval
)) {
1877 /* copy struct timeval to target */
1878 __put_user(tv
->tv_sec
, &target_tv
->tv_sec
);
1879 __put_user(tv
->tv_usec
, &target_tv
->tv_usec
);
1882 case SCM_CREDENTIALS
:
1884 struct ucred
*cred
= (struct ucred
*)data
;
1885 struct target_ucred
*target_cred
=
1886 (struct target_ucred
*)target_data
;
1888 __put_user(cred
->pid
, &target_cred
->pid
);
1889 __put_user(cred
->uid
, &target_cred
->uid
);
1890 __put_user(cred
->gid
, &target_cred
->gid
);
1899 switch (cmsg
->cmsg_type
) {
1902 uint32_t *v
= (uint32_t *)data
;
1903 uint32_t *t_int
= (uint32_t *)target_data
;
1905 if (len
!= sizeof(uint32_t) ||
1906 tgt_len
!= sizeof(uint32_t)) {
1909 __put_user(*v
, t_int
);
1915 struct sock_extended_err ee
;
1916 struct sockaddr_in offender
;
1918 struct errhdr_t
*errh
= (struct errhdr_t
*)data
;
1919 struct errhdr_t
*target_errh
=
1920 (struct errhdr_t
*)target_data
;
1922 if (len
!= sizeof(struct errhdr_t
) ||
1923 tgt_len
!= sizeof(struct errhdr_t
)) {
1926 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1927 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1928 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1929 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1930 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1931 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1932 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1933 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1934 (void *) &errh
->offender
, sizeof(errh
->offender
));
1943 switch (cmsg
->cmsg_type
) {
1946 uint32_t *v
= (uint32_t *)data
;
1947 uint32_t *t_int
= (uint32_t *)target_data
;
1949 if (len
!= sizeof(uint32_t) ||
1950 tgt_len
!= sizeof(uint32_t)) {
1953 __put_user(*v
, t_int
);
1959 struct sock_extended_err ee
;
1960 struct sockaddr_in6 offender
;
1962 struct errhdr6_t
*errh
= (struct errhdr6_t
*)data
;
1963 struct errhdr6_t
*target_errh
=
1964 (struct errhdr6_t
*)target_data
;
1966 if (len
!= sizeof(struct errhdr6_t
) ||
1967 tgt_len
!= sizeof(struct errhdr6_t
)) {
1970 __put_user(errh
->ee
.ee_errno
, &target_errh
->ee
.ee_errno
);
1971 __put_user(errh
->ee
.ee_origin
, &target_errh
->ee
.ee_origin
);
1972 __put_user(errh
->ee
.ee_type
, &target_errh
->ee
.ee_type
);
1973 __put_user(errh
->ee
.ee_code
, &target_errh
->ee
.ee_code
);
1974 __put_user(errh
->ee
.ee_pad
, &target_errh
->ee
.ee_pad
);
1975 __put_user(errh
->ee
.ee_info
, &target_errh
->ee
.ee_info
);
1976 __put_user(errh
->ee
.ee_data
, &target_errh
->ee
.ee_data
);
1977 host_to_target_sockaddr((unsigned long) &target_errh
->offender
,
1978 (void *) &errh
->offender
, sizeof(errh
->offender
));
1988 gemu_log("Unsupported ancillary data: %d/%d\n",
1989 cmsg
->cmsg_level
, cmsg
->cmsg_type
);
1990 memcpy(target_data
, data
, MIN(len
, tgt_len
));
1991 if (tgt_len
> len
) {
1992 memset(target_data
+ len
, 0, tgt_len
- len
);
1996 target_cmsg
->cmsg_len
= tswapal(TARGET_CMSG_LEN(tgt_len
));
1997 tgt_space
= TARGET_CMSG_SPACE(tgt_len
);
1998 if (msg_controllen
< tgt_space
) {
1999 tgt_space
= msg_controllen
;
2001 msg_controllen
-= tgt_space
;
2003 cmsg
= CMSG_NXTHDR(msgh
, cmsg
);
2004 target_cmsg
= TARGET_CMSG_NXTHDR(target_msgh
, target_cmsg
,
2007 unlock_user(target_cmsg
, target_cmsg_addr
, space
);
2009 target_msgh
->msg_controllen
= tswapal(space
);
2013 static void tswap_nlmsghdr(struct nlmsghdr
*nlh
)
2015 nlh
->nlmsg_len
= tswap32(nlh
->nlmsg_len
);
2016 nlh
->nlmsg_type
= tswap16(nlh
->nlmsg_type
);
2017 nlh
->nlmsg_flags
= tswap16(nlh
->nlmsg_flags
);
2018 nlh
->nlmsg_seq
= tswap32(nlh
->nlmsg_seq
);
2019 nlh
->nlmsg_pid
= tswap32(nlh
->nlmsg_pid
);
2022 static abi_long
host_to_target_for_each_nlmsg(struct nlmsghdr
*nlh
,
2024 abi_long (*host_to_target_nlmsg
)
2025 (struct nlmsghdr
*))
2030 while (len
> sizeof(struct nlmsghdr
)) {
2032 nlmsg_len
= nlh
->nlmsg_len
;
2033 if (nlmsg_len
< sizeof(struct nlmsghdr
) ||
2038 switch (nlh
->nlmsg_type
) {
2040 tswap_nlmsghdr(nlh
);
2046 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2047 e
->error
= tswap32(e
->error
);
2048 tswap_nlmsghdr(&e
->msg
);
2049 tswap_nlmsghdr(nlh
);
2053 ret
= host_to_target_nlmsg(nlh
);
2055 tswap_nlmsghdr(nlh
);
2060 tswap_nlmsghdr(nlh
);
2061 len
-= NLMSG_ALIGN(nlmsg_len
);
2062 nlh
= (struct nlmsghdr
*)(((char*)nlh
) + NLMSG_ALIGN(nlmsg_len
));
2067 static abi_long
target_to_host_for_each_nlmsg(struct nlmsghdr
*nlh
,
2069 abi_long (*target_to_host_nlmsg
)
2070 (struct nlmsghdr
*))
2074 while (len
> sizeof(struct nlmsghdr
)) {
2075 if (tswap32(nlh
->nlmsg_len
) < sizeof(struct nlmsghdr
) ||
2076 tswap32(nlh
->nlmsg_len
) > len
) {
2079 tswap_nlmsghdr(nlh
);
2080 switch (nlh
->nlmsg_type
) {
2087 struct nlmsgerr
*e
= NLMSG_DATA(nlh
);
2088 e
->error
= tswap32(e
->error
);
2089 tswap_nlmsghdr(&e
->msg
);
2093 ret
= target_to_host_nlmsg(nlh
);
2098 len
-= NLMSG_ALIGN(nlh
->nlmsg_len
);
2099 nlh
= (struct nlmsghdr
*)(((char *)nlh
) + NLMSG_ALIGN(nlh
->nlmsg_len
));
2104 #ifdef CONFIG_RTNETLINK
2105 static abi_long
host_to_target_for_each_nlattr(struct nlattr
*nlattr
,
2106 size_t len
, void *context
,
2107 abi_long (*host_to_target_nlattr
)
2111 unsigned short nla_len
;
2114 while (len
> sizeof(struct nlattr
)) {
2115 nla_len
= nlattr
->nla_len
;
2116 if (nla_len
< sizeof(struct nlattr
) ||
2120 ret
= host_to_target_nlattr(nlattr
, context
);
2121 nlattr
->nla_len
= tswap16(nlattr
->nla_len
);
2122 nlattr
->nla_type
= tswap16(nlattr
->nla_type
);
2126 len
-= NLA_ALIGN(nla_len
);
2127 nlattr
= (struct nlattr
*)(((char *)nlattr
) + NLA_ALIGN(nla_len
));
2132 static abi_long
host_to_target_for_each_rtattr(struct rtattr
*rtattr
,
2134 abi_long (*host_to_target_rtattr
)
2137 unsigned short rta_len
;
2140 while (len
> sizeof(struct rtattr
)) {
2141 rta_len
= rtattr
->rta_len
;
2142 if (rta_len
< sizeof(struct rtattr
) ||
2146 ret
= host_to_target_rtattr(rtattr
);
2147 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2148 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2152 len
-= RTA_ALIGN(rta_len
);
2153 rtattr
= (struct rtattr
*)(((char *)rtattr
) + RTA_ALIGN(rta_len
));
2158 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2160 static abi_long
host_to_target_data_bridge_nlattr(struct nlattr
*nlattr
,
2167 switch (nlattr
->nla_type
) {
2169 case QEMU_IFLA_BR_FDB_FLUSH
:
2172 case QEMU_IFLA_BR_GROUP_ADDR
:
2175 case QEMU_IFLA_BR_VLAN_FILTERING
:
2176 case QEMU_IFLA_BR_TOPOLOGY_CHANGE
:
2177 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED
:
2178 case QEMU_IFLA_BR_MCAST_ROUTER
:
2179 case QEMU_IFLA_BR_MCAST_SNOOPING
:
2180 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR
:
2181 case QEMU_IFLA_BR_MCAST_QUERIER
:
2182 case QEMU_IFLA_BR_NF_CALL_IPTABLES
:
2183 case QEMU_IFLA_BR_NF_CALL_IP6TABLES
:
2184 case QEMU_IFLA_BR_NF_CALL_ARPTABLES
:
2187 case QEMU_IFLA_BR_PRIORITY
:
2188 case QEMU_IFLA_BR_VLAN_PROTOCOL
:
2189 case QEMU_IFLA_BR_GROUP_FWD_MASK
:
2190 case QEMU_IFLA_BR_ROOT_PORT
:
2191 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID
:
2192 u16
= NLA_DATA(nlattr
);
2193 *u16
= tswap16(*u16
);
2196 case QEMU_IFLA_BR_FORWARD_DELAY
:
2197 case QEMU_IFLA_BR_HELLO_TIME
:
2198 case QEMU_IFLA_BR_MAX_AGE
:
2199 case QEMU_IFLA_BR_AGEING_TIME
:
2200 case QEMU_IFLA_BR_STP_STATE
:
2201 case QEMU_IFLA_BR_ROOT_PATH_COST
:
2202 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY
:
2203 case QEMU_IFLA_BR_MCAST_HASH_MAX
:
2204 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT
:
2205 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT
:
2206 u32
= NLA_DATA(nlattr
);
2207 *u32
= tswap32(*u32
);
2210 case QEMU_IFLA_BR_HELLO_TIMER
:
2211 case QEMU_IFLA_BR_TCN_TIMER
:
2212 case QEMU_IFLA_BR_GC_TIMER
:
2213 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER
:
2214 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL
:
2215 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL
:
2216 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL
:
2217 case QEMU_IFLA_BR_MCAST_QUERY_INTVL
:
2218 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
:
2219 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL
:
2220 u64
= NLA_DATA(nlattr
);
2221 *u64
= tswap64(*u64
);
2223 /* ifla_bridge_id: uin8_t[] */
2224 case QEMU_IFLA_BR_ROOT_ID
:
2225 case QEMU_IFLA_BR_BRIDGE_ID
:
2228 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr
->nla_type
);
2234 static abi_long
host_to_target_slave_data_bridge_nlattr(struct nlattr
*nlattr
,
2241 switch (nlattr
->nla_type
) {
2243 case QEMU_IFLA_BRPORT_STATE
:
2244 case QEMU_IFLA_BRPORT_MODE
:
2245 case QEMU_IFLA_BRPORT_GUARD
:
2246 case QEMU_IFLA_BRPORT_PROTECT
:
2247 case QEMU_IFLA_BRPORT_FAST_LEAVE
:
2248 case QEMU_IFLA_BRPORT_LEARNING
:
2249 case QEMU_IFLA_BRPORT_UNICAST_FLOOD
:
2250 case QEMU_IFLA_BRPORT_PROXYARP
:
2251 case QEMU_IFLA_BRPORT_LEARNING_SYNC
:
2252 case QEMU_IFLA_BRPORT_PROXYARP_WIFI
:
2253 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK
:
2254 case QEMU_IFLA_BRPORT_CONFIG_PENDING
:
2255 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER
:
2258 case QEMU_IFLA_BRPORT_PRIORITY
:
2259 case QEMU_IFLA_BRPORT_DESIGNATED_PORT
:
2260 case QEMU_IFLA_BRPORT_DESIGNATED_COST
:
2261 case QEMU_IFLA_BRPORT_ID
:
2262 case QEMU_IFLA_BRPORT_NO
:
2263 u16
= NLA_DATA(nlattr
);
2264 *u16
= tswap16(*u16
);
2267 case QEMU_IFLA_BRPORT_COST
:
2268 u32
= NLA_DATA(nlattr
);
2269 *u32
= tswap32(*u32
);
2272 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER
:
2273 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER
:
2274 case QEMU_IFLA_BRPORT_HOLD_TIMER
:
2275 u64
= NLA_DATA(nlattr
);
2276 *u64
= tswap64(*u64
);
2278 /* ifla_bridge_id: uint8_t[] */
2279 case QEMU_IFLA_BRPORT_ROOT_ID
:
2280 case QEMU_IFLA_BRPORT_BRIDGE_ID
:
2283 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr
->nla_type
);
2289 struct linkinfo_context
{
2296 static abi_long
host_to_target_data_linkinfo_nlattr(struct nlattr
*nlattr
,
2299 struct linkinfo_context
*li_context
= context
;
2301 switch (nlattr
->nla_type
) {
2303 case QEMU_IFLA_INFO_KIND
:
2304 li_context
->name
= NLA_DATA(nlattr
);
2305 li_context
->len
= nlattr
->nla_len
- NLA_HDRLEN
;
2307 case QEMU_IFLA_INFO_SLAVE_KIND
:
2308 li_context
->slave_name
= NLA_DATA(nlattr
);
2309 li_context
->slave_len
= nlattr
->nla_len
- NLA_HDRLEN
;
2312 case QEMU_IFLA_INFO_XSTATS
:
2313 /* FIXME: only used by CAN */
2316 case QEMU_IFLA_INFO_DATA
:
2317 if (strncmp(li_context
->name
, "bridge",
2318 li_context
->len
) == 0) {
2319 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2322 host_to_target_data_bridge_nlattr
);
2324 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context
->name
);
2327 case QEMU_IFLA_INFO_SLAVE_DATA
:
2328 if (strncmp(li_context
->slave_name
, "bridge",
2329 li_context
->slave_len
) == 0) {
2330 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
),
2333 host_to_target_slave_data_bridge_nlattr
);
2335 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2336 li_context
->slave_name
);
2340 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr
->nla_type
);
2347 static abi_long
host_to_target_data_inet_nlattr(struct nlattr
*nlattr
,
2353 switch (nlattr
->nla_type
) {
2354 case QEMU_IFLA_INET_CONF
:
2355 u32
= NLA_DATA(nlattr
);
2356 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2358 u32
[i
] = tswap32(u32
[i
]);
2362 gemu_log("Unknown host AF_INET type: %d\n", nlattr
->nla_type
);
2367 static abi_long
host_to_target_data_inet6_nlattr(struct nlattr
*nlattr
,
2372 struct ifla_cacheinfo
*ci
;
2375 switch (nlattr
->nla_type
) {
2377 case QEMU_IFLA_INET6_TOKEN
:
2380 case QEMU_IFLA_INET6_ADDR_GEN_MODE
:
2383 case QEMU_IFLA_INET6_FLAGS
:
2384 u32
= NLA_DATA(nlattr
);
2385 *u32
= tswap32(*u32
);
2388 case QEMU_IFLA_INET6_CONF
:
2389 u32
= NLA_DATA(nlattr
);
2390 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u32
);
2392 u32
[i
] = tswap32(u32
[i
]);
2395 /* ifla_cacheinfo */
2396 case QEMU_IFLA_INET6_CACHEINFO
:
2397 ci
= NLA_DATA(nlattr
);
2398 ci
->max_reasm_len
= tswap32(ci
->max_reasm_len
);
2399 ci
->tstamp
= tswap32(ci
->tstamp
);
2400 ci
->reachable_time
= tswap32(ci
->reachable_time
);
2401 ci
->retrans_time
= tswap32(ci
->retrans_time
);
2404 case QEMU_IFLA_INET6_STATS
:
2405 case QEMU_IFLA_INET6_ICMP6STATS
:
2406 u64
= NLA_DATA(nlattr
);
2407 for (i
= 0; i
< (nlattr
->nla_len
- NLA_HDRLEN
) / sizeof(*u64
);
2409 u64
[i
] = tswap64(u64
[i
]);
2413 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr
->nla_type
);
2418 static abi_long
host_to_target_data_spec_nlattr(struct nlattr
*nlattr
,
2421 switch (nlattr
->nla_type
) {
2423 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2425 host_to_target_data_inet_nlattr
);
2427 return host_to_target_for_each_nlattr(NLA_DATA(nlattr
), nlattr
->nla_len
,
2429 host_to_target_data_inet6_nlattr
);
2431 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr
->nla_type
);
2437 static abi_long
host_to_target_data_link_rtattr(struct rtattr
*rtattr
)
2440 struct rtnl_link_stats
*st
;
2441 struct rtnl_link_stats64
*st64
;
2442 struct rtnl_link_ifmap
*map
;
2443 struct linkinfo_context li_context
;
2445 switch (rtattr
->rta_type
) {
2447 case QEMU_IFLA_ADDRESS
:
2448 case QEMU_IFLA_BROADCAST
:
2450 case QEMU_IFLA_IFNAME
:
2451 case QEMU_IFLA_QDISC
:
2454 case QEMU_IFLA_OPERSTATE
:
2455 case QEMU_IFLA_LINKMODE
:
2456 case QEMU_IFLA_CARRIER
:
2457 case QEMU_IFLA_PROTO_DOWN
:
2461 case QEMU_IFLA_LINK
:
2462 case QEMU_IFLA_WEIGHT
:
2463 case QEMU_IFLA_TXQLEN
:
2464 case QEMU_IFLA_CARRIER_CHANGES
:
2465 case QEMU_IFLA_NUM_RX_QUEUES
:
2466 case QEMU_IFLA_NUM_TX_QUEUES
:
2467 case QEMU_IFLA_PROMISCUITY
:
2468 case QEMU_IFLA_EXT_MASK
:
2469 case QEMU_IFLA_LINK_NETNSID
:
2470 case QEMU_IFLA_GROUP
:
2471 case QEMU_IFLA_MASTER
:
2472 case QEMU_IFLA_NUM_VF
:
2473 case QEMU_IFLA_GSO_MAX_SEGS
:
2474 case QEMU_IFLA_GSO_MAX_SIZE
:
2475 u32
= RTA_DATA(rtattr
);
2476 *u32
= tswap32(*u32
);
2478 /* struct rtnl_link_stats */
2479 case QEMU_IFLA_STATS
:
2480 st
= RTA_DATA(rtattr
);
2481 st
->rx_packets
= tswap32(st
->rx_packets
);
2482 st
->tx_packets
= tswap32(st
->tx_packets
);
2483 st
->rx_bytes
= tswap32(st
->rx_bytes
);
2484 st
->tx_bytes
= tswap32(st
->tx_bytes
);
2485 st
->rx_errors
= tswap32(st
->rx_errors
);
2486 st
->tx_errors
= tswap32(st
->tx_errors
);
2487 st
->rx_dropped
= tswap32(st
->rx_dropped
);
2488 st
->tx_dropped
= tswap32(st
->tx_dropped
);
2489 st
->multicast
= tswap32(st
->multicast
);
2490 st
->collisions
= tswap32(st
->collisions
);
2492 /* detailed rx_errors: */
2493 st
->rx_length_errors
= tswap32(st
->rx_length_errors
);
2494 st
->rx_over_errors
= tswap32(st
->rx_over_errors
);
2495 st
->rx_crc_errors
= tswap32(st
->rx_crc_errors
);
2496 st
->rx_frame_errors
= tswap32(st
->rx_frame_errors
);
2497 st
->rx_fifo_errors
= tswap32(st
->rx_fifo_errors
);
2498 st
->rx_missed_errors
= tswap32(st
->rx_missed_errors
);
2500 /* detailed tx_errors */
2501 st
->tx_aborted_errors
= tswap32(st
->tx_aborted_errors
);
2502 st
->tx_carrier_errors
= tswap32(st
->tx_carrier_errors
);
2503 st
->tx_fifo_errors
= tswap32(st
->tx_fifo_errors
);
2504 st
->tx_heartbeat_errors
= tswap32(st
->tx_heartbeat_errors
);
2505 st
->tx_window_errors
= tswap32(st
->tx_window_errors
);
2508 st
->rx_compressed
= tswap32(st
->rx_compressed
);
2509 st
->tx_compressed
= tswap32(st
->tx_compressed
);
2511 /* struct rtnl_link_stats64 */
2512 case QEMU_IFLA_STATS64
:
2513 st64
= RTA_DATA(rtattr
);
2514 st64
->rx_packets
= tswap64(st64
->rx_packets
);
2515 st64
->tx_packets
= tswap64(st64
->tx_packets
);
2516 st64
->rx_bytes
= tswap64(st64
->rx_bytes
);
2517 st64
->tx_bytes
= tswap64(st64
->tx_bytes
);
2518 st64
->rx_errors
= tswap64(st64
->rx_errors
);
2519 st64
->tx_errors
= tswap64(st64
->tx_errors
);
2520 st64
->rx_dropped
= tswap64(st64
->rx_dropped
);
2521 st64
->tx_dropped
= tswap64(st64
->tx_dropped
);
2522 st64
->multicast
= tswap64(st64
->multicast
);
2523 st64
->collisions
= tswap64(st64
->collisions
);
2525 /* detailed rx_errors: */
2526 st64
->rx_length_errors
= tswap64(st64
->rx_length_errors
);
2527 st64
->rx_over_errors
= tswap64(st64
->rx_over_errors
);
2528 st64
->rx_crc_errors
= tswap64(st64
->rx_crc_errors
);
2529 st64
->rx_frame_errors
= tswap64(st64
->rx_frame_errors
);
2530 st64
->rx_fifo_errors
= tswap64(st64
->rx_fifo_errors
);
2531 st64
->rx_missed_errors
= tswap64(st64
->rx_missed_errors
);
2533 /* detailed tx_errors */
2534 st64
->tx_aborted_errors
= tswap64(st64
->tx_aborted_errors
);
2535 st64
->tx_carrier_errors
= tswap64(st64
->tx_carrier_errors
);
2536 st64
->tx_fifo_errors
= tswap64(st64
->tx_fifo_errors
);
2537 st64
->tx_heartbeat_errors
= tswap64(st64
->tx_heartbeat_errors
);
2538 st64
->tx_window_errors
= tswap64(st64
->tx_window_errors
);
2541 st64
->rx_compressed
= tswap64(st64
->rx_compressed
);
2542 st64
->tx_compressed
= tswap64(st64
->tx_compressed
);
2544 /* struct rtnl_link_ifmap */
2546 map
= RTA_DATA(rtattr
);
2547 map
->mem_start
= tswap64(map
->mem_start
);
2548 map
->mem_end
= tswap64(map
->mem_end
);
2549 map
->base_addr
= tswap64(map
->base_addr
);
2550 map
->irq
= tswap16(map
->irq
);
2553 case QEMU_IFLA_LINKINFO
:
2554 memset(&li_context
, 0, sizeof(li_context
));
2555 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2557 host_to_target_data_linkinfo_nlattr
);
2558 case QEMU_IFLA_AF_SPEC
:
2559 return host_to_target_for_each_nlattr(RTA_DATA(rtattr
), rtattr
->rta_len
,
2561 host_to_target_data_spec_nlattr
);
2563 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2569 static abi_long
host_to_target_data_addr_rtattr(struct rtattr
*rtattr
)
2572 struct ifa_cacheinfo
*ci
;
2574 switch (rtattr
->rta_type
) {
2575 /* binary: depends on family type */
2585 u32
= RTA_DATA(rtattr
);
2586 *u32
= tswap32(*u32
);
2588 /* struct ifa_cacheinfo */
2590 ci
= RTA_DATA(rtattr
);
2591 ci
->ifa_prefered
= tswap32(ci
->ifa_prefered
);
2592 ci
->ifa_valid
= tswap32(ci
->ifa_valid
);
2593 ci
->cstamp
= tswap32(ci
->cstamp
);
2594 ci
->tstamp
= tswap32(ci
->tstamp
);
2597 gemu_log("Unknown host IFA type: %d\n", rtattr
->rta_type
);
2603 static abi_long
host_to_target_data_route_rtattr(struct rtattr
*rtattr
)
2606 switch (rtattr
->rta_type
) {
2607 /* binary: depends on family type */
2616 u32
= RTA_DATA(rtattr
);
2617 *u32
= tswap32(*u32
);
2620 gemu_log("Unknown host RTA type: %d\n", rtattr
->rta_type
);
2626 static abi_long
host_to_target_link_rtattr(struct rtattr
*rtattr
,
2627 uint32_t rtattr_len
)
2629 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2630 host_to_target_data_link_rtattr
);
2633 static abi_long
host_to_target_addr_rtattr(struct rtattr
*rtattr
,
2634 uint32_t rtattr_len
)
2636 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2637 host_to_target_data_addr_rtattr
);
2640 static abi_long
host_to_target_route_rtattr(struct rtattr
*rtattr
,
2641 uint32_t rtattr_len
)
2643 return host_to_target_for_each_rtattr(rtattr
, rtattr_len
,
2644 host_to_target_data_route_rtattr
);
2647 static abi_long
host_to_target_data_route(struct nlmsghdr
*nlh
)
2650 struct ifinfomsg
*ifi
;
2651 struct ifaddrmsg
*ifa
;
2654 nlmsg_len
= nlh
->nlmsg_len
;
2655 switch (nlh
->nlmsg_type
) {
2659 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2660 ifi
= NLMSG_DATA(nlh
);
2661 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2662 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2663 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2664 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2665 host_to_target_link_rtattr(IFLA_RTA(ifi
),
2666 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifi
)));
2672 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2673 ifa
= NLMSG_DATA(nlh
);
2674 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2675 host_to_target_addr_rtattr(IFA_RTA(ifa
),
2676 nlmsg_len
- NLMSG_LENGTH(sizeof(*ifa
)));
2682 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2683 rtm
= NLMSG_DATA(nlh
);
2684 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2685 host_to_target_route_rtattr(RTM_RTA(rtm
),
2686 nlmsg_len
- NLMSG_LENGTH(sizeof(*rtm
)));
2690 return -TARGET_EINVAL
;
2695 static inline abi_long
host_to_target_nlmsg_route(struct nlmsghdr
*nlh
,
2698 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_route
);
2701 static abi_long
target_to_host_for_each_rtattr(struct rtattr
*rtattr
,
2703 abi_long (*target_to_host_rtattr
)
2708 while (len
>= sizeof(struct rtattr
)) {
2709 if (tswap16(rtattr
->rta_len
) < sizeof(struct rtattr
) ||
2710 tswap16(rtattr
->rta_len
) > len
) {
2713 rtattr
->rta_len
= tswap16(rtattr
->rta_len
);
2714 rtattr
->rta_type
= tswap16(rtattr
->rta_type
);
2715 ret
= target_to_host_rtattr(rtattr
);
2719 len
-= RTA_ALIGN(rtattr
->rta_len
);
2720 rtattr
= (struct rtattr
*)(((char *)rtattr
) +
2721 RTA_ALIGN(rtattr
->rta_len
));
2726 static abi_long
target_to_host_data_link_rtattr(struct rtattr
*rtattr
)
2728 switch (rtattr
->rta_type
) {
2730 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr
->rta_type
);
2736 static abi_long
target_to_host_data_addr_rtattr(struct rtattr
*rtattr
)
2738 switch (rtattr
->rta_type
) {
2739 /* binary: depends on family type */
2744 gemu_log("Unknown target IFA type: %d\n", rtattr
->rta_type
);
2750 static abi_long
target_to_host_data_route_rtattr(struct rtattr
*rtattr
)
2753 switch (rtattr
->rta_type
) {
2754 /* binary: depends on family type */
2762 u32
= RTA_DATA(rtattr
);
2763 *u32
= tswap32(*u32
);
2766 gemu_log("Unknown target RTA type: %d\n", rtattr
->rta_type
);
2772 static void target_to_host_link_rtattr(struct rtattr
*rtattr
,
2773 uint32_t rtattr_len
)
2775 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2776 target_to_host_data_link_rtattr
);
2779 static void target_to_host_addr_rtattr(struct rtattr
*rtattr
,
2780 uint32_t rtattr_len
)
2782 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2783 target_to_host_data_addr_rtattr
);
2786 static void target_to_host_route_rtattr(struct rtattr
*rtattr
,
2787 uint32_t rtattr_len
)
2789 target_to_host_for_each_rtattr(rtattr
, rtattr_len
,
2790 target_to_host_data_route_rtattr
);
2793 static abi_long
target_to_host_data_route(struct nlmsghdr
*nlh
)
2795 struct ifinfomsg
*ifi
;
2796 struct ifaddrmsg
*ifa
;
2799 switch (nlh
->nlmsg_type
) {
2804 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifi
))) {
2805 ifi
= NLMSG_DATA(nlh
);
2806 ifi
->ifi_type
= tswap16(ifi
->ifi_type
);
2807 ifi
->ifi_index
= tswap32(ifi
->ifi_index
);
2808 ifi
->ifi_flags
= tswap32(ifi
->ifi_flags
);
2809 ifi
->ifi_change
= tswap32(ifi
->ifi_change
);
2810 target_to_host_link_rtattr(IFLA_RTA(ifi
), nlh
->nlmsg_len
-
2811 NLMSG_LENGTH(sizeof(*ifi
)));
2817 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*ifa
))) {
2818 ifa
= NLMSG_DATA(nlh
);
2819 ifa
->ifa_index
= tswap32(ifa
->ifa_index
);
2820 target_to_host_addr_rtattr(IFA_RTA(ifa
), nlh
->nlmsg_len
-
2821 NLMSG_LENGTH(sizeof(*ifa
)));
2828 if (nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(*rtm
))) {
2829 rtm
= NLMSG_DATA(nlh
);
2830 rtm
->rtm_flags
= tswap32(rtm
->rtm_flags
);
2831 target_to_host_route_rtattr(RTM_RTA(rtm
), nlh
->nlmsg_len
-
2832 NLMSG_LENGTH(sizeof(*rtm
)));
2836 return -TARGET_EOPNOTSUPP
;
2841 static abi_long
target_to_host_nlmsg_route(struct nlmsghdr
*nlh
, size_t len
)
2843 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_route
);
2845 #endif /* CONFIG_RTNETLINK */
2847 static abi_long
host_to_target_data_audit(struct nlmsghdr
*nlh
)
2849 switch (nlh
->nlmsg_type
) {
2851 gemu_log("Unknown host audit message type %d\n",
2853 return -TARGET_EINVAL
;
2858 static inline abi_long
host_to_target_nlmsg_audit(struct nlmsghdr
*nlh
,
2861 return host_to_target_for_each_nlmsg(nlh
, len
, host_to_target_data_audit
);
2864 static abi_long
target_to_host_data_audit(struct nlmsghdr
*nlh
)
2866 switch (nlh
->nlmsg_type
) {
2868 case AUDIT_FIRST_USER_MSG
... AUDIT_LAST_USER_MSG
:
2869 case AUDIT_FIRST_USER_MSG2
... AUDIT_LAST_USER_MSG2
:
2872 gemu_log("Unknown target audit message type %d\n",
2874 return -TARGET_EINVAL
;
2880 static abi_long
target_to_host_nlmsg_audit(struct nlmsghdr
*nlh
, size_t len
)
2882 return target_to_host_for_each_nlmsg(nlh
, len
, target_to_host_data_audit
);
2885 /* do_setsockopt() Must return target values and target errnos. */
2886 static abi_long
do_setsockopt(int sockfd
, int level
, int optname
,
2887 abi_ulong optval_addr
, socklen_t optlen
)
2891 struct ip_mreqn
*ip_mreq
;
2892 struct ip_mreq_source
*ip_mreq_source
;
2896 /* TCP options all take an 'int' value. */
2897 if (optlen
< sizeof(uint32_t))
2898 return -TARGET_EINVAL
;
2900 if (get_user_u32(val
, optval_addr
))
2901 return -TARGET_EFAULT
;
2902 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2909 case IP_ROUTER_ALERT
:
2913 case IP_MTU_DISCOVER
:
2920 case IP_MULTICAST_TTL
:
2921 case IP_MULTICAST_LOOP
:
2923 if (optlen
>= sizeof(uint32_t)) {
2924 if (get_user_u32(val
, optval_addr
))
2925 return -TARGET_EFAULT
;
2926 } else if (optlen
>= 1) {
2927 if (get_user_u8(val
, optval_addr
))
2928 return -TARGET_EFAULT
;
2930 ret
= get_errno(setsockopt(sockfd
, level
, optname
, &val
, sizeof(val
)));
2932 case IP_ADD_MEMBERSHIP
:
2933 case IP_DROP_MEMBERSHIP
:
2934 if (optlen
< sizeof (struct target_ip_mreq
) ||
2935 optlen
> sizeof (struct target_ip_mreqn
))
2936 return -TARGET_EINVAL
;
2938 ip_mreq
= (struct ip_mreqn
*) alloca(optlen
);
2939 target_to_host_ip_mreq(ip_mreq
, optval_addr
, optlen
);
2940 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq
, optlen
));
2943 case IP_BLOCK_SOURCE
:
2944 case IP_UNBLOCK_SOURCE
:
2945 case IP_ADD_SOURCE_MEMBERSHIP
:
2946 case IP_DROP_SOURCE_MEMBERSHIP
:
2947 if (optlen
!= sizeof (struct target_ip_mreq_source
))
2948 return -TARGET_EINVAL
;
2950 ip_mreq_source
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
2951 ret
= get_errno(setsockopt(sockfd
, level
, optname
, ip_mreq_source
, optlen
));
2952 unlock_user (ip_mreq_source
, optval_addr
, 0);
2961 case IPV6_MTU_DISCOVER
:
2964 case IPV6_RECVPKTINFO
:
2965 case IPV6_UNICAST_HOPS
:
2967 case IPV6_RECVHOPLIMIT
:
2968 case IPV6_2292HOPLIMIT
:
2971 if (optlen
< sizeof(uint32_t)) {
2972 return -TARGET_EINVAL
;
2974 if (get_user_u32(val
, optval_addr
)) {
2975 return -TARGET_EFAULT
;
2977 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2978 &val
, sizeof(val
)));
2982 struct in6_pktinfo pki
;
2984 if (optlen
< sizeof(pki
)) {
2985 return -TARGET_EINVAL
;
2988 if (copy_from_user(&pki
, optval_addr
, sizeof(pki
))) {
2989 return -TARGET_EFAULT
;
2992 pki
.ipi6_ifindex
= tswap32(pki
.ipi6_ifindex
);
2994 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
2995 &pki
, sizeof(pki
)));
3006 struct icmp6_filter icmp6f
;
3008 if (optlen
> sizeof(icmp6f
)) {
3009 optlen
= sizeof(icmp6f
);
3012 if (copy_from_user(&icmp6f
, optval_addr
, optlen
)) {
3013 return -TARGET_EFAULT
;
3016 for (val
= 0; val
< 8; val
++) {
3017 icmp6f
.data
[val
] = tswap32(icmp6f
.data
[val
]);
3020 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3032 /* those take an u32 value */
3033 if (optlen
< sizeof(uint32_t)) {
3034 return -TARGET_EINVAL
;
3037 if (get_user_u32(val
, optval_addr
)) {
3038 return -TARGET_EFAULT
;
3040 ret
= get_errno(setsockopt(sockfd
, level
, optname
,
3041 &val
, sizeof(val
)));
3048 case TARGET_SOL_SOCKET
:
3050 case TARGET_SO_RCVTIMEO
:
3054 optname
= SO_RCVTIMEO
;
3057 if (optlen
!= sizeof(struct target_timeval
)) {
3058 return -TARGET_EINVAL
;
3061 if (copy_from_user_timeval(&tv
, optval_addr
)) {
3062 return -TARGET_EFAULT
;
3065 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3069 case TARGET_SO_SNDTIMEO
:
3070 optname
= SO_SNDTIMEO
;
3072 case TARGET_SO_ATTACH_FILTER
:
3074 struct target_sock_fprog
*tfprog
;
3075 struct target_sock_filter
*tfilter
;
3076 struct sock_fprog fprog
;
3077 struct sock_filter
*filter
;
3080 if (optlen
!= sizeof(*tfprog
)) {
3081 return -TARGET_EINVAL
;
3083 if (!lock_user_struct(VERIFY_READ
, tfprog
, optval_addr
, 0)) {
3084 return -TARGET_EFAULT
;
3086 if (!lock_user_struct(VERIFY_READ
, tfilter
,
3087 tswapal(tfprog
->filter
), 0)) {
3088 unlock_user_struct(tfprog
, optval_addr
, 1);
3089 return -TARGET_EFAULT
;
3092 fprog
.len
= tswap16(tfprog
->len
);
3093 filter
= g_try_new(struct sock_filter
, fprog
.len
);
3094 if (filter
== NULL
) {
3095 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3096 unlock_user_struct(tfprog
, optval_addr
, 1);
3097 return -TARGET_ENOMEM
;
3099 for (i
= 0; i
< fprog
.len
; i
++) {
3100 filter
[i
].code
= tswap16(tfilter
[i
].code
);
3101 filter
[i
].jt
= tfilter
[i
].jt
;
3102 filter
[i
].jf
= tfilter
[i
].jf
;
3103 filter
[i
].k
= tswap32(tfilter
[i
].k
);
3105 fprog
.filter
= filter
;
3107 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
,
3108 SO_ATTACH_FILTER
, &fprog
, sizeof(fprog
)));
3111 unlock_user_struct(tfilter
, tfprog
->filter
, 1);
3112 unlock_user_struct(tfprog
, optval_addr
, 1);
3115 case TARGET_SO_BINDTODEVICE
:
3117 char *dev_ifname
, *addr_ifname
;
3119 if (optlen
> IFNAMSIZ
- 1) {
3120 optlen
= IFNAMSIZ
- 1;
3122 dev_ifname
= lock_user(VERIFY_READ
, optval_addr
, optlen
, 1);
3124 return -TARGET_EFAULT
;
3126 optname
= SO_BINDTODEVICE
;
3127 addr_ifname
= alloca(IFNAMSIZ
);
3128 memcpy(addr_ifname
, dev_ifname
, optlen
);
3129 addr_ifname
[optlen
] = 0;
3130 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
,
3131 addr_ifname
, optlen
));
3132 unlock_user (dev_ifname
, optval_addr
, 0);
3135 /* Options with 'int' argument. */
3136 case TARGET_SO_DEBUG
:
3139 case TARGET_SO_REUSEADDR
:
3140 optname
= SO_REUSEADDR
;
3142 case TARGET_SO_TYPE
:
3145 case TARGET_SO_ERROR
:
3148 case TARGET_SO_DONTROUTE
:
3149 optname
= SO_DONTROUTE
;
3151 case TARGET_SO_BROADCAST
:
3152 optname
= SO_BROADCAST
;
3154 case TARGET_SO_SNDBUF
:
3155 optname
= SO_SNDBUF
;
3157 case TARGET_SO_SNDBUFFORCE
:
3158 optname
= SO_SNDBUFFORCE
;
3160 case TARGET_SO_RCVBUF
:
3161 optname
= SO_RCVBUF
;
3163 case TARGET_SO_RCVBUFFORCE
:
3164 optname
= SO_RCVBUFFORCE
;
3166 case TARGET_SO_KEEPALIVE
:
3167 optname
= SO_KEEPALIVE
;
3169 case TARGET_SO_OOBINLINE
:
3170 optname
= SO_OOBINLINE
;
3172 case TARGET_SO_NO_CHECK
:
3173 optname
= SO_NO_CHECK
;
3175 case TARGET_SO_PRIORITY
:
3176 optname
= SO_PRIORITY
;
3179 case TARGET_SO_BSDCOMPAT
:
3180 optname
= SO_BSDCOMPAT
;
3183 case TARGET_SO_PASSCRED
:
3184 optname
= SO_PASSCRED
;
3186 case TARGET_SO_PASSSEC
:
3187 optname
= SO_PASSSEC
;
3189 case TARGET_SO_TIMESTAMP
:
3190 optname
= SO_TIMESTAMP
;
3192 case TARGET_SO_RCVLOWAT
:
3193 optname
= SO_RCVLOWAT
;
3198 if (optlen
< sizeof(uint32_t))
3199 return -TARGET_EINVAL
;
3201 if (get_user_u32(val
, optval_addr
))
3202 return -TARGET_EFAULT
;
3203 ret
= get_errno(setsockopt(sockfd
, SOL_SOCKET
, optname
, &val
, sizeof(val
)));
3207 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level
, optname
);
3208 ret
= -TARGET_ENOPROTOOPT
;
3213 /* do_getsockopt() Must return target values and target errnos. */
3214 static abi_long
do_getsockopt(int sockfd
, int level
, int optname
,
3215 abi_ulong optval_addr
, abi_ulong optlen
)
3222 case TARGET_SOL_SOCKET
:
3225 /* These don't just return a single integer */
3226 case TARGET_SO_LINGER
:
3227 case TARGET_SO_RCVTIMEO
:
3228 case TARGET_SO_SNDTIMEO
:
3229 case TARGET_SO_PEERNAME
:
3231 case TARGET_SO_PEERCRED
: {
3234 struct target_ucred
*tcr
;
3236 if (get_user_u32(len
, optlen
)) {
3237 return -TARGET_EFAULT
;
3240 return -TARGET_EINVAL
;
3244 ret
= get_errno(getsockopt(sockfd
, level
, SO_PEERCRED
,
3252 if (!lock_user_struct(VERIFY_WRITE
, tcr
, optval_addr
, 0)) {
3253 return -TARGET_EFAULT
;
3255 __put_user(cr
.pid
, &tcr
->pid
);
3256 __put_user(cr
.uid
, &tcr
->uid
);
3257 __put_user(cr
.gid
, &tcr
->gid
);
3258 unlock_user_struct(tcr
, optval_addr
, 1);
3259 if (put_user_u32(len
, optlen
)) {
3260 return -TARGET_EFAULT
;
3264 /* Options with 'int' argument. */
3265 case TARGET_SO_DEBUG
:
3268 case TARGET_SO_REUSEADDR
:
3269 optname
= SO_REUSEADDR
;
3271 case TARGET_SO_TYPE
:
3274 case TARGET_SO_ERROR
:
3277 case TARGET_SO_DONTROUTE
:
3278 optname
= SO_DONTROUTE
;
3280 case TARGET_SO_BROADCAST
:
3281 optname
= SO_BROADCAST
;
3283 case TARGET_SO_SNDBUF
:
3284 optname
= SO_SNDBUF
;
3286 case TARGET_SO_RCVBUF
:
3287 optname
= SO_RCVBUF
;
3289 case TARGET_SO_KEEPALIVE
:
3290 optname
= SO_KEEPALIVE
;
3292 case TARGET_SO_OOBINLINE
:
3293 optname
= SO_OOBINLINE
;
3295 case TARGET_SO_NO_CHECK
:
3296 optname
= SO_NO_CHECK
;
3298 case TARGET_SO_PRIORITY
:
3299 optname
= SO_PRIORITY
;
3302 case TARGET_SO_BSDCOMPAT
:
3303 optname
= SO_BSDCOMPAT
;
3306 case TARGET_SO_PASSCRED
:
3307 optname
= SO_PASSCRED
;
3309 case TARGET_SO_TIMESTAMP
:
3310 optname
= SO_TIMESTAMP
;
3312 case TARGET_SO_RCVLOWAT
:
3313 optname
= SO_RCVLOWAT
;
3315 case TARGET_SO_ACCEPTCONN
:
3316 optname
= SO_ACCEPTCONN
;
3323 /* TCP options all take an 'int' value. */
3325 if (get_user_u32(len
, optlen
))
3326 return -TARGET_EFAULT
;
3328 return -TARGET_EINVAL
;
3330 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3333 if (optname
== SO_TYPE
) {
3334 val
= host_to_target_sock_type(val
);
3339 if (put_user_u32(val
, optval_addr
))
3340 return -TARGET_EFAULT
;
3342 if (put_user_u8(val
, optval_addr
))
3343 return -TARGET_EFAULT
;
3345 if (put_user_u32(len
, optlen
))
3346 return -TARGET_EFAULT
;
3353 case IP_ROUTER_ALERT
:
3357 case IP_MTU_DISCOVER
:
3363 case IP_MULTICAST_TTL
:
3364 case IP_MULTICAST_LOOP
:
3365 if (get_user_u32(len
, optlen
))
3366 return -TARGET_EFAULT
;
3368 return -TARGET_EINVAL
;
3370 ret
= get_errno(getsockopt(sockfd
, level
, optname
, &val
, &lv
));
3373 if (len
< sizeof(int) && len
> 0 && val
>= 0 && val
< 255) {
3375 if (put_user_u32(len
, optlen
)
3376 || put_user_u8(val
, optval_addr
))
3377 return -TARGET_EFAULT
;
3379 if (len
> sizeof(int))
3381 if (put_user_u32(len
, optlen
)
3382 || put_user_u32(val
, optval_addr
))
3383 return -TARGET_EFAULT
;
3387 ret
= -TARGET_ENOPROTOOPT
;
3393 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3395 ret
= -TARGET_EOPNOTSUPP
;
3401 /* Convert target low/high pair representing file offset into the host
3402 * low/high pair. This function doesn't handle offsets bigger than 64 bits
3403 * as the kernel doesn't handle them either.
3405 static void target_to_host_low_high(abi_ulong tlow
,
3407 unsigned long *hlow
,
3408 unsigned long *hhigh
)
3410 uint64_t off
= tlow
|
3411 ((unsigned long long)thigh
<< TARGET_LONG_BITS
/ 2) <<
3412 TARGET_LONG_BITS
/ 2;
3415 *hhigh
= (off
>> HOST_LONG_BITS
/ 2) >> HOST_LONG_BITS
/ 2;
3418 static struct iovec
*lock_iovec(int type
, abi_ulong target_addr
,
3419 abi_ulong count
, int copy
)
3421 struct target_iovec
*target_vec
;
3423 abi_ulong total_len
, max_len
;
3426 bool bad_address
= false;
3432 if (count
> IOV_MAX
) {
3437 vec
= g_try_new0(struct iovec
, count
);
3443 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3444 count
* sizeof(struct target_iovec
), 1);
3445 if (target_vec
== NULL
) {
3450 /* ??? If host page size > target page size, this will result in a
3451 value larger than what we can actually support. */
3452 max_len
= 0x7fffffff & TARGET_PAGE_MASK
;
3455 for (i
= 0; i
< count
; i
++) {
3456 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3457 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3462 } else if (len
== 0) {
3463 /* Zero length pointer is ignored. */
3464 vec
[i
].iov_base
= 0;
3466 vec
[i
].iov_base
= lock_user(type
, base
, len
, copy
);
3467 /* If the first buffer pointer is bad, this is a fault. But
3468 * subsequent bad buffers will result in a partial write; this
3469 * is realized by filling the vector with null pointers and
3471 if (!vec
[i
].iov_base
) {
3482 if (len
> max_len
- total_len
) {
3483 len
= max_len
- total_len
;
3486 vec
[i
].iov_len
= len
;
3490 unlock_user(target_vec
, target_addr
, 0);
3495 if (tswapal(target_vec
[i
].iov_len
) > 0) {
3496 unlock_user(vec
[i
].iov_base
, tswapal(target_vec
[i
].iov_base
), 0);
3499 unlock_user(target_vec
, target_addr
, 0);
3506 static void unlock_iovec(struct iovec
*vec
, abi_ulong target_addr
,
3507 abi_ulong count
, int copy
)
3509 struct target_iovec
*target_vec
;
3512 target_vec
= lock_user(VERIFY_READ
, target_addr
,
3513 count
* sizeof(struct target_iovec
), 1);
3515 for (i
= 0; i
< count
; i
++) {
3516 abi_ulong base
= tswapal(target_vec
[i
].iov_base
);
3517 abi_long len
= tswapal(target_vec
[i
].iov_len
);
3521 unlock_user(vec
[i
].iov_base
, base
, copy
? vec
[i
].iov_len
: 0);
3523 unlock_user(target_vec
, target_addr
, 0);
3529 static inline int target_to_host_sock_type(int *type
)
3532 int target_type
= *type
;
3534 switch (target_type
& TARGET_SOCK_TYPE_MASK
) {
3535 case TARGET_SOCK_DGRAM
:
3536 host_type
= SOCK_DGRAM
;
3538 case TARGET_SOCK_STREAM
:
3539 host_type
= SOCK_STREAM
;
3542 host_type
= target_type
& TARGET_SOCK_TYPE_MASK
;
3545 if (target_type
& TARGET_SOCK_CLOEXEC
) {
3546 #if defined(SOCK_CLOEXEC)
3547 host_type
|= SOCK_CLOEXEC
;
3549 return -TARGET_EINVAL
;
3552 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3553 #if defined(SOCK_NONBLOCK)
3554 host_type
|= SOCK_NONBLOCK
;
3555 #elif !defined(O_NONBLOCK)
3556 return -TARGET_EINVAL
;
3563 /* Try to emulate socket type flags after socket creation. */
3564 static int sock_flags_fixup(int fd
, int target_type
)
3566 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3567 if (target_type
& TARGET_SOCK_NONBLOCK
) {
3568 int flags
= fcntl(fd
, F_GETFL
);
3569 if (fcntl(fd
, F_SETFL
, O_NONBLOCK
| flags
) == -1) {
3571 return -TARGET_EINVAL
;
3578 static abi_long
packet_target_to_host_sockaddr(void *host_addr
,
3579 abi_ulong target_addr
,
3582 struct sockaddr
*addr
= host_addr
;
3583 struct target_sockaddr
*target_saddr
;
3585 target_saddr
= lock_user(VERIFY_READ
, target_addr
, len
, 1);
3586 if (!target_saddr
) {
3587 return -TARGET_EFAULT
;
3590 memcpy(addr
, target_saddr
, len
);
3591 addr
->sa_family
= tswap16(target_saddr
->sa_family
);
3592 /* spkt_protocol is big-endian */
3594 unlock_user(target_saddr
, target_addr
, 0);
3598 static TargetFdTrans target_packet_trans
= {
3599 .target_to_host_addr
= packet_target_to_host_sockaddr
,
3602 #ifdef CONFIG_RTNETLINK
3603 static abi_long
netlink_route_target_to_host(void *buf
, size_t len
)
3607 ret
= target_to_host_nlmsg_route(buf
, len
);
3615 static abi_long
netlink_route_host_to_target(void *buf
, size_t len
)
3619 ret
= host_to_target_nlmsg_route(buf
, len
);
3627 static TargetFdTrans target_netlink_route_trans
= {
3628 .target_to_host_data
= netlink_route_target_to_host
,
3629 .host_to_target_data
= netlink_route_host_to_target
,
3631 #endif /* CONFIG_RTNETLINK */
3633 static abi_long
netlink_audit_target_to_host(void *buf
, size_t len
)
3637 ret
= target_to_host_nlmsg_audit(buf
, len
);
3645 static abi_long
netlink_audit_host_to_target(void *buf
, size_t len
)
3649 ret
= host_to_target_nlmsg_audit(buf
, len
);
3657 static TargetFdTrans target_netlink_audit_trans
= {
3658 .target_to_host_data
= netlink_audit_target_to_host
,
3659 .host_to_target_data
= netlink_audit_host_to_target
,
3662 /* do_socket() Must return target values and target errnos. */
3663 static abi_long
do_socket(int domain
, int type
, int protocol
)
3665 int target_type
= type
;
3668 ret
= target_to_host_sock_type(&type
);
3673 if (domain
== PF_NETLINK
&& !(
3674 #ifdef CONFIG_RTNETLINK
3675 protocol
== NETLINK_ROUTE
||
3677 protocol
== NETLINK_KOBJECT_UEVENT
||
3678 protocol
== NETLINK_AUDIT
)) {
3679 return -EPFNOSUPPORT
;
3682 if (domain
== AF_PACKET
||
3683 (domain
== AF_INET
&& type
== SOCK_PACKET
)) {
3684 protocol
= tswap16(protocol
);
3687 ret
= get_errno(socket(domain
, type
, protocol
));
3689 ret
= sock_flags_fixup(ret
, target_type
);
3690 if (type
== SOCK_PACKET
) {
3691 /* Manage an obsolete case :
3692 * if socket type is SOCK_PACKET, bind by name
3694 fd_trans_register(ret
, &target_packet_trans
);
3695 } else if (domain
== PF_NETLINK
) {
3697 #ifdef CONFIG_RTNETLINK
3699 fd_trans_register(ret
, &target_netlink_route_trans
);
3702 case NETLINK_KOBJECT_UEVENT
:
3703 /* nothing to do: messages are strings */
3706 fd_trans_register(ret
, &target_netlink_audit_trans
);
3709 g_assert_not_reached();
3716 /* do_bind() Must return target values and target errnos. */
3717 static abi_long
do_bind(int sockfd
, abi_ulong target_addr
,
3723 if ((int)addrlen
< 0) {
3724 return -TARGET_EINVAL
;
3727 addr
= alloca(addrlen
+1);
3729 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3733 return get_errno(bind(sockfd
, addr
, addrlen
));
3736 /* do_connect() Must return target values and target errnos. */
3737 static abi_long
do_connect(int sockfd
, abi_ulong target_addr
,
3743 if ((int)addrlen
< 0) {
3744 return -TARGET_EINVAL
;
3747 addr
= alloca(addrlen
+1);
3749 ret
= target_to_host_sockaddr(sockfd
, addr
, target_addr
, addrlen
);
3753 return get_errno(safe_connect(sockfd
, addr
, addrlen
));
3756 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3757 static abi_long
do_sendrecvmsg_locked(int fd
, struct target_msghdr
*msgp
,
3758 int flags
, int send
)
3764 abi_ulong target_vec
;
3766 if (msgp
->msg_name
) {
3767 msg
.msg_namelen
= tswap32(msgp
->msg_namelen
);
3768 msg
.msg_name
= alloca(msg
.msg_namelen
+1);
3769 ret
= target_to_host_sockaddr(fd
, msg
.msg_name
,
3770 tswapal(msgp
->msg_name
),
3772 if (ret
== -TARGET_EFAULT
) {
3773 /* For connected sockets msg_name and msg_namelen must
3774 * be ignored, so returning EFAULT immediately is wrong.
3775 * Instead, pass a bad msg_name to the host kernel, and
3776 * let it decide whether to return EFAULT or not.
3778 msg
.msg_name
= (void *)-1;
3783 msg
.msg_name
= NULL
;
3784 msg
.msg_namelen
= 0;
3786 msg
.msg_controllen
= 2 * tswapal(msgp
->msg_controllen
);
3787 msg
.msg_control
= alloca(msg
.msg_controllen
);
3788 msg
.msg_flags
= tswap32(msgp
->msg_flags
);
3790 count
= tswapal(msgp
->msg_iovlen
);
3791 target_vec
= tswapal(msgp
->msg_iov
);
3793 if (count
> IOV_MAX
) {
3794 /* sendrcvmsg returns a different errno for this condition than
3795 * readv/writev, so we must catch it here before lock_iovec() does.
3797 ret
= -TARGET_EMSGSIZE
;
3801 vec
= lock_iovec(send
? VERIFY_READ
: VERIFY_WRITE
,
3802 target_vec
, count
, send
);
3804 ret
= -host_to_target_errno(errno
);
3807 msg
.msg_iovlen
= count
;
3811 if (fd_trans_target_to_host_data(fd
)) {
3814 host_msg
= g_malloc(msg
.msg_iov
->iov_len
);
3815 memcpy(host_msg
, msg
.msg_iov
->iov_base
, msg
.msg_iov
->iov_len
);
3816 ret
= fd_trans_target_to_host_data(fd
)(host_msg
,
3817 msg
.msg_iov
->iov_len
);
3819 msg
.msg_iov
->iov_base
= host_msg
;
3820 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3824 ret
= target_to_host_cmsg(&msg
, msgp
);
3826 ret
= get_errno(safe_sendmsg(fd
, &msg
, flags
));
3830 ret
= get_errno(safe_recvmsg(fd
, &msg
, flags
));
3831 if (!is_error(ret
)) {
3833 if (fd_trans_host_to_target_data(fd
)) {
3834 ret
= fd_trans_host_to_target_data(fd
)(msg
.msg_iov
->iov_base
,
3837 ret
= host_to_target_cmsg(msgp
, &msg
);
3839 if (!is_error(ret
)) {
3840 msgp
->msg_namelen
= tswap32(msg
.msg_namelen
);
3841 if (msg
.msg_name
!= NULL
&& msg
.msg_name
!= (void *)-1) {
3842 ret
= host_to_target_sockaddr(tswapal(msgp
->msg_name
),
3843 msg
.msg_name
, msg
.msg_namelen
);
3855 unlock_iovec(vec
, target_vec
, count
, !send
);
3860 static abi_long
do_sendrecvmsg(int fd
, abi_ulong target_msg
,
3861 int flags
, int send
)
3864 struct target_msghdr
*msgp
;
3866 if (!lock_user_struct(send
? VERIFY_READ
: VERIFY_WRITE
,
3870 return -TARGET_EFAULT
;
3872 ret
= do_sendrecvmsg_locked(fd
, msgp
, flags
, send
);
3873 unlock_user_struct(msgp
, target_msg
, send
? 0 : 1);
3877 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3878 * so it might not have this *mmsg-specific flag either.
3880 #ifndef MSG_WAITFORONE
3881 #define MSG_WAITFORONE 0x10000
3884 static abi_long
do_sendrecvmmsg(int fd
, abi_ulong target_msgvec
,
3885 unsigned int vlen
, unsigned int flags
,
3888 struct target_mmsghdr
*mmsgp
;
3892 if (vlen
> UIO_MAXIOV
) {
3896 mmsgp
= lock_user(VERIFY_WRITE
, target_msgvec
, sizeof(*mmsgp
) * vlen
, 1);
3898 return -TARGET_EFAULT
;
3901 for (i
= 0; i
< vlen
; i
++) {
3902 ret
= do_sendrecvmsg_locked(fd
, &mmsgp
[i
].msg_hdr
, flags
, send
);
3903 if (is_error(ret
)) {
3906 mmsgp
[i
].msg_len
= tswap32(ret
);
3907 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3908 if (flags
& MSG_WAITFORONE
) {
3909 flags
|= MSG_DONTWAIT
;
3913 unlock_user(mmsgp
, target_msgvec
, sizeof(*mmsgp
) * i
);
3915 /* Return number of datagrams sent if we sent any at all;
3916 * otherwise return the error.
3924 /* do_accept4() Must return target values and target errnos. */
3925 static abi_long
do_accept4(int fd
, abi_ulong target_addr
,
3926 abi_ulong target_addrlen_addr
, int flags
)
3933 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
3935 if (target_addr
== 0) {
3936 return get_errno(safe_accept4(fd
, NULL
, NULL
, host_flags
));
3939 /* linux returns EINVAL if addrlen pointer is invalid */
3940 if (get_user_u32(addrlen
, target_addrlen_addr
))
3941 return -TARGET_EINVAL
;
3943 if ((int)addrlen
< 0) {
3944 return -TARGET_EINVAL
;
3947 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3948 return -TARGET_EINVAL
;
3950 addr
= alloca(addrlen
);
3952 ret
= get_errno(safe_accept4(fd
, addr
, &addrlen
, host_flags
));
3953 if (!is_error(ret
)) {
3954 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3955 if (put_user_u32(addrlen
, target_addrlen_addr
))
3956 ret
= -TARGET_EFAULT
;
3961 /* do_getpeername() Must return target values and target errnos. */
3962 static abi_long
do_getpeername(int fd
, abi_ulong target_addr
,
3963 abi_ulong target_addrlen_addr
)
3969 if (get_user_u32(addrlen
, target_addrlen_addr
))
3970 return -TARGET_EFAULT
;
3972 if ((int)addrlen
< 0) {
3973 return -TARGET_EINVAL
;
3976 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
3977 return -TARGET_EFAULT
;
3979 addr
= alloca(addrlen
);
3981 ret
= get_errno(getpeername(fd
, addr
, &addrlen
));
3982 if (!is_error(ret
)) {
3983 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
3984 if (put_user_u32(addrlen
, target_addrlen_addr
))
3985 ret
= -TARGET_EFAULT
;
3990 /* do_getsockname() Must return target values and target errnos. */
3991 static abi_long
do_getsockname(int fd
, abi_ulong target_addr
,
3992 abi_ulong target_addrlen_addr
)
3998 if (get_user_u32(addrlen
, target_addrlen_addr
))
3999 return -TARGET_EFAULT
;
4001 if ((int)addrlen
< 0) {
4002 return -TARGET_EINVAL
;
4005 if (!access_ok(VERIFY_WRITE
, target_addr
, addrlen
))
4006 return -TARGET_EFAULT
;
4008 addr
= alloca(addrlen
);
4010 ret
= get_errno(getsockname(fd
, addr
, &addrlen
));
4011 if (!is_error(ret
)) {
4012 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4013 if (put_user_u32(addrlen
, target_addrlen_addr
))
4014 ret
= -TARGET_EFAULT
;
4019 /* do_socketpair() Must return target values and target errnos. */
4020 static abi_long
do_socketpair(int domain
, int type
, int protocol
,
4021 abi_ulong target_tab_addr
)
4026 target_to_host_sock_type(&type
);
4028 ret
= get_errno(socketpair(domain
, type
, protocol
, tab
));
4029 if (!is_error(ret
)) {
4030 if (put_user_s32(tab
[0], target_tab_addr
)
4031 || put_user_s32(tab
[1], target_tab_addr
+ sizeof(tab
[0])))
4032 ret
= -TARGET_EFAULT
;
4037 /* do_sendto() Must return target values and target errnos. */
4038 static abi_long
do_sendto(int fd
, abi_ulong msg
, size_t len
, int flags
,
4039 abi_ulong target_addr
, socklen_t addrlen
)
4043 void *copy_msg
= NULL
;
4046 if ((int)addrlen
< 0) {
4047 return -TARGET_EINVAL
;
4050 host_msg
= lock_user(VERIFY_READ
, msg
, len
, 1);
4052 return -TARGET_EFAULT
;
4053 if (fd_trans_target_to_host_data(fd
)) {
4054 copy_msg
= host_msg
;
4055 host_msg
= g_malloc(len
);
4056 memcpy(host_msg
, copy_msg
, len
);
4057 ret
= fd_trans_target_to_host_data(fd
)(host_msg
, len
);
4063 addr
= alloca(addrlen
+1);
4064 ret
= target_to_host_sockaddr(fd
, addr
, target_addr
, addrlen
);
4068 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, addr
, addrlen
));
4070 ret
= get_errno(safe_sendto(fd
, host_msg
, len
, flags
, NULL
, 0));
4075 host_msg
= copy_msg
;
4077 unlock_user(host_msg
, msg
, 0);
4081 /* do_recvfrom() Must return target values and target errnos. */
4082 static abi_long
do_recvfrom(int fd
, abi_ulong msg
, size_t len
, int flags
,
4083 abi_ulong target_addr
,
4084 abi_ulong target_addrlen
)
4091 host_msg
= lock_user(VERIFY_WRITE
, msg
, len
, 0);
4093 return -TARGET_EFAULT
;
4095 if (get_user_u32(addrlen
, target_addrlen
)) {
4096 ret
= -TARGET_EFAULT
;
4099 if ((int)addrlen
< 0) {
4100 ret
= -TARGET_EINVAL
;
4103 addr
= alloca(addrlen
);
4104 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
,
4107 addr
= NULL
; /* To keep compiler quiet. */
4108 ret
= get_errno(safe_recvfrom(fd
, host_msg
, len
, flags
, NULL
, 0));
4110 if (!is_error(ret
)) {
4111 if (fd_trans_host_to_target_data(fd
)) {
4112 ret
= fd_trans_host_to_target_data(fd
)(host_msg
, ret
);
4115 host_to_target_sockaddr(target_addr
, addr
, addrlen
);
4116 if (put_user_u32(addrlen
, target_addrlen
)) {
4117 ret
= -TARGET_EFAULT
;
4121 unlock_user(host_msg
, msg
, len
);
4124 unlock_user(host_msg
, msg
, 0);
4129 #ifdef TARGET_NR_socketcall
4130 /* do_socketcall() must return target values and target errnos. */
4131 static abi_long
do_socketcall(int num
, abi_ulong vptr
)
4133 static const unsigned nargs
[] = { /* number of arguments per operation */
4134 [TARGET_SYS_SOCKET
] = 3, /* domain, type, protocol */
4135 [TARGET_SYS_BIND
] = 3, /* fd, addr, addrlen */
4136 [TARGET_SYS_CONNECT
] = 3, /* fd, addr, addrlen */
4137 [TARGET_SYS_LISTEN
] = 2, /* fd, backlog */
4138 [TARGET_SYS_ACCEPT
] = 3, /* fd, addr, addrlen */
4139 [TARGET_SYS_GETSOCKNAME
] = 3, /* fd, addr, addrlen */
4140 [TARGET_SYS_GETPEERNAME
] = 3, /* fd, addr, addrlen */
4141 [TARGET_SYS_SOCKETPAIR
] = 4, /* domain, type, protocol, tab */
4142 [TARGET_SYS_SEND
] = 4, /* fd, msg, len, flags */
4143 [TARGET_SYS_RECV
] = 4, /* fd, msg, len, flags */
4144 [TARGET_SYS_SENDTO
] = 6, /* fd, msg, len, flags, addr, addrlen */
4145 [TARGET_SYS_RECVFROM
] = 6, /* fd, msg, len, flags, addr, addrlen */
4146 [TARGET_SYS_SHUTDOWN
] = 2, /* fd, how */
4147 [TARGET_SYS_SETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4148 [TARGET_SYS_GETSOCKOPT
] = 5, /* fd, level, optname, optval, optlen */
4149 [TARGET_SYS_SENDMSG
] = 3, /* fd, msg, flags */
4150 [TARGET_SYS_RECVMSG
] = 3, /* fd, msg, flags */
4151 [TARGET_SYS_ACCEPT4
] = 4, /* fd, addr, addrlen, flags */
4152 [TARGET_SYS_RECVMMSG
] = 4, /* fd, msgvec, vlen, flags */
4153 [TARGET_SYS_SENDMMSG
] = 4, /* fd, msgvec, vlen, flags */
4155 abi_long a
[6]; /* max 6 args */
4158 /* check the range of the first argument num */
4159 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
4160 if (num
< 1 || num
> TARGET_SYS_SENDMMSG
) {
4161 return -TARGET_EINVAL
;
4163 /* ensure we have space for args */
4164 if (nargs
[num
] > ARRAY_SIZE(a
)) {
4165 return -TARGET_EINVAL
;
4167 /* collect the arguments in a[] according to nargs[] */
4168 for (i
= 0; i
< nargs
[num
]; ++i
) {
4169 if (get_user_ual(a
[i
], vptr
+ i
* sizeof(abi_long
)) != 0) {
4170 return -TARGET_EFAULT
;
4173 /* now when we have the args, invoke the appropriate underlying function */
4175 case TARGET_SYS_SOCKET
: /* domain, type, protocol */
4176 return do_socket(a
[0], a
[1], a
[2]);
4177 case TARGET_SYS_BIND
: /* sockfd, addr, addrlen */
4178 return do_bind(a
[0], a
[1], a
[2]);
4179 case TARGET_SYS_CONNECT
: /* sockfd, addr, addrlen */
4180 return do_connect(a
[0], a
[1], a
[2]);
4181 case TARGET_SYS_LISTEN
: /* sockfd, backlog */
4182 return get_errno(listen(a
[0], a
[1]));
4183 case TARGET_SYS_ACCEPT
: /* sockfd, addr, addrlen */
4184 return do_accept4(a
[0], a
[1], a
[2], 0);
4185 case TARGET_SYS_GETSOCKNAME
: /* sockfd, addr, addrlen */
4186 return do_getsockname(a
[0], a
[1], a
[2]);
4187 case TARGET_SYS_GETPEERNAME
: /* sockfd, addr, addrlen */
4188 return do_getpeername(a
[0], a
[1], a
[2]);
4189 case TARGET_SYS_SOCKETPAIR
: /* domain, type, protocol, tab */
4190 return do_socketpair(a
[0], a
[1], a
[2], a
[3]);
4191 case TARGET_SYS_SEND
: /* sockfd, msg, len, flags */
4192 return do_sendto(a
[0], a
[1], a
[2], a
[3], 0, 0);
4193 case TARGET_SYS_RECV
: /* sockfd, msg, len, flags */
4194 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], 0, 0);
4195 case TARGET_SYS_SENDTO
: /* sockfd, msg, len, flags, addr, addrlen */
4196 return do_sendto(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4197 case TARGET_SYS_RECVFROM
: /* sockfd, msg, len, flags, addr, addrlen */
4198 return do_recvfrom(a
[0], a
[1], a
[2], a
[3], a
[4], a
[5]);
4199 case TARGET_SYS_SHUTDOWN
: /* sockfd, how */
4200 return get_errno(shutdown(a
[0], a
[1]));
4201 case TARGET_SYS_SETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4202 return do_setsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4203 case TARGET_SYS_GETSOCKOPT
: /* sockfd, level, optname, optval, optlen */
4204 return do_getsockopt(a
[0], a
[1], a
[2], a
[3], a
[4]);
4205 case TARGET_SYS_SENDMSG
: /* sockfd, msg, flags */
4206 return do_sendrecvmsg(a
[0], a
[1], a
[2], 1);
4207 case TARGET_SYS_RECVMSG
: /* sockfd, msg, flags */
4208 return do_sendrecvmsg(a
[0], a
[1], a
[2], 0);
4209 case TARGET_SYS_ACCEPT4
: /* sockfd, addr, addrlen, flags */
4210 return do_accept4(a
[0], a
[1], a
[2], a
[3]);
4211 case TARGET_SYS_RECVMMSG
: /* sockfd, msgvec, vlen, flags */
4212 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 0);
4213 case TARGET_SYS_SENDMMSG
: /* sockfd, msgvec, vlen, flags */
4214 return do_sendrecvmmsg(a
[0], a
[1], a
[2], a
[3], 1);
4216 gemu_log("Unsupported socketcall: %d\n", num
);
4217 return -TARGET_EINVAL
;
4222 #define N_SHM_REGIONS 32
4224 static struct shm_region
{
4228 } shm_regions
[N_SHM_REGIONS
];
4230 #ifndef TARGET_SEMID64_DS
4231 /* asm-generic version of this struct */
4232 struct target_semid64_ds
4234 struct target_ipc_perm sem_perm
;
4235 abi_ulong sem_otime
;
4236 #if TARGET_ABI_BITS == 32
4237 abi_ulong __unused1
;
4239 abi_ulong sem_ctime
;
4240 #if TARGET_ABI_BITS == 32
4241 abi_ulong __unused2
;
4243 abi_ulong sem_nsems
;
4244 abi_ulong __unused3
;
4245 abi_ulong __unused4
;
4249 static inline abi_long
target_to_host_ipc_perm(struct ipc_perm
*host_ip
,
4250 abi_ulong target_addr
)
4252 struct target_ipc_perm
*target_ip
;
4253 struct target_semid64_ds
*target_sd
;
4255 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4256 return -TARGET_EFAULT
;
4257 target_ip
= &(target_sd
->sem_perm
);
4258 host_ip
->__key
= tswap32(target_ip
->__key
);
4259 host_ip
->uid
= tswap32(target_ip
->uid
);
4260 host_ip
->gid
= tswap32(target_ip
->gid
);
4261 host_ip
->cuid
= tswap32(target_ip
->cuid
);
4262 host_ip
->cgid
= tswap32(target_ip
->cgid
);
4263 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4264 host_ip
->mode
= tswap32(target_ip
->mode
);
4266 host_ip
->mode
= tswap16(target_ip
->mode
);
4268 #if defined(TARGET_PPC)
4269 host_ip
->__seq
= tswap32(target_ip
->__seq
);
4271 host_ip
->__seq
= tswap16(target_ip
->__seq
);
4273 unlock_user_struct(target_sd
, target_addr
, 0);
4277 static inline abi_long
host_to_target_ipc_perm(abi_ulong target_addr
,
4278 struct ipc_perm
*host_ip
)
4280 struct target_ipc_perm
*target_ip
;
4281 struct target_semid64_ds
*target_sd
;
4283 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4284 return -TARGET_EFAULT
;
4285 target_ip
= &(target_sd
->sem_perm
);
4286 target_ip
->__key
= tswap32(host_ip
->__key
);
4287 target_ip
->uid
= tswap32(host_ip
->uid
);
4288 target_ip
->gid
= tswap32(host_ip
->gid
);
4289 target_ip
->cuid
= tswap32(host_ip
->cuid
);
4290 target_ip
->cgid
= tswap32(host_ip
->cgid
);
4291 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4292 target_ip
->mode
= tswap32(host_ip
->mode
);
4294 target_ip
->mode
= tswap16(host_ip
->mode
);
4296 #if defined(TARGET_PPC)
4297 target_ip
->__seq
= tswap32(host_ip
->__seq
);
4299 target_ip
->__seq
= tswap16(host_ip
->__seq
);
4301 unlock_user_struct(target_sd
, target_addr
, 1);
4305 static inline abi_long
target_to_host_semid_ds(struct semid_ds
*host_sd
,
4306 abi_ulong target_addr
)
4308 struct target_semid64_ds
*target_sd
;
4310 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4311 return -TARGET_EFAULT
;
4312 if (target_to_host_ipc_perm(&(host_sd
->sem_perm
),target_addr
))
4313 return -TARGET_EFAULT
;
4314 host_sd
->sem_nsems
= tswapal(target_sd
->sem_nsems
);
4315 host_sd
->sem_otime
= tswapal(target_sd
->sem_otime
);
4316 host_sd
->sem_ctime
= tswapal(target_sd
->sem_ctime
);
4317 unlock_user_struct(target_sd
, target_addr
, 0);
4321 static inline abi_long
host_to_target_semid_ds(abi_ulong target_addr
,
4322 struct semid_ds
*host_sd
)
4324 struct target_semid64_ds
*target_sd
;
4326 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4327 return -TARGET_EFAULT
;
4328 if (host_to_target_ipc_perm(target_addr
,&(host_sd
->sem_perm
)))
4329 return -TARGET_EFAULT
;
4330 target_sd
->sem_nsems
= tswapal(host_sd
->sem_nsems
);
4331 target_sd
->sem_otime
= tswapal(host_sd
->sem_otime
);
4332 target_sd
->sem_ctime
= tswapal(host_sd
->sem_ctime
);
4333 unlock_user_struct(target_sd
, target_addr
, 1);
4337 struct target_seminfo
{
4350 static inline abi_long
host_to_target_seminfo(abi_ulong target_addr
,
4351 struct seminfo
*host_seminfo
)
4353 struct target_seminfo
*target_seminfo
;
4354 if (!lock_user_struct(VERIFY_WRITE
, target_seminfo
, target_addr
, 0))
4355 return -TARGET_EFAULT
;
4356 __put_user(host_seminfo
->semmap
, &target_seminfo
->semmap
);
4357 __put_user(host_seminfo
->semmni
, &target_seminfo
->semmni
);
4358 __put_user(host_seminfo
->semmns
, &target_seminfo
->semmns
);
4359 __put_user(host_seminfo
->semmnu
, &target_seminfo
->semmnu
);
4360 __put_user(host_seminfo
->semmsl
, &target_seminfo
->semmsl
);
4361 __put_user(host_seminfo
->semopm
, &target_seminfo
->semopm
);
4362 __put_user(host_seminfo
->semume
, &target_seminfo
->semume
);
4363 __put_user(host_seminfo
->semusz
, &target_seminfo
->semusz
);
4364 __put_user(host_seminfo
->semvmx
, &target_seminfo
->semvmx
);
4365 __put_user(host_seminfo
->semaem
, &target_seminfo
->semaem
);
4366 unlock_user_struct(target_seminfo
, target_addr
, 1);
4372 struct semid_ds
*buf
;
4373 unsigned short *array
;
4374 struct seminfo
*__buf
;
4377 union target_semun
{
4384 static inline abi_long
target_to_host_semarray(int semid
, unsigned short **host_array
,
4385 abi_ulong target_addr
)
4388 unsigned short *array
;
4390 struct semid_ds semid_ds
;
4393 semun
.buf
= &semid_ds
;
4395 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4397 return get_errno(ret
);
4399 nsems
= semid_ds
.sem_nsems
;
4401 *host_array
= g_try_new(unsigned short, nsems
);
4403 return -TARGET_ENOMEM
;
4405 array
= lock_user(VERIFY_READ
, target_addr
,
4406 nsems
*sizeof(unsigned short), 1);
4408 g_free(*host_array
);
4409 return -TARGET_EFAULT
;
4412 for(i
=0; i
<nsems
; i
++) {
4413 __get_user((*host_array
)[i
], &array
[i
]);
4415 unlock_user(array
, target_addr
, 0);
4420 static inline abi_long
host_to_target_semarray(int semid
, abi_ulong target_addr
,
4421 unsigned short **host_array
)
4424 unsigned short *array
;
4426 struct semid_ds semid_ds
;
4429 semun
.buf
= &semid_ds
;
4431 ret
= semctl(semid
, 0, IPC_STAT
, semun
);
4433 return get_errno(ret
);
4435 nsems
= semid_ds
.sem_nsems
;
4437 array
= lock_user(VERIFY_WRITE
, target_addr
,
4438 nsems
*sizeof(unsigned short), 0);
4440 return -TARGET_EFAULT
;
4442 for(i
=0; i
<nsems
; i
++) {
4443 __put_user((*host_array
)[i
], &array
[i
]);
4445 g_free(*host_array
);
4446 unlock_user(array
, target_addr
, 1);
4451 static inline abi_long
do_semctl(int semid
, int semnum
, int cmd
,
4452 abi_ulong target_arg
)
4454 union target_semun target_su
= { .buf
= target_arg
};
4456 struct semid_ds dsarg
;
4457 unsigned short *array
= NULL
;
4458 struct seminfo seminfo
;
4459 abi_long ret
= -TARGET_EINVAL
;
4466 /* In 64 bit cross-endian situations, we will erroneously pick up
4467 * the wrong half of the union for the "val" element. To rectify
4468 * this, the entire 8-byte structure is byteswapped, followed by
4469 * a swap of the 4 byte val field. In other cases, the data is
4470 * already in proper host byte order. */
4471 if (sizeof(target_su
.val
) != (sizeof(target_su
.buf
))) {
4472 target_su
.buf
= tswapal(target_su
.buf
);
4473 arg
.val
= tswap32(target_su
.val
);
4475 arg
.val
= target_su
.val
;
4477 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4481 err
= target_to_host_semarray(semid
, &array
, target_su
.array
);
4485 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4486 err
= host_to_target_semarray(semid
, target_su
.array
, &array
);
4493 err
= target_to_host_semid_ds(&dsarg
, target_su
.buf
);
4497 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4498 err
= host_to_target_semid_ds(target_su
.buf
, &dsarg
);
4504 arg
.__buf
= &seminfo
;
4505 ret
= get_errno(semctl(semid
, semnum
, cmd
, arg
));
4506 err
= host_to_target_seminfo(target_su
.__buf
, &seminfo
);
4514 ret
= get_errno(semctl(semid
, semnum
, cmd
, NULL
));
4521 struct target_sembuf
{
4522 unsigned short sem_num
;
4527 static inline abi_long
target_to_host_sembuf(struct sembuf
*host_sembuf
,
4528 abi_ulong target_addr
,
4531 struct target_sembuf
*target_sembuf
;
4534 target_sembuf
= lock_user(VERIFY_READ
, target_addr
,
4535 nsops
*sizeof(struct target_sembuf
), 1);
4537 return -TARGET_EFAULT
;
4539 for(i
=0; i
<nsops
; i
++) {
4540 __get_user(host_sembuf
[i
].sem_num
, &target_sembuf
[i
].sem_num
);
4541 __get_user(host_sembuf
[i
].sem_op
, &target_sembuf
[i
].sem_op
);
4542 __get_user(host_sembuf
[i
].sem_flg
, &target_sembuf
[i
].sem_flg
);
4545 unlock_user(target_sembuf
, target_addr
, 0);
4550 static inline abi_long
do_semop(int semid
, abi_long ptr
, unsigned nsops
)
4552 struct sembuf sops
[nsops
];
4554 if (target_to_host_sembuf(sops
, ptr
, nsops
))
4555 return -TARGET_EFAULT
;
4557 return get_errno(safe_semtimedop(semid
, sops
, nsops
, NULL
));
4560 struct target_msqid_ds
4562 struct target_ipc_perm msg_perm
;
4563 abi_ulong msg_stime
;
4564 #if TARGET_ABI_BITS == 32
4565 abi_ulong __unused1
;
4567 abi_ulong msg_rtime
;
4568 #if TARGET_ABI_BITS == 32
4569 abi_ulong __unused2
;
4571 abi_ulong msg_ctime
;
4572 #if TARGET_ABI_BITS == 32
4573 abi_ulong __unused3
;
4575 abi_ulong __msg_cbytes
;
4577 abi_ulong msg_qbytes
;
4578 abi_ulong msg_lspid
;
4579 abi_ulong msg_lrpid
;
4580 abi_ulong __unused4
;
4581 abi_ulong __unused5
;
4584 static inline abi_long
target_to_host_msqid_ds(struct msqid_ds
*host_md
,
4585 abi_ulong target_addr
)
4587 struct target_msqid_ds
*target_md
;
4589 if (!lock_user_struct(VERIFY_READ
, target_md
, target_addr
, 1))
4590 return -TARGET_EFAULT
;
4591 if (target_to_host_ipc_perm(&(host_md
->msg_perm
),target_addr
))
4592 return -TARGET_EFAULT
;
4593 host_md
->msg_stime
= tswapal(target_md
->msg_stime
);
4594 host_md
->msg_rtime
= tswapal(target_md
->msg_rtime
);
4595 host_md
->msg_ctime
= tswapal(target_md
->msg_ctime
);
4596 host_md
->__msg_cbytes
= tswapal(target_md
->__msg_cbytes
);
4597 host_md
->msg_qnum
= tswapal(target_md
->msg_qnum
);
4598 host_md
->msg_qbytes
= tswapal(target_md
->msg_qbytes
);
4599 host_md
->msg_lspid
= tswapal(target_md
->msg_lspid
);
4600 host_md
->msg_lrpid
= tswapal(target_md
->msg_lrpid
);
4601 unlock_user_struct(target_md
, target_addr
, 0);
4605 static inline abi_long
host_to_target_msqid_ds(abi_ulong target_addr
,
4606 struct msqid_ds
*host_md
)
4608 struct target_msqid_ds
*target_md
;
4610 if (!lock_user_struct(VERIFY_WRITE
, target_md
, target_addr
, 0))
4611 return -TARGET_EFAULT
;
4612 if (host_to_target_ipc_perm(target_addr
,&(host_md
->msg_perm
)))
4613 return -TARGET_EFAULT
;
4614 target_md
->msg_stime
= tswapal(host_md
->msg_stime
);
4615 target_md
->msg_rtime
= tswapal(host_md
->msg_rtime
);
4616 target_md
->msg_ctime
= tswapal(host_md
->msg_ctime
);
4617 target_md
->__msg_cbytes
= tswapal(host_md
->__msg_cbytes
);
4618 target_md
->msg_qnum
= tswapal(host_md
->msg_qnum
);
4619 target_md
->msg_qbytes
= tswapal(host_md
->msg_qbytes
);
4620 target_md
->msg_lspid
= tswapal(host_md
->msg_lspid
);
4621 target_md
->msg_lrpid
= tswapal(host_md
->msg_lrpid
);
4622 unlock_user_struct(target_md
, target_addr
, 1);
4626 struct target_msginfo
{
4634 unsigned short int msgseg
;
4637 static inline abi_long
host_to_target_msginfo(abi_ulong target_addr
,
4638 struct msginfo
*host_msginfo
)
4640 struct target_msginfo
*target_msginfo
;
4641 if (!lock_user_struct(VERIFY_WRITE
, target_msginfo
, target_addr
, 0))
4642 return -TARGET_EFAULT
;
4643 __put_user(host_msginfo
->msgpool
, &target_msginfo
->msgpool
);
4644 __put_user(host_msginfo
->msgmap
, &target_msginfo
->msgmap
);
4645 __put_user(host_msginfo
->msgmax
, &target_msginfo
->msgmax
);
4646 __put_user(host_msginfo
->msgmnb
, &target_msginfo
->msgmnb
);
4647 __put_user(host_msginfo
->msgmni
, &target_msginfo
->msgmni
);
4648 __put_user(host_msginfo
->msgssz
, &target_msginfo
->msgssz
);
4649 __put_user(host_msginfo
->msgtql
, &target_msginfo
->msgtql
);
4650 __put_user(host_msginfo
->msgseg
, &target_msginfo
->msgseg
);
4651 unlock_user_struct(target_msginfo
, target_addr
, 1);
4655 static inline abi_long
do_msgctl(int msgid
, int cmd
, abi_long ptr
)
4657 struct msqid_ds dsarg
;
4658 struct msginfo msginfo
;
4659 abi_long ret
= -TARGET_EINVAL
;
4667 if (target_to_host_msqid_ds(&dsarg
,ptr
))
4668 return -TARGET_EFAULT
;
4669 ret
= get_errno(msgctl(msgid
, cmd
, &dsarg
));
4670 if (host_to_target_msqid_ds(ptr
,&dsarg
))
4671 return -TARGET_EFAULT
;
4674 ret
= get_errno(msgctl(msgid
, cmd
, NULL
));
4678 ret
= get_errno(msgctl(msgid
, cmd
, (struct msqid_ds
*)&msginfo
));
4679 if (host_to_target_msginfo(ptr
, &msginfo
))
4680 return -TARGET_EFAULT
;
4687 struct target_msgbuf
{
4692 static inline abi_long
do_msgsnd(int msqid
, abi_long msgp
,
4693 ssize_t msgsz
, int msgflg
)
4695 struct target_msgbuf
*target_mb
;
4696 struct msgbuf
*host_mb
;
4700 return -TARGET_EINVAL
;
4703 if (!lock_user_struct(VERIFY_READ
, target_mb
, msgp
, 0))
4704 return -TARGET_EFAULT
;
4705 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4707 unlock_user_struct(target_mb
, msgp
, 0);
4708 return -TARGET_ENOMEM
;
4710 host_mb
->mtype
= (abi_long
) tswapal(target_mb
->mtype
);
4711 memcpy(host_mb
->mtext
, target_mb
->mtext
, msgsz
);
4712 ret
= get_errno(safe_msgsnd(msqid
, host_mb
, msgsz
, msgflg
));
4714 unlock_user_struct(target_mb
, msgp
, 0);
4719 static inline abi_long
do_msgrcv(int msqid
, abi_long msgp
,
4720 ssize_t msgsz
, abi_long msgtyp
,
4723 struct target_msgbuf
*target_mb
;
4725 struct msgbuf
*host_mb
;
4729 return -TARGET_EINVAL
;
4732 if (!lock_user_struct(VERIFY_WRITE
, target_mb
, msgp
, 0))
4733 return -TARGET_EFAULT
;
4735 host_mb
= g_try_malloc(msgsz
+ sizeof(long));
4737 ret
= -TARGET_ENOMEM
;
4740 ret
= get_errno(safe_msgrcv(msqid
, host_mb
, msgsz
, msgtyp
, msgflg
));
4743 abi_ulong target_mtext_addr
= msgp
+ sizeof(abi_ulong
);
4744 target_mtext
= lock_user(VERIFY_WRITE
, target_mtext_addr
, ret
, 0);
4745 if (!target_mtext
) {
4746 ret
= -TARGET_EFAULT
;
4749 memcpy(target_mb
->mtext
, host_mb
->mtext
, ret
);
4750 unlock_user(target_mtext
, target_mtext_addr
, ret
);
4753 target_mb
->mtype
= tswapal(host_mb
->mtype
);
4757 unlock_user_struct(target_mb
, msgp
, 1);
4762 static inline abi_long
target_to_host_shmid_ds(struct shmid_ds
*host_sd
,
4763 abi_ulong target_addr
)
4765 struct target_shmid_ds
*target_sd
;
4767 if (!lock_user_struct(VERIFY_READ
, target_sd
, target_addr
, 1))
4768 return -TARGET_EFAULT
;
4769 if (target_to_host_ipc_perm(&(host_sd
->shm_perm
), target_addr
))
4770 return -TARGET_EFAULT
;
4771 __get_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4772 __get_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4773 __get_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4774 __get_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4775 __get_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4776 __get_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4777 __get_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4778 unlock_user_struct(target_sd
, target_addr
, 0);
4782 static inline abi_long
host_to_target_shmid_ds(abi_ulong target_addr
,
4783 struct shmid_ds
*host_sd
)
4785 struct target_shmid_ds
*target_sd
;
4787 if (!lock_user_struct(VERIFY_WRITE
, target_sd
, target_addr
, 0))
4788 return -TARGET_EFAULT
;
4789 if (host_to_target_ipc_perm(target_addr
, &(host_sd
->shm_perm
)))
4790 return -TARGET_EFAULT
;
4791 __put_user(host_sd
->shm_segsz
, &target_sd
->shm_segsz
);
4792 __put_user(host_sd
->shm_atime
, &target_sd
->shm_atime
);
4793 __put_user(host_sd
->shm_dtime
, &target_sd
->shm_dtime
);
4794 __put_user(host_sd
->shm_ctime
, &target_sd
->shm_ctime
);
4795 __put_user(host_sd
->shm_cpid
, &target_sd
->shm_cpid
);
4796 __put_user(host_sd
->shm_lpid
, &target_sd
->shm_lpid
);
4797 __put_user(host_sd
->shm_nattch
, &target_sd
->shm_nattch
);
4798 unlock_user_struct(target_sd
, target_addr
, 1);
4802 struct target_shminfo
{
4810 static inline abi_long
host_to_target_shminfo(abi_ulong target_addr
,
4811 struct shminfo
*host_shminfo
)
4813 struct target_shminfo
*target_shminfo
;
4814 if (!lock_user_struct(VERIFY_WRITE
, target_shminfo
, target_addr
, 0))
4815 return -TARGET_EFAULT
;
4816 __put_user(host_shminfo
->shmmax
, &target_shminfo
->shmmax
);
4817 __put_user(host_shminfo
->shmmin
, &target_shminfo
->shmmin
);
4818 __put_user(host_shminfo
->shmmni
, &target_shminfo
->shmmni
);
4819 __put_user(host_shminfo
->shmseg
, &target_shminfo
->shmseg
);
4820 __put_user(host_shminfo
->shmall
, &target_shminfo
->shmall
);
4821 unlock_user_struct(target_shminfo
, target_addr
, 1);
4825 struct target_shm_info
{
4830 abi_ulong swap_attempts
;
4831 abi_ulong swap_successes
;
4834 static inline abi_long
host_to_target_shm_info(abi_ulong target_addr
,
4835 struct shm_info
*host_shm_info
)
4837 struct target_shm_info
*target_shm_info
;
4838 if (!lock_user_struct(VERIFY_WRITE
, target_shm_info
, target_addr
, 0))
4839 return -TARGET_EFAULT
;
4840 __put_user(host_shm_info
->used_ids
, &target_shm_info
->used_ids
);
4841 __put_user(host_shm_info
->shm_tot
, &target_shm_info
->shm_tot
);
4842 __put_user(host_shm_info
->shm_rss
, &target_shm_info
->shm_rss
);
4843 __put_user(host_shm_info
->shm_swp
, &target_shm_info
->shm_swp
);
4844 __put_user(host_shm_info
->swap_attempts
, &target_shm_info
->swap_attempts
);
4845 __put_user(host_shm_info
->swap_successes
, &target_shm_info
->swap_successes
);
4846 unlock_user_struct(target_shm_info
, target_addr
, 1);
4850 static inline abi_long
do_shmctl(int shmid
, int cmd
, abi_long buf
)
4852 struct shmid_ds dsarg
;
4853 struct shminfo shminfo
;
4854 struct shm_info shm_info
;
4855 abi_long ret
= -TARGET_EINVAL
;
4863 if (target_to_host_shmid_ds(&dsarg
, buf
))
4864 return -TARGET_EFAULT
;
4865 ret
= get_errno(shmctl(shmid
, cmd
, &dsarg
));
4866 if (host_to_target_shmid_ds(buf
, &dsarg
))
4867 return -TARGET_EFAULT
;
4870 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shminfo
));
4871 if (host_to_target_shminfo(buf
, &shminfo
))
4872 return -TARGET_EFAULT
;
4875 ret
= get_errno(shmctl(shmid
, cmd
, (struct shmid_ds
*)&shm_info
));
4876 if (host_to_target_shm_info(buf
, &shm_info
))
4877 return -TARGET_EFAULT
;
4882 ret
= get_errno(shmctl(shmid
, cmd
, NULL
));
4889 #ifndef TARGET_FORCE_SHMLBA
4890 /* For most architectures, SHMLBA is the same as the page size;
4891 * some architectures have larger values, in which case they should
4892 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4893 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4894 * and defining its own value for SHMLBA.
4896 * The kernel also permits SHMLBA to be set by the architecture to a
4897 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4898 * this means that addresses are rounded to the large size if
4899 * SHM_RND is set but addresses not aligned to that size are not rejected
4900 * as long as they are at least page-aligned. Since the only architecture
4901 * which uses this is ia64 this code doesn't provide for that oddity.
4903 static inline abi_ulong
target_shmlba(CPUArchState
*cpu_env
)
4905 return TARGET_PAGE_SIZE
;
4909 static inline abi_ulong
do_shmat(CPUArchState
*cpu_env
,
4910 int shmid
, abi_ulong shmaddr
, int shmflg
)
4914 struct shmid_ds shm_info
;
4918 /* find out the length of the shared memory segment */
4919 ret
= get_errno(shmctl(shmid
, IPC_STAT
, &shm_info
));
4920 if (is_error(ret
)) {
4921 /* can't get length, bail out */
4925 shmlba
= target_shmlba(cpu_env
);
4927 if (shmaddr
& (shmlba
- 1)) {
4928 if (shmflg
& SHM_RND
) {
4929 shmaddr
&= ~(shmlba
- 1);
4931 return -TARGET_EINVAL
;
4934 if (!guest_range_valid(shmaddr
, shm_info
.shm_segsz
)) {
4935 return -TARGET_EINVAL
;
4941 host_raddr
= shmat(shmid
, (void *)g2h(shmaddr
), shmflg
);
4943 abi_ulong mmap_start
;
4945 mmap_start
= mmap_find_vma(0, shm_info
.shm_segsz
);
4947 if (mmap_start
== -1) {
4949 host_raddr
= (void *)-1;
4951 host_raddr
= shmat(shmid
, g2h(mmap_start
), shmflg
| SHM_REMAP
);
4954 if (host_raddr
== (void *)-1) {
4956 return get_errno((long)host_raddr
);
4958 raddr
=h2g((unsigned long)host_raddr
);
4960 page_set_flags(raddr
, raddr
+ shm_info
.shm_segsz
,
4961 PAGE_VALID
| PAGE_READ
|
4962 ((shmflg
& SHM_RDONLY
)? 0 : PAGE_WRITE
));
4964 for (i
= 0; i
< N_SHM_REGIONS
; i
++) {
4965 if (!shm_regions
[i
].in_use
) {
4966 shm_regions
[i
].in_use
= true;
4967 shm_regions
[i
].start
= raddr
;
4968 shm_regions
[i
].size
= shm_info
.shm_segsz
;
4978 static inline abi_long
do_shmdt(abi_ulong shmaddr
)
4985 for (i
= 0; i
< N_SHM_REGIONS
; ++i
) {
4986 if (shm_regions
[i
].in_use
&& shm_regions
[i
].start
== shmaddr
) {
4987 shm_regions
[i
].in_use
= false;
4988 page_set_flags(shmaddr
, shmaddr
+ shm_regions
[i
].size
, 0);
4992 rv
= get_errno(shmdt(g2h(shmaddr
)));
4999 #ifdef TARGET_NR_ipc
5000 /* ??? This only works with linear mappings. */
5001 /* do_ipc() must return target values and target errnos. */
5002 static abi_long
do_ipc(CPUArchState
*cpu_env
,
5003 unsigned int call
, abi_long first
,
5004 abi_long second
, abi_long third
,
5005 abi_long ptr
, abi_long fifth
)
5010 version
= call
>> 16;
5015 ret
= do_semop(first
, ptr
, second
);
5019 ret
= get_errno(semget(first
, second
, third
));
5022 case IPCOP_semctl
: {
5023 /* The semun argument to semctl is passed by value, so dereference the
5026 get_user_ual(atptr
, ptr
);
5027 ret
= do_semctl(first
, second
, third
, atptr
);
5032 ret
= get_errno(msgget(first
, second
));
5036 ret
= do_msgsnd(first
, ptr
, second
, third
);
5040 ret
= do_msgctl(first
, second
, ptr
);
5047 struct target_ipc_kludge
{
5052 if (!lock_user_struct(VERIFY_READ
, tmp
, ptr
, 1)) {
5053 ret
= -TARGET_EFAULT
;
5057 ret
= do_msgrcv(first
, tswapal(tmp
->msgp
), second
, tswapal(tmp
->msgtyp
), third
);
5059 unlock_user_struct(tmp
, ptr
, 0);
5063 ret
= do_msgrcv(first
, ptr
, second
, fifth
, third
);
5072 raddr
= do_shmat(cpu_env
, first
, ptr
, second
);
5073 if (is_error(raddr
))
5074 return get_errno(raddr
);
5075 if (put_user_ual(raddr
, third
))
5076 return -TARGET_EFAULT
;
5080 ret
= -TARGET_EINVAL
;
5085 ret
= do_shmdt(ptr
);
5089 /* IPC_* flag values are the same on all linux platforms */
5090 ret
= get_errno(shmget(first
, second
, third
));
5093 /* IPC_* and SHM_* command values are the same on all linux platforms */
5095 ret
= do_shmctl(first
, second
, ptr
);
5098 gemu_log("Unsupported ipc call: %d (version %d)\n", call
, version
);
5099 ret
= -TARGET_ENOSYS
;
5106 /* kernel structure types definitions */
5108 #define STRUCT(name, ...) STRUCT_ ## name,
5109 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
5111 #include "syscall_types.h"
5115 #undef STRUCT_SPECIAL
5117 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
5118 #define STRUCT_SPECIAL(name)
5119 #include "syscall_types.h"
5121 #undef STRUCT_SPECIAL
5123 typedef struct IOCTLEntry IOCTLEntry
;
5125 typedef abi_long
do_ioctl_fn(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5126 int fd
, int cmd
, abi_long arg
);
5130 unsigned int host_cmd
;
5133 do_ioctl_fn
*do_ioctl
;
5134 const argtype arg_type
[5];
5137 #define IOC_R 0x0001
5138 #define IOC_W 0x0002
5139 #define IOC_RW (IOC_R | IOC_W)
5141 #define MAX_STRUCT_SIZE 4096
5143 #ifdef CONFIG_FIEMAP
5144 /* So fiemap access checks don't overflow on 32 bit systems.
5145 * This is very slightly smaller than the limit imposed by
5146 * the underlying kernel.
5148 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
5149 / sizeof(struct fiemap_extent))
5151 static abi_long
do_ioctl_fs_ioc_fiemap(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5152 int fd
, int cmd
, abi_long arg
)
5154 /* The parameter for this ioctl is a struct fiemap followed
5155 * by an array of struct fiemap_extent whose size is set
5156 * in fiemap->fm_extent_count. The array is filled in by the
5159 int target_size_in
, target_size_out
;
5161 const argtype
*arg_type
= ie
->arg_type
;
5162 const argtype extent_arg_type
[] = { MK_STRUCT(STRUCT_fiemap_extent
) };
5165 int i
, extent_size
= thunk_type_size(extent_arg_type
, 0);
5169 assert(arg_type
[0] == TYPE_PTR
);
5170 assert(ie
->access
== IOC_RW
);
5172 target_size_in
= thunk_type_size(arg_type
, 0);
5173 argptr
= lock_user(VERIFY_READ
, arg
, target_size_in
, 1);
5175 return -TARGET_EFAULT
;
5177 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5178 unlock_user(argptr
, arg
, 0);
5179 fm
= (struct fiemap
*)buf_temp
;
5180 if (fm
->fm_extent_count
> FIEMAP_MAX_EXTENTS
) {
5181 return -TARGET_EINVAL
;
5184 outbufsz
= sizeof (*fm
) +
5185 (sizeof(struct fiemap_extent
) * fm
->fm_extent_count
);
5187 if (outbufsz
> MAX_STRUCT_SIZE
) {
5188 /* We can't fit all the extents into the fixed size buffer.
5189 * Allocate one that is large enough and use it instead.
5191 fm
= g_try_malloc(outbufsz
);
5193 return -TARGET_ENOMEM
;
5195 memcpy(fm
, buf_temp
, sizeof(struct fiemap
));
5198 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, fm
));
5199 if (!is_error(ret
)) {
5200 target_size_out
= target_size_in
;
5201 /* An extent_count of 0 means we were only counting the extents
5202 * so there are no structs to copy
5204 if (fm
->fm_extent_count
!= 0) {
5205 target_size_out
+= fm
->fm_mapped_extents
* extent_size
;
5207 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size_out
, 0);
5209 ret
= -TARGET_EFAULT
;
5211 /* Convert the struct fiemap */
5212 thunk_convert(argptr
, fm
, arg_type
, THUNK_TARGET
);
5213 if (fm
->fm_extent_count
!= 0) {
5214 p
= argptr
+ target_size_in
;
5215 /* ...and then all the struct fiemap_extents */
5216 for (i
= 0; i
< fm
->fm_mapped_extents
; i
++) {
5217 thunk_convert(p
, &fm
->fm_extents
[i
], extent_arg_type
,
5222 unlock_user(argptr
, arg
, target_size_out
);
5232 static abi_long
do_ioctl_ifconf(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5233 int fd
, int cmd
, abi_long arg
)
5235 const argtype
*arg_type
= ie
->arg_type
;
5239 struct ifconf
*host_ifconf
;
5241 const argtype ifreq_arg_type
[] = { MK_STRUCT(STRUCT_sockaddr_ifreq
) };
5242 int target_ifreq_size
;
5247 abi_long target_ifc_buf
;
5251 assert(arg_type
[0] == TYPE_PTR
);
5252 assert(ie
->access
== IOC_RW
);
5255 target_size
= thunk_type_size(arg_type
, 0);
5257 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5259 return -TARGET_EFAULT
;
5260 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5261 unlock_user(argptr
, arg
, 0);
5263 host_ifconf
= (struct ifconf
*)(unsigned long)buf_temp
;
5264 target_ifc_len
= host_ifconf
->ifc_len
;
5265 target_ifc_buf
= (abi_long
)(unsigned long)host_ifconf
->ifc_buf
;
5267 target_ifreq_size
= thunk_type_size(ifreq_arg_type
, 0);
5268 nb_ifreq
= target_ifc_len
/ target_ifreq_size
;
5269 host_ifc_len
= nb_ifreq
* sizeof(struct ifreq
);
5271 outbufsz
= sizeof(*host_ifconf
) + host_ifc_len
;
5272 if (outbufsz
> MAX_STRUCT_SIZE
) {
5273 /* We can't fit all the extents into the fixed size buffer.
5274 * Allocate one that is large enough and use it instead.
5276 host_ifconf
= malloc(outbufsz
);
5278 return -TARGET_ENOMEM
;
5280 memcpy(host_ifconf
, buf_temp
, sizeof(*host_ifconf
));
5283 host_ifc_buf
= (char*)host_ifconf
+ sizeof(*host_ifconf
);
5285 host_ifconf
->ifc_len
= host_ifc_len
;
5286 host_ifconf
->ifc_buf
= host_ifc_buf
;
5288 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_ifconf
));
5289 if (!is_error(ret
)) {
5290 /* convert host ifc_len to target ifc_len */
5292 nb_ifreq
= host_ifconf
->ifc_len
/ sizeof(struct ifreq
);
5293 target_ifc_len
= nb_ifreq
* target_ifreq_size
;
5294 host_ifconf
->ifc_len
= target_ifc_len
;
5296 /* restore target ifc_buf */
5298 host_ifconf
->ifc_buf
= (char *)(unsigned long)target_ifc_buf
;
5300 /* copy struct ifconf to target user */
5302 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5304 return -TARGET_EFAULT
;
5305 thunk_convert(argptr
, host_ifconf
, arg_type
, THUNK_TARGET
);
5306 unlock_user(argptr
, arg
, target_size
);
5308 /* copy ifreq[] to target user */
5310 argptr
= lock_user(VERIFY_WRITE
, target_ifc_buf
, target_ifc_len
, 0);
5311 for (i
= 0; i
< nb_ifreq
; i
++) {
5312 thunk_convert(argptr
+ i
* target_ifreq_size
,
5313 host_ifc_buf
+ i
* sizeof(struct ifreq
),
5314 ifreq_arg_type
, THUNK_TARGET
);
5316 unlock_user(argptr
, target_ifc_buf
, target_ifc_len
);
5326 static abi_long
do_ioctl_dm(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5327 int cmd
, abi_long arg
)
5330 struct dm_ioctl
*host_dm
;
5331 abi_long guest_data
;
5332 uint32_t guest_data_size
;
5334 const argtype
*arg_type
= ie
->arg_type
;
5336 void *big_buf
= NULL
;
5340 target_size
= thunk_type_size(arg_type
, 0);
5341 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5343 ret
= -TARGET_EFAULT
;
5346 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5347 unlock_user(argptr
, arg
, 0);
5349 /* buf_temp is too small, so fetch things into a bigger buffer */
5350 big_buf
= g_malloc0(((struct dm_ioctl
*)buf_temp
)->data_size
* 2);
5351 memcpy(big_buf
, buf_temp
, target_size
);
5355 guest_data
= arg
+ host_dm
->data_start
;
5356 if ((guest_data
- arg
) < 0) {
5357 ret
= -TARGET_EINVAL
;
5360 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5361 host_data
= (char*)host_dm
+ host_dm
->data_start
;
5363 argptr
= lock_user(VERIFY_READ
, guest_data
, guest_data_size
, 1);
5365 ret
= -TARGET_EFAULT
;
5369 switch (ie
->host_cmd
) {
5371 case DM_LIST_DEVICES
:
5374 case DM_DEV_SUSPEND
:
5377 case DM_TABLE_STATUS
:
5378 case DM_TABLE_CLEAR
:
5380 case DM_LIST_VERSIONS
:
5384 case DM_DEV_SET_GEOMETRY
:
5385 /* data contains only strings */
5386 memcpy(host_data
, argptr
, guest_data_size
);
5389 memcpy(host_data
, argptr
, guest_data_size
);
5390 *(uint64_t*)host_data
= tswap64(*(uint64_t*)argptr
);
5394 void *gspec
= argptr
;
5395 void *cur_data
= host_data
;
5396 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5397 int spec_size
= thunk_type_size(arg_type
, 0);
5400 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5401 struct dm_target_spec
*spec
= cur_data
;
5405 thunk_convert(spec
, gspec
, arg_type
, THUNK_HOST
);
5406 slen
= strlen((char*)gspec
+ spec_size
) + 1;
5408 spec
->next
= sizeof(*spec
) + slen
;
5409 strcpy((char*)&spec
[1], gspec
+ spec_size
);
5411 cur_data
+= spec
->next
;
5416 ret
= -TARGET_EINVAL
;
5417 unlock_user(argptr
, guest_data
, 0);
5420 unlock_user(argptr
, guest_data
, 0);
5422 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5423 if (!is_error(ret
)) {
5424 guest_data
= arg
+ host_dm
->data_start
;
5425 guest_data_size
= host_dm
->data_size
- host_dm
->data_start
;
5426 argptr
= lock_user(VERIFY_WRITE
, guest_data
, guest_data_size
, 0);
5427 switch (ie
->host_cmd
) {
5432 case DM_DEV_SUSPEND
:
5435 case DM_TABLE_CLEAR
:
5437 case DM_DEV_SET_GEOMETRY
:
5438 /* no return data */
5440 case DM_LIST_DEVICES
:
5442 struct dm_name_list
*nl
= (void*)host_dm
+ host_dm
->data_start
;
5443 uint32_t remaining_data
= guest_data_size
;
5444 void *cur_data
= argptr
;
5445 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_name_list
) };
5446 int nl_size
= 12; /* can't use thunk_size due to alignment */
5449 uint32_t next
= nl
->next
;
5451 nl
->next
= nl_size
+ (strlen(nl
->name
) + 1);
5453 if (remaining_data
< nl
->next
) {
5454 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5457 thunk_convert(cur_data
, nl
, arg_type
, THUNK_TARGET
);
5458 strcpy(cur_data
+ nl_size
, nl
->name
);
5459 cur_data
+= nl
->next
;
5460 remaining_data
-= nl
->next
;
5464 nl
= (void*)nl
+ next
;
5469 case DM_TABLE_STATUS
:
5471 struct dm_target_spec
*spec
= (void*)host_dm
+ host_dm
->data_start
;
5472 void *cur_data
= argptr
;
5473 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_spec
) };
5474 int spec_size
= thunk_type_size(arg_type
, 0);
5477 for (i
= 0; i
< host_dm
->target_count
; i
++) {
5478 uint32_t next
= spec
->next
;
5479 int slen
= strlen((char*)&spec
[1]) + 1;
5480 spec
->next
= (cur_data
- argptr
) + spec_size
+ slen
;
5481 if (guest_data_size
< spec
->next
) {
5482 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5485 thunk_convert(cur_data
, spec
, arg_type
, THUNK_TARGET
);
5486 strcpy(cur_data
+ spec_size
, (char*)&spec
[1]);
5487 cur_data
= argptr
+ spec
->next
;
5488 spec
= (void*)host_dm
+ host_dm
->data_start
+ next
;
5494 void *hdata
= (void*)host_dm
+ host_dm
->data_start
;
5495 int count
= *(uint32_t*)hdata
;
5496 uint64_t *hdev
= hdata
+ 8;
5497 uint64_t *gdev
= argptr
+ 8;
5500 *(uint32_t*)argptr
= tswap32(count
);
5501 for (i
= 0; i
< count
; i
++) {
5502 *gdev
= tswap64(*hdev
);
5508 case DM_LIST_VERSIONS
:
5510 struct dm_target_versions
*vers
= (void*)host_dm
+ host_dm
->data_start
;
5511 uint32_t remaining_data
= guest_data_size
;
5512 void *cur_data
= argptr
;
5513 const argtype arg_type
[] = { MK_STRUCT(STRUCT_dm_target_versions
) };
5514 int vers_size
= thunk_type_size(arg_type
, 0);
5517 uint32_t next
= vers
->next
;
5519 vers
->next
= vers_size
+ (strlen(vers
->name
) + 1);
5521 if (remaining_data
< vers
->next
) {
5522 host_dm
->flags
|= DM_BUFFER_FULL_FLAG
;
5525 thunk_convert(cur_data
, vers
, arg_type
, THUNK_TARGET
);
5526 strcpy(cur_data
+ vers_size
, vers
->name
);
5527 cur_data
+= vers
->next
;
5528 remaining_data
-= vers
->next
;
5532 vers
= (void*)vers
+ next
;
5537 unlock_user(argptr
, guest_data
, 0);
5538 ret
= -TARGET_EINVAL
;
5541 unlock_user(argptr
, guest_data
, guest_data_size
);
5543 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5545 ret
= -TARGET_EFAULT
;
5548 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5549 unlock_user(argptr
, arg
, target_size
);
5556 static abi_long
do_ioctl_blkpg(const IOCTLEntry
*ie
, uint8_t *buf_temp
, int fd
,
5557 int cmd
, abi_long arg
)
5561 const argtype
*arg_type
= ie
->arg_type
;
5562 const argtype part_arg_type
[] = { MK_STRUCT(STRUCT_blkpg_partition
) };
5565 struct blkpg_ioctl_arg
*host_blkpg
= (void*)buf_temp
;
5566 struct blkpg_partition host_part
;
5568 /* Read and convert blkpg */
5570 target_size
= thunk_type_size(arg_type
, 0);
5571 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5573 ret
= -TARGET_EFAULT
;
5576 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5577 unlock_user(argptr
, arg
, 0);
5579 switch (host_blkpg
->op
) {
5580 case BLKPG_ADD_PARTITION
:
5581 case BLKPG_DEL_PARTITION
:
5582 /* payload is struct blkpg_partition */
5585 /* Unknown opcode */
5586 ret
= -TARGET_EINVAL
;
5590 /* Read and convert blkpg->data */
5591 arg
= (abi_long
)(uintptr_t)host_blkpg
->data
;
5592 target_size
= thunk_type_size(part_arg_type
, 0);
5593 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5595 ret
= -TARGET_EFAULT
;
5598 thunk_convert(&host_part
, argptr
, part_arg_type
, THUNK_HOST
);
5599 unlock_user(argptr
, arg
, 0);
5601 /* Swizzle the data pointer to our local copy and call! */
5602 host_blkpg
->data
= &host_part
;
5603 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, host_blkpg
));
5609 static abi_long
do_ioctl_rt(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5610 int fd
, int cmd
, abi_long arg
)
5612 const argtype
*arg_type
= ie
->arg_type
;
5613 const StructEntry
*se
;
5614 const argtype
*field_types
;
5615 const int *dst_offsets
, *src_offsets
;
5618 abi_ulong
*target_rt_dev_ptr
;
5619 unsigned long *host_rt_dev_ptr
;
5623 assert(ie
->access
== IOC_W
);
5624 assert(*arg_type
== TYPE_PTR
);
5626 assert(*arg_type
== TYPE_STRUCT
);
5627 target_size
= thunk_type_size(arg_type
, 0);
5628 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5630 return -TARGET_EFAULT
;
5633 assert(*arg_type
== (int)STRUCT_rtentry
);
5634 se
= struct_entries
+ *arg_type
++;
5635 assert(se
->convert
[0] == NULL
);
5636 /* convert struct here to be able to catch rt_dev string */
5637 field_types
= se
->field_types
;
5638 dst_offsets
= se
->field_offsets
[THUNK_HOST
];
5639 src_offsets
= se
->field_offsets
[THUNK_TARGET
];
5640 for (i
= 0; i
< se
->nb_fields
; i
++) {
5641 if (dst_offsets
[i
] == offsetof(struct rtentry
, rt_dev
)) {
5642 assert(*field_types
== TYPE_PTRVOID
);
5643 target_rt_dev_ptr
= (abi_ulong
*)(argptr
+ src_offsets
[i
]);
5644 host_rt_dev_ptr
= (unsigned long *)(buf_temp
+ dst_offsets
[i
]);
5645 if (*target_rt_dev_ptr
!= 0) {
5646 *host_rt_dev_ptr
= (unsigned long)lock_user_string(
5647 tswapal(*target_rt_dev_ptr
));
5648 if (!*host_rt_dev_ptr
) {
5649 unlock_user(argptr
, arg
, 0);
5650 return -TARGET_EFAULT
;
5653 *host_rt_dev_ptr
= 0;
5658 field_types
= thunk_convert(buf_temp
+ dst_offsets
[i
],
5659 argptr
+ src_offsets
[i
],
5660 field_types
, THUNK_HOST
);
5662 unlock_user(argptr
, arg
, 0);
5664 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5665 if (*host_rt_dev_ptr
!= 0) {
5666 unlock_user((void *)*host_rt_dev_ptr
,
5667 *target_rt_dev_ptr
, 0);
5672 static abi_long
do_ioctl_kdsigaccept(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5673 int fd
, int cmd
, abi_long arg
)
5675 int sig
= target_to_host_signal(arg
);
5676 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, sig
));
5680 static abi_long
do_ioctl_tiocgptpeer(const IOCTLEntry
*ie
, uint8_t *buf_temp
,
5681 int fd
, int cmd
, abi_long arg
)
5683 int flags
= target_to_host_bitmask(arg
, fcntl_flags_tbl
);
5684 return get_errno(safe_ioctl(fd
, ie
->host_cmd
, flags
));
5688 static IOCTLEntry ioctl_entries
[] = {
5689 #define IOCTL(cmd, access, ...) \
5690 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5691 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5692 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5693 #define IOCTL_IGNORE(cmd) \
5694 { TARGET_ ## cmd, 0, #cmd },
5699 /* ??? Implement proper locking for ioctls. */
5700 /* do_ioctl() Must return target values and target errnos. */
5701 static abi_long
do_ioctl(int fd
, int cmd
, abi_long arg
)
5703 const IOCTLEntry
*ie
;
5704 const argtype
*arg_type
;
5706 uint8_t buf_temp
[MAX_STRUCT_SIZE
];
5712 if (ie
->target_cmd
== 0) {
5713 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd
);
5714 return -TARGET_ENOSYS
;
5716 if (ie
->target_cmd
== cmd
)
5720 arg_type
= ie
->arg_type
;
5722 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd
, ie
->name
);
5725 return ie
->do_ioctl(ie
, buf_temp
, fd
, cmd
, arg
);
5726 } else if (!ie
->host_cmd
) {
5727 /* Some architectures define BSD ioctls in their headers
5728 that are not implemented in Linux. */
5729 return -TARGET_ENOSYS
;
5732 switch(arg_type
[0]) {
5735 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
));
5739 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, arg
));
5743 target_size
= thunk_type_size(arg_type
, 0);
5744 switch(ie
->access
) {
5746 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5747 if (!is_error(ret
)) {
5748 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5750 return -TARGET_EFAULT
;
5751 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5752 unlock_user(argptr
, arg
, target_size
);
5756 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5758 return -TARGET_EFAULT
;
5759 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5760 unlock_user(argptr
, arg
, 0);
5761 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5765 argptr
= lock_user(VERIFY_READ
, arg
, target_size
, 1);
5767 return -TARGET_EFAULT
;
5768 thunk_convert(buf_temp
, argptr
, arg_type
, THUNK_HOST
);
5769 unlock_user(argptr
, arg
, 0);
5770 ret
= get_errno(safe_ioctl(fd
, ie
->host_cmd
, buf_temp
));
5771 if (!is_error(ret
)) {
5772 argptr
= lock_user(VERIFY_WRITE
, arg
, target_size
, 0);
5774 return -TARGET_EFAULT
;
5775 thunk_convert(argptr
, buf_temp
, arg_type
, THUNK_TARGET
);
5776 unlock_user(argptr
, arg
, target_size
);
5782 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5783 (long)cmd
, arg_type
[0]);
5784 ret
= -TARGET_ENOSYS
;
5790 static const bitmask_transtbl iflag_tbl
[] = {
5791 { TARGET_IGNBRK
, TARGET_IGNBRK
, IGNBRK
, IGNBRK
},
5792 { TARGET_BRKINT
, TARGET_BRKINT
, BRKINT
, BRKINT
},
5793 { TARGET_IGNPAR
, TARGET_IGNPAR
, IGNPAR
, IGNPAR
},
5794 { TARGET_PARMRK
, TARGET_PARMRK
, PARMRK
, PARMRK
},
5795 { TARGET_INPCK
, TARGET_INPCK
, INPCK
, INPCK
},
5796 { TARGET_ISTRIP
, TARGET_ISTRIP
, ISTRIP
, ISTRIP
},
5797 { TARGET_INLCR
, TARGET_INLCR
, INLCR
, INLCR
},
5798 { TARGET_IGNCR
, TARGET_IGNCR
, IGNCR
, IGNCR
},
5799 { TARGET_ICRNL
, TARGET_ICRNL
, ICRNL
, ICRNL
},
5800 { TARGET_IUCLC
, TARGET_IUCLC
, IUCLC
, IUCLC
},
5801 { TARGET_IXON
, TARGET_IXON
, IXON
, IXON
},
5802 { TARGET_IXANY
, TARGET_IXANY
, IXANY
, IXANY
},
5803 { TARGET_IXOFF
, TARGET_IXOFF
, IXOFF
, IXOFF
},
5804 { TARGET_IMAXBEL
, TARGET_IMAXBEL
, IMAXBEL
, IMAXBEL
},
5808 static const bitmask_transtbl oflag_tbl
[] = {
5809 { TARGET_OPOST
, TARGET_OPOST
, OPOST
, OPOST
},
5810 { TARGET_OLCUC
, TARGET_OLCUC
, OLCUC
, OLCUC
},
5811 { TARGET_ONLCR
, TARGET_ONLCR
, ONLCR
, ONLCR
},
5812 { TARGET_OCRNL
, TARGET_OCRNL
, OCRNL
, OCRNL
},
5813 { TARGET_ONOCR
, TARGET_ONOCR
, ONOCR
, ONOCR
},
5814 { TARGET_ONLRET
, TARGET_ONLRET
, ONLRET
, ONLRET
},
5815 { TARGET_OFILL
, TARGET_OFILL
, OFILL
, OFILL
},
5816 { TARGET_OFDEL
, TARGET_OFDEL
, OFDEL
, OFDEL
},
5817 { TARGET_NLDLY
, TARGET_NL0
, NLDLY
, NL0
},
5818 { TARGET_NLDLY
, TARGET_NL1
, NLDLY
, NL1
},
5819 { TARGET_CRDLY
, TARGET_CR0
, CRDLY
, CR0
},
5820 { TARGET_CRDLY
, TARGET_CR1
, CRDLY
, CR1
},
5821 { TARGET_CRDLY
, TARGET_CR2
, CRDLY
, CR2
},
5822 { TARGET_CRDLY
, TARGET_CR3
, CRDLY
, CR3
},
5823 { TARGET_TABDLY
, TARGET_TAB0
, TABDLY
, TAB0
},
5824 { TARGET_TABDLY
, TARGET_TAB1
, TABDLY
, TAB1
},
5825 { TARGET_TABDLY
, TARGET_TAB2
, TABDLY
, TAB2
},
5826 { TARGET_TABDLY
, TARGET_TAB3
, TABDLY
, TAB3
},
5827 { TARGET_BSDLY
, TARGET_BS0
, BSDLY
, BS0
},
5828 { TARGET_BSDLY
, TARGET_BS1
, BSDLY
, BS1
},
5829 { TARGET_VTDLY
, TARGET_VT0
, VTDLY
, VT0
},
5830 { TARGET_VTDLY
, TARGET_VT1
, VTDLY
, VT1
},
5831 { TARGET_FFDLY
, TARGET_FF0
, FFDLY
, FF0
},
5832 { TARGET_FFDLY
, TARGET_FF1
, FFDLY
, FF1
},
5836 static const bitmask_transtbl cflag_tbl
[] = {
5837 { TARGET_CBAUD
, TARGET_B0
, CBAUD
, B0
},
5838 { TARGET_CBAUD
, TARGET_B50
, CBAUD
, B50
},
5839 { TARGET_CBAUD
, TARGET_B75
, CBAUD
, B75
},
5840 { TARGET_CBAUD
, TARGET_B110
, CBAUD
, B110
},
5841 { TARGET_CBAUD
, TARGET_B134
, CBAUD
, B134
},
5842 { TARGET_CBAUD
, TARGET_B150
, CBAUD
, B150
},
5843 { TARGET_CBAUD
, TARGET_B200
, CBAUD
, B200
},
5844 { TARGET_CBAUD
, TARGET_B300
, CBAUD
, B300
},
5845 { TARGET_CBAUD
, TARGET_B600
, CBAUD
, B600
},
5846 { TARGET_CBAUD
, TARGET_B1200
, CBAUD
, B1200
},
5847 { TARGET_CBAUD
, TARGET_B1800
, CBAUD
, B1800
},
5848 { TARGET_CBAUD
, TARGET_B2400
, CBAUD
, B2400
},
5849 { TARGET_CBAUD
, TARGET_B4800
, CBAUD
, B4800
},
5850 { TARGET_CBAUD
, TARGET_B9600
, CBAUD
, B9600
},
5851 { TARGET_CBAUD
, TARGET_B19200
, CBAUD
, B19200
},
5852 { TARGET_CBAUD
, TARGET_B38400
, CBAUD
, B38400
},
5853 { TARGET_CBAUD
, TARGET_B57600
, CBAUD
, B57600
},
5854 { TARGET_CBAUD
, TARGET_B115200
, CBAUD
, B115200
},
5855 { TARGET_CBAUD
, TARGET_B230400
, CBAUD
, B230400
},
5856 { TARGET_CBAUD
, TARGET_B460800
, CBAUD
, B460800
},
5857 { TARGET_CSIZE
, TARGET_CS5
, CSIZE
, CS5
},
5858 { TARGET_CSIZE
, TARGET_CS6
, CSIZE
, CS6
},
5859 { TARGET_CSIZE
, TARGET_CS7
, CSIZE
, CS7
},
5860 { TARGET_CSIZE
, TARGET_CS8
, CSIZE
, CS8
},
5861 { TARGET_CSTOPB
, TARGET_CSTOPB
, CSTOPB
, CSTOPB
},
5862 { TARGET_CREAD
, TARGET_CREAD
, CREAD
, CREAD
},
5863 { TARGET_PARENB
, TARGET_PARENB
, PARENB
, PARENB
},
5864 { TARGET_PARODD
, TARGET_PARODD
, PARODD
, PARODD
},
5865 { TARGET_HUPCL
, TARGET_HUPCL
, HUPCL
, HUPCL
},
5866 { TARGET_CLOCAL
, TARGET_CLOCAL
, CLOCAL
, CLOCAL
},
5867 { TARGET_CRTSCTS
, TARGET_CRTSCTS
, CRTSCTS
, CRTSCTS
},
5871 static const bitmask_transtbl lflag_tbl
[] = {
5872 { TARGET_ISIG
, TARGET_ISIG
, ISIG
, ISIG
},
5873 { TARGET_ICANON
, TARGET_ICANON
, ICANON
, ICANON
},
5874 { TARGET_XCASE
, TARGET_XCASE
, XCASE
, XCASE
},
5875 { TARGET_ECHO
, TARGET_ECHO
, ECHO
, ECHO
},
5876 { TARGET_ECHOE
, TARGET_ECHOE
, ECHOE
, ECHOE
},
5877 { TARGET_ECHOK
, TARGET_ECHOK
, ECHOK
, ECHOK
},
5878 { TARGET_ECHONL
, TARGET_ECHONL
, ECHONL
, ECHONL
},
5879 { TARGET_NOFLSH
, TARGET_NOFLSH
, NOFLSH
, NOFLSH
},
5880 { TARGET_TOSTOP
, TARGET_TOSTOP
, TOSTOP
, TOSTOP
},
5881 { TARGET_ECHOCTL
, TARGET_ECHOCTL
, ECHOCTL
, ECHOCTL
},
5882 { TARGET_ECHOPRT
, TARGET_ECHOPRT
, ECHOPRT
, ECHOPRT
},
5883 { TARGET_ECHOKE
, TARGET_ECHOKE
, ECHOKE
, ECHOKE
},
5884 { TARGET_FLUSHO
, TARGET_FLUSHO
, FLUSHO
, FLUSHO
},
5885 { TARGET_PENDIN
, TARGET_PENDIN
, PENDIN
, PENDIN
},
5886 { TARGET_IEXTEN
, TARGET_IEXTEN
, IEXTEN
, IEXTEN
},
5890 static void target_to_host_termios (void *dst
, const void *src
)
5892 struct host_termios
*host
= dst
;
5893 const struct target_termios
*target
= src
;
5896 target_to_host_bitmask(tswap32(target
->c_iflag
), iflag_tbl
);
5898 target_to_host_bitmask(tswap32(target
->c_oflag
), oflag_tbl
);
5900 target_to_host_bitmask(tswap32(target
->c_cflag
), cflag_tbl
);
5902 target_to_host_bitmask(tswap32(target
->c_lflag
), lflag_tbl
);
5903 host
->c_line
= target
->c_line
;
5905 memset(host
->c_cc
, 0, sizeof(host
->c_cc
));
5906 host
->c_cc
[VINTR
] = target
->c_cc
[TARGET_VINTR
];
5907 host
->c_cc
[VQUIT
] = target
->c_cc
[TARGET_VQUIT
];
5908 host
->c_cc
[VERASE
] = target
->c_cc
[TARGET_VERASE
];
5909 host
->c_cc
[VKILL
] = target
->c_cc
[TARGET_VKILL
];
5910 host
->c_cc
[VEOF
] = target
->c_cc
[TARGET_VEOF
];
5911 host
->c_cc
[VTIME
] = target
->c_cc
[TARGET_VTIME
];
5912 host
->c_cc
[VMIN
] = target
->c_cc
[TARGET_VMIN
];
5913 host
->c_cc
[VSWTC
] = target
->c_cc
[TARGET_VSWTC
];
5914 host
->c_cc
[VSTART
] = target
->c_cc
[TARGET_VSTART
];
5915 host
->c_cc
[VSTOP
] = target
->c_cc
[TARGET_VSTOP
];
5916 host
->c_cc
[VSUSP
] = target
->c_cc
[TARGET_VSUSP
];
5917 host
->c_cc
[VEOL
] = target
->c_cc
[TARGET_VEOL
];
5918 host
->c_cc
[VREPRINT
] = target
->c_cc
[TARGET_VREPRINT
];
5919 host
->c_cc
[VDISCARD
] = target
->c_cc
[TARGET_VDISCARD
];
5920 host
->c_cc
[VWERASE
] = target
->c_cc
[TARGET_VWERASE
];
5921 host
->c_cc
[VLNEXT
] = target
->c_cc
[TARGET_VLNEXT
];
5922 host
->c_cc
[VEOL2
] = target
->c_cc
[TARGET_VEOL2
];
5925 static void host_to_target_termios (void *dst
, const void *src
)
5927 struct target_termios
*target
= dst
;
5928 const struct host_termios
*host
= src
;
5931 tswap32(host_to_target_bitmask(host
->c_iflag
, iflag_tbl
));
5933 tswap32(host_to_target_bitmask(host
->c_oflag
, oflag_tbl
));
5935 tswap32(host_to_target_bitmask(host
->c_cflag
, cflag_tbl
));
5937 tswap32(host_to_target_bitmask(host
->c_lflag
, lflag_tbl
));
5938 target
->c_line
= host
->c_line
;
5940 memset(target
->c_cc
, 0, sizeof(target
->c_cc
));
5941 target
->c_cc
[TARGET_VINTR
] = host
->c_cc
[VINTR
];
5942 target
->c_cc
[TARGET_VQUIT
] = host
->c_cc
[VQUIT
];
5943 target
->c_cc
[TARGET_VERASE
] = host
->c_cc
[VERASE
];
5944 target
->c_cc
[TARGET_VKILL
] = host
->c_cc
[VKILL
];
5945 target
->c_cc
[TARGET_VEOF
] = host
->c_cc
[VEOF
];
5946 target
->c_cc
[TARGET_VTIME
] = host
->c_cc
[VTIME
];
5947 target
->c_cc
[TARGET_VMIN
] = host
->c_cc
[VMIN
];
5948 target
->c_cc
[TARGET_VSWTC
] = host
->c_cc
[VSWTC
];
5949 target
->c_cc
[TARGET_VSTART
] = host
->c_cc
[VSTART
];
5950 target
->c_cc
[TARGET_VSTOP
] = host
->c_cc
[VSTOP
];
5951 target
->c_cc
[TARGET_VSUSP
] = host
->c_cc
[VSUSP
];
5952 target
->c_cc
[TARGET_VEOL
] = host
->c_cc
[VEOL
];
5953 target
->c_cc
[TARGET_VREPRINT
] = host
->c_cc
[VREPRINT
];
5954 target
->c_cc
[TARGET_VDISCARD
] = host
->c_cc
[VDISCARD
];
5955 target
->c_cc
[TARGET_VWERASE
] = host
->c_cc
[VWERASE
];
5956 target
->c_cc
[TARGET_VLNEXT
] = host
->c_cc
[VLNEXT
];
5957 target
->c_cc
[TARGET_VEOL2
] = host
->c_cc
[VEOL2
];
5960 static const StructEntry struct_termios_def
= {
5961 .convert
= { host_to_target_termios
, target_to_host_termios
},
5962 .size
= { sizeof(struct target_termios
), sizeof(struct host_termios
) },
5963 .align
= { __alignof__(struct target_termios
), __alignof__(struct host_termios
) },
5966 static bitmask_transtbl mmap_flags_tbl
[] = {
5967 { TARGET_MAP_SHARED
, TARGET_MAP_SHARED
, MAP_SHARED
, MAP_SHARED
},
5968 { TARGET_MAP_PRIVATE
, TARGET_MAP_PRIVATE
, MAP_PRIVATE
, MAP_PRIVATE
},
5969 { TARGET_MAP_FIXED
, TARGET_MAP_FIXED
, MAP_FIXED
, MAP_FIXED
},
5970 { TARGET_MAP_ANONYMOUS
, TARGET_MAP_ANONYMOUS
,
5971 MAP_ANONYMOUS
, MAP_ANONYMOUS
},
5972 { TARGET_MAP_GROWSDOWN
, TARGET_MAP_GROWSDOWN
,
5973 MAP_GROWSDOWN
, MAP_GROWSDOWN
},
5974 { TARGET_MAP_DENYWRITE
, TARGET_MAP_DENYWRITE
,
5975 MAP_DENYWRITE
, MAP_DENYWRITE
},
5976 { TARGET_MAP_EXECUTABLE
, TARGET_MAP_EXECUTABLE
,
5977 MAP_EXECUTABLE
, MAP_EXECUTABLE
},
5978 { TARGET_MAP_LOCKED
, TARGET_MAP_LOCKED
, MAP_LOCKED
, MAP_LOCKED
},
5979 { TARGET_MAP_NORESERVE
, TARGET_MAP_NORESERVE
,
5980 MAP_NORESERVE
, MAP_NORESERVE
},
5981 { TARGET_MAP_HUGETLB
, TARGET_MAP_HUGETLB
, MAP_HUGETLB
, MAP_HUGETLB
},
5982 /* MAP_STACK had been ignored by the kernel for quite some time.
5983 Recognize it for the target insofar as we do not want to pass
5984 it through to the host. */
5985 { TARGET_MAP_STACK
, TARGET_MAP_STACK
, 0, 0 },
5989 #if defined(TARGET_I386)
5991 /* NOTE: there is really one LDT for all the threads */
5992 static uint8_t *ldt_table
;
5994 static abi_long
read_ldt(abi_ulong ptr
, unsigned long bytecount
)
6001 size
= TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
;
6002 if (size
> bytecount
)
6004 p
= lock_user(VERIFY_WRITE
, ptr
, size
, 0);
6006 return -TARGET_EFAULT
;
6007 /* ??? Should this by byteswapped? */
6008 memcpy(p
, ldt_table
, size
);
6009 unlock_user(p
, ptr
, size
);
6013 /* XXX: add locking support */
6014 static abi_long
write_ldt(CPUX86State
*env
,
6015 abi_ulong ptr
, unsigned long bytecount
, int oldmode
)
6017 struct target_modify_ldt_ldt_s ldt_info
;
6018 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6019 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6020 int seg_not_present
, useable
, lm
;
6021 uint32_t *lp
, entry_1
, entry_2
;
6023 if (bytecount
!= sizeof(ldt_info
))
6024 return -TARGET_EINVAL
;
6025 if (!lock_user_struct(VERIFY_READ
, target_ldt_info
, ptr
, 1))
6026 return -TARGET_EFAULT
;
6027 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6028 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6029 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6030 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6031 unlock_user_struct(target_ldt_info
, ptr
, 0);
6033 if (ldt_info
.entry_number
>= TARGET_LDT_ENTRIES
)
6034 return -TARGET_EINVAL
;
6035 seg_32bit
= ldt_info
.flags
& 1;
6036 contents
= (ldt_info
.flags
>> 1) & 3;
6037 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6038 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6039 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6040 useable
= (ldt_info
.flags
>> 6) & 1;
6044 lm
= (ldt_info
.flags
>> 7) & 1;
6046 if (contents
== 3) {
6048 return -TARGET_EINVAL
;
6049 if (seg_not_present
== 0)
6050 return -TARGET_EINVAL
;
6052 /* allocate the LDT */
6054 env
->ldt
.base
= target_mmap(0,
6055 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
,
6056 PROT_READ
|PROT_WRITE
,
6057 MAP_ANONYMOUS
|MAP_PRIVATE
, -1, 0);
6058 if (env
->ldt
.base
== -1)
6059 return -TARGET_ENOMEM
;
6060 memset(g2h(env
->ldt
.base
), 0,
6061 TARGET_LDT_ENTRIES
* TARGET_LDT_ENTRY_SIZE
);
6062 env
->ldt
.limit
= 0xffff;
6063 ldt_table
= g2h(env
->ldt
.base
);
6066 /* NOTE: same code as Linux kernel */
6067 /* Allow LDTs to be cleared by the user. */
6068 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6071 read_exec_only
== 1 &&
6073 limit_in_pages
== 0 &&
6074 seg_not_present
== 1 &&
6082 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6083 (ldt_info
.limit
& 0x0ffff);
6084 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6085 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6086 (ldt_info
.limit
& 0xf0000) |
6087 ((read_exec_only
^ 1) << 9) |
6089 ((seg_not_present
^ 1) << 15) |
6091 (limit_in_pages
<< 23) |
6095 entry_2
|= (useable
<< 20);
6097 /* Install the new entry ... */
6099 lp
= (uint32_t *)(ldt_table
+ (ldt_info
.entry_number
<< 3));
6100 lp
[0] = tswap32(entry_1
);
6101 lp
[1] = tswap32(entry_2
);
6105 /* specific and weird i386 syscalls */
6106 static abi_long
do_modify_ldt(CPUX86State
*env
, int func
, abi_ulong ptr
,
6107 unsigned long bytecount
)
6113 ret
= read_ldt(ptr
, bytecount
);
6116 ret
= write_ldt(env
, ptr
, bytecount
, 1);
6119 ret
= write_ldt(env
, ptr
, bytecount
, 0);
6122 ret
= -TARGET_ENOSYS
;
6128 #if defined(TARGET_I386) && defined(TARGET_ABI32)
6129 abi_long
do_set_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6131 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6132 struct target_modify_ldt_ldt_s ldt_info
;
6133 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6134 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
;
6135 int seg_not_present
, useable
, lm
;
6136 uint32_t *lp
, entry_1
, entry_2
;
6139 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6140 if (!target_ldt_info
)
6141 return -TARGET_EFAULT
;
6142 ldt_info
.entry_number
= tswap32(target_ldt_info
->entry_number
);
6143 ldt_info
.base_addr
= tswapal(target_ldt_info
->base_addr
);
6144 ldt_info
.limit
= tswap32(target_ldt_info
->limit
);
6145 ldt_info
.flags
= tswap32(target_ldt_info
->flags
);
6146 if (ldt_info
.entry_number
== -1) {
6147 for (i
=TARGET_GDT_ENTRY_TLS_MIN
; i
<=TARGET_GDT_ENTRY_TLS_MAX
; i
++) {
6148 if (gdt_table
[i
] == 0) {
6149 ldt_info
.entry_number
= i
;
6150 target_ldt_info
->entry_number
= tswap32(i
);
6155 unlock_user_struct(target_ldt_info
, ptr
, 1);
6157 if (ldt_info
.entry_number
< TARGET_GDT_ENTRY_TLS_MIN
||
6158 ldt_info
.entry_number
> TARGET_GDT_ENTRY_TLS_MAX
)
6159 return -TARGET_EINVAL
;
6160 seg_32bit
= ldt_info
.flags
& 1;
6161 contents
= (ldt_info
.flags
>> 1) & 3;
6162 read_exec_only
= (ldt_info
.flags
>> 3) & 1;
6163 limit_in_pages
= (ldt_info
.flags
>> 4) & 1;
6164 seg_not_present
= (ldt_info
.flags
>> 5) & 1;
6165 useable
= (ldt_info
.flags
>> 6) & 1;
6169 lm
= (ldt_info
.flags
>> 7) & 1;
6172 if (contents
== 3) {
6173 if (seg_not_present
== 0)
6174 return -TARGET_EINVAL
;
6177 /* NOTE: same code as Linux kernel */
6178 /* Allow LDTs to be cleared by the user. */
6179 if (ldt_info
.base_addr
== 0 && ldt_info
.limit
== 0) {
6180 if ((contents
== 0 &&
6181 read_exec_only
== 1 &&
6183 limit_in_pages
== 0 &&
6184 seg_not_present
== 1 &&
6192 entry_1
= ((ldt_info
.base_addr
& 0x0000ffff) << 16) |
6193 (ldt_info
.limit
& 0x0ffff);
6194 entry_2
= (ldt_info
.base_addr
& 0xff000000) |
6195 ((ldt_info
.base_addr
& 0x00ff0000) >> 16) |
6196 (ldt_info
.limit
& 0xf0000) |
6197 ((read_exec_only
^ 1) << 9) |
6199 ((seg_not_present
^ 1) << 15) |
6201 (limit_in_pages
<< 23) |
6206 /* Install the new entry ... */
6208 lp
= (uint32_t *)(gdt_table
+ ldt_info
.entry_number
);
6209 lp
[0] = tswap32(entry_1
);
6210 lp
[1] = tswap32(entry_2
);
6214 static abi_long
do_get_thread_area(CPUX86State
*env
, abi_ulong ptr
)
6216 struct target_modify_ldt_ldt_s
*target_ldt_info
;
6217 uint64_t *gdt_table
= g2h(env
->gdt
.base
);
6218 uint32_t base_addr
, limit
, flags
;
6219 int seg_32bit
, contents
, read_exec_only
, limit_in_pages
, idx
;
6220 int seg_not_present
, useable
, lm
;
6221 uint32_t *lp
, entry_1
, entry_2
;
6223 lock_user_struct(VERIFY_WRITE
, target_ldt_info
, ptr
, 1);
6224 if (!target_ldt_info
)
6225 return -TARGET_EFAULT
;
6226 idx
= tswap32(target_ldt_info
->entry_number
);
6227 if (idx
< TARGET_GDT_ENTRY_TLS_MIN
||
6228 idx
> TARGET_GDT_ENTRY_TLS_MAX
) {
6229 unlock_user_struct(target_ldt_info
, ptr
, 1);
6230 return -TARGET_EINVAL
;
6232 lp
= (uint32_t *)(gdt_table
+ idx
);
6233 entry_1
= tswap32(lp
[0]);
6234 entry_2
= tswap32(lp
[1]);
6236 read_exec_only
= ((entry_2
>> 9) & 1) ^ 1;
6237 contents
= (entry_2
>> 10) & 3;
6238 seg_not_present
= ((entry_2
>> 15) & 1) ^ 1;
6239 seg_32bit
= (entry_2
>> 22) & 1;
6240 limit_in_pages
= (entry_2
>> 23) & 1;
6241 useable
= (entry_2
>> 20) & 1;
6245 lm
= (entry_2
>> 21) & 1;
6247 flags
= (seg_32bit
<< 0) | (contents
<< 1) |
6248 (read_exec_only
<< 3) | (limit_in_pages
<< 4) |
6249 (seg_not_present
<< 5) | (useable
<< 6) | (lm
<< 7);
6250 limit
= (entry_1
& 0xffff) | (entry_2
& 0xf0000);
6251 base_addr
= (entry_1
>> 16) |
6252 (entry_2
& 0xff000000) |
6253 ((entry_2
& 0xff) << 16);
6254 target_ldt_info
->base_addr
= tswapal(base_addr
);
6255 target_ldt_info
->limit
= tswap32(limit
);
6256 target_ldt_info
->flags
= tswap32(flags
);
6257 unlock_user_struct(target_ldt_info
, ptr
, 1);
6260 #endif /* TARGET_I386 && TARGET_ABI32 */
6262 #ifndef TARGET_ABI32
6263 abi_long
do_arch_prctl(CPUX86State
*env
, int code
, abi_ulong addr
)
6270 case TARGET_ARCH_SET_GS
:
6271 case TARGET_ARCH_SET_FS
:
6272 if (code
== TARGET_ARCH_SET_GS
)
6276 cpu_x86_load_seg(env
, idx
, 0);
6277 env
->segs
[idx
].base
= addr
;
6279 case TARGET_ARCH_GET_GS
:
6280 case TARGET_ARCH_GET_FS
:
6281 if (code
== TARGET_ARCH_GET_GS
)
6285 val
= env
->segs
[idx
].base
;
6286 if (put_user(val
, addr
, abi_ulong
))
6287 ret
= -TARGET_EFAULT
;
6290 ret
= -TARGET_EINVAL
;
6297 #endif /* defined(TARGET_I386) */
6299 #define NEW_STACK_SIZE 0x40000
6302 static pthread_mutex_t clone_lock
= PTHREAD_MUTEX_INITIALIZER
;
6305 pthread_mutex_t mutex
;
6306 pthread_cond_t cond
;
6309 abi_ulong child_tidptr
;
6310 abi_ulong parent_tidptr
;
6314 static void *clone_func(void *arg
)
6316 new_thread_info
*info
= arg
;
6321 rcu_register_thread();
6322 tcg_register_thread();
6324 cpu
= ENV_GET_CPU(env
);
6326 ts
= (TaskState
*)cpu
->opaque
;
6327 info
->tid
= gettid();
6329 if (info
->child_tidptr
)
6330 put_user_u32(info
->tid
, info
->child_tidptr
);
6331 if (info
->parent_tidptr
)
6332 put_user_u32(info
->tid
, info
->parent_tidptr
);
6333 /* Enable signals. */
6334 sigprocmask(SIG_SETMASK
, &info
->sigmask
, NULL
);
6335 /* Signal to the parent that we're ready. */
6336 pthread_mutex_lock(&info
->mutex
);
6337 pthread_cond_broadcast(&info
->cond
);
6338 pthread_mutex_unlock(&info
->mutex
);
6339 /* Wait until the parent has finished initializing the tls state. */
6340 pthread_mutex_lock(&clone_lock
);
6341 pthread_mutex_unlock(&clone_lock
);
6347 /* do_fork() Must return host values and target errnos (unlike most
6348 do_*() functions). */
6349 static int do_fork(CPUArchState
*env
, unsigned int flags
, abi_ulong newsp
,
6350 abi_ulong parent_tidptr
, target_ulong newtls
,
6351 abi_ulong child_tidptr
)
6353 CPUState
*cpu
= ENV_GET_CPU(env
);
6357 CPUArchState
*new_env
;
6360 flags
&= ~CLONE_IGNORED_FLAGS
;
6362 /* Emulate vfork() with fork() */
6363 if (flags
& CLONE_VFORK
)
6364 flags
&= ~(CLONE_VFORK
| CLONE_VM
);
6366 if (flags
& CLONE_VM
) {
6367 TaskState
*parent_ts
= (TaskState
*)cpu
->opaque
;
6368 new_thread_info info
;
6369 pthread_attr_t attr
;
6371 if (((flags
& CLONE_THREAD_FLAGS
) != CLONE_THREAD_FLAGS
) ||
6372 (flags
& CLONE_INVALID_THREAD_FLAGS
)) {
6373 return -TARGET_EINVAL
;
6376 ts
= g_new0(TaskState
, 1);
6377 init_task_state(ts
);
6379 /* Grab a mutex so that thread setup appears atomic. */
6380 pthread_mutex_lock(&clone_lock
);
6382 /* we create a new CPU instance. */
6383 new_env
= cpu_copy(env
);
6384 /* Init regs that differ from the parent. */
6385 cpu_clone_regs(new_env
, newsp
);
6386 new_cpu
= ENV_GET_CPU(new_env
);
6387 new_cpu
->opaque
= ts
;
6388 ts
->bprm
= parent_ts
->bprm
;
6389 ts
->info
= parent_ts
->info
;
6390 ts
->signal_mask
= parent_ts
->signal_mask
;
6392 if (flags
& CLONE_CHILD_CLEARTID
) {
6393 ts
->child_tidptr
= child_tidptr
;
6396 if (flags
& CLONE_SETTLS
) {
6397 cpu_set_tls (new_env
, newtls
);
6400 memset(&info
, 0, sizeof(info
));
6401 pthread_mutex_init(&info
.mutex
, NULL
);
6402 pthread_mutex_lock(&info
.mutex
);
6403 pthread_cond_init(&info
.cond
, NULL
);
6405 if (flags
& CLONE_CHILD_SETTID
) {
6406 info
.child_tidptr
= child_tidptr
;
6408 if (flags
& CLONE_PARENT_SETTID
) {
6409 info
.parent_tidptr
= parent_tidptr
;
6412 ret
= pthread_attr_init(&attr
);
6413 ret
= pthread_attr_setstacksize(&attr
, NEW_STACK_SIZE
);
6414 ret
= pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
);
6415 /* It is not safe to deliver signals until the child has finished
6416 initializing, so temporarily block all signals. */
6417 sigfillset(&sigmask
);
6418 sigprocmask(SIG_BLOCK
, &sigmask
, &info
.sigmask
);
6420 /* If this is our first additional thread, we need to ensure we
6421 * generate code for parallel execution and flush old translations.
6423 if (!parallel_cpus
) {
6424 parallel_cpus
= true;
6428 ret
= pthread_create(&info
.thread
, &attr
, clone_func
, &info
);
6429 /* TODO: Free new CPU state if thread creation failed. */
6431 sigprocmask(SIG_SETMASK
, &info
.sigmask
, NULL
);
6432 pthread_attr_destroy(&attr
);
6434 /* Wait for the child to initialize. */
6435 pthread_cond_wait(&info
.cond
, &info
.mutex
);
6440 pthread_mutex_unlock(&info
.mutex
);
6441 pthread_cond_destroy(&info
.cond
);
6442 pthread_mutex_destroy(&info
.mutex
);
6443 pthread_mutex_unlock(&clone_lock
);
6445 /* if no CLONE_VM, we consider it is a fork */
6446 if (flags
& CLONE_INVALID_FORK_FLAGS
) {
6447 return -TARGET_EINVAL
;
6450 /* We can't support custom termination signals */
6451 if ((flags
& CSIGNAL
) != TARGET_SIGCHLD
) {
6452 return -TARGET_EINVAL
;
6455 if (block_signals()) {
6456 return -TARGET_ERESTARTSYS
;
6462 /* Child Process. */
6463 cpu_clone_regs(env
, newsp
);
6465 /* There is a race condition here. The parent process could
6466 theoretically read the TID in the child process before the child
6467 tid is set. This would require using either ptrace
6468 (not implemented) or having *_tidptr to point at a shared memory
6469 mapping. We can't repeat the spinlock hack used above because
6470 the child process gets its own copy of the lock. */
6471 if (flags
& CLONE_CHILD_SETTID
)
6472 put_user_u32(gettid(), child_tidptr
);
6473 if (flags
& CLONE_PARENT_SETTID
)
6474 put_user_u32(gettid(), parent_tidptr
);
6475 ts
= (TaskState
*)cpu
->opaque
;
6476 if (flags
& CLONE_SETTLS
)
6477 cpu_set_tls (env
, newtls
);
6478 if (flags
& CLONE_CHILD_CLEARTID
)
6479 ts
->child_tidptr
= child_tidptr
;
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd
)
6491 case TARGET_F_DUPFD
:
6492 case TARGET_F_GETFD
:
6493 case TARGET_F_SETFD
:
6494 case TARGET_F_GETFL
:
6495 case TARGET_F_SETFL
:
6497 case TARGET_F_GETLK
:
6499 case TARGET_F_SETLK
:
6501 case TARGET_F_SETLKW
:
6503 case TARGET_F_GETOWN
:
6505 case TARGET_F_SETOWN
:
6507 case TARGET_F_GETSIG
:
6509 case TARGET_F_SETSIG
:
6511 #if TARGET_ABI_BITS == 32
6512 case TARGET_F_GETLK64
:
6514 case TARGET_F_SETLK64
:
6516 case TARGET_F_SETLKW64
:
6519 case TARGET_F_SETLEASE
:
6521 case TARGET_F_GETLEASE
:
6523 #ifdef F_DUPFD_CLOEXEC
6524 case TARGET_F_DUPFD_CLOEXEC
:
6525 return F_DUPFD_CLOEXEC
;
6527 case TARGET_F_NOTIFY
:
6530 case TARGET_F_GETOWN_EX
:
6534 case TARGET_F_SETOWN_EX
:
6538 case TARGET_F_SETPIPE_SZ
:
6539 return F_SETPIPE_SZ
;
6540 case TARGET_F_GETPIPE_SZ
:
6541 return F_GETPIPE_SZ
;
6544 return -TARGET_EINVAL
;
6546 return -TARGET_EINVAL
;
6549 #define FLOCK_TRANSTBL \
6551 TRANSTBL_CONVERT(F_RDLCK); \
6552 TRANSTBL_CONVERT(F_WRLCK); \
6553 TRANSTBL_CONVERT(F_UNLCK); \
6554 TRANSTBL_CONVERT(F_EXLCK); \
6555 TRANSTBL_CONVERT(F_SHLCK); \
6558 static int target_to_host_flock(int type
)
6560 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6562 #undef TRANSTBL_CONVERT
6563 return -TARGET_EINVAL
;
6566 static int host_to_target_flock(int type
)
6568 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6570 #undef TRANSTBL_CONVERT
6571 /* if we don't know how to convert the value coming
6572 * from the host we copy to the target field as-is
6577 static inline abi_long
copy_from_user_flock(struct flock64
*fl
,
6578 abi_ulong target_flock_addr
)
6580 struct target_flock
*target_fl
;
6583 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6584 return -TARGET_EFAULT
;
6587 __get_user(l_type
, &target_fl
->l_type
);
6588 l_type
= target_to_host_flock(l_type
);
6592 fl
->l_type
= l_type
;
6593 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6594 __get_user(fl
->l_start
, &target_fl
->l_start
);
6595 __get_user(fl
->l_len
, &target_fl
->l_len
);
6596 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6597 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6601 static inline abi_long
copy_to_user_flock(abi_ulong target_flock_addr
,
6602 const struct flock64
*fl
)
6604 struct target_flock
*target_fl
;
6607 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6608 return -TARGET_EFAULT
;
6611 l_type
= host_to_target_flock(fl
->l_type
);
6612 __put_user(l_type
, &target_fl
->l_type
);
6613 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6614 __put_user(fl
->l_start
, &target_fl
->l_start
);
6615 __put_user(fl
->l_len
, &target_fl
->l_len
);
6616 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6617 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6621 typedef abi_long
from_flock64_fn(struct flock64
*fl
, abi_ulong target_addr
);
6622 typedef abi_long
to_flock64_fn(abi_ulong target_addr
, const struct flock64
*fl
);
6624 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6625 static inline abi_long
copy_from_user_oabi_flock64(struct flock64
*fl
,
6626 abi_ulong target_flock_addr
)
6628 struct target_oabi_flock64
*target_fl
;
6631 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6632 return -TARGET_EFAULT
;
6635 __get_user(l_type
, &target_fl
->l_type
);
6636 l_type
= target_to_host_flock(l_type
);
6640 fl
->l_type
= l_type
;
6641 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6642 __get_user(fl
->l_start
, &target_fl
->l_start
);
6643 __get_user(fl
->l_len
, &target_fl
->l_len
);
6644 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6645 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6649 static inline abi_long
copy_to_user_oabi_flock64(abi_ulong target_flock_addr
,
6650 const struct flock64
*fl
)
6652 struct target_oabi_flock64
*target_fl
;
6655 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6656 return -TARGET_EFAULT
;
6659 l_type
= host_to_target_flock(fl
->l_type
);
6660 __put_user(l_type
, &target_fl
->l_type
);
6661 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6662 __put_user(fl
->l_start
, &target_fl
->l_start
);
6663 __put_user(fl
->l_len
, &target_fl
->l_len
);
6664 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6665 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6670 static inline abi_long
copy_from_user_flock64(struct flock64
*fl
,
6671 abi_ulong target_flock_addr
)
6673 struct target_flock64
*target_fl
;
6676 if (!lock_user_struct(VERIFY_READ
, target_fl
, target_flock_addr
, 1)) {
6677 return -TARGET_EFAULT
;
6680 __get_user(l_type
, &target_fl
->l_type
);
6681 l_type
= target_to_host_flock(l_type
);
6685 fl
->l_type
= l_type
;
6686 __get_user(fl
->l_whence
, &target_fl
->l_whence
);
6687 __get_user(fl
->l_start
, &target_fl
->l_start
);
6688 __get_user(fl
->l_len
, &target_fl
->l_len
);
6689 __get_user(fl
->l_pid
, &target_fl
->l_pid
);
6690 unlock_user_struct(target_fl
, target_flock_addr
, 0);
6694 static inline abi_long
copy_to_user_flock64(abi_ulong target_flock_addr
,
6695 const struct flock64
*fl
)
6697 struct target_flock64
*target_fl
;
6700 if (!lock_user_struct(VERIFY_WRITE
, target_fl
, target_flock_addr
, 0)) {
6701 return -TARGET_EFAULT
;
6704 l_type
= host_to_target_flock(fl
->l_type
);
6705 __put_user(l_type
, &target_fl
->l_type
);
6706 __put_user(fl
->l_whence
, &target_fl
->l_whence
);
6707 __put_user(fl
->l_start
, &target_fl
->l_start
);
6708 __put_user(fl
->l_len
, &target_fl
->l_len
);
6709 __put_user(fl
->l_pid
, &target_fl
->l_pid
);
6710 unlock_user_struct(target_fl
, target_flock_addr
, 1);
6714 static abi_long
do_fcntl(int fd
, int cmd
, abi_ulong arg
)
6716 struct flock64 fl64
;
6718 struct f_owner_ex fox
;
6719 struct target_f_owner_ex
*target_fox
;
6722 int host_cmd
= target_to_host_fcntl_cmd(cmd
);
6724 if (host_cmd
== -TARGET_EINVAL
)
6728 case TARGET_F_GETLK
:
6729 ret
= copy_from_user_flock(&fl64
, arg
);
6733 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6735 ret
= copy_to_user_flock(arg
, &fl64
);
6739 case TARGET_F_SETLK
:
6740 case TARGET_F_SETLKW
:
6741 ret
= copy_from_user_flock(&fl64
, arg
);
6745 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6748 case TARGET_F_GETLK64
:
6749 ret
= copy_from_user_flock64(&fl64
, arg
);
6753 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6755 ret
= copy_to_user_flock64(arg
, &fl64
);
6758 case TARGET_F_SETLK64
:
6759 case TARGET_F_SETLKW64
:
6760 ret
= copy_from_user_flock64(&fl64
, arg
);
6764 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fl64
));
6767 case TARGET_F_GETFL
:
6768 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6770 ret
= host_to_target_bitmask(ret
, fcntl_flags_tbl
);
6774 case TARGET_F_SETFL
:
6775 ret
= get_errno(safe_fcntl(fd
, host_cmd
,
6776 target_to_host_bitmask(arg
,
6781 case TARGET_F_GETOWN_EX
:
6782 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6784 if (!lock_user_struct(VERIFY_WRITE
, target_fox
, arg
, 0))
6785 return -TARGET_EFAULT
;
6786 target_fox
->type
= tswap32(fox
.type
);
6787 target_fox
->pid
= tswap32(fox
.pid
);
6788 unlock_user_struct(target_fox
, arg
, 1);
6794 case TARGET_F_SETOWN_EX
:
6795 if (!lock_user_struct(VERIFY_READ
, target_fox
, arg
, 1))
6796 return -TARGET_EFAULT
;
6797 fox
.type
= tswap32(target_fox
->type
);
6798 fox
.pid
= tswap32(target_fox
->pid
);
6799 unlock_user_struct(target_fox
, arg
, 0);
6800 ret
= get_errno(safe_fcntl(fd
, host_cmd
, &fox
));
6804 case TARGET_F_SETOWN
:
6805 case TARGET_F_GETOWN
:
6806 case TARGET_F_SETSIG
:
6807 case TARGET_F_GETSIG
:
6808 case TARGET_F_SETLEASE
:
6809 case TARGET_F_GETLEASE
:
6810 case TARGET_F_SETPIPE_SZ
:
6811 case TARGET_F_GETPIPE_SZ
:
6812 ret
= get_errno(safe_fcntl(fd
, host_cmd
, arg
));
6816 ret
= get_errno(safe_fcntl(fd
, cmd
, arg
));
6824 static inline int high2lowuid(int uid
)
6832 static inline int high2lowgid(int gid
)
6840 static inline int low2highuid(int uid
)
6842 if ((int16_t)uid
== -1)
6848 static inline int low2highgid(int gid
)
6850 if ((int16_t)gid
== -1)
6855 static inline int tswapid(int id
)
6860 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6862 #else /* !USE_UID16 */
6863 static inline int high2lowuid(int uid
)
6867 static inline int high2lowgid(int gid
)
6871 static inline int low2highuid(int uid
)
6875 static inline int low2highgid(int gid
)
6879 static inline int tswapid(int id
)
6884 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6886 #endif /* USE_UID16 */
6888 /* We must do direct syscalls for setting UID/GID, because we want to
6889 * implement the Linux system call semantics of "change only for this thread",
6890 * not the libc/POSIX semantics of "change for all threads in process".
6891 * (See http://ewontfix.com/17/ for more details.)
6892 * We use the 32-bit version of the syscalls if present; if it is not
6893 * then either the host architecture supports 32-bit UIDs natively with
6894 * the standard syscall, or the 16-bit UID is the best we can do.
6896 #ifdef __NR_setuid32
6897 #define __NR_sys_setuid __NR_setuid32
6899 #define __NR_sys_setuid __NR_setuid
6901 #ifdef __NR_setgid32
6902 #define __NR_sys_setgid __NR_setgid32
6904 #define __NR_sys_setgid __NR_setgid
6906 #ifdef __NR_setresuid32
6907 #define __NR_sys_setresuid __NR_setresuid32
6909 #define __NR_sys_setresuid __NR_setresuid
6911 #ifdef __NR_setresgid32
6912 #define __NR_sys_setresgid __NR_setresgid32
6914 #define __NR_sys_setresgid __NR_setresgid
6917 _syscall1(int, sys_setuid
, uid_t
, uid
)
6918 _syscall1(int, sys_setgid
, gid_t
, gid
)
6919 _syscall3(int, sys_setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
6920 _syscall3(int, sys_setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
6922 void syscall_init(void)
6925 const argtype
*arg_type
;
6929 thunk_init(STRUCT_MAX
);
6931 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6932 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6933 #include "syscall_types.h"
6935 #undef STRUCT_SPECIAL
6937 /* Build target_to_host_errno_table[] table from
6938 * host_to_target_errno_table[]. */
6939 for (i
= 0; i
< ERRNO_TABLE_SIZE
; i
++) {
6940 target_to_host_errno_table
[host_to_target_errno_table
[i
]] = i
;
6943 /* we patch the ioctl size if necessary. We rely on the fact that
6944 no ioctl has all the bits at '1' in the size field */
6946 while (ie
->target_cmd
!= 0) {
6947 if (((ie
->target_cmd
>> TARGET_IOC_SIZESHIFT
) & TARGET_IOC_SIZEMASK
) ==
6948 TARGET_IOC_SIZEMASK
) {
6949 arg_type
= ie
->arg_type
;
6950 if (arg_type
[0] != TYPE_PTR
) {
6951 fprintf(stderr
, "cannot patch size for ioctl 0x%x\n",
6956 size
= thunk_type_size(arg_type
, 0);
6957 ie
->target_cmd
= (ie
->target_cmd
&
6958 ~(TARGET_IOC_SIZEMASK
<< TARGET_IOC_SIZESHIFT
)) |
6959 (size
<< TARGET_IOC_SIZESHIFT
);
6962 /* automatic consistency check if same arch */
6963 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6964 (defined(__x86_64__) && defined(TARGET_X86_64))
6965 if (unlikely(ie
->target_cmd
!= ie
->host_cmd
)) {
6966 fprintf(stderr
, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6967 ie
->name
, ie
->target_cmd
, ie
->host_cmd
);
6974 #if TARGET_ABI_BITS == 32
6975 static inline uint64_t target_offset64(uint32_t word0
, uint32_t word1
)
6977 #ifdef TARGET_WORDS_BIGENDIAN
6978 return ((uint64_t)word0
<< 32) | word1
;
6980 return ((uint64_t)word1
<< 32) | word0
;
6983 #else /* TARGET_ABI_BITS == 32 */
6984 static inline uint64_t target_offset64(uint64_t word0
, uint64_t word1
)
6988 #endif /* TARGET_ABI_BITS != 32 */
6990 #ifdef TARGET_NR_truncate64
6991 static inline abi_long
target_truncate64(void *cpu_env
, const char *arg1
,
6996 if (regpairs_aligned(cpu_env
, TARGET_NR_truncate64
)) {
7000 return get_errno(truncate64(arg1
, target_offset64(arg2
, arg3
)));
7004 #ifdef TARGET_NR_ftruncate64
7005 static inline abi_long
target_ftruncate64(void *cpu_env
, abi_long arg1
,
7010 if (regpairs_aligned(cpu_env
, TARGET_NR_ftruncate64
)) {
7014 return get_errno(ftruncate64(arg1
, target_offset64(arg2
, arg3
)));
7018 static inline abi_long
target_to_host_timespec(struct timespec
*host_ts
,
7019 abi_ulong target_addr
)
7021 struct target_timespec
*target_ts
;
7023 if (!lock_user_struct(VERIFY_READ
, target_ts
, target_addr
, 1))
7024 return -TARGET_EFAULT
;
7025 __get_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7026 __get_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7027 unlock_user_struct(target_ts
, target_addr
, 0);
7031 static inline abi_long
host_to_target_timespec(abi_ulong target_addr
,
7032 struct timespec
*host_ts
)
7034 struct target_timespec
*target_ts
;
7036 if (!lock_user_struct(VERIFY_WRITE
, target_ts
, target_addr
, 0))
7037 return -TARGET_EFAULT
;
7038 __put_user(host_ts
->tv_sec
, &target_ts
->tv_sec
);
7039 __put_user(host_ts
->tv_nsec
, &target_ts
->tv_nsec
);
7040 unlock_user_struct(target_ts
, target_addr
, 1);
7044 static inline abi_long
target_to_host_itimerspec(struct itimerspec
*host_itspec
,
7045 abi_ulong target_addr
)
7047 struct target_itimerspec
*target_itspec
;
7049 if (!lock_user_struct(VERIFY_READ
, target_itspec
, target_addr
, 1)) {
7050 return -TARGET_EFAULT
;
7053 host_itspec
->it_interval
.tv_sec
=
7054 tswapal(target_itspec
->it_interval
.tv_sec
);
7055 host_itspec
->it_interval
.tv_nsec
=
7056 tswapal(target_itspec
->it_interval
.tv_nsec
);
7057 host_itspec
->it_value
.tv_sec
= tswapal(target_itspec
->it_value
.tv_sec
);
7058 host_itspec
->it_value
.tv_nsec
= tswapal(target_itspec
->it_value
.tv_nsec
);
7060 unlock_user_struct(target_itspec
, target_addr
, 1);
7064 static inline abi_long
host_to_target_itimerspec(abi_ulong target_addr
,
7065 struct itimerspec
*host_its
)
7067 struct target_itimerspec
*target_itspec
;
7069 if (!lock_user_struct(VERIFY_WRITE
, target_itspec
, target_addr
, 0)) {
7070 return -TARGET_EFAULT
;
7073 target_itspec
->it_interval
.tv_sec
= tswapal(host_its
->it_interval
.tv_sec
);
7074 target_itspec
->it_interval
.tv_nsec
= tswapal(host_its
->it_interval
.tv_nsec
);
7076 target_itspec
->it_value
.tv_sec
= tswapal(host_its
->it_value
.tv_sec
);
7077 target_itspec
->it_value
.tv_nsec
= tswapal(host_its
->it_value
.tv_nsec
);
7079 unlock_user_struct(target_itspec
, target_addr
, 0);
7083 static inline abi_long
target_to_host_timex(struct timex
*host_tx
,
7084 abi_long target_addr
)
7086 struct target_timex
*target_tx
;
7088 if (!lock_user_struct(VERIFY_READ
, target_tx
, target_addr
, 1)) {
7089 return -TARGET_EFAULT
;
7092 __get_user(host_tx
->modes
, &target_tx
->modes
);
7093 __get_user(host_tx
->offset
, &target_tx
->offset
);
7094 __get_user(host_tx
->freq
, &target_tx
->freq
);
7095 __get_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7096 __get_user(host_tx
->esterror
, &target_tx
->esterror
);
7097 __get_user(host_tx
->status
, &target_tx
->status
);
7098 __get_user(host_tx
->constant
, &target_tx
->constant
);
7099 __get_user(host_tx
->precision
, &target_tx
->precision
);
7100 __get_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7101 __get_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7102 __get_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7103 __get_user(host_tx
->tick
, &target_tx
->tick
);
7104 __get_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7105 __get_user(host_tx
->jitter
, &target_tx
->jitter
);
7106 __get_user(host_tx
->shift
, &target_tx
->shift
);
7107 __get_user(host_tx
->stabil
, &target_tx
->stabil
);
7108 __get_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7109 __get_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7110 __get_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7111 __get_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7112 __get_user(host_tx
->tai
, &target_tx
->tai
);
7114 unlock_user_struct(target_tx
, target_addr
, 0);
7118 static inline abi_long
host_to_target_timex(abi_long target_addr
,
7119 struct timex
*host_tx
)
7121 struct target_timex
*target_tx
;
7123 if (!lock_user_struct(VERIFY_WRITE
, target_tx
, target_addr
, 0)) {
7124 return -TARGET_EFAULT
;
7127 __put_user(host_tx
->modes
, &target_tx
->modes
);
7128 __put_user(host_tx
->offset
, &target_tx
->offset
);
7129 __put_user(host_tx
->freq
, &target_tx
->freq
);
7130 __put_user(host_tx
->maxerror
, &target_tx
->maxerror
);
7131 __put_user(host_tx
->esterror
, &target_tx
->esterror
);
7132 __put_user(host_tx
->status
, &target_tx
->status
);
7133 __put_user(host_tx
->constant
, &target_tx
->constant
);
7134 __put_user(host_tx
->precision
, &target_tx
->precision
);
7135 __put_user(host_tx
->tolerance
, &target_tx
->tolerance
);
7136 __put_user(host_tx
->time
.tv_sec
, &target_tx
->time
.tv_sec
);
7137 __put_user(host_tx
->time
.tv_usec
, &target_tx
->time
.tv_usec
);
7138 __put_user(host_tx
->tick
, &target_tx
->tick
);
7139 __put_user(host_tx
->ppsfreq
, &target_tx
->ppsfreq
);
7140 __put_user(host_tx
->jitter
, &target_tx
->jitter
);
7141 __put_user(host_tx
->shift
, &target_tx
->shift
);
7142 __put_user(host_tx
->stabil
, &target_tx
->stabil
);
7143 __put_user(host_tx
->jitcnt
, &target_tx
->jitcnt
);
7144 __put_user(host_tx
->calcnt
, &target_tx
->calcnt
);
7145 __put_user(host_tx
->errcnt
, &target_tx
->errcnt
);
7146 __put_user(host_tx
->stbcnt
, &target_tx
->stbcnt
);
7147 __put_user(host_tx
->tai
, &target_tx
->tai
);
7149 unlock_user_struct(target_tx
, target_addr
, 1);
7154 static inline abi_long
target_to_host_sigevent(struct sigevent
*host_sevp
,
7155 abi_ulong target_addr
)
7157 struct target_sigevent
*target_sevp
;
7159 if (!lock_user_struct(VERIFY_READ
, target_sevp
, target_addr
, 1)) {
7160 return -TARGET_EFAULT
;
7163 /* This union is awkward on 64 bit systems because it has a 32 bit
7164 * integer and a pointer in it; we follow the conversion approach
7165 * used for handling sigval types in signal.c so the guest should get
7166 * the correct value back even if we did a 64 bit byteswap and it's
7167 * using the 32 bit integer.
7169 host_sevp
->sigev_value
.sival_ptr
=
7170 (void *)(uintptr_t)tswapal(target_sevp
->sigev_value
.sival_ptr
);
7171 host_sevp
->sigev_signo
=
7172 target_to_host_signal(tswap32(target_sevp
->sigev_signo
));
7173 host_sevp
->sigev_notify
= tswap32(target_sevp
->sigev_notify
);
7174 host_sevp
->_sigev_un
._tid
= tswap32(target_sevp
->_sigev_un
._tid
);
7176 unlock_user_struct(target_sevp
, target_addr
, 1);
7180 #if defined(TARGET_NR_mlockall)
7181 static inline int target_to_host_mlockall_arg(int arg
)
7185 if (arg
& TARGET_MLOCKALL_MCL_CURRENT
) {
7186 result
|= MCL_CURRENT
;
7188 if (arg
& TARGET_MLOCKALL_MCL_FUTURE
) {
7189 result
|= MCL_FUTURE
;
7195 static inline abi_long
host_to_target_stat64(void *cpu_env
,
7196 abi_ulong target_addr
,
7197 struct stat
*host_st
)
7199 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7200 if (((CPUARMState
*)cpu_env
)->eabi
) {
7201 struct target_eabi_stat64
*target_st
;
7203 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7204 return -TARGET_EFAULT
;
7205 memset(target_st
, 0, sizeof(struct target_eabi_stat64
));
7206 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7207 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7208 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7209 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7211 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7212 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7213 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7214 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7215 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7216 __put_user(host_st
->st_size
, &target_st
->st_size
);
7217 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7218 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7219 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7220 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7221 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7222 unlock_user_struct(target_st
, target_addr
, 1);
7226 #if defined(TARGET_HAS_STRUCT_STAT64)
7227 struct target_stat64
*target_st
;
7229 struct target_stat
*target_st
;
7232 if (!lock_user_struct(VERIFY_WRITE
, target_st
, target_addr
, 0))
7233 return -TARGET_EFAULT
;
7234 memset(target_st
, 0, sizeof(*target_st
));
7235 __put_user(host_st
->st_dev
, &target_st
->st_dev
);
7236 __put_user(host_st
->st_ino
, &target_st
->st_ino
);
7237 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7238 __put_user(host_st
->st_ino
, &target_st
->__st_ino
);
7240 __put_user(host_st
->st_mode
, &target_st
->st_mode
);
7241 __put_user(host_st
->st_nlink
, &target_st
->st_nlink
);
7242 __put_user(host_st
->st_uid
, &target_st
->st_uid
);
7243 __put_user(host_st
->st_gid
, &target_st
->st_gid
);
7244 __put_user(host_st
->st_rdev
, &target_st
->st_rdev
);
7245 /* XXX: better use of kernel struct */
7246 __put_user(host_st
->st_size
, &target_st
->st_size
);
7247 __put_user(host_st
->st_blksize
, &target_st
->st_blksize
);
7248 __put_user(host_st
->st_blocks
, &target_st
->st_blocks
);
7249 __put_user(host_st
->st_atime
, &target_st
->target_st_atime
);
7250 __put_user(host_st
->st_mtime
, &target_st
->target_st_mtime
);
7251 __put_user(host_st
->st_ctime
, &target_st
->target_st_ctime
);
7252 unlock_user_struct(target_st
, target_addr
, 1);
7258 /* ??? Using host futex calls even when target atomic operations
7259 are not really atomic probably breaks things. However implementing
7260 futexes locally would make futexes shared between multiple processes
7261 tricky. However they're probably useless because guest atomic
7262 operations won't work either. */
7263 static int do_futex(target_ulong uaddr
, int op
, int val
, target_ulong timeout
,
7264 target_ulong uaddr2
, int val3
)
7266 struct timespec ts
, *pts
;
7269 /* ??? We assume FUTEX_* constants are the same on both host
7271 #ifdef FUTEX_CMD_MASK
7272 base_op
= op
& FUTEX_CMD_MASK
;
7278 case FUTEX_WAIT_BITSET
:
7281 target_to_host_timespec(pts
, timeout
);
7285 return get_errno(safe_futex(g2h(uaddr
), op
, tswap32(val
),
7288 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7290 return get_errno(safe_futex(g2h(uaddr
), op
, val
, NULL
, NULL
, 0));
7292 case FUTEX_CMP_REQUEUE
:
7294 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7295 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7296 But the prototype takes a `struct timespec *'; insert casts
7297 to satisfy the compiler. We do not need to tswap TIMEOUT
7298 since it's not compared to guest memory. */
7299 pts
= (struct timespec
*)(uintptr_t) timeout
;
7300 return get_errno(safe_futex(g2h(uaddr
), op
, val
, pts
,
7302 (base_op
== FUTEX_CMP_REQUEUE
7306 return -TARGET_ENOSYS
;
7309 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7310 static abi_long
do_name_to_handle_at(abi_long dirfd
, abi_long pathname
,
7311 abi_long handle
, abi_long mount_id
,
7314 struct file_handle
*target_fh
;
7315 struct file_handle
*fh
;
7319 unsigned int size
, total_size
;
7321 if (get_user_s32(size
, handle
)) {
7322 return -TARGET_EFAULT
;
7325 name
= lock_user_string(pathname
);
7327 return -TARGET_EFAULT
;
7330 total_size
= sizeof(struct file_handle
) + size
;
7331 target_fh
= lock_user(VERIFY_WRITE
, handle
, total_size
, 0);
7333 unlock_user(name
, pathname
, 0);
7334 return -TARGET_EFAULT
;
7337 fh
= g_malloc0(total_size
);
7338 fh
->handle_bytes
= size
;
7340 ret
= get_errno(name_to_handle_at(dirfd
, path(name
), fh
, &mid
, flags
));
7341 unlock_user(name
, pathname
, 0);
7343 /* man name_to_handle_at(2):
7344 * Other than the use of the handle_bytes field, the caller should treat
7345 * the file_handle structure as an opaque data type
7348 memcpy(target_fh
, fh
, total_size
);
7349 target_fh
->handle_bytes
= tswap32(fh
->handle_bytes
);
7350 target_fh
->handle_type
= tswap32(fh
->handle_type
);
7352 unlock_user(target_fh
, handle
, total_size
);
7354 if (put_user_s32(mid
, mount_id
)) {
7355 return -TARGET_EFAULT
;
7363 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7364 static abi_long
do_open_by_handle_at(abi_long mount_fd
, abi_long handle
,
7367 struct file_handle
*target_fh
;
7368 struct file_handle
*fh
;
7369 unsigned int size
, total_size
;
7372 if (get_user_s32(size
, handle
)) {
7373 return -TARGET_EFAULT
;
7376 total_size
= sizeof(struct file_handle
) + size
;
7377 target_fh
= lock_user(VERIFY_READ
, handle
, total_size
, 1);
7379 return -TARGET_EFAULT
;
7382 fh
= g_memdup(target_fh
, total_size
);
7383 fh
->handle_bytes
= size
;
7384 fh
->handle_type
= tswap32(target_fh
->handle_type
);
7386 ret
= get_errno(open_by_handle_at(mount_fd
, fh
,
7387 target_to_host_bitmask(flags
, fcntl_flags_tbl
)));
7391 unlock_user(target_fh
, handle
, total_size
);
7397 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7399 /* signalfd siginfo conversion */
7402 host_to_target_signalfd_siginfo(struct signalfd_siginfo
*tinfo
,
7403 const struct signalfd_siginfo
*info
)
7405 int sig
= host_to_target_signal(info
->ssi_signo
);
7407 /* linux/signalfd.h defines a ssi_addr_lsb
7408 * not defined in sys/signalfd.h but used by some kernels
7411 #ifdef BUS_MCEERR_AO
7412 if (tinfo
->ssi_signo
== SIGBUS
&&
7413 (tinfo
->ssi_code
== BUS_MCEERR_AR
||
7414 tinfo
->ssi_code
== BUS_MCEERR_AO
)) {
7415 uint16_t *ssi_addr_lsb
= (uint16_t *)(&info
->ssi_addr
+ 1);
7416 uint16_t *tssi_addr_lsb
= (uint16_t *)(&tinfo
->ssi_addr
+ 1);
7417 *tssi_addr_lsb
= tswap16(*ssi_addr_lsb
);
7421 tinfo
->ssi_signo
= tswap32(sig
);
7422 tinfo
->ssi_errno
= tswap32(tinfo
->ssi_errno
);
7423 tinfo
->ssi_code
= tswap32(info
->ssi_code
);
7424 tinfo
->ssi_pid
= tswap32(info
->ssi_pid
);
7425 tinfo
->ssi_uid
= tswap32(info
->ssi_uid
);
7426 tinfo
->ssi_fd
= tswap32(info
->ssi_fd
);
7427 tinfo
->ssi_tid
= tswap32(info
->ssi_tid
);
7428 tinfo
->ssi_band
= tswap32(info
->ssi_band
);
7429 tinfo
->ssi_overrun
= tswap32(info
->ssi_overrun
);
7430 tinfo
->ssi_trapno
= tswap32(info
->ssi_trapno
);
7431 tinfo
->ssi_status
= tswap32(info
->ssi_status
);
7432 tinfo
->ssi_int
= tswap32(info
->ssi_int
);
7433 tinfo
->ssi_ptr
= tswap64(info
->ssi_ptr
);
7434 tinfo
->ssi_utime
= tswap64(info
->ssi_utime
);
7435 tinfo
->ssi_stime
= tswap64(info
->ssi_stime
);
7436 tinfo
->ssi_addr
= tswap64(info
->ssi_addr
);
7439 static abi_long
host_to_target_data_signalfd(void *buf
, size_t len
)
7443 for (i
= 0; i
< len
; i
+= sizeof(struct signalfd_siginfo
)) {
7444 host_to_target_signalfd_siginfo(buf
+ i
, buf
+ i
);
7450 static TargetFdTrans target_signalfd_trans
= {
7451 .host_to_target_data
= host_to_target_data_signalfd
,
7454 static abi_long
do_signalfd4(int fd
, abi_long mask
, int flags
)
7457 target_sigset_t
*target_mask
;
7461 if (flags
& ~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
)) {
7462 return -TARGET_EINVAL
;
7464 if (!lock_user_struct(VERIFY_READ
, target_mask
, mask
, 1)) {
7465 return -TARGET_EFAULT
;
7468 target_to_host_sigset(&host_mask
, target_mask
);
7470 host_flags
= target_to_host_bitmask(flags
, fcntl_flags_tbl
);
7472 ret
= get_errno(signalfd(fd
, &host_mask
, host_flags
));
7474 fd_trans_register(ret
, &target_signalfd_trans
);
7477 unlock_user_struct(target_mask
, mask
, 0);
7483 /* Map host to target signal numbers for the wait family of syscalls.
7484 Assume all other status bits are the same. */
7485 int host_to_target_waitstatus(int status
)
7487 if (WIFSIGNALED(status
)) {
7488 return host_to_target_signal(WTERMSIG(status
)) | (status
& ~0x7f);
7490 if (WIFSTOPPED(status
)) {
7491 return (host_to_target_signal(WSTOPSIG(status
)) << 8)
7497 static int open_self_cmdline(void *cpu_env
, int fd
)
7499 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7500 struct linux_binprm
*bprm
= ((TaskState
*)cpu
->opaque
)->bprm
;
7503 for (i
= 0; i
< bprm
->argc
; i
++) {
7504 size_t len
= strlen(bprm
->argv
[i
]) + 1;
7506 if (write(fd
, bprm
->argv
[i
], len
) != len
) {
7514 static int open_self_maps(void *cpu_env
, int fd
)
7516 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7517 TaskState
*ts
= cpu
->opaque
;
7523 fp
= fopen("/proc/self/maps", "r");
7528 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7529 int fields
, dev_maj
, dev_min
, inode
;
7530 uint64_t min
, max
, offset
;
7531 char flag_r
, flag_w
, flag_x
, flag_p
;
7532 char path
[512] = "";
7533 fields
= sscanf(line
, "%"PRIx64
"-%"PRIx64
" %c%c%c%c %"PRIx64
" %x:%x %d"
7534 " %512s", &min
, &max
, &flag_r
, &flag_w
, &flag_x
,
7535 &flag_p
, &offset
, &dev_maj
, &dev_min
, &inode
, path
);
7537 if ((fields
< 10) || (fields
> 11)) {
7540 if (h2g_valid(min
)) {
7541 int flags
= page_get_flags(h2g(min
));
7542 max
= h2g_valid(max
- 1) ? max
: (uintptr_t)g2h(GUEST_ADDR_MAX
) + 1;
7543 if (page_check_range(h2g(min
), max
- min
, flags
) == -1) {
7546 if (h2g(min
) == ts
->info
->stack_limit
) {
7547 pstrcpy(path
, sizeof(path
), " [stack]");
7549 dprintf(fd
, TARGET_ABI_FMT_lx
"-" TARGET_ABI_FMT_lx
7550 " %c%c%c%c %08" PRIx64
" %02x:%02x %d %s%s\n",
7551 h2g(min
), h2g(max
- 1) + 1, flag_r
, flag_w
,
7552 flag_x
, flag_p
, offset
, dev_maj
, dev_min
, inode
,
7553 path
[0] ? " " : "", path
);
7563 static int open_self_stat(void *cpu_env
, int fd
)
7565 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7566 TaskState
*ts
= cpu
->opaque
;
7567 abi_ulong start_stack
= ts
->info
->start_stack
;
7570 for (i
= 0; i
< 44; i
++) {
7578 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7579 } else if (i
== 1) {
7581 snprintf(buf
, sizeof(buf
), "(%s) ", ts
->bprm
->argv
[0]);
7582 } else if (i
== 27) {
7585 snprintf(buf
, sizeof(buf
), "%"PRId64
" ", val
);
7587 /* for the rest, there is MasterCard */
7588 snprintf(buf
, sizeof(buf
), "0%c", i
== 43 ? '\n' : ' ');
7592 if (write(fd
, buf
, len
) != len
) {
7600 static int open_self_auxv(void *cpu_env
, int fd
)
7602 CPUState
*cpu
= ENV_GET_CPU((CPUArchState
*)cpu_env
);
7603 TaskState
*ts
= cpu
->opaque
;
7604 abi_ulong auxv
= ts
->info
->saved_auxv
;
7605 abi_ulong len
= ts
->info
->auxv_len
;
7609 * Auxiliary vector is stored in target process stack.
7610 * read in whole auxv vector and copy it to file
7612 ptr
= lock_user(VERIFY_READ
, auxv
, len
, 0);
7616 r
= write(fd
, ptr
, len
);
7623 lseek(fd
, 0, SEEK_SET
);
7624 unlock_user(ptr
, auxv
, len
);
7630 static int is_proc_myself(const char *filename
, const char *entry
)
7632 if (!strncmp(filename
, "/proc/", strlen("/proc/"))) {
7633 filename
+= strlen("/proc/");
7634 if (!strncmp(filename
, "self/", strlen("self/"))) {
7635 filename
+= strlen("self/");
7636 } else if (*filename
>= '1' && *filename
<= '9') {
7638 snprintf(myself
, sizeof(myself
), "%d/", getpid());
7639 if (!strncmp(filename
, myself
, strlen(myself
))) {
7640 filename
+= strlen(myself
);
7647 if (!strcmp(filename
, entry
)) {
7654 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7655 static int is_proc(const char *filename
, const char *entry
)
7657 return strcmp(filename
, entry
) == 0;
7660 static int open_net_route(void *cpu_env
, int fd
)
7667 fp
= fopen("/proc/net/route", "r");
7674 read
= getline(&line
, &len
, fp
);
7675 dprintf(fd
, "%s", line
);
7679 while ((read
= getline(&line
, &len
, fp
)) != -1) {
7681 uint32_t dest
, gw
, mask
;
7682 unsigned int flags
, refcnt
, use
, metric
, mtu
, window
, irtt
;
7683 sscanf(line
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7684 iface
, &dest
, &gw
, &flags
, &refcnt
, &use
, &metric
,
7685 &mask
, &mtu
, &window
, &irtt
);
7686 dprintf(fd
, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7687 iface
, tswap32(dest
), tswap32(gw
), flags
, refcnt
, use
,
7688 metric
, tswap32(mask
), mtu
, window
, irtt
);
7698 static int do_openat(void *cpu_env
, int dirfd
, const char *pathname
, int flags
, mode_t mode
)
7701 const char *filename
;
7702 int (*fill
)(void *cpu_env
, int fd
);
7703 int (*cmp
)(const char *s1
, const char *s2
);
7705 const struct fake_open
*fake_open
;
7706 static const struct fake_open fakes
[] = {
7707 { "maps", open_self_maps
, is_proc_myself
},
7708 { "stat", open_self_stat
, is_proc_myself
},
7709 { "auxv", open_self_auxv
, is_proc_myself
},
7710 { "cmdline", open_self_cmdline
, is_proc_myself
},
7711 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7712 { "/proc/net/route", open_net_route
, is_proc
},
7714 { NULL
, NULL
, NULL
}
7717 if (is_proc_myself(pathname
, "exe")) {
7718 int execfd
= qemu_getauxval(AT_EXECFD
);
7719 return execfd
? execfd
: safe_openat(dirfd
, exec_path
, flags
, mode
);
7722 for (fake_open
= fakes
; fake_open
->filename
; fake_open
++) {
7723 if (fake_open
->cmp(pathname
, fake_open
->filename
)) {
7728 if (fake_open
->filename
) {
7730 char filename
[PATH_MAX
];
7733 /* create temporary file to map stat to */
7734 tmpdir
= getenv("TMPDIR");
7737 snprintf(filename
, sizeof(filename
), "%s/qemu-open.XXXXXX", tmpdir
);
7738 fd
= mkstemp(filename
);
7744 if ((r
= fake_open
->fill(cpu_env
, fd
))) {
7750 lseek(fd
, 0, SEEK_SET
);
7755 return safe_openat(dirfd
, path(pathname
), flags
, mode
);
7758 #define TIMER_MAGIC 0x0caf0000
7759 #define TIMER_MAGIC_MASK 0xffff0000
7761 /* Convert QEMU provided timer ID back to internal 16bit index format */
7762 static target_timer_t
get_timer_id(abi_long arg
)
7764 target_timer_t timerid
= arg
;
7766 if ((timerid
& TIMER_MAGIC_MASK
) != TIMER_MAGIC
) {
7767 return -TARGET_EINVAL
;
7772 if (timerid
>= ARRAY_SIZE(g_posix_timers
)) {
7773 return -TARGET_EINVAL
;
7779 static abi_long
swap_data_eventfd(void *buf
, size_t len
)
7781 uint64_t *counter
= buf
;
7784 if (len
< sizeof(uint64_t)) {
7788 for (i
= 0; i
< len
; i
+= sizeof(uint64_t)) {
7789 *counter
= tswap64(*counter
);
7796 static TargetFdTrans target_eventfd_trans
= {
7797 .host_to_target_data
= swap_data_eventfd
,
7798 .target_to_host_data
= swap_data_eventfd
,
7801 #if (defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)) || \
7802 (defined(CONFIG_INOTIFY1) && defined(TARGET_NR_inotify_init1) && \
7803 defined(__NR_inotify_init1))
7804 static abi_long
host_to_target_data_inotify(void *buf
, size_t len
)
7806 struct inotify_event
*ev
;
7810 for (i
= 0; i
< len
; i
+= sizeof(struct inotify_event
) + name_len
) {
7811 ev
= (struct inotify_event
*)((char *)buf
+ i
);
7814 ev
->wd
= tswap32(ev
->wd
);
7815 ev
->mask
= tswap32(ev
->mask
);
7816 ev
->cookie
= tswap32(ev
->cookie
);
7817 ev
->len
= tswap32(name_len
);
7823 static TargetFdTrans target_inotify_trans
= {
7824 .host_to_target_data
= host_to_target_data_inotify
,
7828 static int target_to_host_cpu_mask(unsigned long *host_mask
,
7830 abi_ulong target_addr
,
7833 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7834 unsigned host_bits
= sizeof(*host_mask
) * 8;
7835 abi_ulong
*target_mask
;
7838 assert(host_size
>= target_size
);
7840 target_mask
= lock_user(VERIFY_READ
, target_addr
, target_size
, 1);
7842 return -TARGET_EFAULT
;
7844 memset(host_mask
, 0, host_size
);
7846 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7847 unsigned bit
= i
* target_bits
;
7850 __get_user(val
, &target_mask
[i
]);
7851 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7852 if (val
& (1UL << j
)) {
7853 host_mask
[bit
/ host_bits
] |= 1UL << (bit
% host_bits
);
7858 unlock_user(target_mask
, target_addr
, 0);
7862 static int host_to_target_cpu_mask(const unsigned long *host_mask
,
7864 abi_ulong target_addr
,
7867 unsigned target_bits
= sizeof(abi_ulong
) * 8;
7868 unsigned host_bits
= sizeof(*host_mask
) * 8;
7869 abi_ulong
*target_mask
;
7872 assert(host_size
>= target_size
);
7874 target_mask
= lock_user(VERIFY_WRITE
, target_addr
, target_size
, 0);
7876 return -TARGET_EFAULT
;
7879 for (i
= 0 ; i
< target_size
/ sizeof(abi_ulong
); i
++) {
7880 unsigned bit
= i
* target_bits
;
7883 for (j
= 0; j
< target_bits
; j
++, bit
++) {
7884 if (host_mask
[bit
/ host_bits
] & (1UL << (bit
% host_bits
))) {
7888 __put_user(val
, &target_mask
[i
]);
7891 unlock_user(target_mask
, target_addr
, target_size
);
7895 /* do_syscall() should always have a single exit point at the end so
7896 that actions, such as logging of syscall results, can be performed.
7897 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7898 abi_long
do_syscall(void *cpu_env
, int num
, abi_long arg1
,
7899 abi_long arg2
, abi_long arg3
, abi_long arg4
,
7900 abi_long arg5
, abi_long arg6
, abi_long arg7
,
7903 CPUState
*cpu
= ENV_GET_CPU(cpu_env
);
7909 #if defined(DEBUG_ERESTARTSYS)
7910 /* Debug-only code for exercising the syscall-restart code paths
7911 * in the per-architecture cpu main loops: restart every syscall
7912 * the guest makes once before letting it through.
7919 return -TARGET_ERESTARTSYS
;
7925 gemu_log("syscall %d", num
);
7927 trace_guest_user_syscall(cpu
, num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
, arg7
, arg8
);
7929 print_syscall(num
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
7932 case TARGET_NR_exit
:
7933 /* In old applications this may be used to implement _exit(2).
7934 However in threaded applictions it is used for thread termination,
7935 and _exit_group is used for application termination.
7936 Do thread termination if we have more then one thread. */
7938 if (block_signals()) {
7939 ret
= -TARGET_ERESTARTSYS
;
7945 if (CPU_NEXT(first_cpu
)) {
7948 /* Remove the CPU from the list. */
7949 QTAILQ_REMOVE(&cpus
, cpu
, node
);
7954 if (ts
->child_tidptr
) {
7955 put_user_u32(0, ts
->child_tidptr
);
7956 sys_futex(g2h(ts
->child_tidptr
), FUTEX_WAKE
, INT_MAX
,
7960 object_unref(OBJECT(cpu
));
7962 rcu_unregister_thread();
7970 gdb_exit(cpu_env
, arg1
);
7972 ret
= 0; /* avoid warning */
7974 case TARGET_NR_read
:
7978 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
7980 ret
= get_errno(safe_read(arg1
, p
, arg3
));
7982 fd_trans_host_to_target_data(arg1
)) {
7983 ret
= fd_trans_host_to_target_data(arg1
)(p
, ret
);
7985 unlock_user(p
, arg2
, ret
);
7988 case TARGET_NR_write
:
7989 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
7991 if (fd_trans_target_to_host_data(arg1
)) {
7992 void *copy
= g_malloc(arg3
);
7993 memcpy(copy
, p
, arg3
);
7994 ret
= fd_trans_target_to_host_data(arg1
)(copy
, arg3
);
7996 ret
= get_errno(safe_write(arg1
, copy
, ret
));
8000 ret
= get_errno(safe_write(arg1
, p
, arg3
));
8002 unlock_user(p
, arg2
, 0);
8004 #ifdef TARGET_NR_open
8005 case TARGET_NR_open
:
8006 if (!(p
= lock_user_string(arg1
)))
8008 ret
= get_errno(do_openat(cpu_env
, AT_FDCWD
, p
,
8009 target_to_host_bitmask(arg2
, fcntl_flags_tbl
),
8011 fd_trans_unregister(ret
);
8012 unlock_user(p
, arg1
, 0);
8015 case TARGET_NR_openat
:
8016 if (!(p
= lock_user_string(arg2
)))
8018 ret
= get_errno(do_openat(cpu_env
, arg1
, p
,
8019 target_to_host_bitmask(arg3
, fcntl_flags_tbl
),
8021 fd_trans_unregister(ret
);
8022 unlock_user(p
, arg2
, 0);
8024 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8025 case TARGET_NR_name_to_handle_at
:
8026 ret
= do_name_to_handle_at(arg1
, arg2
, arg3
, arg4
, arg5
);
8029 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8030 case TARGET_NR_open_by_handle_at
:
8031 ret
= do_open_by_handle_at(arg1
, arg2
, arg3
);
8032 fd_trans_unregister(ret
);
8035 case TARGET_NR_close
:
8036 fd_trans_unregister(arg1
);
8037 ret
= get_errno(close(arg1
));
8042 #ifdef TARGET_NR_fork
8043 case TARGET_NR_fork
:
8044 ret
= get_errno(do_fork(cpu_env
, TARGET_SIGCHLD
, 0, 0, 0, 0));
8047 #ifdef TARGET_NR_waitpid
8048 case TARGET_NR_waitpid
:
8051 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, 0));
8052 if (!is_error(ret
) && arg2
&& ret
8053 && put_user_s32(host_to_target_waitstatus(status
), arg2
))
8058 #ifdef TARGET_NR_waitid
8059 case TARGET_NR_waitid
:
8063 ret
= get_errno(safe_waitid(arg1
, arg2
, &info
, arg4
, NULL
));
8064 if (!is_error(ret
) && arg3
&& info
.si_pid
!= 0) {
8065 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_siginfo_t
), 0)))
8067 host_to_target_siginfo(p
, &info
);
8068 unlock_user(p
, arg3
, sizeof(target_siginfo_t
));
8073 #ifdef TARGET_NR_creat /* not on alpha */
8074 case TARGET_NR_creat
:
8075 if (!(p
= lock_user_string(arg1
)))
8077 ret
= get_errno(creat(p
, arg2
));
8078 fd_trans_unregister(ret
);
8079 unlock_user(p
, arg1
, 0);
8082 #ifdef TARGET_NR_link
8083 case TARGET_NR_link
:
8086 p
= lock_user_string(arg1
);
8087 p2
= lock_user_string(arg2
);
8089 ret
= -TARGET_EFAULT
;
8091 ret
= get_errno(link(p
, p2
));
8092 unlock_user(p2
, arg2
, 0);
8093 unlock_user(p
, arg1
, 0);
8097 #if defined(TARGET_NR_linkat)
8098 case TARGET_NR_linkat
:
8103 p
= lock_user_string(arg2
);
8104 p2
= lock_user_string(arg4
);
8106 ret
= -TARGET_EFAULT
;
8108 ret
= get_errno(linkat(arg1
, p
, arg3
, p2
, arg5
));
8109 unlock_user(p
, arg2
, 0);
8110 unlock_user(p2
, arg4
, 0);
8114 #ifdef TARGET_NR_unlink
8115 case TARGET_NR_unlink
:
8116 if (!(p
= lock_user_string(arg1
)))
8118 ret
= get_errno(unlink(p
));
8119 unlock_user(p
, arg1
, 0);
8122 #if defined(TARGET_NR_unlinkat)
8123 case TARGET_NR_unlinkat
:
8124 if (!(p
= lock_user_string(arg2
)))
8126 ret
= get_errno(unlinkat(arg1
, p
, arg3
));
8127 unlock_user(p
, arg2
, 0);
8130 case TARGET_NR_execve
:
8132 char **argp
, **envp
;
8135 abi_ulong guest_argp
;
8136 abi_ulong guest_envp
;
8143 for (gp
= guest_argp
; gp
; gp
+= sizeof(abi_ulong
)) {
8144 if (get_user_ual(addr
, gp
))
8152 for (gp
= guest_envp
; gp
; gp
+= sizeof(abi_ulong
)) {
8153 if (get_user_ual(addr
, gp
))
8160 argp
= g_new0(char *, argc
+ 1);
8161 envp
= g_new0(char *, envc
+ 1);
8163 for (gp
= guest_argp
, q
= argp
; gp
;
8164 gp
+= sizeof(abi_ulong
), q
++) {
8165 if (get_user_ual(addr
, gp
))
8169 if (!(*q
= lock_user_string(addr
)))
8171 total_size
+= strlen(*q
) + 1;
8175 for (gp
= guest_envp
, q
= envp
; gp
;
8176 gp
+= sizeof(abi_ulong
), q
++) {
8177 if (get_user_ual(addr
, gp
))
8181 if (!(*q
= lock_user_string(addr
)))
8183 total_size
+= strlen(*q
) + 1;
8187 if (!(p
= lock_user_string(arg1
)))
8189 /* Although execve() is not an interruptible syscall it is
8190 * a special case where we must use the safe_syscall wrapper:
8191 * if we allow a signal to happen before we make the host
8192 * syscall then we will 'lose' it, because at the point of
8193 * execve the process leaves QEMU's control. So we use the
8194 * safe syscall wrapper to ensure that we either take the
8195 * signal as a guest signal, or else it does not happen
8196 * before the execve completes and makes it the other
8197 * program's problem.
8199 ret
= get_errno(safe_execve(p
, argp
, envp
));
8200 unlock_user(p
, arg1
, 0);
8205 ret
= -TARGET_EFAULT
;
8208 for (gp
= guest_argp
, q
= argp
; *q
;
8209 gp
+= sizeof(abi_ulong
), q
++) {
8210 if (get_user_ual(addr
, gp
)
8213 unlock_user(*q
, addr
, 0);
8215 for (gp
= guest_envp
, q
= envp
; *q
;
8216 gp
+= sizeof(abi_ulong
), q
++) {
8217 if (get_user_ual(addr
, gp
)
8220 unlock_user(*q
, addr
, 0);
8227 case TARGET_NR_chdir
:
8228 if (!(p
= lock_user_string(arg1
)))
8230 ret
= get_errno(chdir(p
));
8231 unlock_user(p
, arg1
, 0);
8233 #ifdef TARGET_NR_time
8234 case TARGET_NR_time
:
8237 ret
= get_errno(time(&host_time
));
8240 && put_user_sal(host_time
, arg1
))
8245 #ifdef TARGET_NR_mknod
8246 case TARGET_NR_mknod
:
8247 if (!(p
= lock_user_string(arg1
)))
8249 ret
= get_errno(mknod(p
, arg2
, arg3
));
8250 unlock_user(p
, arg1
, 0);
8253 #if defined(TARGET_NR_mknodat)
8254 case TARGET_NR_mknodat
:
8255 if (!(p
= lock_user_string(arg2
)))
8257 ret
= get_errno(mknodat(arg1
, p
, arg3
, arg4
));
8258 unlock_user(p
, arg2
, 0);
8261 #ifdef TARGET_NR_chmod
8262 case TARGET_NR_chmod
:
8263 if (!(p
= lock_user_string(arg1
)))
8265 ret
= get_errno(chmod(p
, arg2
));
8266 unlock_user(p
, arg1
, 0);
8269 #ifdef TARGET_NR_break
8270 case TARGET_NR_break
:
8273 #ifdef TARGET_NR_oldstat
8274 case TARGET_NR_oldstat
:
8277 case TARGET_NR_lseek
:
8278 ret
= get_errno(lseek(arg1
, arg2
, arg3
));
8280 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8281 /* Alpha specific */
8282 case TARGET_NR_getxpid
:
8283 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
] = getppid();
8284 ret
= get_errno(getpid());
8287 #ifdef TARGET_NR_getpid
8288 case TARGET_NR_getpid
:
8289 ret
= get_errno(getpid());
8292 case TARGET_NR_mount
:
8294 /* need to look at the data field */
8298 p
= lock_user_string(arg1
);
8306 p2
= lock_user_string(arg2
);
8309 unlock_user(p
, arg1
, 0);
8315 p3
= lock_user_string(arg3
);
8318 unlock_user(p
, arg1
, 0);
8320 unlock_user(p2
, arg2
, 0);
8327 /* FIXME - arg5 should be locked, but it isn't clear how to
8328 * do that since it's not guaranteed to be a NULL-terminated
8332 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, NULL
);
8334 ret
= mount(p
, p2
, p3
, (unsigned long)arg4
, g2h(arg5
));
8336 ret
= get_errno(ret
);
8339 unlock_user(p
, arg1
, 0);
8341 unlock_user(p2
, arg2
, 0);
8343 unlock_user(p3
, arg3
, 0);
8347 #ifdef TARGET_NR_umount
8348 case TARGET_NR_umount
:
8349 if (!(p
= lock_user_string(arg1
)))
8351 ret
= get_errno(umount(p
));
8352 unlock_user(p
, arg1
, 0);
8355 #ifdef TARGET_NR_stime /* not on alpha */
8356 case TARGET_NR_stime
:
8359 if (get_user_sal(host_time
, arg1
))
8361 ret
= get_errno(stime(&host_time
));
8365 case TARGET_NR_ptrace
:
8367 #ifdef TARGET_NR_alarm /* not on alpha */
8368 case TARGET_NR_alarm
:
8372 #ifdef TARGET_NR_oldfstat
8373 case TARGET_NR_oldfstat
:
8376 #ifdef TARGET_NR_pause /* not on alpha */
8377 case TARGET_NR_pause
:
8378 if (!block_signals()) {
8379 sigsuspend(&((TaskState
*)cpu
->opaque
)->signal_mask
);
8381 ret
= -TARGET_EINTR
;
8384 #ifdef TARGET_NR_utime
8385 case TARGET_NR_utime
:
8387 struct utimbuf tbuf
, *host_tbuf
;
8388 struct target_utimbuf
*target_tbuf
;
8390 if (!lock_user_struct(VERIFY_READ
, target_tbuf
, arg2
, 1))
8392 tbuf
.actime
= tswapal(target_tbuf
->actime
);
8393 tbuf
.modtime
= tswapal(target_tbuf
->modtime
);
8394 unlock_user_struct(target_tbuf
, arg2
, 0);
8399 if (!(p
= lock_user_string(arg1
)))
8401 ret
= get_errno(utime(p
, host_tbuf
));
8402 unlock_user(p
, arg1
, 0);
8406 #ifdef TARGET_NR_utimes
8407 case TARGET_NR_utimes
:
8409 struct timeval
*tvp
, tv
[2];
8411 if (copy_from_user_timeval(&tv
[0], arg2
)
8412 || copy_from_user_timeval(&tv
[1],
8413 arg2
+ sizeof(struct target_timeval
)))
8419 if (!(p
= lock_user_string(arg1
)))
8421 ret
= get_errno(utimes(p
, tvp
));
8422 unlock_user(p
, arg1
, 0);
8426 #if defined(TARGET_NR_futimesat)
8427 case TARGET_NR_futimesat
:
8429 struct timeval
*tvp
, tv
[2];
8431 if (copy_from_user_timeval(&tv
[0], arg3
)
8432 || copy_from_user_timeval(&tv
[1],
8433 arg3
+ sizeof(struct target_timeval
)))
8439 if (!(p
= lock_user_string(arg2
)))
8441 ret
= get_errno(futimesat(arg1
, path(p
), tvp
));
8442 unlock_user(p
, arg2
, 0);
8446 #ifdef TARGET_NR_stty
8447 case TARGET_NR_stty
:
8450 #ifdef TARGET_NR_gtty
8451 case TARGET_NR_gtty
:
8454 #ifdef TARGET_NR_access
8455 case TARGET_NR_access
:
8456 if (!(p
= lock_user_string(arg1
)))
8458 ret
= get_errno(access(path(p
), arg2
));
8459 unlock_user(p
, arg1
, 0);
8462 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8463 case TARGET_NR_faccessat
:
8464 if (!(p
= lock_user_string(arg2
)))
8466 ret
= get_errno(faccessat(arg1
, p
, arg3
, 0));
8467 unlock_user(p
, arg2
, 0);
8470 #ifdef TARGET_NR_nice /* not on alpha */
8471 case TARGET_NR_nice
:
8472 ret
= get_errno(nice(arg1
));
8475 #ifdef TARGET_NR_ftime
8476 case TARGET_NR_ftime
:
8479 case TARGET_NR_sync
:
8483 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8484 case TARGET_NR_syncfs
:
8485 ret
= get_errno(syncfs(arg1
));
8488 case TARGET_NR_kill
:
8489 ret
= get_errno(safe_kill(arg1
, target_to_host_signal(arg2
)));
8491 #ifdef TARGET_NR_rename
8492 case TARGET_NR_rename
:
8495 p
= lock_user_string(arg1
);
8496 p2
= lock_user_string(arg2
);
8498 ret
= -TARGET_EFAULT
;
8500 ret
= get_errno(rename(p
, p2
));
8501 unlock_user(p2
, arg2
, 0);
8502 unlock_user(p
, arg1
, 0);
8506 #if defined(TARGET_NR_renameat)
8507 case TARGET_NR_renameat
:
8510 p
= lock_user_string(arg2
);
8511 p2
= lock_user_string(arg4
);
8513 ret
= -TARGET_EFAULT
;
8515 ret
= get_errno(renameat(arg1
, p
, arg3
, p2
));
8516 unlock_user(p2
, arg4
, 0);
8517 unlock_user(p
, arg2
, 0);
8521 #if defined(TARGET_NR_renameat2)
8522 case TARGET_NR_renameat2
:
8525 p
= lock_user_string(arg2
);
8526 p2
= lock_user_string(arg4
);
8528 ret
= -TARGET_EFAULT
;
8530 ret
= get_errno(sys_renameat2(arg1
, p
, arg3
, p2
, arg5
));
8532 unlock_user(p2
, arg4
, 0);
8533 unlock_user(p
, arg2
, 0);
8537 #ifdef TARGET_NR_mkdir
8538 case TARGET_NR_mkdir
:
8539 if (!(p
= lock_user_string(arg1
)))
8541 ret
= get_errno(mkdir(p
, arg2
));
8542 unlock_user(p
, arg1
, 0);
8545 #if defined(TARGET_NR_mkdirat)
8546 case TARGET_NR_mkdirat
:
8547 if (!(p
= lock_user_string(arg2
)))
8549 ret
= get_errno(mkdirat(arg1
, p
, arg3
));
8550 unlock_user(p
, arg2
, 0);
8553 #ifdef TARGET_NR_rmdir
8554 case TARGET_NR_rmdir
:
8555 if (!(p
= lock_user_string(arg1
)))
8557 ret
= get_errno(rmdir(p
));
8558 unlock_user(p
, arg1
, 0);
8562 ret
= get_errno(dup(arg1
));
8564 fd_trans_dup(arg1
, ret
);
8567 #ifdef TARGET_NR_pipe
8568 case TARGET_NR_pipe
:
8569 ret
= do_pipe(cpu_env
, arg1
, 0, 0);
8572 #ifdef TARGET_NR_pipe2
8573 case TARGET_NR_pipe2
:
8574 ret
= do_pipe(cpu_env
, arg1
,
8575 target_to_host_bitmask(arg2
, fcntl_flags_tbl
), 1);
8578 case TARGET_NR_times
:
8580 struct target_tms
*tmsp
;
8582 ret
= get_errno(times(&tms
));
8584 tmsp
= lock_user(VERIFY_WRITE
, arg1
, sizeof(struct target_tms
), 0);
8587 tmsp
->tms_utime
= tswapal(host_to_target_clock_t(tms
.tms_utime
));
8588 tmsp
->tms_stime
= tswapal(host_to_target_clock_t(tms
.tms_stime
));
8589 tmsp
->tms_cutime
= tswapal(host_to_target_clock_t(tms
.tms_cutime
));
8590 tmsp
->tms_cstime
= tswapal(host_to_target_clock_t(tms
.tms_cstime
));
8593 ret
= host_to_target_clock_t(ret
);
8596 #ifdef TARGET_NR_prof
8597 case TARGET_NR_prof
:
8600 #ifdef TARGET_NR_signal
8601 case TARGET_NR_signal
:
8604 case TARGET_NR_acct
:
8606 ret
= get_errno(acct(NULL
));
8608 if (!(p
= lock_user_string(arg1
)))
8610 ret
= get_errno(acct(path(p
)));
8611 unlock_user(p
, arg1
, 0);
8614 #ifdef TARGET_NR_umount2
8615 case TARGET_NR_umount2
:
8616 if (!(p
= lock_user_string(arg1
)))
8618 ret
= get_errno(umount2(p
, arg2
));
8619 unlock_user(p
, arg1
, 0);
8622 #ifdef TARGET_NR_lock
8623 case TARGET_NR_lock
:
8626 case TARGET_NR_ioctl
:
8627 ret
= do_ioctl(arg1
, arg2
, arg3
);
8629 #ifdef TARGET_NR_fcntl
8630 case TARGET_NR_fcntl
:
8631 ret
= do_fcntl(arg1
, arg2
, arg3
);
8634 #ifdef TARGET_NR_mpx
8638 case TARGET_NR_setpgid
:
8639 ret
= get_errno(setpgid(arg1
, arg2
));
8641 #ifdef TARGET_NR_ulimit
8642 case TARGET_NR_ulimit
:
8645 #ifdef TARGET_NR_oldolduname
8646 case TARGET_NR_oldolduname
:
8649 case TARGET_NR_umask
:
8650 ret
= get_errno(umask(arg1
));
8652 case TARGET_NR_chroot
:
8653 if (!(p
= lock_user_string(arg1
)))
8655 ret
= get_errno(chroot(p
));
8656 unlock_user(p
, arg1
, 0);
8658 #ifdef TARGET_NR_ustat
8659 case TARGET_NR_ustat
:
8662 #ifdef TARGET_NR_dup2
8663 case TARGET_NR_dup2
:
8664 ret
= get_errno(dup2(arg1
, arg2
));
8666 fd_trans_dup(arg1
, arg2
);
8670 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8671 case TARGET_NR_dup3
:
8675 if ((arg3
& ~TARGET_O_CLOEXEC
) != 0) {
8678 host_flags
= target_to_host_bitmask(arg3
, fcntl_flags_tbl
);
8679 ret
= get_errno(dup3(arg1
, arg2
, host_flags
));
8681 fd_trans_dup(arg1
, arg2
);
8686 #ifdef TARGET_NR_getppid /* not on alpha */
8687 case TARGET_NR_getppid
:
8688 ret
= get_errno(getppid());
8691 #ifdef TARGET_NR_getpgrp
8692 case TARGET_NR_getpgrp
:
8693 ret
= get_errno(getpgrp());
8696 case TARGET_NR_setsid
:
8697 ret
= get_errno(setsid());
8699 #ifdef TARGET_NR_sigaction
8700 case TARGET_NR_sigaction
:
8702 #if defined(TARGET_ALPHA)
8703 struct target_sigaction act
, oact
, *pact
= 0;
8704 struct target_old_sigaction
*old_act
;
8706 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8708 act
._sa_handler
= old_act
->_sa_handler
;
8709 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8710 act
.sa_flags
= old_act
->sa_flags
;
8711 act
.sa_restorer
= 0;
8712 unlock_user_struct(old_act
, arg2
, 0);
8715 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8716 if (!is_error(ret
) && arg3
) {
8717 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8719 old_act
->_sa_handler
= oact
._sa_handler
;
8720 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8721 old_act
->sa_flags
= oact
.sa_flags
;
8722 unlock_user_struct(old_act
, arg3
, 1);
8724 #elif defined(TARGET_MIPS)
8725 struct target_sigaction act
, oact
, *pact
, *old_act
;
8728 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8730 act
._sa_handler
= old_act
->_sa_handler
;
8731 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
.sig
[0]);
8732 act
.sa_flags
= old_act
->sa_flags
;
8733 unlock_user_struct(old_act
, arg2
, 0);
8739 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8741 if (!is_error(ret
) && arg3
) {
8742 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8744 old_act
->_sa_handler
= oact
._sa_handler
;
8745 old_act
->sa_flags
= oact
.sa_flags
;
8746 old_act
->sa_mask
.sig
[0] = oact
.sa_mask
.sig
[0];
8747 old_act
->sa_mask
.sig
[1] = 0;
8748 old_act
->sa_mask
.sig
[2] = 0;
8749 old_act
->sa_mask
.sig
[3] = 0;
8750 unlock_user_struct(old_act
, arg3
, 1);
8753 struct target_old_sigaction
*old_act
;
8754 struct target_sigaction act
, oact
, *pact
;
8756 if (!lock_user_struct(VERIFY_READ
, old_act
, arg2
, 1))
8758 act
._sa_handler
= old_act
->_sa_handler
;
8759 target_siginitset(&act
.sa_mask
, old_act
->sa_mask
);
8760 act
.sa_flags
= old_act
->sa_flags
;
8761 act
.sa_restorer
= old_act
->sa_restorer
;
8762 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8763 act
.ka_restorer
= 0;
8765 unlock_user_struct(old_act
, arg2
, 0);
8770 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8771 if (!is_error(ret
) && arg3
) {
8772 if (!lock_user_struct(VERIFY_WRITE
, old_act
, arg3
, 0))
8774 old_act
->_sa_handler
= oact
._sa_handler
;
8775 old_act
->sa_mask
= oact
.sa_mask
.sig
[0];
8776 old_act
->sa_flags
= oact
.sa_flags
;
8777 old_act
->sa_restorer
= oact
.sa_restorer
;
8778 unlock_user_struct(old_act
, arg3
, 1);
8784 case TARGET_NR_rt_sigaction
:
8786 #if defined(TARGET_ALPHA)
8787 /* For Alpha and SPARC this is a 5 argument syscall, with
8788 * a 'restorer' parameter which must be copied into the
8789 * sa_restorer field of the sigaction struct.
8790 * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8791 * and arg5 is the sigsetsize.
8792 * Alpha also has a separate rt_sigaction struct that it uses
8793 * here; SPARC uses the usual sigaction struct.
8795 struct target_rt_sigaction
*rt_act
;
8796 struct target_sigaction act
, oact
, *pact
= 0;
8798 if (arg4
!= sizeof(target_sigset_t
)) {
8799 ret
= -TARGET_EINVAL
;
8803 if (!lock_user_struct(VERIFY_READ
, rt_act
, arg2
, 1))
8805 act
._sa_handler
= rt_act
->_sa_handler
;
8806 act
.sa_mask
= rt_act
->sa_mask
;
8807 act
.sa_flags
= rt_act
->sa_flags
;
8808 act
.sa_restorer
= arg5
;
8809 unlock_user_struct(rt_act
, arg2
, 0);
8812 ret
= get_errno(do_sigaction(arg1
, pact
, &oact
));
8813 if (!is_error(ret
) && arg3
) {
8814 if (!lock_user_struct(VERIFY_WRITE
, rt_act
, arg3
, 0))
8816 rt_act
->_sa_handler
= oact
._sa_handler
;
8817 rt_act
->sa_mask
= oact
.sa_mask
;
8818 rt_act
->sa_flags
= oact
.sa_flags
;
8819 unlock_user_struct(rt_act
, arg3
, 1);
8823 target_ulong restorer
= arg4
;
8824 target_ulong sigsetsize
= arg5
;
8826 target_ulong sigsetsize
= arg4
;
8828 struct target_sigaction
*act
;
8829 struct target_sigaction
*oact
;
8831 if (sigsetsize
!= sizeof(target_sigset_t
)) {
8832 ret
= -TARGET_EINVAL
;
8836 if (!lock_user_struct(VERIFY_READ
, act
, arg2
, 1)) {
8839 #ifdef TARGET_ARCH_HAS_KA_RESTORER
8840 act
->ka_restorer
= restorer
;
8846 if (!lock_user_struct(VERIFY_WRITE
, oact
, arg3
, 0)) {
8847 ret
= -TARGET_EFAULT
;
8848 goto rt_sigaction_fail
;
8852 ret
= get_errno(do_sigaction(arg1
, act
, oact
));
8855 unlock_user_struct(act
, arg2
, 0);
8857 unlock_user_struct(oact
, arg3
, 1);
8861 #ifdef TARGET_NR_sgetmask /* not on alpha */
8862 case TARGET_NR_sgetmask
:
8865 abi_ulong target_set
;
8866 ret
= do_sigprocmask(0, NULL
, &cur_set
);
8868 host_to_target_old_sigset(&target_set
, &cur_set
);
8874 #ifdef TARGET_NR_ssetmask /* not on alpha */
8875 case TARGET_NR_ssetmask
:
8878 abi_ulong target_set
= arg1
;
8879 target_to_host_old_sigset(&set
, &target_set
);
8880 ret
= do_sigprocmask(SIG_SETMASK
, &set
, &oset
);
8882 host_to_target_old_sigset(&target_set
, &oset
);
8888 #ifdef TARGET_NR_sigprocmask
8889 case TARGET_NR_sigprocmask
:
8891 #if defined(TARGET_ALPHA)
8892 sigset_t set
, oldset
;
8897 case TARGET_SIG_BLOCK
:
8900 case TARGET_SIG_UNBLOCK
:
8903 case TARGET_SIG_SETMASK
:
8907 ret
= -TARGET_EINVAL
;
8911 target_to_host_old_sigset(&set
, &mask
);
8913 ret
= do_sigprocmask(how
, &set
, &oldset
);
8914 if (!is_error(ret
)) {
8915 host_to_target_old_sigset(&mask
, &oldset
);
8917 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0; /* force no error */
8920 sigset_t set
, oldset
, *set_ptr
;
8925 case TARGET_SIG_BLOCK
:
8928 case TARGET_SIG_UNBLOCK
:
8931 case TARGET_SIG_SETMASK
:
8935 ret
= -TARGET_EINVAL
;
8938 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8940 target_to_host_old_sigset(&set
, p
);
8941 unlock_user(p
, arg2
, 0);
8947 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8948 if (!is_error(ret
) && arg3
) {
8949 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8951 host_to_target_old_sigset(p
, &oldset
);
8952 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
8958 case TARGET_NR_rt_sigprocmask
:
8961 sigset_t set
, oldset
, *set_ptr
;
8963 if (arg4
!= sizeof(target_sigset_t
)) {
8964 ret
= -TARGET_EINVAL
;
8970 case TARGET_SIG_BLOCK
:
8973 case TARGET_SIG_UNBLOCK
:
8976 case TARGET_SIG_SETMASK
:
8980 ret
= -TARGET_EINVAL
;
8983 if (!(p
= lock_user(VERIFY_READ
, arg2
, sizeof(target_sigset_t
), 1)))
8985 target_to_host_sigset(&set
, p
);
8986 unlock_user(p
, arg2
, 0);
8992 ret
= do_sigprocmask(how
, set_ptr
, &oldset
);
8993 if (!is_error(ret
) && arg3
) {
8994 if (!(p
= lock_user(VERIFY_WRITE
, arg3
, sizeof(target_sigset_t
), 0)))
8996 host_to_target_sigset(p
, &oldset
);
8997 unlock_user(p
, arg3
, sizeof(target_sigset_t
));
9001 #ifdef TARGET_NR_sigpending
9002 case TARGET_NR_sigpending
:
9005 ret
= get_errno(sigpending(&set
));
9006 if (!is_error(ret
)) {
9007 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9009 host_to_target_old_sigset(p
, &set
);
9010 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9015 case TARGET_NR_rt_sigpending
:
9019 /* Yes, this check is >, not != like most. We follow the kernel's
9020 * logic and it does it like this because it implements
9021 * NR_sigpending through the same code path, and in that case
9022 * the old_sigset_t is smaller in size.
9024 if (arg2
> sizeof(target_sigset_t
)) {
9025 ret
= -TARGET_EINVAL
;
9029 ret
= get_errno(sigpending(&set
));
9030 if (!is_error(ret
)) {
9031 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, sizeof(target_sigset_t
), 0)))
9033 host_to_target_sigset(p
, &set
);
9034 unlock_user(p
, arg1
, sizeof(target_sigset_t
));
9038 #ifdef TARGET_NR_sigsuspend
9039 case TARGET_NR_sigsuspend
:
9041 TaskState
*ts
= cpu
->opaque
;
9042 #if defined(TARGET_ALPHA)
9043 abi_ulong mask
= arg1
;
9044 target_to_host_old_sigset(&ts
->sigsuspend_mask
, &mask
);
9046 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9048 target_to_host_old_sigset(&ts
->sigsuspend_mask
, p
);
9049 unlock_user(p
, arg1
, 0);
9051 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9053 if (ret
!= -TARGET_ERESTARTSYS
) {
9054 ts
->in_sigsuspend
= 1;
9059 case TARGET_NR_rt_sigsuspend
:
9061 TaskState
*ts
= cpu
->opaque
;
9063 if (arg2
!= sizeof(target_sigset_t
)) {
9064 ret
= -TARGET_EINVAL
;
9067 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9069 target_to_host_sigset(&ts
->sigsuspend_mask
, p
);
9070 unlock_user(p
, arg1
, 0);
9071 ret
= get_errno(safe_rt_sigsuspend(&ts
->sigsuspend_mask
,
9073 if (ret
!= -TARGET_ERESTARTSYS
) {
9074 ts
->in_sigsuspend
= 1;
9078 case TARGET_NR_rt_sigtimedwait
:
9081 struct timespec uts
, *puts
;
9084 if (arg4
!= sizeof(target_sigset_t
)) {
9085 ret
= -TARGET_EINVAL
;
9089 if (!(p
= lock_user(VERIFY_READ
, arg1
, sizeof(target_sigset_t
), 1)))
9091 target_to_host_sigset(&set
, p
);
9092 unlock_user(p
, arg1
, 0);
9095 target_to_host_timespec(puts
, arg3
);
9099 ret
= get_errno(safe_rt_sigtimedwait(&set
, &uinfo
, puts
,
9101 if (!is_error(ret
)) {
9103 p
= lock_user(VERIFY_WRITE
, arg2
, sizeof(target_siginfo_t
),
9108 host_to_target_siginfo(p
, &uinfo
);
9109 unlock_user(p
, arg2
, sizeof(target_siginfo_t
));
9111 ret
= host_to_target_signal(ret
);
9115 case TARGET_NR_rt_sigqueueinfo
:
9119 p
= lock_user(VERIFY_READ
, arg3
, sizeof(target_siginfo_t
), 1);
9123 target_to_host_siginfo(&uinfo
, p
);
9124 unlock_user(p
, arg3
, 0);
9125 ret
= get_errno(sys_rt_sigqueueinfo(arg1
, arg2
, &uinfo
));
9128 case TARGET_NR_rt_tgsigqueueinfo
:
9132 p
= lock_user(VERIFY_READ
, arg4
, sizeof(target_siginfo_t
), 1);
9136 target_to_host_siginfo(&uinfo
, p
);
9137 unlock_user(p
, arg4
, 0);
9138 ret
= get_errno(sys_rt_tgsigqueueinfo(arg1
, arg2
, arg3
, &uinfo
));
9141 #ifdef TARGET_NR_sigreturn
9142 case TARGET_NR_sigreturn
:
9143 if (block_signals()) {
9144 ret
= -TARGET_ERESTARTSYS
;
9146 ret
= do_sigreturn(cpu_env
);
9150 case TARGET_NR_rt_sigreturn
:
9151 if (block_signals()) {
9152 ret
= -TARGET_ERESTARTSYS
;
9154 ret
= do_rt_sigreturn(cpu_env
);
9157 case TARGET_NR_sethostname
:
9158 if (!(p
= lock_user_string(arg1
)))
9160 ret
= get_errno(sethostname(p
, arg2
));
9161 unlock_user(p
, arg1
, 0);
9163 case TARGET_NR_setrlimit
:
9165 int resource
= target_to_host_resource(arg1
);
9166 struct target_rlimit
*target_rlim
;
9168 if (!lock_user_struct(VERIFY_READ
, target_rlim
, arg2
, 1))
9170 rlim
.rlim_cur
= target_to_host_rlim(target_rlim
->rlim_cur
);
9171 rlim
.rlim_max
= target_to_host_rlim(target_rlim
->rlim_max
);
9172 unlock_user_struct(target_rlim
, arg2
, 0);
9173 ret
= get_errno(setrlimit(resource
, &rlim
));
9176 case TARGET_NR_getrlimit
:
9178 int resource
= target_to_host_resource(arg1
);
9179 struct target_rlimit
*target_rlim
;
9182 ret
= get_errno(getrlimit(resource
, &rlim
));
9183 if (!is_error(ret
)) {
9184 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
9186 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
9187 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
9188 unlock_user_struct(target_rlim
, arg2
, 1);
9192 case TARGET_NR_getrusage
:
9194 struct rusage rusage
;
9195 ret
= get_errno(getrusage(arg1
, &rusage
));
9196 if (!is_error(ret
)) {
9197 ret
= host_to_target_rusage(arg2
, &rusage
);
9201 case TARGET_NR_gettimeofday
:
9204 ret
= get_errno(gettimeofday(&tv
, NULL
));
9205 if (!is_error(ret
)) {
9206 if (copy_to_user_timeval(arg1
, &tv
))
9211 case TARGET_NR_settimeofday
:
9213 struct timeval tv
, *ptv
= NULL
;
9214 struct timezone tz
, *ptz
= NULL
;
9217 if (copy_from_user_timeval(&tv
, arg1
)) {
9224 if (copy_from_user_timezone(&tz
, arg2
)) {
9230 ret
= get_errno(settimeofday(ptv
, ptz
));
9233 #if defined(TARGET_NR_select)
9234 case TARGET_NR_select
:
9235 #if defined(TARGET_WANT_NI_OLD_SELECT)
9236 /* some architectures used to have old_select here
9237 * but now ENOSYS it.
9239 ret
= -TARGET_ENOSYS
;
9240 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9241 ret
= do_old_select(arg1
);
9243 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
9247 #ifdef TARGET_NR_pselect6
9248 case TARGET_NR_pselect6
:
9250 abi_long rfd_addr
, wfd_addr
, efd_addr
, n
, ts_addr
;
9251 fd_set rfds
, wfds
, efds
;
9252 fd_set
*rfds_ptr
, *wfds_ptr
, *efds_ptr
;
9253 struct timespec ts
, *ts_ptr
;
9256 * The 6th arg is actually two args smashed together,
9257 * so we cannot use the C library.
9265 abi_ulong arg_sigset
, arg_sigsize
, *arg7
;
9266 target_sigset_t
*target_sigset
;
9274 ret
= copy_from_user_fdset_ptr(&rfds
, &rfds_ptr
, rfd_addr
, n
);
9278 ret
= copy_from_user_fdset_ptr(&wfds
, &wfds_ptr
, wfd_addr
, n
);
9282 ret
= copy_from_user_fdset_ptr(&efds
, &efds_ptr
, efd_addr
, n
);
9288 * This takes a timespec, and not a timeval, so we cannot
9289 * use the do_select() helper ...
9292 if (target_to_host_timespec(&ts
, ts_addr
)) {
9300 /* Extract the two packed args for the sigset */
9303 sig
.size
= SIGSET_T_SIZE
;
9305 arg7
= lock_user(VERIFY_READ
, arg6
, sizeof(*arg7
) * 2, 1);
9309 arg_sigset
= tswapal(arg7
[0]);
9310 arg_sigsize
= tswapal(arg7
[1]);
9311 unlock_user(arg7
, arg6
, 0);
9315 if (arg_sigsize
!= sizeof(*target_sigset
)) {
9316 /* Like the kernel, we enforce correct size sigsets */
9317 ret
= -TARGET_EINVAL
;
9320 target_sigset
= lock_user(VERIFY_READ
, arg_sigset
,
9321 sizeof(*target_sigset
), 1);
9322 if (!target_sigset
) {
9325 target_to_host_sigset(&set
, target_sigset
);
9326 unlock_user(target_sigset
, arg_sigset
, 0);
9334 ret
= get_errno(safe_pselect6(n
, rfds_ptr
, wfds_ptr
, efds_ptr
,
9337 if (!is_error(ret
)) {
9338 if (rfd_addr
&& copy_to_user_fdset(rfd_addr
, &rfds
, n
))
9340 if (wfd_addr
&& copy_to_user_fdset(wfd_addr
, &wfds
, n
))
9342 if (efd_addr
&& copy_to_user_fdset(efd_addr
, &efds
, n
))
9345 if (ts_addr
&& host_to_target_timespec(ts_addr
, &ts
))
9351 #ifdef TARGET_NR_symlink
9352 case TARGET_NR_symlink
:
9355 p
= lock_user_string(arg1
);
9356 p2
= lock_user_string(arg2
);
9358 ret
= -TARGET_EFAULT
;
9360 ret
= get_errno(symlink(p
, p2
));
9361 unlock_user(p2
, arg2
, 0);
9362 unlock_user(p
, arg1
, 0);
9366 #if defined(TARGET_NR_symlinkat)
9367 case TARGET_NR_symlinkat
:
9370 p
= lock_user_string(arg1
);
9371 p2
= lock_user_string(arg3
);
9373 ret
= -TARGET_EFAULT
;
9375 ret
= get_errno(symlinkat(p
, arg2
, p2
));
9376 unlock_user(p2
, arg3
, 0);
9377 unlock_user(p
, arg1
, 0);
9381 #ifdef TARGET_NR_oldlstat
9382 case TARGET_NR_oldlstat
:
9385 #ifdef TARGET_NR_readlink
9386 case TARGET_NR_readlink
:
9389 p
= lock_user_string(arg1
);
9390 p2
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9392 ret
= -TARGET_EFAULT
;
9394 /* Short circuit this for the magic exe check. */
9395 ret
= -TARGET_EINVAL
;
9396 } else if (is_proc_myself((const char *)p
, "exe")) {
9397 char real
[PATH_MAX
], *temp
;
9398 temp
= realpath(exec_path
, real
);
9399 /* Return value is # of bytes that we wrote to the buffer. */
9401 ret
= get_errno(-1);
9403 /* Don't worry about sign mismatch as earlier mapping
9404 * logic would have thrown a bad address error. */
9405 ret
= MIN(strlen(real
), arg3
);
9406 /* We cannot NUL terminate the string. */
9407 memcpy(p2
, real
, ret
);
9410 ret
= get_errno(readlink(path(p
), p2
, arg3
));
9412 unlock_user(p2
, arg2
, ret
);
9413 unlock_user(p
, arg1
, 0);
9417 #if defined(TARGET_NR_readlinkat)
9418 case TARGET_NR_readlinkat
:
9421 p
= lock_user_string(arg2
);
9422 p2
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
9424 ret
= -TARGET_EFAULT
;
9425 } else if (is_proc_myself((const char *)p
, "exe")) {
9426 char real
[PATH_MAX
], *temp
;
9427 temp
= realpath(exec_path
, real
);
9428 ret
= temp
== NULL
? get_errno(-1) : strlen(real
) ;
9429 snprintf((char *)p2
, arg4
, "%s", real
);
9431 ret
= get_errno(readlinkat(arg1
, path(p
), p2
, arg4
));
9433 unlock_user(p2
, arg3
, ret
);
9434 unlock_user(p
, arg2
, 0);
9438 #ifdef TARGET_NR_uselib
9439 case TARGET_NR_uselib
:
9442 #ifdef TARGET_NR_swapon
9443 case TARGET_NR_swapon
:
9444 if (!(p
= lock_user_string(arg1
)))
9446 ret
= get_errno(swapon(p
, arg2
));
9447 unlock_user(p
, arg1
, 0);
9450 case TARGET_NR_reboot
:
9451 if (arg3
== LINUX_REBOOT_CMD_RESTART2
) {
9452 /* arg4 must be ignored in all other cases */
9453 p
= lock_user_string(arg4
);
9457 ret
= get_errno(reboot(arg1
, arg2
, arg3
, p
));
9458 unlock_user(p
, arg4
, 0);
9460 ret
= get_errno(reboot(arg1
, arg2
, arg3
, NULL
));
9463 #ifdef TARGET_NR_readdir
9464 case TARGET_NR_readdir
:
9467 #ifdef TARGET_NR_mmap
9468 case TARGET_NR_mmap
:
9469 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9470 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9471 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9472 || defined(TARGET_S390X)
9475 abi_ulong v1
, v2
, v3
, v4
, v5
, v6
;
9476 if (!(v
= lock_user(VERIFY_READ
, arg1
, 6 * sizeof(abi_ulong
), 1)))
9484 unlock_user(v
, arg1
, 0);
9485 ret
= get_errno(target_mmap(v1
, v2
, v3
,
9486 target_to_host_bitmask(v4
, mmap_flags_tbl
),
9490 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9491 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9497 #ifdef TARGET_NR_mmap2
9498 case TARGET_NR_mmap2
:
9500 #define MMAP_SHIFT 12
9502 ret
= get_errno(target_mmap(arg1
, arg2
, arg3
,
9503 target_to_host_bitmask(arg4
, mmap_flags_tbl
),
9505 arg6
<< MMAP_SHIFT
));
9508 case TARGET_NR_munmap
:
9509 ret
= get_errno(target_munmap(arg1
, arg2
));
9511 case TARGET_NR_mprotect
:
9513 TaskState
*ts
= cpu
->opaque
;
9514 /* Special hack to detect libc making the stack executable. */
9515 if ((arg3
& PROT_GROWSDOWN
)
9516 && arg1
>= ts
->info
->stack_limit
9517 && arg1
<= ts
->info
->start_stack
) {
9518 arg3
&= ~PROT_GROWSDOWN
;
9519 arg2
= arg2
+ arg1
- ts
->info
->stack_limit
;
9520 arg1
= ts
->info
->stack_limit
;
9523 ret
= get_errno(target_mprotect(arg1
, arg2
, arg3
));
9525 #ifdef TARGET_NR_mremap
9526 case TARGET_NR_mremap
:
9527 ret
= get_errno(target_mremap(arg1
, arg2
, arg3
, arg4
, arg5
));
9530 /* ??? msync/mlock/munlock are broken for softmmu. */
9531 #ifdef TARGET_NR_msync
9532 case TARGET_NR_msync
:
9533 ret
= get_errno(msync(g2h(arg1
), arg2
, arg3
));
9536 #ifdef TARGET_NR_mlock
9537 case TARGET_NR_mlock
:
9538 ret
= get_errno(mlock(g2h(arg1
), arg2
));
9541 #ifdef TARGET_NR_munlock
9542 case TARGET_NR_munlock
:
9543 ret
= get_errno(munlock(g2h(arg1
), arg2
));
9546 #ifdef TARGET_NR_mlockall
9547 case TARGET_NR_mlockall
:
9548 ret
= get_errno(mlockall(target_to_host_mlockall_arg(arg1
)));
9551 #ifdef TARGET_NR_munlockall
9552 case TARGET_NR_munlockall
:
9553 ret
= get_errno(munlockall());
9556 case TARGET_NR_truncate
:
9557 if (!(p
= lock_user_string(arg1
)))
9559 ret
= get_errno(truncate(p
, arg2
));
9560 unlock_user(p
, arg1
, 0);
9562 case TARGET_NR_ftruncate
:
9563 ret
= get_errno(ftruncate(arg1
, arg2
));
9565 case TARGET_NR_fchmod
:
9566 ret
= get_errno(fchmod(arg1
, arg2
));
9568 #if defined(TARGET_NR_fchmodat)
9569 case TARGET_NR_fchmodat
:
9570 if (!(p
= lock_user_string(arg2
)))
9572 ret
= get_errno(fchmodat(arg1
, p
, arg3
, 0));
9573 unlock_user(p
, arg2
, 0);
9576 case TARGET_NR_getpriority
:
9577 /* Note that negative values are valid for getpriority, so we must
9578 differentiate based on errno settings. */
9580 ret
= getpriority(arg1
, arg2
);
9581 if (ret
== -1 && errno
!= 0) {
9582 ret
= -host_to_target_errno(errno
);
9586 /* Return value is the unbiased priority. Signal no error. */
9587 ((CPUAlphaState
*)cpu_env
)->ir
[IR_V0
] = 0;
9589 /* Return value is a biased priority to avoid negative numbers. */
9593 case TARGET_NR_setpriority
:
9594 ret
= get_errno(setpriority(arg1
, arg2
, arg3
));
9596 #ifdef TARGET_NR_profil
9597 case TARGET_NR_profil
:
9600 case TARGET_NR_statfs
:
9601 if (!(p
= lock_user_string(arg1
)))
9603 ret
= get_errno(statfs(path(p
), &stfs
));
9604 unlock_user(p
, arg1
, 0);
9606 if (!is_error(ret
)) {
9607 struct target_statfs
*target_stfs
;
9609 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg2
, 0))
9611 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9612 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9613 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9614 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9615 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9616 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9617 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9618 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9619 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9620 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9621 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9622 #ifdef _STATFS_F_FLAGS
9623 __put_user(stfs
.f_flags
, &target_stfs
->f_flags
);
9625 __put_user(0, &target_stfs
->f_flags
);
9627 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9628 unlock_user_struct(target_stfs
, arg2
, 1);
9631 case TARGET_NR_fstatfs
:
9632 ret
= get_errno(fstatfs(arg1
, &stfs
));
9633 goto convert_statfs
;
9634 #ifdef TARGET_NR_statfs64
9635 case TARGET_NR_statfs64
:
9636 if (!(p
= lock_user_string(arg1
)))
9638 ret
= get_errno(statfs(path(p
), &stfs
));
9639 unlock_user(p
, arg1
, 0);
9641 if (!is_error(ret
)) {
9642 struct target_statfs64
*target_stfs
;
9644 if (!lock_user_struct(VERIFY_WRITE
, target_stfs
, arg3
, 0))
9646 __put_user(stfs
.f_type
, &target_stfs
->f_type
);
9647 __put_user(stfs
.f_bsize
, &target_stfs
->f_bsize
);
9648 __put_user(stfs
.f_blocks
, &target_stfs
->f_blocks
);
9649 __put_user(stfs
.f_bfree
, &target_stfs
->f_bfree
);
9650 __put_user(stfs
.f_bavail
, &target_stfs
->f_bavail
);
9651 __put_user(stfs
.f_files
, &target_stfs
->f_files
);
9652 __put_user(stfs
.f_ffree
, &target_stfs
->f_ffree
);
9653 __put_user(stfs
.f_fsid
.__val
[0], &target_stfs
->f_fsid
.val
[0]);
9654 __put_user(stfs
.f_fsid
.__val
[1], &target_stfs
->f_fsid
.val
[1]);
9655 __put_user(stfs
.f_namelen
, &target_stfs
->f_namelen
);
9656 __put_user(stfs
.f_frsize
, &target_stfs
->f_frsize
);
9657 memset(target_stfs
->f_spare
, 0, sizeof(target_stfs
->f_spare
));
9658 unlock_user_struct(target_stfs
, arg3
, 1);
9661 case TARGET_NR_fstatfs64
:
9662 ret
= get_errno(fstatfs(arg1
, &stfs
));
9663 goto convert_statfs64
;
9665 #ifdef TARGET_NR_ioperm
9666 case TARGET_NR_ioperm
:
9669 #ifdef TARGET_NR_socketcall
9670 case TARGET_NR_socketcall
:
9671 ret
= do_socketcall(arg1
, arg2
);
9674 #ifdef TARGET_NR_accept
9675 case TARGET_NR_accept
:
9676 ret
= do_accept4(arg1
, arg2
, arg3
, 0);
9679 #ifdef TARGET_NR_accept4
9680 case TARGET_NR_accept4
:
9681 ret
= do_accept4(arg1
, arg2
, arg3
, arg4
);
9684 #ifdef TARGET_NR_bind
9685 case TARGET_NR_bind
:
9686 ret
= do_bind(arg1
, arg2
, arg3
);
9689 #ifdef TARGET_NR_connect
9690 case TARGET_NR_connect
:
9691 ret
= do_connect(arg1
, arg2
, arg3
);
9694 #ifdef TARGET_NR_getpeername
9695 case TARGET_NR_getpeername
:
9696 ret
= do_getpeername(arg1
, arg2
, arg3
);
9699 #ifdef TARGET_NR_getsockname
9700 case TARGET_NR_getsockname
:
9701 ret
= do_getsockname(arg1
, arg2
, arg3
);
9704 #ifdef TARGET_NR_getsockopt
9705 case TARGET_NR_getsockopt
:
9706 ret
= do_getsockopt(arg1
, arg2
, arg3
, arg4
, arg5
);
9709 #ifdef TARGET_NR_listen
9710 case TARGET_NR_listen
:
9711 ret
= get_errno(listen(arg1
, arg2
));
9714 #ifdef TARGET_NR_recv
9715 case TARGET_NR_recv
:
9716 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, 0, 0);
9719 #ifdef TARGET_NR_recvfrom
9720 case TARGET_NR_recvfrom
:
9721 ret
= do_recvfrom(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9724 #ifdef TARGET_NR_recvmsg
9725 case TARGET_NR_recvmsg
:
9726 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 0);
9729 #ifdef TARGET_NR_send
9730 case TARGET_NR_send
:
9731 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, 0, 0);
9734 #ifdef TARGET_NR_sendmsg
9735 case TARGET_NR_sendmsg
:
9736 ret
= do_sendrecvmsg(arg1
, arg2
, arg3
, 1);
9739 #ifdef TARGET_NR_sendmmsg
9740 case TARGET_NR_sendmmsg
:
9741 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 1);
9743 case TARGET_NR_recvmmsg
:
9744 ret
= do_sendrecvmmsg(arg1
, arg2
, arg3
, arg4
, 0);
9747 #ifdef TARGET_NR_sendto
9748 case TARGET_NR_sendto
:
9749 ret
= do_sendto(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9752 #ifdef TARGET_NR_shutdown
9753 case TARGET_NR_shutdown
:
9754 ret
= get_errno(shutdown(arg1
, arg2
));
9757 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9758 case TARGET_NR_getrandom
:
9759 p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
9763 ret
= get_errno(getrandom(p
, arg2
, arg3
));
9764 unlock_user(p
, arg1
, ret
);
9767 #ifdef TARGET_NR_socket
9768 case TARGET_NR_socket
:
9769 ret
= do_socket(arg1
, arg2
, arg3
);
9772 #ifdef TARGET_NR_socketpair
9773 case TARGET_NR_socketpair
:
9774 ret
= do_socketpair(arg1
, arg2
, arg3
, arg4
);
9777 #ifdef TARGET_NR_setsockopt
9778 case TARGET_NR_setsockopt
:
9779 ret
= do_setsockopt(arg1
, arg2
, arg3
, arg4
, (socklen_t
) arg5
);
9782 #if defined(TARGET_NR_syslog)
9783 case TARGET_NR_syslog
:
9788 case TARGET_SYSLOG_ACTION_CLOSE
: /* Close log */
9789 case TARGET_SYSLOG_ACTION_OPEN
: /* Open log */
9790 case TARGET_SYSLOG_ACTION_CLEAR
: /* Clear ring buffer */
9791 case TARGET_SYSLOG_ACTION_CONSOLE_OFF
: /* Disable logging */
9792 case TARGET_SYSLOG_ACTION_CONSOLE_ON
: /* Enable logging */
9793 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL
: /* Set messages level */
9794 case TARGET_SYSLOG_ACTION_SIZE_UNREAD
: /* Number of chars */
9795 case TARGET_SYSLOG_ACTION_SIZE_BUFFER
: /* Size of the buffer */
9797 ret
= get_errno(sys_syslog((int)arg1
, NULL
, (int)arg3
));
9800 case TARGET_SYSLOG_ACTION_READ
: /* Read from log */
9801 case TARGET_SYSLOG_ACTION_READ_CLEAR
: /* Read/clear msgs */
9802 case TARGET_SYSLOG_ACTION_READ_ALL
: /* Read last messages */
9804 ret
= -TARGET_EINVAL
;
9812 p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
9814 ret
= -TARGET_EFAULT
;
9817 ret
= get_errno(sys_syslog((int)arg1
, p
, (int)arg3
));
9818 unlock_user(p
, arg2
, arg3
);
9828 case TARGET_NR_setitimer
:
9830 struct itimerval value
, ovalue
, *pvalue
;
9834 if (copy_from_user_timeval(&pvalue
->it_interval
, arg2
)
9835 || copy_from_user_timeval(&pvalue
->it_value
,
9836 arg2
+ sizeof(struct target_timeval
)))
9841 ret
= get_errno(setitimer(arg1
, pvalue
, &ovalue
));
9842 if (!is_error(ret
) && arg3
) {
9843 if (copy_to_user_timeval(arg3
,
9844 &ovalue
.it_interval
)
9845 || copy_to_user_timeval(arg3
+ sizeof(struct target_timeval
),
9851 case TARGET_NR_getitimer
:
9853 struct itimerval value
;
9855 ret
= get_errno(getitimer(arg1
, &value
));
9856 if (!is_error(ret
) && arg2
) {
9857 if (copy_to_user_timeval(arg2
,
9859 || copy_to_user_timeval(arg2
+ sizeof(struct target_timeval
),
9865 #ifdef TARGET_NR_stat
9866 case TARGET_NR_stat
:
9867 if (!(p
= lock_user_string(arg1
)))
9869 ret
= get_errno(stat(path(p
), &st
));
9870 unlock_user(p
, arg1
, 0);
9873 #ifdef TARGET_NR_lstat
9874 case TARGET_NR_lstat
:
9875 if (!(p
= lock_user_string(arg1
)))
9877 ret
= get_errno(lstat(path(p
), &st
));
9878 unlock_user(p
, arg1
, 0);
9881 case TARGET_NR_fstat
:
9883 ret
= get_errno(fstat(arg1
, &st
));
9884 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9887 if (!is_error(ret
)) {
9888 struct target_stat
*target_st
;
9890 if (!lock_user_struct(VERIFY_WRITE
, target_st
, arg2
, 0))
9892 memset(target_st
, 0, sizeof(*target_st
));
9893 __put_user(st
.st_dev
, &target_st
->st_dev
);
9894 __put_user(st
.st_ino
, &target_st
->st_ino
);
9895 __put_user(st
.st_mode
, &target_st
->st_mode
);
9896 __put_user(st
.st_uid
, &target_st
->st_uid
);
9897 __put_user(st
.st_gid
, &target_st
->st_gid
);
9898 __put_user(st
.st_nlink
, &target_st
->st_nlink
);
9899 __put_user(st
.st_rdev
, &target_st
->st_rdev
);
9900 __put_user(st
.st_size
, &target_st
->st_size
);
9901 __put_user(st
.st_blksize
, &target_st
->st_blksize
);
9902 __put_user(st
.st_blocks
, &target_st
->st_blocks
);
9903 __put_user(st
.st_atime
, &target_st
->target_st_atime
);
9904 __put_user(st
.st_mtime
, &target_st
->target_st_mtime
);
9905 __put_user(st
.st_ctime
, &target_st
->target_st_ctime
);
9906 unlock_user_struct(target_st
, arg2
, 1);
9910 #ifdef TARGET_NR_olduname
9911 case TARGET_NR_olduname
:
9914 #ifdef TARGET_NR_iopl
9915 case TARGET_NR_iopl
:
9918 case TARGET_NR_vhangup
:
9919 ret
= get_errno(vhangup());
9921 #ifdef TARGET_NR_idle
9922 case TARGET_NR_idle
:
9925 #ifdef TARGET_NR_syscall
9926 case TARGET_NR_syscall
:
9927 ret
= do_syscall(cpu_env
, arg1
& 0xffff, arg2
, arg3
, arg4
, arg5
,
9928 arg6
, arg7
, arg8
, 0);
9931 case TARGET_NR_wait4
:
9934 abi_long status_ptr
= arg2
;
9935 struct rusage rusage
, *rusage_ptr
;
9936 abi_ulong target_rusage
= arg4
;
9937 abi_long rusage_err
;
9939 rusage_ptr
= &rusage
;
9942 ret
= get_errno(safe_wait4(arg1
, &status
, arg3
, rusage_ptr
));
9943 if (!is_error(ret
)) {
9944 if (status_ptr
&& ret
) {
9945 status
= host_to_target_waitstatus(status
);
9946 if (put_user_s32(status
, status_ptr
))
9949 if (target_rusage
) {
9950 rusage_err
= host_to_target_rusage(target_rusage
, &rusage
);
9958 #ifdef TARGET_NR_swapoff
9959 case TARGET_NR_swapoff
:
9960 if (!(p
= lock_user_string(arg1
)))
9962 ret
= get_errno(swapoff(p
));
9963 unlock_user(p
, arg1
, 0);
9966 case TARGET_NR_sysinfo
:
9968 struct target_sysinfo
*target_value
;
9969 struct sysinfo value
;
9970 ret
= get_errno(sysinfo(&value
));
9971 if (!is_error(ret
) && arg1
)
9973 if (!lock_user_struct(VERIFY_WRITE
, target_value
, arg1
, 0))
9975 __put_user(value
.uptime
, &target_value
->uptime
);
9976 __put_user(value
.loads
[0], &target_value
->loads
[0]);
9977 __put_user(value
.loads
[1], &target_value
->loads
[1]);
9978 __put_user(value
.loads
[2], &target_value
->loads
[2]);
9979 __put_user(value
.totalram
, &target_value
->totalram
);
9980 __put_user(value
.freeram
, &target_value
->freeram
);
9981 __put_user(value
.sharedram
, &target_value
->sharedram
);
9982 __put_user(value
.bufferram
, &target_value
->bufferram
);
9983 __put_user(value
.totalswap
, &target_value
->totalswap
);
9984 __put_user(value
.freeswap
, &target_value
->freeswap
);
9985 __put_user(value
.procs
, &target_value
->procs
);
9986 __put_user(value
.totalhigh
, &target_value
->totalhigh
);
9987 __put_user(value
.freehigh
, &target_value
->freehigh
);
9988 __put_user(value
.mem_unit
, &target_value
->mem_unit
);
9989 unlock_user_struct(target_value
, arg1
, 1);
9993 #ifdef TARGET_NR_ipc
9995 ret
= do_ipc(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
9998 #ifdef TARGET_NR_semget
9999 case TARGET_NR_semget
:
10000 ret
= get_errno(semget(arg1
, arg2
, arg3
));
10003 #ifdef TARGET_NR_semop
10004 case TARGET_NR_semop
:
10005 ret
= do_semop(arg1
, arg2
, arg3
);
10008 #ifdef TARGET_NR_semctl
10009 case TARGET_NR_semctl
:
10010 ret
= do_semctl(arg1
, arg2
, arg3
, arg4
);
10013 #ifdef TARGET_NR_msgctl
10014 case TARGET_NR_msgctl
:
10015 ret
= do_msgctl(arg1
, arg2
, arg3
);
10018 #ifdef TARGET_NR_msgget
10019 case TARGET_NR_msgget
:
10020 ret
= get_errno(msgget(arg1
, arg2
));
10023 #ifdef TARGET_NR_msgrcv
10024 case TARGET_NR_msgrcv
:
10025 ret
= do_msgrcv(arg1
, arg2
, arg3
, arg4
, arg5
);
10028 #ifdef TARGET_NR_msgsnd
10029 case TARGET_NR_msgsnd
:
10030 ret
= do_msgsnd(arg1
, arg2
, arg3
, arg4
);
10033 #ifdef TARGET_NR_shmget
10034 case TARGET_NR_shmget
:
10035 ret
= get_errno(shmget(arg1
, arg2
, arg3
));
10038 #ifdef TARGET_NR_shmctl
10039 case TARGET_NR_shmctl
:
10040 ret
= do_shmctl(arg1
, arg2
, arg3
);
10043 #ifdef TARGET_NR_shmat
10044 case TARGET_NR_shmat
:
10045 ret
= do_shmat(cpu_env
, arg1
, arg2
, arg3
);
10048 #ifdef TARGET_NR_shmdt
10049 case TARGET_NR_shmdt
:
10050 ret
= do_shmdt(arg1
);
10053 case TARGET_NR_fsync
:
10054 ret
= get_errno(fsync(arg1
));
10056 case TARGET_NR_clone
:
10057 /* Linux manages to have three different orderings for its
10058 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10059 * match the kernel's CONFIG_CLONE_* settings.
10060 * Microblaze is further special in that it uses a sixth
10061 * implicit argument to clone for the TLS pointer.
10063 #if defined(TARGET_MICROBLAZE)
10064 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg4
, arg6
, arg5
));
10065 #elif defined(TARGET_CLONE_BACKWARDS)
10066 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg4
, arg5
));
10067 #elif defined(TARGET_CLONE_BACKWARDS2)
10068 ret
= get_errno(do_fork(cpu_env
, arg2
, arg1
, arg3
, arg5
, arg4
));
10070 ret
= get_errno(do_fork(cpu_env
, arg1
, arg2
, arg3
, arg5
, arg4
));
10073 #ifdef __NR_exit_group
10074 /* new thread calls */
10075 case TARGET_NR_exit_group
:
10076 #ifdef TARGET_GPROF
10079 gdb_exit(cpu_env
, arg1
);
10080 ret
= get_errno(exit_group(arg1
));
10083 case TARGET_NR_setdomainname
:
10084 if (!(p
= lock_user_string(arg1
)))
10086 ret
= get_errno(setdomainname(p
, arg2
));
10087 unlock_user(p
, arg1
, 0);
10089 case TARGET_NR_uname
:
10090 /* no need to transcode because we use the linux syscall */
10092 struct new_utsname
* buf
;
10094 if (!lock_user_struct(VERIFY_WRITE
, buf
, arg1
, 0))
10096 ret
= get_errno(sys_uname(buf
));
10097 if (!is_error(ret
)) {
10098 /* Overwrite the native machine name with whatever is being
10100 strcpy (buf
->machine
, cpu_to_uname_machine(cpu_env
));
10101 /* Allow the user to override the reported release. */
10102 if (qemu_uname_release
&& *qemu_uname_release
) {
10103 g_strlcpy(buf
->release
, qemu_uname_release
,
10104 sizeof(buf
->release
));
10107 unlock_user_struct(buf
, arg1
, 1);
10111 case TARGET_NR_modify_ldt
:
10112 ret
= do_modify_ldt(cpu_env
, arg1
, arg2
, arg3
);
10114 #if !defined(TARGET_X86_64)
10115 case TARGET_NR_vm86old
:
10116 goto unimplemented
;
10117 case TARGET_NR_vm86
:
10118 ret
= do_vm86(cpu_env
, arg1
, arg2
);
10122 case TARGET_NR_adjtimex
:
10124 struct timex host_buf
;
10126 if (target_to_host_timex(&host_buf
, arg1
) != 0) {
10129 ret
= get_errno(adjtimex(&host_buf
));
10130 if (!is_error(ret
)) {
10131 if (host_to_target_timex(arg1
, &host_buf
) != 0) {
10137 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10138 case TARGET_NR_clock_adjtime
:
10140 struct timex htx
, *phtx
= &htx
;
10142 if (target_to_host_timex(phtx
, arg2
) != 0) {
10145 ret
= get_errno(clock_adjtime(arg1
, phtx
));
10146 if (!is_error(ret
) && phtx
) {
10147 if (host_to_target_timex(arg2
, phtx
) != 0) {
10154 #ifdef TARGET_NR_create_module
10155 case TARGET_NR_create_module
:
10157 case TARGET_NR_init_module
:
10158 case TARGET_NR_delete_module
:
10159 #ifdef TARGET_NR_get_kernel_syms
10160 case TARGET_NR_get_kernel_syms
:
10162 goto unimplemented
;
10163 case TARGET_NR_quotactl
:
10164 goto unimplemented
;
10165 case TARGET_NR_getpgid
:
10166 ret
= get_errno(getpgid(arg1
));
10168 case TARGET_NR_fchdir
:
10169 ret
= get_errno(fchdir(arg1
));
10171 #ifdef TARGET_NR_bdflush /* not on x86_64 */
10172 case TARGET_NR_bdflush
:
10173 goto unimplemented
;
10175 #ifdef TARGET_NR_sysfs
10176 case TARGET_NR_sysfs
:
10177 goto unimplemented
;
10179 case TARGET_NR_personality
:
10180 ret
= get_errno(personality(arg1
));
10182 #ifdef TARGET_NR_afs_syscall
10183 case TARGET_NR_afs_syscall
:
10184 goto unimplemented
;
10186 #ifdef TARGET_NR__llseek /* Not on alpha */
10187 case TARGET_NR__llseek
:
10190 #if !defined(__NR_llseek)
10191 res
= lseek(arg1
, ((uint64_t)arg2
<< 32) | (abi_ulong
)arg3
, arg5
);
10193 ret
= get_errno(res
);
10198 ret
= get_errno(_llseek(arg1
, arg2
, arg3
, &res
, arg5
));
10200 if ((ret
== 0) && put_user_s64(res
, arg4
)) {
10206 #ifdef TARGET_NR_getdents
10207 case TARGET_NR_getdents
:
10208 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10209 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10211 struct target_dirent
*target_dirp
;
10212 struct linux_dirent
*dirp
;
10213 abi_long count
= arg3
;
10215 dirp
= g_try_malloc(count
);
10217 ret
= -TARGET_ENOMEM
;
10221 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10222 if (!is_error(ret
)) {
10223 struct linux_dirent
*de
;
10224 struct target_dirent
*tde
;
10226 int reclen
, treclen
;
10227 int count1
, tnamelen
;
10231 if (!(target_dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10235 reclen
= de
->d_reclen
;
10236 tnamelen
= reclen
- offsetof(struct linux_dirent
, d_name
);
10237 assert(tnamelen
>= 0);
10238 treclen
= tnamelen
+ offsetof(struct target_dirent
, d_name
);
10239 assert(count1
+ treclen
<= count
);
10240 tde
->d_reclen
= tswap16(treclen
);
10241 tde
->d_ino
= tswapal(de
->d_ino
);
10242 tde
->d_off
= tswapal(de
->d_off
);
10243 memcpy(tde
->d_name
, de
->d_name
, tnamelen
);
10244 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10246 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10250 unlock_user(target_dirp
, arg2
, ret
);
10256 struct linux_dirent
*dirp
;
10257 abi_long count
= arg3
;
10259 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10261 ret
= get_errno(sys_getdents(arg1
, dirp
, count
));
10262 if (!is_error(ret
)) {
10263 struct linux_dirent
*de
;
10268 reclen
= de
->d_reclen
;
10271 de
->d_reclen
= tswap16(reclen
);
10272 tswapls(&de
->d_ino
);
10273 tswapls(&de
->d_off
);
10274 de
= (struct linux_dirent
*)((char *)de
+ reclen
);
10278 unlock_user(dirp
, arg2
, ret
);
10282 /* Implement getdents in terms of getdents64 */
10284 struct linux_dirent64
*dirp
;
10285 abi_long count
= arg3
;
10287 dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0);
10291 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10292 if (!is_error(ret
)) {
10293 /* Convert the dirent64 structs to target dirent. We do this
10294 * in-place, since we can guarantee that a target_dirent is no
10295 * larger than a dirent64; however this means we have to be
10296 * careful to read everything before writing in the new format.
10298 struct linux_dirent64
*de
;
10299 struct target_dirent
*tde
;
10304 tde
= (struct target_dirent
*)dirp
;
10306 int namelen
, treclen
;
10307 int reclen
= de
->d_reclen
;
10308 uint64_t ino
= de
->d_ino
;
10309 int64_t off
= de
->d_off
;
10310 uint8_t type
= de
->d_type
;
10312 namelen
= strlen(de
->d_name
);
10313 treclen
= offsetof(struct target_dirent
, d_name
)
10315 treclen
= QEMU_ALIGN_UP(treclen
, sizeof(abi_long
));
10317 memmove(tde
->d_name
, de
->d_name
, namelen
+ 1);
10318 tde
->d_ino
= tswapal(ino
);
10319 tde
->d_off
= tswapal(off
);
10320 tde
->d_reclen
= tswap16(treclen
);
10321 /* The target_dirent type is in what was formerly a padding
10322 * byte at the end of the structure:
10324 *(((char *)tde
) + treclen
- 1) = type
;
10326 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10327 tde
= (struct target_dirent
*)((char *)tde
+ treclen
);
10333 unlock_user(dirp
, arg2
, ret
);
10337 #endif /* TARGET_NR_getdents */
10338 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10339 case TARGET_NR_getdents64
:
10341 struct linux_dirent64
*dirp
;
10342 abi_long count
= arg3
;
10343 if (!(dirp
= lock_user(VERIFY_WRITE
, arg2
, count
, 0)))
10345 ret
= get_errno(sys_getdents64(arg1
, dirp
, count
));
10346 if (!is_error(ret
)) {
10347 struct linux_dirent64
*de
;
10352 reclen
= de
->d_reclen
;
10355 de
->d_reclen
= tswap16(reclen
);
10356 tswap64s((uint64_t *)&de
->d_ino
);
10357 tswap64s((uint64_t *)&de
->d_off
);
10358 de
= (struct linux_dirent64
*)((char *)de
+ reclen
);
10362 unlock_user(dirp
, arg2
, ret
);
10365 #endif /* TARGET_NR_getdents64 */
10366 #if defined(TARGET_NR__newselect)
10367 case TARGET_NR__newselect
:
10368 ret
= do_select(arg1
, arg2
, arg3
, arg4
, arg5
);
10371 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
10372 # ifdef TARGET_NR_poll
10373 case TARGET_NR_poll
:
10375 # ifdef TARGET_NR_ppoll
10376 case TARGET_NR_ppoll
:
10379 struct target_pollfd
*target_pfd
;
10380 unsigned int nfds
= arg2
;
10381 struct pollfd
*pfd
;
10387 if (nfds
> (INT_MAX
/ sizeof(struct target_pollfd
))) {
10388 ret
= -TARGET_EINVAL
;
10392 target_pfd
= lock_user(VERIFY_WRITE
, arg1
,
10393 sizeof(struct target_pollfd
) * nfds
, 1);
10398 pfd
= alloca(sizeof(struct pollfd
) * nfds
);
10399 for (i
= 0; i
< nfds
; i
++) {
10400 pfd
[i
].fd
= tswap32(target_pfd
[i
].fd
);
10401 pfd
[i
].events
= tswap16(target_pfd
[i
].events
);
10406 # ifdef TARGET_NR_ppoll
10407 case TARGET_NR_ppoll
:
10409 struct timespec _timeout_ts
, *timeout_ts
= &_timeout_ts
;
10410 target_sigset_t
*target_set
;
10411 sigset_t _set
, *set
= &_set
;
10414 if (target_to_host_timespec(timeout_ts
, arg3
)) {
10415 unlock_user(target_pfd
, arg1
, 0);
10423 if (arg5
!= sizeof(target_sigset_t
)) {
10424 unlock_user(target_pfd
, arg1
, 0);
10425 ret
= -TARGET_EINVAL
;
10429 target_set
= lock_user(VERIFY_READ
, arg4
, sizeof(target_sigset_t
), 1);
10431 unlock_user(target_pfd
, arg1
, 0);
10434 target_to_host_sigset(set
, target_set
);
10439 ret
= get_errno(safe_ppoll(pfd
, nfds
, timeout_ts
,
10440 set
, SIGSET_T_SIZE
));
10442 if (!is_error(ret
) && arg3
) {
10443 host_to_target_timespec(arg3
, timeout_ts
);
10446 unlock_user(target_set
, arg4
, 0);
10451 # ifdef TARGET_NR_poll
10452 case TARGET_NR_poll
:
10454 struct timespec ts
, *pts
;
10457 /* Convert ms to secs, ns */
10458 ts
.tv_sec
= arg3
/ 1000;
10459 ts
.tv_nsec
= (arg3
% 1000) * 1000000LL;
10462 /* -ve poll() timeout means "infinite" */
10465 ret
= get_errno(safe_ppoll(pfd
, nfds
, pts
, NULL
, 0));
10470 g_assert_not_reached();
10473 if (!is_error(ret
)) {
10474 for(i
= 0; i
< nfds
; i
++) {
10475 target_pfd
[i
].revents
= tswap16(pfd
[i
].revents
);
10478 unlock_user(target_pfd
, arg1
, sizeof(struct target_pollfd
) * nfds
);
10482 case TARGET_NR_flock
:
10483 /* NOTE: the flock constant seems to be the same for every
10485 ret
= get_errno(safe_flock(arg1
, arg2
));
10487 case TARGET_NR_readv
:
10489 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10491 ret
= get_errno(safe_readv(arg1
, vec
, arg3
));
10492 unlock_iovec(vec
, arg2
, arg3
, 1);
10494 ret
= -host_to_target_errno(errno
);
10498 case TARGET_NR_writev
:
10500 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10502 ret
= get_errno(safe_writev(arg1
, vec
, arg3
));
10503 unlock_iovec(vec
, arg2
, arg3
, 0);
10505 ret
= -host_to_target_errno(errno
);
10509 #if defined(TARGET_NR_preadv)
10510 case TARGET_NR_preadv
:
10512 struct iovec
*vec
= lock_iovec(VERIFY_WRITE
, arg2
, arg3
, 0);
10514 unsigned long low
, high
;
10516 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10517 ret
= get_errno(safe_preadv(arg1
, vec
, arg3
, low
, high
));
10518 unlock_iovec(vec
, arg2
, arg3
, 1);
10520 ret
= -host_to_target_errno(errno
);
10525 #if defined(TARGET_NR_pwritev)
10526 case TARGET_NR_pwritev
:
10528 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
10530 unsigned long low
, high
;
10532 target_to_host_low_high(arg4
, arg5
, &low
, &high
);
10533 ret
= get_errno(safe_pwritev(arg1
, vec
, arg3
, low
, high
));
10534 unlock_iovec(vec
, arg2
, arg3
, 0);
10536 ret
= -host_to_target_errno(errno
);
10541 case TARGET_NR_getsid
:
10542 ret
= get_errno(getsid(arg1
));
10544 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10545 case TARGET_NR_fdatasync
:
10546 ret
= get_errno(fdatasync(arg1
));
10549 #ifdef TARGET_NR__sysctl
10550 case TARGET_NR__sysctl
:
10551 /* We don't implement this, but ENOTDIR is always a safe
10553 ret
= -TARGET_ENOTDIR
;
10556 case TARGET_NR_sched_getaffinity
:
10558 unsigned int mask_size
;
10559 unsigned long *mask
;
10562 * sched_getaffinity needs multiples of ulong, so need to take
10563 * care of mismatches between target ulong and host ulong sizes.
10565 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10566 ret
= -TARGET_EINVAL
;
10569 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10571 mask
= alloca(mask_size
);
10572 memset(mask
, 0, mask_size
);
10573 ret
= get_errno(sys_sched_getaffinity(arg1
, mask_size
, mask
));
10575 if (!is_error(ret
)) {
10577 /* More data returned than the caller's buffer will fit.
10578 * This only happens if sizeof(abi_long) < sizeof(long)
10579 * and the caller passed us a buffer holding an odd number
10580 * of abi_longs. If the host kernel is actually using the
10581 * extra 4 bytes then fail EINVAL; otherwise we can just
10582 * ignore them and only copy the interesting part.
10584 int numcpus
= sysconf(_SC_NPROCESSORS_CONF
);
10585 if (numcpus
> arg2
* 8) {
10586 ret
= -TARGET_EINVAL
;
10592 if (host_to_target_cpu_mask(mask
, mask_size
, arg3
, ret
)) {
10598 case TARGET_NR_sched_setaffinity
:
10600 unsigned int mask_size
;
10601 unsigned long *mask
;
10604 * sched_setaffinity needs multiples of ulong, so need to take
10605 * care of mismatches between target ulong and host ulong sizes.
10607 if (arg2
& (sizeof(abi_ulong
) - 1)) {
10608 ret
= -TARGET_EINVAL
;
10611 mask_size
= (arg2
+ (sizeof(*mask
) - 1)) & ~(sizeof(*mask
) - 1);
10612 mask
= alloca(mask_size
);
10614 ret
= target_to_host_cpu_mask(mask
, mask_size
, arg3
, arg2
);
10619 ret
= get_errno(sys_sched_setaffinity(arg1
, mask_size
, mask
));
10622 case TARGET_NR_getcpu
:
10624 unsigned cpu
, node
;
10625 ret
= get_errno(sys_getcpu(arg1
? &cpu
: NULL
,
10626 arg2
? &node
: NULL
,
10628 if (is_error(ret
)) {
10631 if (arg1
&& put_user_u32(cpu
, arg1
)) {
10634 if (arg2
&& put_user_u32(node
, arg2
)) {
10639 case TARGET_NR_sched_setparam
:
10641 struct sched_param
*target_schp
;
10642 struct sched_param schp
;
10645 return -TARGET_EINVAL
;
10647 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg2
, 1))
10649 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10650 unlock_user_struct(target_schp
, arg2
, 0);
10651 ret
= get_errno(sched_setparam(arg1
, &schp
));
10654 case TARGET_NR_sched_getparam
:
10656 struct sched_param
*target_schp
;
10657 struct sched_param schp
;
10660 return -TARGET_EINVAL
;
10662 ret
= get_errno(sched_getparam(arg1
, &schp
));
10663 if (!is_error(ret
)) {
10664 if (!lock_user_struct(VERIFY_WRITE
, target_schp
, arg2
, 0))
10666 target_schp
->sched_priority
= tswap32(schp
.sched_priority
);
10667 unlock_user_struct(target_schp
, arg2
, 1);
10671 case TARGET_NR_sched_setscheduler
:
10673 struct sched_param
*target_schp
;
10674 struct sched_param schp
;
10676 return -TARGET_EINVAL
;
10678 if (!lock_user_struct(VERIFY_READ
, target_schp
, arg3
, 1))
10680 schp
.sched_priority
= tswap32(target_schp
->sched_priority
);
10681 unlock_user_struct(target_schp
, arg3
, 0);
10682 ret
= get_errno(sched_setscheduler(arg1
, arg2
, &schp
));
10685 case TARGET_NR_sched_getscheduler
:
10686 ret
= get_errno(sched_getscheduler(arg1
));
10688 case TARGET_NR_sched_yield
:
10689 ret
= get_errno(sched_yield());
10691 case TARGET_NR_sched_get_priority_max
:
10692 ret
= get_errno(sched_get_priority_max(arg1
));
10694 case TARGET_NR_sched_get_priority_min
:
10695 ret
= get_errno(sched_get_priority_min(arg1
));
10697 case TARGET_NR_sched_rr_get_interval
:
10699 struct timespec ts
;
10700 ret
= get_errno(sched_rr_get_interval(arg1
, &ts
));
10701 if (!is_error(ret
)) {
10702 ret
= host_to_target_timespec(arg2
, &ts
);
10706 case TARGET_NR_nanosleep
:
10708 struct timespec req
, rem
;
10709 target_to_host_timespec(&req
, arg1
);
10710 ret
= get_errno(safe_nanosleep(&req
, &rem
));
10711 if (is_error(ret
) && arg2
) {
10712 host_to_target_timespec(arg2
, &rem
);
10716 #ifdef TARGET_NR_query_module
10717 case TARGET_NR_query_module
:
10718 goto unimplemented
;
10720 #ifdef TARGET_NR_nfsservctl
10721 case TARGET_NR_nfsservctl
:
10722 goto unimplemented
;
10724 case TARGET_NR_prctl
:
10726 case PR_GET_PDEATHSIG
:
10729 ret
= get_errno(prctl(arg1
, &deathsig
, arg3
, arg4
, arg5
));
10730 if (!is_error(ret
) && arg2
10731 && put_user_ual(deathsig
, arg2
)) {
10739 void *name
= lock_user(VERIFY_WRITE
, arg2
, 16, 1);
10743 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10744 arg3
, arg4
, arg5
));
10745 unlock_user(name
, arg2
, 16);
10750 void *name
= lock_user(VERIFY_READ
, arg2
, 16, 1);
10754 ret
= get_errno(prctl(arg1
, (unsigned long)name
,
10755 arg3
, arg4
, arg5
));
10756 unlock_user(name
, arg2
, 0);
10760 #ifdef TARGET_AARCH64
10761 case TARGET_PR_SVE_SET_VL
:
10762 /* We cannot support either PR_SVE_SET_VL_ONEXEC
10763 or PR_SVE_VL_INHERIT. Therefore, anything above
10764 ARM_MAX_VQ results in EINVAL. */
10765 ret
= -TARGET_EINVAL
;
10766 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)
10767 && arg2
>= 0 && arg2
<= ARM_MAX_VQ
* 16 && !(arg2
& 15)) {
10768 CPUARMState
*env
= cpu_env
;
10769 int old_vq
= (env
->vfp
.zcr_el
[1] & 0xf) + 1;
10770 int vq
= MAX(arg2
/ 16, 1);
10773 aarch64_sve_narrow_vq(env
, vq
);
10775 env
->vfp
.zcr_el
[1] = vq
- 1;
10779 case TARGET_PR_SVE_GET_VL
:
10780 ret
= -TARGET_EINVAL
;
10781 if (arm_feature(cpu_env
, ARM_FEATURE_SVE
)) {
10782 CPUARMState
*env
= cpu_env
;
10783 ret
= ((env
->vfp
.zcr_el
[1] & 0xf) + 1) * 16;
10786 #endif /* AARCH64 */
10787 case PR_GET_SECCOMP
:
10788 case PR_SET_SECCOMP
:
10789 /* Disable seccomp to prevent the target disabling syscalls we
10791 ret
= -TARGET_EINVAL
;
10794 /* Most prctl options have no pointer arguments */
10795 ret
= get_errno(prctl(arg1
, arg2
, arg3
, arg4
, arg5
));
10799 #ifdef TARGET_NR_arch_prctl
10800 case TARGET_NR_arch_prctl
:
10801 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10802 ret
= do_arch_prctl(cpu_env
, arg1
, arg2
);
10805 goto unimplemented
;
10808 #ifdef TARGET_NR_pread64
10809 case TARGET_NR_pread64
:
10810 if (regpairs_aligned(cpu_env
, num
)) {
10814 if (!(p
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0)))
10816 ret
= get_errno(pread64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10817 unlock_user(p
, arg2
, ret
);
10819 case TARGET_NR_pwrite64
:
10820 if (regpairs_aligned(cpu_env
, num
)) {
10824 if (!(p
= lock_user(VERIFY_READ
, arg2
, arg3
, 1)))
10826 ret
= get_errno(pwrite64(arg1
, p
, arg3
, target_offset64(arg4
, arg5
)));
10827 unlock_user(p
, arg2
, 0);
10830 case TARGET_NR_getcwd
:
10831 if (!(p
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0)))
10833 ret
= get_errno(sys_getcwd1(p
, arg2
));
10834 unlock_user(p
, arg1
, ret
);
10836 case TARGET_NR_capget
:
10837 case TARGET_NR_capset
:
10839 struct target_user_cap_header
*target_header
;
10840 struct target_user_cap_data
*target_data
= NULL
;
10841 struct __user_cap_header_struct header
;
10842 struct __user_cap_data_struct data
[2];
10843 struct __user_cap_data_struct
*dataptr
= NULL
;
10844 int i
, target_datalen
;
10845 int data_items
= 1;
10847 if (!lock_user_struct(VERIFY_WRITE
, target_header
, arg1
, 1)) {
10850 header
.version
= tswap32(target_header
->version
);
10851 header
.pid
= tswap32(target_header
->pid
);
10853 if (header
.version
!= _LINUX_CAPABILITY_VERSION
) {
10854 /* Version 2 and up takes pointer to two user_data structs */
10858 target_datalen
= sizeof(*target_data
) * data_items
;
10861 if (num
== TARGET_NR_capget
) {
10862 target_data
= lock_user(VERIFY_WRITE
, arg2
, target_datalen
, 0);
10864 target_data
= lock_user(VERIFY_READ
, arg2
, target_datalen
, 1);
10866 if (!target_data
) {
10867 unlock_user_struct(target_header
, arg1
, 0);
10871 if (num
== TARGET_NR_capset
) {
10872 for (i
= 0; i
< data_items
; i
++) {
10873 data
[i
].effective
= tswap32(target_data
[i
].effective
);
10874 data
[i
].permitted
= tswap32(target_data
[i
].permitted
);
10875 data
[i
].inheritable
= tswap32(target_data
[i
].inheritable
);
10882 if (num
== TARGET_NR_capget
) {
10883 ret
= get_errno(capget(&header
, dataptr
));
10885 ret
= get_errno(capset(&header
, dataptr
));
10888 /* The kernel always updates version for both capget and capset */
10889 target_header
->version
= tswap32(header
.version
);
10890 unlock_user_struct(target_header
, arg1
, 1);
10893 if (num
== TARGET_NR_capget
) {
10894 for (i
= 0; i
< data_items
; i
++) {
10895 target_data
[i
].effective
= tswap32(data
[i
].effective
);
10896 target_data
[i
].permitted
= tswap32(data
[i
].permitted
);
10897 target_data
[i
].inheritable
= tswap32(data
[i
].inheritable
);
10899 unlock_user(target_data
, arg2
, target_datalen
);
10901 unlock_user(target_data
, arg2
, 0);
10906 case TARGET_NR_sigaltstack
:
10907 ret
= do_sigaltstack(arg1
, arg2
, get_sp_from_cpustate((CPUArchState
*)cpu_env
));
10910 #ifdef CONFIG_SENDFILE
10911 case TARGET_NR_sendfile
:
10913 off_t
*offp
= NULL
;
10916 ret
= get_user_sal(off
, arg3
);
10917 if (is_error(ret
)) {
10922 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10923 if (!is_error(ret
) && arg3
) {
10924 abi_long ret2
= put_user_sal(off
, arg3
);
10925 if (is_error(ret2
)) {
10931 #ifdef TARGET_NR_sendfile64
10932 case TARGET_NR_sendfile64
:
10934 off_t
*offp
= NULL
;
10937 ret
= get_user_s64(off
, arg3
);
10938 if (is_error(ret
)) {
10943 ret
= get_errno(sendfile(arg1
, arg2
, offp
, arg4
));
10944 if (!is_error(ret
) && arg3
) {
10945 abi_long ret2
= put_user_s64(off
, arg3
);
10946 if (is_error(ret2
)) {
10954 case TARGET_NR_sendfile
:
10955 #ifdef TARGET_NR_sendfile64
10956 case TARGET_NR_sendfile64
:
10958 goto unimplemented
;
10961 #ifdef TARGET_NR_getpmsg
10962 case TARGET_NR_getpmsg
:
10963 goto unimplemented
;
10965 #ifdef TARGET_NR_putpmsg
10966 case TARGET_NR_putpmsg
:
10967 goto unimplemented
;
10969 #ifdef TARGET_NR_vfork
10970 case TARGET_NR_vfork
:
10971 ret
= get_errno(do_fork(cpu_env
,
10972 CLONE_VFORK
| CLONE_VM
| TARGET_SIGCHLD
,
10976 #ifdef TARGET_NR_ugetrlimit
10977 case TARGET_NR_ugetrlimit
:
10979 struct rlimit rlim
;
10980 int resource
= target_to_host_resource(arg1
);
10981 ret
= get_errno(getrlimit(resource
, &rlim
));
10982 if (!is_error(ret
)) {
10983 struct target_rlimit
*target_rlim
;
10984 if (!lock_user_struct(VERIFY_WRITE
, target_rlim
, arg2
, 0))
10986 target_rlim
->rlim_cur
= host_to_target_rlim(rlim
.rlim_cur
);
10987 target_rlim
->rlim_max
= host_to_target_rlim(rlim
.rlim_max
);
10988 unlock_user_struct(target_rlim
, arg2
, 1);
10993 #ifdef TARGET_NR_truncate64
10994 case TARGET_NR_truncate64
:
10995 if (!(p
= lock_user_string(arg1
)))
10997 ret
= target_truncate64(cpu_env
, p
, arg2
, arg3
, arg4
);
10998 unlock_user(p
, arg1
, 0);
11001 #ifdef TARGET_NR_ftruncate64
11002 case TARGET_NR_ftruncate64
:
11003 ret
= target_ftruncate64(cpu_env
, arg1
, arg2
, arg3
, arg4
);
11006 #ifdef TARGET_NR_stat64
11007 case TARGET_NR_stat64
:
11008 if (!(p
= lock_user_string(arg1
)))
11010 ret
= get_errno(stat(path(p
), &st
));
11011 unlock_user(p
, arg1
, 0);
11012 if (!is_error(ret
))
11013 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11016 #ifdef TARGET_NR_lstat64
11017 case TARGET_NR_lstat64
:
11018 if (!(p
= lock_user_string(arg1
)))
11020 ret
= get_errno(lstat(path(p
), &st
));
11021 unlock_user(p
, arg1
, 0);
11022 if (!is_error(ret
))
11023 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11026 #ifdef TARGET_NR_fstat64
11027 case TARGET_NR_fstat64
:
11028 ret
= get_errno(fstat(arg1
, &st
));
11029 if (!is_error(ret
))
11030 ret
= host_to_target_stat64(cpu_env
, arg2
, &st
);
11033 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11034 #ifdef TARGET_NR_fstatat64
11035 case TARGET_NR_fstatat64
:
11037 #ifdef TARGET_NR_newfstatat
11038 case TARGET_NR_newfstatat
:
11040 if (!(p
= lock_user_string(arg2
)))
11042 ret
= get_errno(fstatat(arg1
, path(p
), &st
, arg4
));
11043 if (!is_error(ret
))
11044 ret
= host_to_target_stat64(cpu_env
, arg3
, &st
);
11047 #ifdef TARGET_NR_lchown
11048 case TARGET_NR_lchown
:
11049 if (!(p
= lock_user_string(arg1
)))
11051 ret
= get_errno(lchown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11052 unlock_user(p
, arg1
, 0);
11055 #ifdef TARGET_NR_getuid
11056 case TARGET_NR_getuid
:
11057 ret
= get_errno(high2lowuid(getuid()));
11060 #ifdef TARGET_NR_getgid
11061 case TARGET_NR_getgid
:
11062 ret
= get_errno(high2lowgid(getgid()));
11065 #ifdef TARGET_NR_geteuid
11066 case TARGET_NR_geteuid
:
11067 ret
= get_errno(high2lowuid(geteuid()));
11070 #ifdef TARGET_NR_getegid
11071 case TARGET_NR_getegid
:
11072 ret
= get_errno(high2lowgid(getegid()));
11075 case TARGET_NR_setreuid
:
11076 ret
= get_errno(setreuid(low2highuid(arg1
), low2highuid(arg2
)));
11078 case TARGET_NR_setregid
:
11079 ret
= get_errno(setregid(low2highgid(arg1
), low2highgid(arg2
)));
11081 case TARGET_NR_getgroups
:
11083 int gidsetsize
= arg1
;
11084 target_id
*target_grouplist
;
11088 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11089 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11090 if (gidsetsize
== 0)
11092 if (!is_error(ret
)) {
11093 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* sizeof(target_id
), 0);
11094 if (!target_grouplist
)
11096 for(i
= 0;i
< ret
; i
++)
11097 target_grouplist
[i
] = tswapid(high2lowgid(grouplist
[i
]));
11098 unlock_user(target_grouplist
, arg2
, gidsetsize
* sizeof(target_id
));
11102 case TARGET_NR_setgroups
:
11104 int gidsetsize
= arg1
;
11105 target_id
*target_grouplist
;
11106 gid_t
*grouplist
= NULL
;
11109 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11110 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* sizeof(target_id
), 1);
11111 if (!target_grouplist
) {
11112 ret
= -TARGET_EFAULT
;
11115 for (i
= 0; i
< gidsetsize
; i
++) {
11116 grouplist
[i
] = low2highgid(tswapid(target_grouplist
[i
]));
11118 unlock_user(target_grouplist
, arg2
, 0);
11120 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11123 case TARGET_NR_fchown
:
11124 ret
= get_errno(fchown(arg1
, low2highuid(arg2
), low2highgid(arg3
)));
11126 #if defined(TARGET_NR_fchownat)
11127 case TARGET_NR_fchownat
:
11128 if (!(p
= lock_user_string(arg2
)))
11130 ret
= get_errno(fchownat(arg1
, p
, low2highuid(arg3
),
11131 low2highgid(arg4
), arg5
));
11132 unlock_user(p
, arg2
, 0);
11135 #ifdef TARGET_NR_setresuid
11136 case TARGET_NR_setresuid
:
11137 ret
= get_errno(sys_setresuid(low2highuid(arg1
),
11139 low2highuid(arg3
)));
11142 #ifdef TARGET_NR_getresuid
11143 case TARGET_NR_getresuid
:
11145 uid_t ruid
, euid
, suid
;
11146 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11147 if (!is_error(ret
)) {
11148 if (put_user_id(high2lowuid(ruid
), arg1
)
11149 || put_user_id(high2lowuid(euid
), arg2
)
11150 || put_user_id(high2lowuid(suid
), arg3
))
11156 #ifdef TARGET_NR_getresgid
11157 case TARGET_NR_setresgid
:
11158 ret
= get_errno(sys_setresgid(low2highgid(arg1
),
11160 low2highgid(arg3
)));
11163 #ifdef TARGET_NR_getresgid
11164 case TARGET_NR_getresgid
:
11166 gid_t rgid
, egid
, sgid
;
11167 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11168 if (!is_error(ret
)) {
11169 if (put_user_id(high2lowgid(rgid
), arg1
)
11170 || put_user_id(high2lowgid(egid
), arg2
)
11171 || put_user_id(high2lowgid(sgid
), arg3
))
11177 #ifdef TARGET_NR_chown
11178 case TARGET_NR_chown
:
11179 if (!(p
= lock_user_string(arg1
)))
11181 ret
= get_errno(chown(p
, low2highuid(arg2
), low2highgid(arg3
)));
11182 unlock_user(p
, arg1
, 0);
11185 case TARGET_NR_setuid
:
11186 ret
= get_errno(sys_setuid(low2highuid(arg1
)));
11188 case TARGET_NR_setgid
:
11189 ret
= get_errno(sys_setgid(low2highgid(arg1
)));
11191 case TARGET_NR_setfsuid
:
11192 ret
= get_errno(setfsuid(arg1
));
11194 case TARGET_NR_setfsgid
:
11195 ret
= get_errno(setfsgid(arg1
));
11198 #ifdef TARGET_NR_lchown32
11199 case TARGET_NR_lchown32
:
11200 if (!(p
= lock_user_string(arg1
)))
11202 ret
= get_errno(lchown(p
, arg2
, arg3
));
11203 unlock_user(p
, arg1
, 0);
11206 #ifdef TARGET_NR_getuid32
11207 case TARGET_NR_getuid32
:
11208 ret
= get_errno(getuid());
11212 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11213 /* Alpha specific */
11214 case TARGET_NR_getxuid
:
11218 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=euid
;
11220 ret
= get_errno(getuid());
11223 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11224 /* Alpha specific */
11225 case TARGET_NR_getxgid
:
11229 ((CPUAlphaState
*)cpu_env
)->ir
[IR_A4
]=egid
;
11231 ret
= get_errno(getgid());
11234 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11235 /* Alpha specific */
11236 case TARGET_NR_osf_getsysinfo
:
11237 ret
= -TARGET_EOPNOTSUPP
;
11239 case TARGET_GSI_IEEE_FP_CONTROL
:
11241 uint64_t swcr
, fpcr
= cpu_alpha_load_fpcr (cpu_env
);
11243 /* Copied from linux ieee_fpcr_to_swcr. */
11244 swcr
= (fpcr
>> 35) & SWCR_STATUS_MASK
;
11245 swcr
|= (fpcr
>> 36) & SWCR_MAP_DMZ
;
11246 swcr
|= (~fpcr
>> 48) & (SWCR_TRAP_ENABLE_INV
11247 | SWCR_TRAP_ENABLE_DZE
11248 | SWCR_TRAP_ENABLE_OVF
);
11249 swcr
|= (~fpcr
>> 57) & (SWCR_TRAP_ENABLE_UNF
11250 | SWCR_TRAP_ENABLE_INE
);
11251 swcr
|= (fpcr
>> 47) & SWCR_MAP_UMZ
;
11252 swcr
|= (~fpcr
>> 41) & SWCR_TRAP_ENABLE_DNO
;
11254 if (put_user_u64 (swcr
, arg2
))
11260 /* case GSI_IEEE_STATE_AT_SIGNAL:
11261 -- Not implemented in linux kernel.
11263 -- Retrieves current unaligned access state; not much used.
11264 case GSI_PROC_TYPE:
11265 -- Retrieves implver information; surely not used.
11266 case GSI_GET_HWRPB:
11267 -- Grabs a copy of the HWRPB; surely not used.
11272 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11273 /* Alpha specific */
11274 case TARGET_NR_osf_setsysinfo
:
11275 ret
= -TARGET_EOPNOTSUPP
;
11277 case TARGET_SSI_IEEE_FP_CONTROL
:
11279 uint64_t swcr
, fpcr
, orig_fpcr
;
11281 if (get_user_u64 (swcr
, arg2
)) {
11284 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11285 fpcr
= orig_fpcr
& FPCR_DYN_MASK
;
11287 /* Copied from linux ieee_swcr_to_fpcr. */
11288 fpcr
|= (swcr
& SWCR_STATUS_MASK
) << 35;
11289 fpcr
|= (swcr
& SWCR_MAP_DMZ
) << 36;
11290 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_INV
11291 | SWCR_TRAP_ENABLE_DZE
11292 | SWCR_TRAP_ENABLE_OVF
)) << 48;
11293 fpcr
|= (~swcr
& (SWCR_TRAP_ENABLE_UNF
11294 | SWCR_TRAP_ENABLE_INE
)) << 57;
11295 fpcr
|= (swcr
& SWCR_MAP_UMZ
? FPCR_UNDZ
| FPCR_UNFD
: 0);
11296 fpcr
|= (~swcr
& SWCR_TRAP_ENABLE_DNO
) << 41;
11298 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11303 case TARGET_SSI_IEEE_RAISE_EXCEPTION
:
11305 uint64_t exc
, fpcr
, orig_fpcr
;
11308 if (get_user_u64(exc
, arg2
)) {
11312 orig_fpcr
= cpu_alpha_load_fpcr(cpu_env
);
11314 /* We only add to the exception status here. */
11315 fpcr
= orig_fpcr
| ((exc
& SWCR_STATUS_MASK
) << 35);
11317 cpu_alpha_store_fpcr(cpu_env
, fpcr
);
11320 /* Old exceptions are not signaled. */
11321 fpcr
&= ~(orig_fpcr
& FPCR_STATUS_MASK
);
11323 /* If any exceptions set by this call,
11324 and are unmasked, send a signal. */
11326 if ((fpcr
& (FPCR_INE
| FPCR_INED
)) == FPCR_INE
) {
11327 si_code
= TARGET_FPE_FLTRES
;
11329 if ((fpcr
& (FPCR_UNF
| FPCR_UNFD
)) == FPCR_UNF
) {
11330 si_code
= TARGET_FPE_FLTUND
;
11332 if ((fpcr
& (FPCR_OVF
| FPCR_OVFD
)) == FPCR_OVF
) {
11333 si_code
= TARGET_FPE_FLTOVF
;
11335 if ((fpcr
& (FPCR_DZE
| FPCR_DZED
)) == FPCR_DZE
) {
11336 si_code
= TARGET_FPE_FLTDIV
;
11338 if ((fpcr
& (FPCR_INV
| FPCR_INVD
)) == FPCR_INV
) {
11339 si_code
= TARGET_FPE_FLTINV
;
11341 if (si_code
!= 0) {
11342 target_siginfo_t info
;
11343 info
.si_signo
= SIGFPE
;
11345 info
.si_code
= si_code
;
11346 info
._sifields
._sigfault
._addr
11347 = ((CPUArchState
*)cpu_env
)->pc
;
11348 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
11349 QEMU_SI_FAULT
, &info
);
11354 /* case SSI_NVPAIRS:
11355 -- Used with SSIN_UACPROC to enable unaligned accesses.
11356 case SSI_IEEE_STATE_AT_SIGNAL:
11357 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11358 -- Not implemented in linux kernel
11363 #ifdef TARGET_NR_osf_sigprocmask
11364 /* Alpha specific. */
11365 case TARGET_NR_osf_sigprocmask
:
11369 sigset_t set
, oldset
;
11372 case TARGET_SIG_BLOCK
:
11375 case TARGET_SIG_UNBLOCK
:
11378 case TARGET_SIG_SETMASK
:
11382 ret
= -TARGET_EINVAL
;
11386 target_to_host_old_sigset(&set
, &mask
);
11387 ret
= do_sigprocmask(how
, &set
, &oldset
);
11389 host_to_target_old_sigset(&mask
, &oldset
);
11396 #ifdef TARGET_NR_getgid32
11397 case TARGET_NR_getgid32
:
11398 ret
= get_errno(getgid());
11401 #ifdef TARGET_NR_geteuid32
11402 case TARGET_NR_geteuid32
:
11403 ret
= get_errno(geteuid());
11406 #ifdef TARGET_NR_getegid32
11407 case TARGET_NR_getegid32
:
11408 ret
= get_errno(getegid());
11411 #ifdef TARGET_NR_setreuid32
11412 case TARGET_NR_setreuid32
:
11413 ret
= get_errno(setreuid(arg1
, arg2
));
11416 #ifdef TARGET_NR_setregid32
11417 case TARGET_NR_setregid32
:
11418 ret
= get_errno(setregid(arg1
, arg2
));
11421 #ifdef TARGET_NR_getgroups32
11422 case TARGET_NR_getgroups32
:
11424 int gidsetsize
= arg1
;
11425 uint32_t *target_grouplist
;
11429 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11430 ret
= get_errno(getgroups(gidsetsize
, grouplist
));
11431 if (gidsetsize
== 0)
11433 if (!is_error(ret
)) {
11434 target_grouplist
= lock_user(VERIFY_WRITE
, arg2
, gidsetsize
* 4, 0);
11435 if (!target_grouplist
) {
11436 ret
= -TARGET_EFAULT
;
11439 for(i
= 0;i
< ret
; i
++)
11440 target_grouplist
[i
] = tswap32(grouplist
[i
]);
11441 unlock_user(target_grouplist
, arg2
, gidsetsize
* 4);
11446 #ifdef TARGET_NR_setgroups32
11447 case TARGET_NR_setgroups32
:
11449 int gidsetsize
= arg1
;
11450 uint32_t *target_grouplist
;
11454 grouplist
= alloca(gidsetsize
* sizeof(gid_t
));
11455 target_grouplist
= lock_user(VERIFY_READ
, arg2
, gidsetsize
* 4, 1);
11456 if (!target_grouplist
) {
11457 ret
= -TARGET_EFAULT
;
11460 for(i
= 0;i
< gidsetsize
; i
++)
11461 grouplist
[i
] = tswap32(target_grouplist
[i
]);
11462 unlock_user(target_grouplist
, arg2
, 0);
11463 ret
= get_errno(setgroups(gidsetsize
, grouplist
));
11467 #ifdef TARGET_NR_fchown32
11468 case TARGET_NR_fchown32
:
11469 ret
= get_errno(fchown(arg1
, arg2
, arg3
));
11472 #ifdef TARGET_NR_setresuid32
11473 case TARGET_NR_setresuid32
:
11474 ret
= get_errno(sys_setresuid(arg1
, arg2
, arg3
));
11477 #ifdef TARGET_NR_getresuid32
11478 case TARGET_NR_getresuid32
:
11480 uid_t ruid
, euid
, suid
;
11481 ret
= get_errno(getresuid(&ruid
, &euid
, &suid
));
11482 if (!is_error(ret
)) {
11483 if (put_user_u32(ruid
, arg1
)
11484 || put_user_u32(euid
, arg2
)
11485 || put_user_u32(suid
, arg3
))
11491 #ifdef TARGET_NR_setresgid32
11492 case TARGET_NR_setresgid32
:
11493 ret
= get_errno(sys_setresgid(arg1
, arg2
, arg3
));
11496 #ifdef TARGET_NR_getresgid32
11497 case TARGET_NR_getresgid32
:
11499 gid_t rgid
, egid
, sgid
;
11500 ret
= get_errno(getresgid(&rgid
, &egid
, &sgid
));
11501 if (!is_error(ret
)) {
11502 if (put_user_u32(rgid
, arg1
)
11503 || put_user_u32(egid
, arg2
)
11504 || put_user_u32(sgid
, arg3
))
11510 #ifdef TARGET_NR_chown32
11511 case TARGET_NR_chown32
:
11512 if (!(p
= lock_user_string(arg1
)))
11514 ret
= get_errno(chown(p
, arg2
, arg3
));
11515 unlock_user(p
, arg1
, 0);
11518 #ifdef TARGET_NR_setuid32
11519 case TARGET_NR_setuid32
:
11520 ret
= get_errno(sys_setuid(arg1
));
11523 #ifdef TARGET_NR_setgid32
11524 case TARGET_NR_setgid32
:
11525 ret
= get_errno(sys_setgid(arg1
));
11528 #ifdef TARGET_NR_setfsuid32
11529 case TARGET_NR_setfsuid32
:
11530 ret
= get_errno(setfsuid(arg1
));
11533 #ifdef TARGET_NR_setfsgid32
11534 case TARGET_NR_setfsgid32
:
11535 ret
= get_errno(setfsgid(arg1
));
11539 case TARGET_NR_pivot_root
:
11540 goto unimplemented
;
11541 #ifdef TARGET_NR_mincore
11542 case TARGET_NR_mincore
:
11545 ret
= -TARGET_ENOMEM
;
11546 a
= lock_user(VERIFY_READ
, arg1
, arg2
, 0);
11550 ret
= -TARGET_EFAULT
;
11551 p
= lock_user_string(arg3
);
11555 ret
= get_errno(mincore(a
, arg2
, p
));
11556 unlock_user(p
, arg3
, ret
);
11558 unlock_user(a
, arg1
, 0);
11562 #ifdef TARGET_NR_arm_fadvise64_64
11563 case TARGET_NR_arm_fadvise64_64
:
11564 /* arm_fadvise64_64 looks like fadvise64_64 but
11565 * with different argument order: fd, advice, offset, len
11566 * rather than the usual fd, offset, len, advice.
11567 * Note that offset and len are both 64-bit so appear as
11568 * pairs of 32-bit registers.
11570 ret
= posix_fadvise(arg1
, target_offset64(arg3
, arg4
),
11571 target_offset64(arg5
, arg6
), arg2
);
11572 ret
= -host_to_target_errno(ret
);
11576 #if TARGET_ABI_BITS == 32
11578 #ifdef TARGET_NR_fadvise64_64
11579 case TARGET_NR_fadvise64_64
:
11580 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11581 /* 6 args: fd, advice, offset (high, low), len (high, low) */
11589 /* 6 args: fd, offset (high, low), len (high, low), advice */
11590 if (regpairs_aligned(cpu_env
, num
)) {
11591 /* offset is in (3,4), len in (5,6) and advice in 7 */
11599 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11600 target_offset64(arg2
, arg3
),
11601 target_offset64(arg4
, arg5
),
11606 #ifdef TARGET_NR_fadvise64
11607 case TARGET_NR_fadvise64
:
11608 /* 5 args: fd, offset (high, low), len, advice */
11609 if (regpairs_aligned(cpu_env
, num
)) {
11610 /* offset is in (3,4), len in 5 and advice in 6 */
11616 ret
= -host_to_target_errno(posix_fadvise(arg1
,
11617 target_offset64(arg2
, arg3
),
11622 #else /* not a 32-bit ABI */
11623 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11624 #ifdef TARGET_NR_fadvise64_64
11625 case TARGET_NR_fadvise64_64
:
11627 #ifdef TARGET_NR_fadvise64
11628 case TARGET_NR_fadvise64
:
11630 #ifdef TARGET_S390X
11632 case 4: arg4
= POSIX_FADV_NOREUSE
+ 1; break; /* make sure it's an invalid value */
11633 case 5: arg4
= POSIX_FADV_NOREUSE
+ 2; break; /* ditto */
11634 case 6: arg4
= POSIX_FADV_DONTNEED
; break;
11635 case 7: arg4
= POSIX_FADV_NOREUSE
; break;
11639 ret
= -host_to_target_errno(posix_fadvise(arg1
, arg2
, arg3
, arg4
));
11642 #endif /* end of 64-bit ABI fadvise handling */
11644 #ifdef TARGET_NR_madvise
11645 case TARGET_NR_madvise
:
11646 /* A straight passthrough may not be safe because qemu sometimes
11647 turns private file-backed mappings into anonymous mappings.
11648 This will break MADV_DONTNEED.
11649 This is a hint, so ignoring and returning success is ok. */
11650 ret
= get_errno(0);
11653 #if TARGET_ABI_BITS == 32
11654 case TARGET_NR_fcntl64
:
11658 from_flock64_fn
*copyfrom
= copy_from_user_flock64
;
11659 to_flock64_fn
*copyto
= copy_to_user_flock64
;
11662 if (!((CPUARMState
*)cpu_env
)->eabi
) {
11663 copyfrom
= copy_from_user_oabi_flock64
;
11664 copyto
= copy_to_user_oabi_flock64
;
11668 cmd
= target_to_host_fcntl_cmd(arg2
);
11669 if (cmd
== -TARGET_EINVAL
) {
11675 case TARGET_F_GETLK64
:
11676 ret
= copyfrom(&fl
, arg3
);
11680 ret
= get_errno(fcntl(arg1
, cmd
, &fl
));
11682 ret
= copyto(arg3
, &fl
);
11686 case TARGET_F_SETLK64
:
11687 case TARGET_F_SETLKW64
:
11688 ret
= copyfrom(&fl
, arg3
);
11692 ret
= get_errno(safe_fcntl(arg1
, cmd
, &fl
));
11695 ret
= do_fcntl(arg1
, arg2
, arg3
);
11701 #ifdef TARGET_NR_cacheflush
11702 case TARGET_NR_cacheflush
:
11703 /* self-modifying code is handled automatically, so nothing needed */
11707 #ifdef TARGET_NR_security
11708 case TARGET_NR_security
:
11709 goto unimplemented
;
11711 #ifdef TARGET_NR_getpagesize
11712 case TARGET_NR_getpagesize
:
11713 ret
= TARGET_PAGE_SIZE
;
11716 case TARGET_NR_gettid
:
11717 ret
= get_errno(gettid());
11719 #ifdef TARGET_NR_readahead
11720 case TARGET_NR_readahead
:
11721 #if TARGET_ABI_BITS == 32
11722 if (regpairs_aligned(cpu_env
, num
)) {
11727 ret
= get_errno(readahead(arg1
, target_offset64(arg2
, arg3
) , arg4
));
11729 ret
= get_errno(readahead(arg1
, arg2
, arg3
));
11734 #ifdef TARGET_NR_setxattr
11735 case TARGET_NR_listxattr
:
11736 case TARGET_NR_llistxattr
:
11740 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11742 ret
= -TARGET_EFAULT
;
11746 p
= lock_user_string(arg1
);
11748 if (num
== TARGET_NR_listxattr
) {
11749 ret
= get_errno(listxattr(p
, b
, arg3
));
11751 ret
= get_errno(llistxattr(p
, b
, arg3
));
11754 ret
= -TARGET_EFAULT
;
11756 unlock_user(p
, arg1
, 0);
11757 unlock_user(b
, arg2
, arg3
);
11760 case TARGET_NR_flistxattr
:
11764 b
= lock_user(VERIFY_WRITE
, arg2
, arg3
, 0);
11766 ret
= -TARGET_EFAULT
;
11770 ret
= get_errno(flistxattr(arg1
, b
, arg3
));
11771 unlock_user(b
, arg2
, arg3
);
11774 case TARGET_NR_setxattr
:
11775 case TARGET_NR_lsetxattr
:
11777 void *p
, *n
, *v
= 0;
11779 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11781 ret
= -TARGET_EFAULT
;
11785 p
= lock_user_string(arg1
);
11786 n
= lock_user_string(arg2
);
11788 if (num
== TARGET_NR_setxattr
) {
11789 ret
= get_errno(setxattr(p
, n
, v
, arg4
, arg5
));
11791 ret
= get_errno(lsetxattr(p
, n
, v
, arg4
, arg5
));
11794 ret
= -TARGET_EFAULT
;
11796 unlock_user(p
, arg1
, 0);
11797 unlock_user(n
, arg2
, 0);
11798 unlock_user(v
, arg3
, 0);
11801 case TARGET_NR_fsetxattr
:
11805 v
= lock_user(VERIFY_READ
, arg3
, arg4
, 1);
11807 ret
= -TARGET_EFAULT
;
11811 n
= lock_user_string(arg2
);
11813 ret
= get_errno(fsetxattr(arg1
, n
, v
, arg4
, arg5
));
11815 ret
= -TARGET_EFAULT
;
11817 unlock_user(n
, arg2
, 0);
11818 unlock_user(v
, arg3
, 0);
11821 case TARGET_NR_getxattr
:
11822 case TARGET_NR_lgetxattr
:
11824 void *p
, *n
, *v
= 0;
11826 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11828 ret
= -TARGET_EFAULT
;
11832 p
= lock_user_string(arg1
);
11833 n
= lock_user_string(arg2
);
11835 if (num
== TARGET_NR_getxattr
) {
11836 ret
= get_errno(getxattr(p
, n
, v
, arg4
));
11838 ret
= get_errno(lgetxattr(p
, n
, v
, arg4
));
11841 ret
= -TARGET_EFAULT
;
11843 unlock_user(p
, arg1
, 0);
11844 unlock_user(n
, arg2
, 0);
11845 unlock_user(v
, arg3
, arg4
);
11848 case TARGET_NR_fgetxattr
:
11852 v
= lock_user(VERIFY_WRITE
, arg3
, arg4
, 0);
11854 ret
= -TARGET_EFAULT
;
11858 n
= lock_user_string(arg2
);
11860 ret
= get_errno(fgetxattr(arg1
, n
, v
, arg4
));
11862 ret
= -TARGET_EFAULT
;
11864 unlock_user(n
, arg2
, 0);
11865 unlock_user(v
, arg3
, arg4
);
11868 case TARGET_NR_removexattr
:
11869 case TARGET_NR_lremovexattr
:
11872 p
= lock_user_string(arg1
);
11873 n
= lock_user_string(arg2
);
11875 if (num
== TARGET_NR_removexattr
) {
11876 ret
= get_errno(removexattr(p
, n
));
11878 ret
= get_errno(lremovexattr(p
, n
));
11881 ret
= -TARGET_EFAULT
;
11883 unlock_user(p
, arg1
, 0);
11884 unlock_user(n
, arg2
, 0);
11887 case TARGET_NR_fremovexattr
:
11890 n
= lock_user_string(arg2
);
11892 ret
= get_errno(fremovexattr(arg1
, n
));
11894 ret
= -TARGET_EFAULT
;
11896 unlock_user(n
, arg2
, 0);
11900 #endif /* CONFIG_ATTR */
11901 #ifdef TARGET_NR_set_thread_area
11902 case TARGET_NR_set_thread_area
:
11903 #if defined(TARGET_MIPS)
11904 ((CPUMIPSState
*) cpu_env
)->active_tc
.CP0_UserLocal
= arg1
;
11907 #elif defined(TARGET_CRIS)
11909 ret
= -TARGET_EINVAL
;
11911 ((CPUCRISState
*) cpu_env
)->pregs
[PR_PID
] = arg1
;
11915 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11916 ret
= do_set_thread_area(cpu_env
, arg1
);
11918 #elif defined(TARGET_M68K)
11920 TaskState
*ts
= cpu
->opaque
;
11921 ts
->tp_value
= arg1
;
11926 goto unimplemented_nowarn
;
11929 #ifdef TARGET_NR_get_thread_area
11930 case TARGET_NR_get_thread_area
:
11931 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11932 ret
= do_get_thread_area(cpu_env
, arg1
);
11934 #elif defined(TARGET_M68K)
11936 TaskState
*ts
= cpu
->opaque
;
11937 ret
= ts
->tp_value
;
11941 goto unimplemented_nowarn
;
11944 #ifdef TARGET_NR_getdomainname
11945 case TARGET_NR_getdomainname
:
11946 goto unimplemented_nowarn
;
11949 #ifdef TARGET_NR_clock_settime
11950 case TARGET_NR_clock_settime
:
11952 struct timespec ts
;
11954 ret
= target_to_host_timespec(&ts
, arg2
);
11955 if (!is_error(ret
)) {
11956 ret
= get_errno(clock_settime(arg1
, &ts
));
11961 #ifdef TARGET_NR_clock_gettime
11962 case TARGET_NR_clock_gettime
:
11964 struct timespec ts
;
11965 ret
= get_errno(clock_gettime(arg1
, &ts
));
11966 if (!is_error(ret
)) {
11967 ret
= host_to_target_timespec(arg2
, &ts
);
11972 #ifdef TARGET_NR_clock_getres
11973 case TARGET_NR_clock_getres
:
11975 struct timespec ts
;
11976 ret
= get_errno(clock_getres(arg1
, &ts
));
11977 if (!is_error(ret
)) {
11978 host_to_target_timespec(arg2
, &ts
);
11983 #ifdef TARGET_NR_clock_nanosleep
11984 case TARGET_NR_clock_nanosleep
:
11986 struct timespec ts
;
11987 target_to_host_timespec(&ts
, arg3
);
11988 ret
= get_errno(safe_clock_nanosleep(arg1
, arg2
,
11989 &ts
, arg4
? &ts
: NULL
));
11991 host_to_target_timespec(arg4
, &ts
);
11993 #if defined(TARGET_PPC)
11994 /* clock_nanosleep is odd in that it returns positive errno values.
11995 * On PPC, CR0 bit 3 should be set in such a situation. */
11996 if (ret
&& ret
!= -TARGET_ERESTARTSYS
) {
11997 ((CPUPPCState
*)cpu_env
)->crf
[0] |= 1;
12004 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12005 case TARGET_NR_set_tid_address
:
12006 ret
= get_errno(set_tid_address((int *)g2h(arg1
)));
12010 case TARGET_NR_tkill
:
12011 ret
= get_errno(safe_tkill((int)arg1
, target_to_host_signal(arg2
)));
12014 case TARGET_NR_tgkill
:
12015 ret
= get_errno(safe_tgkill((int)arg1
, (int)arg2
,
12016 target_to_host_signal(arg3
)));
12019 #ifdef TARGET_NR_set_robust_list
12020 case TARGET_NR_set_robust_list
:
12021 case TARGET_NR_get_robust_list
:
12022 /* The ABI for supporting robust futexes has userspace pass
12023 * the kernel a pointer to a linked list which is updated by
12024 * userspace after the syscall; the list is walked by the kernel
12025 * when the thread exits. Since the linked list in QEMU guest
12026 * memory isn't a valid linked list for the host and we have
12027 * no way to reliably intercept the thread-death event, we can't
12028 * support these. Silently return ENOSYS so that guest userspace
12029 * falls back to a non-robust futex implementation (which should
12030 * be OK except in the corner case of the guest crashing while
12031 * holding a mutex that is shared with another process via
12034 goto unimplemented_nowarn
;
12037 #if defined(TARGET_NR_utimensat)
12038 case TARGET_NR_utimensat
:
12040 struct timespec
*tsp
, ts
[2];
12044 target_to_host_timespec(ts
, arg3
);
12045 target_to_host_timespec(ts
+1, arg3
+sizeof(struct target_timespec
));
12049 ret
= get_errno(sys_utimensat(arg1
, NULL
, tsp
, arg4
));
12051 if (!(p
= lock_user_string(arg2
))) {
12052 ret
= -TARGET_EFAULT
;
12055 ret
= get_errno(sys_utimensat(arg1
, path(p
), tsp
, arg4
));
12056 unlock_user(p
, arg2
, 0);
12061 case TARGET_NR_futex
:
12062 ret
= do_futex(arg1
, arg2
, arg3
, arg4
, arg5
, arg6
);
12064 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12065 case TARGET_NR_inotify_init
:
12066 ret
= get_errno(sys_inotify_init());
12068 fd_trans_register(ret
, &target_inotify_trans
);
12072 #ifdef CONFIG_INOTIFY1
12073 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12074 case TARGET_NR_inotify_init1
:
12075 ret
= get_errno(sys_inotify_init1(target_to_host_bitmask(arg1
,
12076 fcntl_flags_tbl
)));
12078 fd_trans_register(ret
, &target_inotify_trans
);
12083 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12084 case TARGET_NR_inotify_add_watch
:
12085 p
= lock_user_string(arg2
);
12086 ret
= get_errno(sys_inotify_add_watch(arg1
, path(p
), arg3
));
12087 unlock_user(p
, arg2
, 0);
12090 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12091 case TARGET_NR_inotify_rm_watch
:
12092 ret
= get_errno(sys_inotify_rm_watch(arg1
, arg2
));
12096 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12097 case TARGET_NR_mq_open
:
12099 struct mq_attr posix_mq_attr
;
12100 struct mq_attr
*pposix_mq_attr
;
12103 host_flags
= target_to_host_bitmask(arg2
, fcntl_flags_tbl
);
12104 pposix_mq_attr
= NULL
;
12106 if (copy_from_user_mq_attr(&posix_mq_attr
, arg4
) != 0) {
12109 pposix_mq_attr
= &posix_mq_attr
;
12111 p
= lock_user_string(arg1
- 1);
12115 ret
= get_errno(mq_open(p
, host_flags
, arg3
, pposix_mq_attr
));
12116 unlock_user (p
, arg1
, 0);
12120 case TARGET_NR_mq_unlink
:
12121 p
= lock_user_string(arg1
- 1);
12123 ret
= -TARGET_EFAULT
;
12126 ret
= get_errno(mq_unlink(p
));
12127 unlock_user (p
, arg1
, 0);
12130 case TARGET_NR_mq_timedsend
:
12132 struct timespec ts
;
12134 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12136 target_to_host_timespec(&ts
, arg5
);
12137 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, &ts
));
12138 host_to_target_timespec(arg5
, &ts
);
12140 ret
= get_errno(safe_mq_timedsend(arg1
, p
, arg3
, arg4
, NULL
));
12142 unlock_user (p
, arg2
, arg3
);
12146 case TARGET_NR_mq_timedreceive
:
12148 struct timespec ts
;
12151 p
= lock_user (VERIFY_READ
, arg2
, arg3
, 1);
12153 target_to_host_timespec(&ts
, arg5
);
12154 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12156 host_to_target_timespec(arg5
, &ts
);
12158 ret
= get_errno(safe_mq_timedreceive(arg1
, p
, arg3
,
12161 unlock_user (p
, arg2
, arg3
);
12163 put_user_u32(prio
, arg4
);
12167 /* Not implemented for now... */
12168 /* case TARGET_NR_mq_notify: */
12171 case TARGET_NR_mq_getsetattr
:
12173 struct mq_attr posix_mq_attr_in
, posix_mq_attr_out
;
12176 copy_from_user_mq_attr(&posix_mq_attr_in
, arg2
);
12177 ret
= get_errno(mq_setattr(arg1
, &posix_mq_attr_in
,
12178 &posix_mq_attr_out
));
12179 } else if (arg3
!= 0) {
12180 ret
= get_errno(mq_getattr(arg1
, &posix_mq_attr_out
));
12182 if (ret
== 0 && arg3
!= 0) {
12183 copy_to_user_mq_attr(arg3
, &posix_mq_attr_out
);
12189 #ifdef CONFIG_SPLICE
12190 #ifdef TARGET_NR_tee
12191 case TARGET_NR_tee
:
12193 ret
= get_errno(tee(arg1
,arg2
,arg3
,arg4
));
12197 #ifdef TARGET_NR_splice
12198 case TARGET_NR_splice
:
12200 loff_t loff_in
, loff_out
;
12201 loff_t
*ploff_in
= NULL
, *ploff_out
= NULL
;
12203 if (get_user_u64(loff_in
, arg2
)) {
12206 ploff_in
= &loff_in
;
12209 if (get_user_u64(loff_out
, arg4
)) {
12212 ploff_out
= &loff_out
;
12214 ret
= get_errno(splice(arg1
, ploff_in
, arg3
, ploff_out
, arg5
, arg6
));
12216 if (put_user_u64(loff_in
, arg2
)) {
12221 if (put_user_u64(loff_out
, arg4
)) {
12228 #ifdef TARGET_NR_vmsplice
12229 case TARGET_NR_vmsplice
:
12231 struct iovec
*vec
= lock_iovec(VERIFY_READ
, arg2
, arg3
, 1);
12233 ret
= get_errno(vmsplice(arg1
, vec
, arg3
, arg4
));
12234 unlock_iovec(vec
, arg2
, arg3
, 0);
12236 ret
= -host_to_target_errno(errno
);
12241 #endif /* CONFIG_SPLICE */
12242 #ifdef CONFIG_EVENTFD
12243 #if defined(TARGET_NR_eventfd)
12244 case TARGET_NR_eventfd
:
12245 ret
= get_errno(eventfd(arg1
, 0));
12247 fd_trans_register(ret
, &target_eventfd_trans
);
12251 #if defined(TARGET_NR_eventfd2)
12252 case TARGET_NR_eventfd2
:
12254 int host_flags
= arg2
& (~(TARGET_O_NONBLOCK
| TARGET_O_CLOEXEC
));
12255 if (arg2
& TARGET_O_NONBLOCK
) {
12256 host_flags
|= O_NONBLOCK
;
12258 if (arg2
& TARGET_O_CLOEXEC
) {
12259 host_flags
|= O_CLOEXEC
;
12261 ret
= get_errno(eventfd(arg1
, host_flags
));
12263 fd_trans_register(ret
, &target_eventfd_trans
);
12268 #endif /* CONFIG_EVENTFD */
12269 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12270 case TARGET_NR_fallocate
:
12271 #if TARGET_ABI_BITS == 32
12272 ret
= get_errno(fallocate(arg1
, arg2
, target_offset64(arg3
, arg4
),
12273 target_offset64(arg5
, arg6
)));
12275 ret
= get_errno(fallocate(arg1
, arg2
, arg3
, arg4
));
12279 #if defined(CONFIG_SYNC_FILE_RANGE)
12280 #if defined(TARGET_NR_sync_file_range)
12281 case TARGET_NR_sync_file_range
:
12282 #if TARGET_ABI_BITS == 32
12283 #if defined(TARGET_MIPS)
12284 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12285 target_offset64(arg5
, arg6
), arg7
));
12287 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg2
, arg3
),
12288 target_offset64(arg4
, arg5
), arg6
));
12289 #endif /* !TARGET_MIPS */
12291 ret
= get_errno(sync_file_range(arg1
, arg2
, arg3
, arg4
));
12295 #if defined(TARGET_NR_sync_file_range2)
12296 case TARGET_NR_sync_file_range2
:
12297 /* This is like sync_file_range but the arguments are reordered */
12298 #if TARGET_ABI_BITS == 32
12299 ret
= get_errno(sync_file_range(arg1
, target_offset64(arg3
, arg4
),
12300 target_offset64(arg5
, arg6
), arg2
));
12302 ret
= get_errno(sync_file_range(arg1
, arg3
, arg4
, arg2
));
12307 #if defined(TARGET_NR_signalfd4)
12308 case TARGET_NR_signalfd4
:
12309 ret
= do_signalfd4(arg1
, arg2
, arg4
);
12312 #if defined(TARGET_NR_signalfd)
12313 case TARGET_NR_signalfd
:
12314 ret
= do_signalfd4(arg1
, arg2
, 0);
12317 #if defined(CONFIG_EPOLL)
12318 #if defined(TARGET_NR_epoll_create)
12319 case TARGET_NR_epoll_create
:
12320 ret
= get_errno(epoll_create(arg1
));
12323 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12324 case TARGET_NR_epoll_create1
:
12325 ret
= get_errno(epoll_create1(arg1
));
12328 #if defined(TARGET_NR_epoll_ctl)
12329 case TARGET_NR_epoll_ctl
:
12331 struct epoll_event ep
;
12332 struct epoll_event
*epp
= 0;
12334 struct target_epoll_event
*target_ep
;
12335 if (!lock_user_struct(VERIFY_READ
, target_ep
, arg4
, 1)) {
12338 ep
.events
= tswap32(target_ep
->events
);
12339 /* The epoll_data_t union is just opaque data to the kernel,
12340 * so we transfer all 64 bits across and need not worry what
12341 * actual data type it is.
12343 ep
.data
.u64
= tswap64(target_ep
->data
.u64
);
12344 unlock_user_struct(target_ep
, arg4
, 0);
12347 ret
= get_errno(epoll_ctl(arg1
, arg2
, arg3
, epp
));
12352 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12353 #if defined(TARGET_NR_epoll_wait)
12354 case TARGET_NR_epoll_wait
:
12356 #if defined(TARGET_NR_epoll_pwait)
12357 case TARGET_NR_epoll_pwait
:
12360 struct target_epoll_event
*target_ep
;
12361 struct epoll_event
*ep
;
12363 int maxevents
= arg3
;
12364 int timeout
= arg4
;
12366 if (maxevents
<= 0 || maxevents
> TARGET_EP_MAX_EVENTS
) {
12367 ret
= -TARGET_EINVAL
;
12371 target_ep
= lock_user(VERIFY_WRITE
, arg2
,
12372 maxevents
* sizeof(struct target_epoll_event
), 1);
12377 ep
= g_try_new(struct epoll_event
, maxevents
);
12379 unlock_user(target_ep
, arg2
, 0);
12380 ret
= -TARGET_ENOMEM
;
12385 #if defined(TARGET_NR_epoll_pwait)
12386 case TARGET_NR_epoll_pwait
:
12388 target_sigset_t
*target_set
;
12389 sigset_t _set
, *set
= &_set
;
12392 if (arg6
!= sizeof(target_sigset_t
)) {
12393 ret
= -TARGET_EINVAL
;
12397 target_set
= lock_user(VERIFY_READ
, arg5
,
12398 sizeof(target_sigset_t
), 1);
12400 ret
= -TARGET_EFAULT
;
12403 target_to_host_sigset(set
, target_set
);
12404 unlock_user(target_set
, arg5
, 0);
12409 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12410 set
, SIGSET_T_SIZE
));
12414 #if defined(TARGET_NR_epoll_wait)
12415 case TARGET_NR_epoll_wait
:
12416 ret
= get_errno(safe_epoll_pwait(epfd
, ep
, maxevents
, timeout
,
12421 ret
= -TARGET_ENOSYS
;
12423 if (!is_error(ret
)) {
12425 for (i
= 0; i
< ret
; i
++) {
12426 target_ep
[i
].events
= tswap32(ep
[i
].events
);
12427 target_ep
[i
].data
.u64
= tswap64(ep
[i
].data
.u64
);
12429 unlock_user(target_ep
, arg2
,
12430 ret
* sizeof(struct target_epoll_event
));
12432 unlock_user(target_ep
, arg2
, 0);
12439 #ifdef TARGET_NR_prlimit64
12440 case TARGET_NR_prlimit64
:
12442 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12443 struct target_rlimit64
*target_rnew
, *target_rold
;
12444 struct host_rlimit64 rnew
, rold
, *rnewp
= 0;
12445 int resource
= target_to_host_resource(arg2
);
12447 if (!lock_user_struct(VERIFY_READ
, target_rnew
, arg3
, 1)) {
12450 rnew
.rlim_cur
= tswap64(target_rnew
->rlim_cur
);
12451 rnew
.rlim_max
= tswap64(target_rnew
->rlim_max
);
12452 unlock_user_struct(target_rnew
, arg3
, 0);
12456 ret
= get_errno(sys_prlimit64(arg1
, resource
, rnewp
, arg4
? &rold
: 0));
12457 if (!is_error(ret
) && arg4
) {
12458 if (!lock_user_struct(VERIFY_WRITE
, target_rold
, arg4
, 1)) {
12461 target_rold
->rlim_cur
= tswap64(rold
.rlim_cur
);
12462 target_rold
->rlim_max
= tswap64(rold
.rlim_max
);
12463 unlock_user_struct(target_rold
, arg4
, 1);
12468 #ifdef TARGET_NR_gethostname
12469 case TARGET_NR_gethostname
:
12471 char *name
= lock_user(VERIFY_WRITE
, arg1
, arg2
, 0);
12473 ret
= get_errno(gethostname(name
, arg2
));
12474 unlock_user(name
, arg1
, arg2
);
12476 ret
= -TARGET_EFAULT
;
12481 #ifdef TARGET_NR_atomic_cmpxchg_32
12482 case TARGET_NR_atomic_cmpxchg_32
:
12484 /* should use start_exclusive from main.c */
12485 abi_ulong mem_value
;
12486 if (get_user_u32(mem_value
, arg6
)) {
12487 target_siginfo_t info
;
12488 info
.si_signo
= SIGSEGV
;
12490 info
.si_code
= TARGET_SEGV_MAPERR
;
12491 info
._sifields
._sigfault
._addr
= arg6
;
12492 queue_signal((CPUArchState
*)cpu_env
, info
.si_signo
,
12493 QEMU_SI_FAULT
, &info
);
12497 if (mem_value
== arg2
)
12498 put_user_u32(arg1
, arg6
);
12503 #ifdef TARGET_NR_atomic_barrier
12504 case TARGET_NR_atomic_barrier
:
12506 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
12512 #ifdef TARGET_NR_timer_create
12513 case TARGET_NR_timer_create
:
12515 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12517 struct sigevent host_sevp
= { {0}, }, *phost_sevp
= NULL
;
12520 int timer_index
= next_free_host_timer();
12522 if (timer_index
< 0) {
12523 ret
= -TARGET_EAGAIN
;
12525 timer_t
*phtimer
= g_posix_timers
+ timer_index
;
12528 phost_sevp
= &host_sevp
;
12529 ret
= target_to_host_sigevent(phost_sevp
, arg2
);
12535 ret
= get_errno(timer_create(clkid
, phost_sevp
, phtimer
));
12539 if (put_user(TIMER_MAGIC
| timer_index
, arg3
, target_timer_t
)) {
12548 #ifdef TARGET_NR_timer_settime
12549 case TARGET_NR_timer_settime
:
12551 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12552 * struct itimerspec * old_value */
12553 target_timer_t timerid
= get_timer_id(arg1
);
12557 } else if (arg3
== 0) {
12558 ret
= -TARGET_EINVAL
;
12560 timer_t htimer
= g_posix_timers
[timerid
];
12561 struct itimerspec hspec_new
= {{0},}, hspec_old
= {{0},};
12563 if (target_to_host_itimerspec(&hspec_new
, arg3
)) {
12567 timer_settime(htimer
, arg2
, &hspec_new
, &hspec_old
));
12568 if (arg4
&& host_to_target_itimerspec(arg4
, &hspec_old
)) {
12576 #ifdef TARGET_NR_timer_gettime
12577 case TARGET_NR_timer_gettime
:
12579 /* args: timer_t timerid, struct itimerspec *curr_value */
12580 target_timer_t timerid
= get_timer_id(arg1
);
12584 } else if (!arg2
) {
12585 ret
= -TARGET_EFAULT
;
12587 timer_t htimer
= g_posix_timers
[timerid
];
12588 struct itimerspec hspec
;
12589 ret
= get_errno(timer_gettime(htimer
, &hspec
));
12591 if (host_to_target_itimerspec(arg2
, &hspec
)) {
12592 ret
= -TARGET_EFAULT
;
12599 #ifdef TARGET_NR_timer_getoverrun
12600 case TARGET_NR_timer_getoverrun
:
12602 /* args: timer_t timerid */
12603 target_timer_t timerid
= get_timer_id(arg1
);
12608 timer_t htimer
= g_posix_timers
[timerid
];
12609 ret
= get_errno(timer_getoverrun(htimer
));
12611 fd_trans_unregister(ret
);
12616 #ifdef TARGET_NR_timer_delete
12617 case TARGET_NR_timer_delete
:
12619 /* args: timer_t timerid */
12620 target_timer_t timerid
= get_timer_id(arg1
);
12625 timer_t htimer
= g_posix_timers
[timerid
];
12626 ret
= get_errno(timer_delete(htimer
));
12627 g_posix_timers
[timerid
] = 0;
12633 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12634 case TARGET_NR_timerfd_create
:
12635 ret
= get_errno(timerfd_create(arg1
,
12636 target_to_host_bitmask(arg2
, fcntl_flags_tbl
)));
12640 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12641 case TARGET_NR_timerfd_gettime
:
12643 struct itimerspec its_curr
;
12645 ret
= get_errno(timerfd_gettime(arg1
, &its_curr
));
12647 if (arg2
&& host_to_target_itimerspec(arg2
, &its_curr
)) {
12654 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12655 case TARGET_NR_timerfd_settime
:
12657 struct itimerspec its_new
, its_old
, *p_new
;
12660 if (target_to_host_itimerspec(&its_new
, arg3
)) {
12668 ret
= get_errno(timerfd_settime(arg1
, arg2
, p_new
, &its_old
));
12670 if (arg4
&& host_to_target_itimerspec(arg4
, &its_old
)) {
12677 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12678 case TARGET_NR_ioprio_get
:
12679 ret
= get_errno(ioprio_get(arg1
, arg2
));
12683 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12684 case TARGET_NR_ioprio_set
:
12685 ret
= get_errno(ioprio_set(arg1
, arg2
, arg3
));
12689 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12690 case TARGET_NR_setns
:
12691 ret
= get_errno(setns(arg1
, arg2
));
12694 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12695 case TARGET_NR_unshare
:
12696 ret
= get_errno(unshare(arg1
));
12699 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12700 case TARGET_NR_kcmp
:
12701 ret
= get_errno(kcmp(arg1
, arg2
, arg3
, arg4
, arg5
));
12707 gemu_log("qemu: Unsupported syscall: %d\n", num
);
12708 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12709 unimplemented_nowarn
:
12711 ret
= -TARGET_ENOSYS
;
12716 gemu_log(" = " TARGET_ABI_FMT_ld
"\n", ret
);
12719 print_syscall_ret(num
, ret
);
12720 trace_guest_user_syscall_ret(cpu
, num
, ret
);
12723 ret
= -TARGET_EFAULT
;