hmp: Remove dead code in hmp_qemu_io()
[qemu.git] / linux-user / syscall.c
blob7aa2c1d7206f371f7cb3ed60813ee3c93e0e1f33
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
244 defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
268 #endif
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
272 #endif
273 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
274 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group,int,error_code)
277 #endif
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address,int *,tidptr)
280 #endif
281 #if defined(TARGET_NR_futex) && defined(__NR_futex)
282 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
283 const struct timespec *,timeout,int *,uaddr2,int,val3)
284 #endif
285 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
286 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
287 unsigned long *, user_mask_ptr);
288 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
289 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
290 unsigned long *, user_mask_ptr);
291 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
292 void *, arg);
293 _syscall2(int, capget, struct __user_cap_header_struct *, header,
294 struct __user_cap_data_struct *, data);
295 _syscall2(int, capset, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
298 _syscall2(int, ioprio_get, int, which, int, who)
299 #endif
300 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
301 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
302 #endif
303 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
304 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
305 #endif
307 static bitmask_transtbl fcntl_flags_tbl[] = {
308 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
309 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
310 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
311 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
312 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
313 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
314 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
315 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
316 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
317 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
318 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
319 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
320 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
321 #if defined(O_DIRECT)
322 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
323 #endif
324 #if defined(O_NOATIME)
325 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
326 #endif
327 #if defined(O_CLOEXEC)
328 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
329 #endif
330 #if defined(O_PATH)
331 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
332 #endif
333 /* Don't terminate the list prematurely on 64-bit host+guest. */
334 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
335 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
336 #endif
337 { 0, 0, 0, 0 }
340 enum {
341 QEMU_IFLA_BR_UNSPEC,
342 QEMU_IFLA_BR_FORWARD_DELAY,
343 QEMU_IFLA_BR_HELLO_TIME,
344 QEMU_IFLA_BR_MAX_AGE,
345 QEMU_IFLA_BR_AGEING_TIME,
346 QEMU_IFLA_BR_STP_STATE,
347 QEMU_IFLA_BR_PRIORITY,
348 QEMU_IFLA_BR_VLAN_FILTERING,
349 QEMU_IFLA_BR_VLAN_PROTOCOL,
350 QEMU_IFLA_BR_GROUP_FWD_MASK,
351 QEMU_IFLA_BR_ROOT_ID,
352 QEMU_IFLA_BR_BRIDGE_ID,
353 QEMU_IFLA_BR_ROOT_PORT,
354 QEMU_IFLA_BR_ROOT_PATH_COST,
355 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
357 QEMU_IFLA_BR_HELLO_TIMER,
358 QEMU_IFLA_BR_TCN_TIMER,
359 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
360 QEMU_IFLA_BR_GC_TIMER,
361 QEMU_IFLA_BR_GROUP_ADDR,
362 QEMU_IFLA_BR_FDB_FLUSH,
363 QEMU_IFLA_BR_MCAST_ROUTER,
364 QEMU_IFLA_BR_MCAST_SNOOPING,
365 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
366 QEMU_IFLA_BR_MCAST_QUERIER,
367 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
368 QEMU_IFLA_BR_MCAST_HASH_MAX,
369 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
370 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
371 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
372 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
373 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
374 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
375 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
376 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
377 QEMU_IFLA_BR_NF_CALL_IPTABLES,
378 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
379 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
380 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
381 QEMU_IFLA_BR_PAD,
382 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
383 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
384 QEMU___IFLA_BR_MAX,
387 enum {
388 QEMU_IFLA_UNSPEC,
389 QEMU_IFLA_ADDRESS,
390 QEMU_IFLA_BROADCAST,
391 QEMU_IFLA_IFNAME,
392 QEMU_IFLA_MTU,
393 QEMU_IFLA_LINK,
394 QEMU_IFLA_QDISC,
395 QEMU_IFLA_STATS,
396 QEMU_IFLA_COST,
397 QEMU_IFLA_PRIORITY,
398 QEMU_IFLA_MASTER,
399 QEMU_IFLA_WIRELESS,
400 QEMU_IFLA_PROTINFO,
401 QEMU_IFLA_TXQLEN,
402 QEMU_IFLA_MAP,
403 QEMU_IFLA_WEIGHT,
404 QEMU_IFLA_OPERSTATE,
405 QEMU_IFLA_LINKMODE,
406 QEMU_IFLA_LINKINFO,
407 QEMU_IFLA_NET_NS_PID,
408 QEMU_IFLA_IFALIAS,
409 QEMU_IFLA_NUM_VF,
410 QEMU_IFLA_VFINFO_LIST,
411 QEMU_IFLA_STATS64,
412 QEMU_IFLA_VF_PORTS,
413 QEMU_IFLA_PORT_SELF,
414 QEMU_IFLA_AF_SPEC,
415 QEMU_IFLA_GROUP,
416 QEMU_IFLA_NET_NS_FD,
417 QEMU_IFLA_EXT_MASK,
418 QEMU_IFLA_PROMISCUITY,
419 QEMU_IFLA_NUM_TX_QUEUES,
420 QEMU_IFLA_NUM_RX_QUEUES,
421 QEMU_IFLA_CARRIER,
422 QEMU_IFLA_PHYS_PORT_ID,
423 QEMU_IFLA_CARRIER_CHANGES,
424 QEMU_IFLA_PHYS_SWITCH_ID,
425 QEMU_IFLA_LINK_NETNSID,
426 QEMU_IFLA_PHYS_PORT_NAME,
427 QEMU_IFLA_PROTO_DOWN,
428 QEMU_IFLA_GSO_MAX_SEGS,
429 QEMU_IFLA_GSO_MAX_SIZE,
430 QEMU_IFLA_PAD,
431 QEMU_IFLA_XDP,
432 QEMU___IFLA_MAX
435 enum {
436 QEMU_IFLA_BRPORT_UNSPEC,
437 QEMU_IFLA_BRPORT_STATE,
438 QEMU_IFLA_BRPORT_PRIORITY,
439 QEMU_IFLA_BRPORT_COST,
440 QEMU_IFLA_BRPORT_MODE,
441 QEMU_IFLA_BRPORT_GUARD,
442 QEMU_IFLA_BRPORT_PROTECT,
443 QEMU_IFLA_BRPORT_FAST_LEAVE,
444 QEMU_IFLA_BRPORT_LEARNING,
445 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
446 QEMU_IFLA_BRPORT_PROXYARP,
447 QEMU_IFLA_BRPORT_LEARNING_SYNC,
448 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
449 QEMU_IFLA_BRPORT_ROOT_ID,
450 QEMU_IFLA_BRPORT_BRIDGE_ID,
451 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
452 QEMU_IFLA_BRPORT_DESIGNATED_COST,
453 QEMU_IFLA_BRPORT_ID,
454 QEMU_IFLA_BRPORT_NO,
455 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
456 QEMU_IFLA_BRPORT_CONFIG_PENDING,
457 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
458 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
459 QEMU_IFLA_BRPORT_HOLD_TIMER,
460 QEMU_IFLA_BRPORT_FLUSH,
461 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
462 QEMU_IFLA_BRPORT_PAD,
463 QEMU___IFLA_BRPORT_MAX
466 enum {
467 QEMU_IFLA_INFO_UNSPEC,
468 QEMU_IFLA_INFO_KIND,
469 QEMU_IFLA_INFO_DATA,
470 QEMU_IFLA_INFO_XSTATS,
471 QEMU_IFLA_INFO_SLAVE_KIND,
472 QEMU_IFLA_INFO_SLAVE_DATA,
473 QEMU___IFLA_INFO_MAX,
476 enum {
477 QEMU_IFLA_INET_UNSPEC,
478 QEMU_IFLA_INET_CONF,
479 QEMU___IFLA_INET_MAX,
482 enum {
483 QEMU_IFLA_INET6_UNSPEC,
484 QEMU_IFLA_INET6_FLAGS,
485 QEMU_IFLA_INET6_CONF,
486 QEMU_IFLA_INET6_STATS,
487 QEMU_IFLA_INET6_MCAST,
488 QEMU_IFLA_INET6_CACHEINFO,
489 QEMU_IFLA_INET6_ICMP6STATS,
490 QEMU_IFLA_INET6_TOKEN,
491 QEMU_IFLA_INET6_ADDR_GEN_MODE,
492 QEMU___IFLA_INET6_MAX
495 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
496 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
497 typedef struct TargetFdTrans {
498 TargetFdDataFunc host_to_target_data;
499 TargetFdDataFunc target_to_host_data;
500 TargetFdAddrFunc target_to_host_addr;
501 } TargetFdTrans;
503 static TargetFdTrans **target_fd_trans;
505 static unsigned int target_fd_max;
507 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
509 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
510 return target_fd_trans[fd]->target_to_host_data;
512 return NULL;
515 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
517 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
518 return target_fd_trans[fd]->host_to_target_data;
520 return NULL;
523 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
525 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
526 return target_fd_trans[fd]->target_to_host_addr;
528 return NULL;
531 static void fd_trans_register(int fd, TargetFdTrans *trans)
533 unsigned int oldmax;
535 if (fd >= target_fd_max) {
536 oldmax = target_fd_max;
537 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
538 target_fd_trans = g_renew(TargetFdTrans *,
539 target_fd_trans, target_fd_max);
540 memset((void *)(target_fd_trans + oldmax), 0,
541 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
543 target_fd_trans[fd] = trans;
546 static void fd_trans_unregister(int fd)
548 if (fd >= 0 && fd < target_fd_max) {
549 target_fd_trans[fd] = NULL;
553 static void fd_trans_dup(int oldfd, int newfd)
555 fd_trans_unregister(newfd);
556 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
557 fd_trans_register(newfd, target_fd_trans[oldfd]);
561 static int sys_getcwd1(char *buf, size_t size)
563 if (getcwd(buf, size) == NULL) {
564 /* getcwd() sets errno */
565 return (-1);
567 return strlen(buf)+1;
570 #ifdef TARGET_NR_utimensat
571 #if defined(__NR_utimensat)
572 #define __NR_sys_utimensat __NR_utimensat
573 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
574 const struct timespec *,tsp,int,flags)
575 #else
576 static int sys_utimensat(int dirfd, const char *pathname,
577 const struct timespec times[2], int flags)
579 errno = ENOSYS;
580 return -1;
582 #endif
583 #endif /* TARGET_NR_utimensat */
585 #ifdef CONFIG_INOTIFY
586 #include <sys/inotify.h>
588 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
589 static int sys_inotify_init(void)
591 return (inotify_init());
593 #endif
594 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
595 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
597 return (inotify_add_watch(fd, pathname, mask));
599 #endif
600 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
601 static int sys_inotify_rm_watch(int fd, int32_t wd)
603 return (inotify_rm_watch(fd, wd));
605 #endif
606 #ifdef CONFIG_INOTIFY1
607 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
608 static int sys_inotify_init1(int flags)
610 return (inotify_init1(flags));
612 #endif
613 #endif
614 #else
615 /* Userspace can usually survive runtime without inotify */
616 #undef TARGET_NR_inotify_init
617 #undef TARGET_NR_inotify_init1
618 #undef TARGET_NR_inotify_add_watch
619 #undef TARGET_NR_inotify_rm_watch
620 #endif /* CONFIG_INOTIFY */
622 #if defined(TARGET_NR_prlimit64)
623 #ifndef __NR_prlimit64
624 # define __NR_prlimit64 -1
625 #endif
626 #define __NR_sys_prlimit64 __NR_prlimit64
627 /* The glibc rlimit structure may not be that used by the underlying syscall */
628 struct host_rlimit64 {
629 uint64_t rlim_cur;
630 uint64_t rlim_max;
632 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
633 const struct host_rlimit64 *, new_limit,
634 struct host_rlimit64 *, old_limit)
635 #endif
638 #if defined(TARGET_NR_timer_create)
639 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
640 static timer_t g_posix_timers[32] = { 0, } ;
642 static inline int next_free_host_timer(void)
644 int k ;
645 /* FIXME: Does finding the next free slot require a lock? */
646 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
647 if (g_posix_timers[k] == 0) {
648 g_posix_timers[k] = (timer_t) 1;
649 return k;
652 return -1;
654 #endif
656 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
657 #ifdef TARGET_ARM
658 static inline int regpairs_aligned(void *cpu_env) {
659 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
661 #elif defined(TARGET_MIPS)
662 static inline int regpairs_aligned(void *cpu_env) { return 1; }
663 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
664 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
665 * of registers which translates to the same as ARM/MIPS, because we start with
666 * r3 as arg1 */
667 static inline int regpairs_aligned(void *cpu_env) { return 1; }
668 #else
669 static inline int regpairs_aligned(void *cpu_env) { return 0; }
670 #endif
672 #define ERRNO_TABLE_SIZE 1200
674 /* target_to_host_errno_table[] is initialized from
675 * host_to_target_errno_table[] in syscall_init(). */
676 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
680 * This list is the union of errno values overridden in asm-<arch>/errno.h
681 * minus the errnos that are not actually generic to all archs.
683 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
684 [EAGAIN] = TARGET_EAGAIN,
685 [EIDRM] = TARGET_EIDRM,
686 [ECHRNG] = TARGET_ECHRNG,
687 [EL2NSYNC] = TARGET_EL2NSYNC,
688 [EL3HLT] = TARGET_EL3HLT,
689 [EL3RST] = TARGET_EL3RST,
690 [ELNRNG] = TARGET_ELNRNG,
691 [EUNATCH] = TARGET_EUNATCH,
692 [ENOCSI] = TARGET_ENOCSI,
693 [EL2HLT] = TARGET_EL2HLT,
694 [EDEADLK] = TARGET_EDEADLK,
695 [ENOLCK] = TARGET_ENOLCK,
696 [EBADE] = TARGET_EBADE,
697 [EBADR] = TARGET_EBADR,
698 [EXFULL] = TARGET_EXFULL,
699 [ENOANO] = TARGET_ENOANO,
700 [EBADRQC] = TARGET_EBADRQC,
701 [EBADSLT] = TARGET_EBADSLT,
702 [EBFONT] = TARGET_EBFONT,
703 [ENOSTR] = TARGET_ENOSTR,
704 [ENODATA] = TARGET_ENODATA,
705 [ETIME] = TARGET_ETIME,
706 [ENOSR] = TARGET_ENOSR,
707 [ENONET] = TARGET_ENONET,
708 [ENOPKG] = TARGET_ENOPKG,
709 [EREMOTE] = TARGET_EREMOTE,
710 [ENOLINK] = TARGET_ENOLINK,
711 [EADV] = TARGET_EADV,
712 [ESRMNT] = TARGET_ESRMNT,
713 [ECOMM] = TARGET_ECOMM,
714 [EPROTO] = TARGET_EPROTO,
715 [EDOTDOT] = TARGET_EDOTDOT,
716 [EMULTIHOP] = TARGET_EMULTIHOP,
717 [EBADMSG] = TARGET_EBADMSG,
718 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
719 [EOVERFLOW] = TARGET_EOVERFLOW,
720 [ENOTUNIQ] = TARGET_ENOTUNIQ,
721 [EBADFD] = TARGET_EBADFD,
722 [EREMCHG] = TARGET_EREMCHG,
723 [ELIBACC] = TARGET_ELIBACC,
724 [ELIBBAD] = TARGET_ELIBBAD,
725 [ELIBSCN] = TARGET_ELIBSCN,
726 [ELIBMAX] = TARGET_ELIBMAX,
727 [ELIBEXEC] = TARGET_ELIBEXEC,
728 [EILSEQ] = TARGET_EILSEQ,
729 [ENOSYS] = TARGET_ENOSYS,
730 [ELOOP] = TARGET_ELOOP,
731 [ERESTART] = TARGET_ERESTART,
732 [ESTRPIPE] = TARGET_ESTRPIPE,
733 [ENOTEMPTY] = TARGET_ENOTEMPTY,
734 [EUSERS] = TARGET_EUSERS,
735 [ENOTSOCK] = TARGET_ENOTSOCK,
736 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
737 [EMSGSIZE] = TARGET_EMSGSIZE,
738 [EPROTOTYPE] = TARGET_EPROTOTYPE,
739 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
740 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
741 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
742 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
743 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
744 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
745 [EADDRINUSE] = TARGET_EADDRINUSE,
746 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
747 [ENETDOWN] = TARGET_ENETDOWN,
748 [ENETUNREACH] = TARGET_ENETUNREACH,
749 [ENETRESET] = TARGET_ENETRESET,
750 [ECONNABORTED] = TARGET_ECONNABORTED,
751 [ECONNRESET] = TARGET_ECONNRESET,
752 [ENOBUFS] = TARGET_ENOBUFS,
753 [EISCONN] = TARGET_EISCONN,
754 [ENOTCONN] = TARGET_ENOTCONN,
755 [EUCLEAN] = TARGET_EUCLEAN,
756 [ENOTNAM] = TARGET_ENOTNAM,
757 [ENAVAIL] = TARGET_ENAVAIL,
758 [EISNAM] = TARGET_EISNAM,
759 [EREMOTEIO] = TARGET_EREMOTEIO,
760 [ESHUTDOWN] = TARGET_ESHUTDOWN,
761 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
762 [ETIMEDOUT] = TARGET_ETIMEDOUT,
763 [ECONNREFUSED] = TARGET_ECONNREFUSED,
764 [EHOSTDOWN] = TARGET_EHOSTDOWN,
765 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
766 [EALREADY] = TARGET_EALREADY,
767 [EINPROGRESS] = TARGET_EINPROGRESS,
768 [ESTALE] = TARGET_ESTALE,
769 [ECANCELED] = TARGET_ECANCELED,
770 [ENOMEDIUM] = TARGET_ENOMEDIUM,
771 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
772 #ifdef ENOKEY
773 [ENOKEY] = TARGET_ENOKEY,
774 #endif
775 #ifdef EKEYEXPIRED
776 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
777 #endif
778 #ifdef EKEYREVOKED
779 [EKEYREVOKED] = TARGET_EKEYREVOKED,
780 #endif
781 #ifdef EKEYREJECTED
782 [EKEYREJECTED] = TARGET_EKEYREJECTED,
783 #endif
784 #ifdef EOWNERDEAD
785 [EOWNERDEAD] = TARGET_EOWNERDEAD,
786 #endif
787 #ifdef ENOTRECOVERABLE
788 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
789 #endif
792 static inline int host_to_target_errno(int err)
794 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
795 host_to_target_errno_table[err]) {
796 return host_to_target_errno_table[err];
798 return err;
801 static inline int target_to_host_errno(int err)
803 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
804 target_to_host_errno_table[err]) {
805 return target_to_host_errno_table[err];
807 return err;
810 static inline abi_long get_errno(abi_long ret)
812 if (ret == -1)
813 return -host_to_target_errno(errno);
814 else
815 return ret;
818 static inline int is_error(abi_long ret)
820 return (abi_ulong)ret >= (abi_ulong)(-4096);
823 const char *target_strerror(int err)
825 if (err == TARGET_ERESTARTSYS) {
826 return "To be restarted";
828 if (err == TARGET_QEMU_ESIGRETURN) {
829 return "Successful exit from sigreturn";
832 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
833 return NULL;
835 return strerror(target_to_host_errno(err));
838 #define safe_syscall0(type, name) \
839 static type safe_##name(void) \
841 return safe_syscall(__NR_##name); \
844 #define safe_syscall1(type, name, type1, arg1) \
845 static type safe_##name(type1 arg1) \
847 return safe_syscall(__NR_##name, arg1); \
850 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
851 static type safe_##name(type1 arg1, type2 arg2) \
853 return safe_syscall(__NR_##name, arg1, arg2); \
856 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
857 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
859 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
862 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
863 type4, arg4) \
864 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
866 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
869 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
870 type4, arg4, type5, arg5) \
871 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
872 type5 arg5) \
874 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
877 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
878 type4, arg4, type5, arg5, type6, arg6) \
879 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
880 type5 arg5, type6 arg6) \
882 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
885 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
886 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
887 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
888 int, flags, mode_t, mode)
889 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
890 struct rusage *, rusage)
891 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
892 int, options, struct rusage *, rusage)
893 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
894 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
895 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
896 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
897 struct timespec *, tsp, const sigset_t *, sigmask,
898 size_t, sigsetsize)
899 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
900 int, maxevents, int, timeout, const sigset_t *, sigmask,
901 size_t, sigsetsize)
902 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
903 const struct timespec *,timeout,int *,uaddr2,int,val3)
904 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
905 safe_syscall2(int, kill, pid_t, pid, int, sig)
906 safe_syscall2(int, tkill, int, tid, int, sig)
907 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
908 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
909 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
910 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
911 socklen_t, addrlen)
912 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
913 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
914 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
915 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
916 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
917 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
918 safe_syscall2(int, flock, int, fd, int, operation)
919 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
920 const struct timespec *, uts, size_t, sigsetsize)
921 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
922 int, flags)
923 safe_syscall2(int, nanosleep, const struct timespec *, req,
924 struct timespec *, rem)
925 #ifdef TARGET_NR_clock_nanosleep
926 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
927 const struct timespec *, req, struct timespec *, rem)
928 #endif
929 #ifdef __NR_msgsnd
930 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
931 int, flags)
932 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
933 long, msgtype, int, flags)
934 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
935 unsigned, nsops, const struct timespec *, timeout)
936 #else
937 /* This host kernel architecture uses a single ipc syscall; fake up
938 * wrappers for the sub-operations to hide this implementation detail.
939 * Annoyingly we can't include linux/ipc.h to get the constant definitions
940 * for the call parameter because some structs in there conflict with the
941 * sys/ipc.h ones. So we just define them here, and rely on them being
942 * the same for all host architectures.
944 #define Q_SEMTIMEDOP 4
945 #define Q_MSGSND 11
946 #define Q_MSGRCV 12
947 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
949 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
950 void *, ptr, long, fifth)
951 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
953 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
955 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
957 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
959 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
960 const struct timespec *timeout)
962 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
963 (long)timeout);
965 #endif
966 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
967 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
968 size_t, len, unsigned, prio, const struct timespec *, timeout)
969 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
970 size_t, len, unsigned *, prio, const struct timespec *, timeout)
971 #endif
972 /* We do ioctl like this rather than via safe_syscall3 to preserve the
973 * "third argument might be integer or pointer or not present" behaviour of
974 * the libc function.
976 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
977 /* Similarly for fcntl. Note that callers must always:
978 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
979 * use the flock64 struct rather than unsuffixed flock
980 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
982 #ifdef __NR_fcntl64
983 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
984 #else
985 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
986 #endif
988 static inline int host_to_target_sock_type(int host_type)
990 int target_type;
992 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
993 case SOCK_DGRAM:
994 target_type = TARGET_SOCK_DGRAM;
995 break;
996 case SOCK_STREAM:
997 target_type = TARGET_SOCK_STREAM;
998 break;
999 default:
1000 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1001 break;
1004 #if defined(SOCK_CLOEXEC)
1005 if (host_type & SOCK_CLOEXEC) {
1006 target_type |= TARGET_SOCK_CLOEXEC;
1008 #endif
1010 #if defined(SOCK_NONBLOCK)
1011 if (host_type & SOCK_NONBLOCK) {
1012 target_type |= TARGET_SOCK_NONBLOCK;
1014 #endif
1016 return target_type;
1019 static abi_ulong target_brk;
1020 static abi_ulong target_original_brk;
1021 static abi_ulong brk_page;
1023 void target_set_brk(abi_ulong new_brk)
1025 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1026 brk_page = HOST_PAGE_ALIGN(target_brk);
1029 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1030 #define DEBUGF_BRK(message, args...)
1032 /* do_brk() must return target values and target errnos. */
1033 abi_long do_brk(abi_ulong new_brk)
1035 abi_long mapped_addr;
1036 abi_ulong new_alloc_size;
1038 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1040 if (!new_brk) {
1041 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1042 return target_brk;
1044 if (new_brk < target_original_brk) {
1045 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1046 target_brk);
1047 return target_brk;
1050 /* If the new brk is less than the highest page reserved to the
1051 * target heap allocation, set it and we're almost done... */
1052 if (new_brk <= brk_page) {
1053 /* Heap contents are initialized to zero, as for anonymous
1054 * mapped pages. */
1055 if (new_brk > target_brk) {
1056 memset(g2h(target_brk), 0, new_brk - target_brk);
1058 target_brk = new_brk;
1059 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1060 return target_brk;
1063 /* We need to allocate more memory after the brk... Note that
1064 * we don't use MAP_FIXED because that will map over the top of
1065 * any existing mapping (like the one with the host libc or qemu
1066 * itself); instead we treat "mapped but at wrong address" as
1067 * a failure and unmap again.
1069 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1070 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1071 PROT_READ|PROT_WRITE,
1072 MAP_ANON|MAP_PRIVATE, 0, 0));
1074 if (mapped_addr == brk_page) {
1075 /* Heap contents are initialized to zero, as for anonymous
1076 * mapped pages. Technically the new pages are already
1077 * initialized to zero since they *are* anonymous mapped
1078 * pages, however we have to take care with the contents that
1079 * come from the remaining part of the previous page: it may
1080 * contains garbage data due to a previous heap usage (grown
1081 * then shrunken). */
1082 memset(g2h(target_brk), 0, brk_page - target_brk);
1084 target_brk = new_brk;
1085 brk_page = HOST_PAGE_ALIGN(target_brk);
1086 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1087 target_brk);
1088 return target_brk;
1089 } else if (mapped_addr != -1) {
1090 /* Mapped but at wrong address, meaning there wasn't actually
1091 * enough space for this brk.
1093 target_munmap(mapped_addr, new_alloc_size);
1094 mapped_addr = -1;
1095 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1097 else {
1098 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1101 #if defined(TARGET_ALPHA)
1102 /* We (partially) emulate OSF/1 on Alpha, which requires we
1103 return a proper errno, not an unchanged brk value. */
1104 return -TARGET_ENOMEM;
1105 #endif
1106 /* For everything else, return the previous break. */
1107 return target_brk;
1110 static inline abi_long copy_from_user_fdset(fd_set *fds,
1111 abi_ulong target_fds_addr,
1112 int n)
1114 int i, nw, j, k;
1115 abi_ulong b, *target_fds;
1117 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1118 if (!(target_fds = lock_user(VERIFY_READ,
1119 target_fds_addr,
1120 sizeof(abi_ulong) * nw,
1121 1)))
1122 return -TARGET_EFAULT;
1124 FD_ZERO(fds);
1125 k = 0;
1126 for (i = 0; i < nw; i++) {
1127 /* grab the abi_ulong */
1128 __get_user(b, &target_fds[i]);
1129 for (j = 0; j < TARGET_ABI_BITS; j++) {
1130 /* check the bit inside the abi_ulong */
1131 if ((b >> j) & 1)
1132 FD_SET(k, fds);
1133 k++;
1137 unlock_user(target_fds, target_fds_addr, 0);
1139 return 0;
1142 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1143 abi_ulong target_fds_addr,
1144 int n)
1146 if (target_fds_addr) {
1147 if (copy_from_user_fdset(fds, target_fds_addr, n))
1148 return -TARGET_EFAULT;
1149 *fds_ptr = fds;
1150 } else {
1151 *fds_ptr = NULL;
1153 return 0;
1156 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1157 const fd_set *fds,
1158 int n)
1160 int i, nw, j, k;
1161 abi_long v;
1162 abi_ulong *target_fds;
1164 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1165 if (!(target_fds = lock_user(VERIFY_WRITE,
1166 target_fds_addr,
1167 sizeof(abi_ulong) * nw,
1168 0)))
1169 return -TARGET_EFAULT;
1171 k = 0;
1172 for (i = 0; i < nw; i++) {
1173 v = 0;
1174 for (j = 0; j < TARGET_ABI_BITS; j++) {
1175 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1176 k++;
1178 __put_user(v, &target_fds[i]);
1181 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1183 return 0;
1186 #if defined(__alpha__)
1187 #define HOST_HZ 1024
1188 #else
1189 #define HOST_HZ 100
1190 #endif
1192 static inline abi_long host_to_target_clock_t(long ticks)
1194 #if HOST_HZ == TARGET_HZ
1195 return ticks;
1196 #else
1197 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1198 #endif
1201 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1202 const struct rusage *rusage)
1204 struct target_rusage *target_rusage;
1206 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1207 return -TARGET_EFAULT;
1208 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1209 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1210 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1211 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1212 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1213 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1214 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1215 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1216 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1217 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1218 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1219 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1220 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1221 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1222 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1223 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1224 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1225 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1226 unlock_user_struct(target_rusage, target_addr, 1);
1228 return 0;
1231 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1233 abi_ulong target_rlim_swap;
1234 rlim_t result;
1236 target_rlim_swap = tswapal(target_rlim);
1237 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1238 return RLIM_INFINITY;
1240 result = target_rlim_swap;
1241 if (target_rlim_swap != (rlim_t)result)
1242 return RLIM_INFINITY;
1244 return result;
1247 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1249 abi_ulong target_rlim_swap;
1250 abi_ulong result;
1252 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1253 target_rlim_swap = TARGET_RLIM_INFINITY;
1254 else
1255 target_rlim_swap = rlim;
1256 result = tswapal(target_rlim_swap);
1258 return result;
1261 static inline int target_to_host_resource(int code)
1263 switch (code) {
1264 case TARGET_RLIMIT_AS:
1265 return RLIMIT_AS;
1266 case TARGET_RLIMIT_CORE:
1267 return RLIMIT_CORE;
1268 case TARGET_RLIMIT_CPU:
1269 return RLIMIT_CPU;
1270 case TARGET_RLIMIT_DATA:
1271 return RLIMIT_DATA;
1272 case TARGET_RLIMIT_FSIZE:
1273 return RLIMIT_FSIZE;
1274 case TARGET_RLIMIT_LOCKS:
1275 return RLIMIT_LOCKS;
1276 case TARGET_RLIMIT_MEMLOCK:
1277 return RLIMIT_MEMLOCK;
1278 case TARGET_RLIMIT_MSGQUEUE:
1279 return RLIMIT_MSGQUEUE;
1280 case TARGET_RLIMIT_NICE:
1281 return RLIMIT_NICE;
1282 case TARGET_RLIMIT_NOFILE:
1283 return RLIMIT_NOFILE;
1284 case TARGET_RLIMIT_NPROC:
1285 return RLIMIT_NPROC;
1286 case TARGET_RLIMIT_RSS:
1287 return RLIMIT_RSS;
1288 case TARGET_RLIMIT_RTPRIO:
1289 return RLIMIT_RTPRIO;
1290 case TARGET_RLIMIT_SIGPENDING:
1291 return RLIMIT_SIGPENDING;
1292 case TARGET_RLIMIT_STACK:
1293 return RLIMIT_STACK;
1294 default:
1295 return code;
1299 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1300 abi_ulong target_tv_addr)
1302 struct target_timeval *target_tv;
1304 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1305 return -TARGET_EFAULT;
1307 __get_user(tv->tv_sec, &target_tv->tv_sec);
1308 __get_user(tv->tv_usec, &target_tv->tv_usec);
1310 unlock_user_struct(target_tv, target_tv_addr, 0);
1312 return 0;
1315 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1316 const struct timeval *tv)
1318 struct target_timeval *target_tv;
1320 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1321 return -TARGET_EFAULT;
1323 __put_user(tv->tv_sec, &target_tv->tv_sec);
1324 __put_user(tv->tv_usec, &target_tv->tv_usec);
1326 unlock_user_struct(target_tv, target_tv_addr, 1);
1328 return 0;
1331 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1332 abi_ulong target_tz_addr)
1334 struct target_timezone *target_tz;
1336 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1337 return -TARGET_EFAULT;
1340 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1341 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1343 unlock_user_struct(target_tz, target_tz_addr, 0);
1345 return 0;
1348 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1349 #include <mqueue.h>
1351 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1352 abi_ulong target_mq_attr_addr)
1354 struct target_mq_attr *target_mq_attr;
1356 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1357 target_mq_attr_addr, 1))
1358 return -TARGET_EFAULT;
1360 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1361 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1362 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1363 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1365 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1367 return 0;
1370 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1371 const struct mq_attr *attr)
1373 struct target_mq_attr *target_mq_attr;
1375 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1376 target_mq_attr_addr, 0))
1377 return -TARGET_EFAULT;
1379 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1380 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1381 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1382 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1384 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1386 return 0;
1388 #endif
1390 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1391 /* do_select() must return target values and target errnos. */
1392 static abi_long do_select(int n,
1393 abi_ulong rfd_addr, abi_ulong wfd_addr,
1394 abi_ulong efd_addr, abi_ulong target_tv_addr)
1396 fd_set rfds, wfds, efds;
1397 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1398 struct timeval tv;
1399 struct timespec ts, *ts_ptr;
1400 abi_long ret;
1402 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403 if (ret) {
1404 return ret;
1406 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407 if (ret) {
1408 return ret;
1410 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411 if (ret) {
1412 return ret;
1415 if (target_tv_addr) {
1416 if (copy_from_user_timeval(&tv, target_tv_addr))
1417 return -TARGET_EFAULT;
1418 ts.tv_sec = tv.tv_sec;
1419 ts.tv_nsec = tv.tv_usec * 1000;
1420 ts_ptr = &ts;
1421 } else {
1422 ts_ptr = NULL;
1425 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1426 ts_ptr, NULL));
1428 if (!is_error(ret)) {
1429 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1430 return -TARGET_EFAULT;
1431 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1432 return -TARGET_EFAULT;
1433 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1434 return -TARGET_EFAULT;
1436 if (target_tv_addr) {
1437 tv.tv_sec = ts.tv_sec;
1438 tv.tv_usec = ts.tv_nsec / 1000;
1439 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1440 return -TARGET_EFAULT;
1445 return ret;
1448 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1449 static abi_long do_old_select(abi_ulong arg1)
1451 struct target_sel_arg_struct *sel;
1452 abi_ulong inp, outp, exp, tvp;
1453 long nsel;
1455 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1456 return -TARGET_EFAULT;
1459 nsel = tswapal(sel->n);
1460 inp = tswapal(sel->inp);
1461 outp = tswapal(sel->outp);
1462 exp = tswapal(sel->exp);
1463 tvp = tswapal(sel->tvp);
1465 unlock_user_struct(sel, arg1, 0);
1467 return do_select(nsel, inp, outp, exp, tvp);
1469 #endif
1470 #endif
1472 static abi_long do_pipe2(int host_pipe[], int flags)
1474 #ifdef CONFIG_PIPE2
1475 return pipe2(host_pipe, flags);
1476 #else
1477 return -ENOSYS;
1478 #endif
1481 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1482 int flags, int is_pipe2)
1484 int host_pipe[2];
1485 abi_long ret;
1486 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1488 if (is_error(ret))
1489 return get_errno(ret);
1491 /* Several targets have special calling conventions for the original
1492 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1493 if (!is_pipe2) {
1494 #if defined(TARGET_ALPHA)
1495 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1496 return host_pipe[0];
1497 #elif defined(TARGET_MIPS)
1498 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1499 return host_pipe[0];
1500 #elif defined(TARGET_SH4)
1501 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1502 return host_pipe[0];
1503 #elif defined(TARGET_SPARC)
1504 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1505 return host_pipe[0];
1506 #endif
1509 if (put_user_s32(host_pipe[0], pipedes)
1510 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1511 return -TARGET_EFAULT;
1512 return get_errno(ret);
1515 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1516 abi_ulong target_addr,
1517 socklen_t len)
1519 struct target_ip_mreqn *target_smreqn;
1521 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1522 if (!target_smreqn)
1523 return -TARGET_EFAULT;
1524 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1525 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1526 if (len == sizeof(struct target_ip_mreqn))
1527 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1528 unlock_user(target_smreqn, target_addr, 0);
1530 return 0;
1533 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1534 abi_ulong target_addr,
1535 socklen_t len)
1537 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1538 sa_family_t sa_family;
1539 struct target_sockaddr *target_saddr;
1541 if (fd_trans_target_to_host_addr(fd)) {
1542 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1545 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1546 if (!target_saddr)
1547 return -TARGET_EFAULT;
1549 sa_family = tswap16(target_saddr->sa_family);
1551 /* Oops. The caller might send a incomplete sun_path; sun_path
1552 * must be terminated by \0 (see the manual page), but
1553 * unfortunately it is quite common to specify sockaddr_un
1554 * length as "strlen(x->sun_path)" while it should be
1555 * "strlen(...) + 1". We'll fix that here if needed.
1556 * Linux kernel has a similar feature.
1559 if (sa_family == AF_UNIX) {
1560 if (len < unix_maxlen && len > 0) {
1561 char *cp = (char*)target_saddr;
1563 if ( cp[len-1] && !cp[len] )
1564 len++;
1566 if (len > unix_maxlen)
1567 len = unix_maxlen;
1570 memcpy(addr, target_saddr, len);
1571 addr->sa_family = sa_family;
1572 if (sa_family == AF_NETLINK) {
1573 struct sockaddr_nl *nladdr;
1575 nladdr = (struct sockaddr_nl *)addr;
1576 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1577 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1578 } else if (sa_family == AF_PACKET) {
1579 struct target_sockaddr_ll *lladdr;
1581 lladdr = (struct target_sockaddr_ll *)addr;
1582 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1583 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1585 unlock_user(target_saddr, target_addr, 0);
1587 return 0;
1590 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1591 struct sockaddr *addr,
1592 socklen_t len)
1594 struct target_sockaddr *target_saddr;
1596 if (len == 0) {
1597 return 0;
1600 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1601 if (!target_saddr)
1602 return -TARGET_EFAULT;
1603 memcpy(target_saddr, addr, len);
1604 if (len >= offsetof(struct target_sockaddr, sa_family) +
1605 sizeof(target_saddr->sa_family)) {
1606 target_saddr->sa_family = tswap16(addr->sa_family);
1608 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1609 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1610 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1611 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1612 } else if (addr->sa_family == AF_PACKET) {
1613 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1614 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1615 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1617 unlock_user(target_saddr, target_addr, len);
1619 return 0;
1622 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1623 struct target_msghdr *target_msgh)
1625 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1626 abi_long msg_controllen;
1627 abi_ulong target_cmsg_addr;
1628 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1629 socklen_t space = 0;
1631 msg_controllen = tswapal(target_msgh->msg_controllen);
1632 if (msg_controllen < sizeof (struct target_cmsghdr))
1633 goto the_end;
1634 target_cmsg_addr = tswapal(target_msgh->msg_control);
1635 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1636 target_cmsg_start = target_cmsg;
1637 if (!target_cmsg)
1638 return -TARGET_EFAULT;
1640 while (cmsg && target_cmsg) {
1641 void *data = CMSG_DATA(cmsg);
1642 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1644 int len = tswapal(target_cmsg->cmsg_len)
1645 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1647 space += CMSG_SPACE(len);
1648 if (space > msgh->msg_controllen) {
1649 space -= CMSG_SPACE(len);
1650 /* This is a QEMU bug, since we allocated the payload
1651 * area ourselves (unlike overflow in host-to-target
1652 * conversion, which is just the guest giving us a buffer
1653 * that's too small). It can't happen for the payload types
1654 * we currently support; if it becomes an issue in future
1655 * we would need to improve our allocation strategy to
1656 * something more intelligent than "twice the size of the
1657 * target buffer we're reading from".
1659 gemu_log("Host cmsg overflow\n");
1660 break;
1663 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1664 cmsg->cmsg_level = SOL_SOCKET;
1665 } else {
1666 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1668 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1669 cmsg->cmsg_len = CMSG_LEN(len);
1671 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1672 int *fd = (int *)data;
1673 int *target_fd = (int *)target_data;
1674 int i, numfds = len / sizeof(int);
1676 for (i = 0; i < numfds; i++) {
1677 __get_user(fd[i], target_fd + i);
1679 } else if (cmsg->cmsg_level == SOL_SOCKET
1680 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1681 struct ucred *cred = (struct ucred *)data;
1682 struct target_ucred *target_cred =
1683 (struct target_ucred *)target_data;
1685 __get_user(cred->pid, &target_cred->pid);
1686 __get_user(cred->uid, &target_cred->uid);
1687 __get_user(cred->gid, &target_cred->gid);
1688 } else {
1689 gemu_log("Unsupported ancillary data: %d/%d\n",
1690 cmsg->cmsg_level, cmsg->cmsg_type);
1691 memcpy(data, target_data, len);
1694 cmsg = CMSG_NXTHDR(msgh, cmsg);
1695 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1696 target_cmsg_start);
1698 unlock_user(target_cmsg, target_cmsg_addr, 0);
1699 the_end:
1700 msgh->msg_controllen = space;
1701 return 0;
1704 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1705 struct msghdr *msgh)
1707 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1708 abi_long msg_controllen;
1709 abi_ulong target_cmsg_addr;
1710 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1711 socklen_t space = 0;
1713 msg_controllen = tswapal(target_msgh->msg_controllen);
1714 if (msg_controllen < sizeof (struct target_cmsghdr))
1715 goto the_end;
1716 target_cmsg_addr = tswapal(target_msgh->msg_control);
1717 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1718 target_cmsg_start = target_cmsg;
1719 if (!target_cmsg)
1720 return -TARGET_EFAULT;
1722 while (cmsg && target_cmsg) {
1723 void *data = CMSG_DATA(cmsg);
1724 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1726 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1727 int tgt_len, tgt_space;
1729 /* We never copy a half-header but may copy half-data;
1730 * this is Linux's behaviour in put_cmsg(). Note that
1731 * truncation here is a guest problem (which we report
1732 * to the guest via the CTRUNC bit), unlike truncation
1733 * in target_to_host_cmsg, which is a QEMU bug.
1735 if (msg_controllen < sizeof(struct cmsghdr)) {
1736 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1737 break;
1740 if (cmsg->cmsg_level == SOL_SOCKET) {
1741 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1742 } else {
1743 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1745 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1747 tgt_len = TARGET_CMSG_LEN(len);
1749 /* Payload types which need a different size of payload on
1750 * the target must adjust tgt_len here.
1752 switch (cmsg->cmsg_level) {
1753 case SOL_SOCKET:
1754 switch (cmsg->cmsg_type) {
1755 case SO_TIMESTAMP:
1756 tgt_len = sizeof(struct target_timeval);
1757 break;
1758 default:
1759 break;
1761 default:
1762 break;
1765 if (msg_controllen < tgt_len) {
1766 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1767 tgt_len = msg_controllen;
1770 /* We must now copy-and-convert len bytes of payload
1771 * into tgt_len bytes of destination space. Bear in mind
1772 * that in both source and destination we may be dealing
1773 * with a truncated value!
1775 switch (cmsg->cmsg_level) {
1776 case SOL_SOCKET:
1777 switch (cmsg->cmsg_type) {
1778 case SCM_RIGHTS:
1780 int *fd = (int *)data;
1781 int *target_fd = (int *)target_data;
1782 int i, numfds = tgt_len / sizeof(int);
1784 for (i = 0; i < numfds; i++) {
1785 __put_user(fd[i], target_fd + i);
1787 break;
1789 case SO_TIMESTAMP:
1791 struct timeval *tv = (struct timeval *)data;
1792 struct target_timeval *target_tv =
1793 (struct target_timeval *)target_data;
1795 if (len != sizeof(struct timeval) ||
1796 tgt_len != sizeof(struct target_timeval)) {
1797 goto unimplemented;
1800 /* copy struct timeval to target */
1801 __put_user(tv->tv_sec, &target_tv->tv_sec);
1802 __put_user(tv->tv_usec, &target_tv->tv_usec);
1803 break;
1805 case SCM_CREDENTIALS:
1807 struct ucred *cred = (struct ucred *)data;
1808 struct target_ucred *target_cred =
1809 (struct target_ucred *)target_data;
1811 __put_user(cred->pid, &target_cred->pid);
1812 __put_user(cred->uid, &target_cred->uid);
1813 __put_user(cred->gid, &target_cred->gid);
1814 break;
1816 default:
1817 goto unimplemented;
1819 break;
1821 default:
1822 unimplemented:
1823 gemu_log("Unsupported ancillary data: %d/%d\n",
1824 cmsg->cmsg_level, cmsg->cmsg_type);
1825 memcpy(target_data, data, MIN(len, tgt_len));
1826 if (tgt_len > len) {
1827 memset(target_data + len, 0, tgt_len - len);
1831 target_cmsg->cmsg_len = tswapal(tgt_len);
1832 tgt_space = TARGET_CMSG_SPACE(len);
1833 if (msg_controllen < tgt_space) {
1834 tgt_space = msg_controllen;
1836 msg_controllen -= tgt_space;
1837 space += tgt_space;
1838 cmsg = CMSG_NXTHDR(msgh, cmsg);
1839 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1840 target_cmsg_start);
1842 unlock_user(target_cmsg, target_cmsg_addr, space);
1843 the_end:
1844 target_msgh->msg_controllen = tswapal(space);
1845 return 0;
1848 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1850 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1851 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1852 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1853 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1854 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1857 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1858 size_t len,
1859 abi_long (*host_to_target_nlmsg)
1860 (struct nlmsghdr *))
1862 uint32_t nlmsg_len;
1863 abi_long ret;
1865 while (len > sizeof(struct nlmsghdr)) {
1867 nlmsg_len = nlh->nlmsg_len;
1868 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1869 nlmsg_len > len) {
1870 break;
1873 switch (nlh->nlmsg_type) {
1874 case NLMSG_DONE:
1875 tswap_nlmsghdr(nlh);
1876 return 0;
1877 case NLMSG_NOOP:
1878 break;
1879 case NLMSG_ERROR:
1881 struct nlmsgerr *e = NLMSG_DATA(nlh);
1882 e->error = tswap32(e->error);
1883 tswap_nlmsghdr(&e->msg);
1884 tswap_nlmsghdr(nlh);
1885 return 0;
1887 default:
1888 ret = host_to_target_nlmsg(nlh);
1889 if (ret < 0) {
1890 tswap_nlmsghdr(nlh);
1891 return ret;
1893 break;
1895 tswap_nlmsghdr(nlh);
1896 len -= NLMSG_ALIGN(nlmsg_len);
1897 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1899 return 0;
1902 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1903 size_t len,
1904 abi_long (*target_to_host_nlmsg)
1905 (struct nlmsghdr *))
1907 int ret;
1909 while (len > sizeof(struct nlmsghdr)) {
1910 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1911 tswap32(nlh->nlmsg_len) > len) {
1912 break;
1914 tswap_nlmsghdr(nlh);
1915 switch (nlh->nlmsg_type) {
1916 case NLMSG_DONE:
1917 return 0;
1918 case NLMSG_NOOP:
1919 break;
1920 case NLMSG_ERROR:
1922 struct nlmsgerr *e = NLMSG_DATA(nlh);
1923 e->error = tswap32(e->error);
1924 tswap_nlmsghdr(&e->msg);
1925 return 0;
1927 default:
1928 ret = target_to_host_nlmsg(nlh);
1929 if (ret < 0) {
1930 return ret;
1933 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1934 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1936 return 0;
1939 #ifdef CONFIG_RTNETLINK
1940 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1941 size_t len, void *context,
1942 abi_long (*host_to_target_nlattr)
1943 (struct nlattr *,
1944 void *context))
1946 unsigned short nla_len;
1947 abi_long ret;
1949 while (len > sizeof(struct nlattr)) {
1950 nla_len = nlattr->nla_len;
1951 if (nla_len < sizeof(struct nlattr) ||
1952 nla_len > len) {
1953 break;
1955 ret = host_to_target_nlattr(nlattr, context);
1956 nlattr->nla_len = tswap16(nlattr->nla_len);
1957 nlattr->nla_type = tswap16(nlattr->nla_type);
1958 if (ret < 0) {
1959 return ret;
1961 len -= NLA_ALIGN(nla_len);
1962 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1964 return 0;
1967 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1968 size_t len,
1969 abi_long (*host_to_target_rtattr)
1970 (struct rtattr *))
1972 unsigned short rta_len;
1973 abi_long ret;
1975 while (len > sizeof(struct rtattr)) {
1976 rta_len = rtattr->rta_len;
1977 if (rta_len < sizeof(struct rtattr) ||
1978 rta_len > len) {
1979 break;
1981 ret = host_to_target_rtattr(rtattr);
1982 rtattr->rta_len = tswap16(rtattr->rta_len);
1983 rtattr->rta_type = tswap16(rtattr->rta_type);
1984 if (ret < 0) {
1985 return ret;
1987 len -= RTA_ALIGN(rta_len);
1988 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1990 return 0;
1993 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1995 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1996 void *context)
1998 uint16_t *u16;
1999 uint32_t *u32;
2000 uint64_t *u64;
2002 switch (nlattr->nla_type) {
2003 /* no data */
2004 case QEMU_IFLA_BR_FDB_FLUSH:
2005 break;
2006 /* binary */
2007 case QEMU_IFLA_BR_GROUP_ADDR:
2008 break;
2009 /* uint8_t */
2010 case QEMU_IFLA_BR_VLAN_FILTERING:
2011 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2012 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2013 case QEMU_IFLA_BR_MCAST_ROUTER:
2014 case QEMU_IFLA_BR_MCAST_SNOOPING:
2015 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2016 case QEMU_IFLA_BR_MCAST_QUERIER:
2017 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2018 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2019 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2020 break;
2021 /* uint16_t */
2022 case QEMU_IFLA_BR_PRIORITY:
2023 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2024 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2025 case QEMU_IFLA_BR_ROOT_PORT:
2026 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2027 u16 = NLA_DATA(nlattr);
2028 *u16 = tswap16(*u16);
2029 break;
2030 /* uint32_t */
2031 case QEMU_IFLA_BR_FORWARD_DELAY:
2032 case QEMU_IFLA_BR_HELLO_TIME:
2033 case QEMU_IFLA_BR_MAX_AGE:
2034 case QEMU_IFLA_BR_AGEING_TIME:
2035 case QEMU_IFLA_BR_STP_STATE:
2036 case QEMU_IFLA_BR_ROOT_PATH_COST:
2037 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2038 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2039 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2040 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2041 u32 = NLA_DATA(nlattr);
2042 *u32 = tswap32(*u32);
2043 break;
2044 /* uint64_t */
2045 case QEMU_IFLA_BR_HELLO_TIMER:
2046 case QEMU_IFLA_BR_TCN_TIMER:
2047 case QEMU_IFLA_BR_GC_TIMER:
2048 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2049 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2050 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2051 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2052 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2053 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2054 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2055 u64 = NLA_DATA(nlattr);
2056 *u64 = tswap64(*u64);
2057 break;
2058 /* ifla_bridge_id: uin8_t[] */
2059 case QEMU_IFLA_BR_ROOT_ID:
2060 case QEMU_IFLA_BR_BRIDGE_ID:
2061 break;
2062 default:
2063 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2064 break;
2066 return 0;
2069 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2070 void *context)
2072 uint16_t *u16;
2073 uint32_t *u32;
2074 uint64_t *u64;
2076 switch (nlattr->nla_type) {
2077 /* uint8_t */
2078 case QEMU_IFLA_BRPORT_STATE:
2079 case QEMU_IFLA_BRPORT_MODE:
2080 case QEMU_IFLA_BRPORT_GUARD:
2081 case QEMU_IFLA_BRPORT_PROTECT:
2082 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2083 case QEMU_IFLA_BRPORT_LEARNING:
2084 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2085 case QEMU_IFLA_BRPORT_PROXYARP:
2086 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2087 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2088 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2089 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2090 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2091 break;
2092 /* uint16_t */
2093 case QEMU_IFLA_BRPORT_PRIORITY:
2094 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2095 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2096 case QEMU_IFLA_BRPORT_ID:
2097 case QEMU_IFLA_BRPORT_NO:
2098 u16 = NLA_DATA(nlattr);
2099 *u16 = tswap16(*u16);
2100 break;
2101 /* uin32_t */
2102 case QEMU_IFLA_BRPORT_COST:
2103 u32 = NLA_DATA(nlattr);
2104 *u32 = tswap32(*u32);
2105 break;
2106 /* uint64_t */
2107 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2108 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2109 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2110 u64 = NLA_DATA(nlattr);
2111 *u64 = tswap64(*u64);
2112 break;
2113 /* ifla_bridge_id: uint8_t[] */
2114 case QEMU_IFLA_BRPORT_ROOT_ID:
2115 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2116 break;
2117 default:
2118 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2119 break;
2121 return 0;
2124 struct linkinfo_context {
2125 int len;
2126 char *name;
2127 int slave_len;
2128 char *slave_name;
2131 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2132 void *context)
2134 struct linkinfo_context *li_context = context;
2136 switch (nlattr->nla_type) {
2137 /* string */
2138 case QEMU_IFLA_INFO_KIND:
2139 li_context->name = NLA_DATA(nlattr);
2140 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2141 break;
2142 case QEMU_IFLA_INFO_SLAVE_KIND:
2143 li_context->slave_name = NLA_DATA(nlattr);
2144 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2145 break;
2146 /* stats */
2147 case QEMU_IFLA_INFO_XSTATS:
2148 /* FIXME: only used by CAN */
2149 break;
2150 /* nested */
2151 case QEMU_IFLA_INFO_DATA:
2152 if (strncmp(li_context->name, "bridge",
2153 li_context->len) == 0) {
2154 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2155 nlattr->nla_len,
2156 NULL,
2157 host_to_target_data_bridge_nlattr);
2158 } else {
2159 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2161 break;
2162 case QEMU_IFLA_INFO_SLAVE_DATA:
2163 if (strncmp(li_context->slave_name, "bridge",
2164 li_context->slave_len) == 0) {
2165 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2166 nlattr->nla_len,
2167 NULL,
2168 host_to_target_slave_data_bridge_nlattr);
2169 } else {
2170 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2171 li_context->slave_name);
2173 break;
2174 default:
2175 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2176 break;
2179 return 0;
2182 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2183 void *context)
2185 uint32_t *u32;
2186 int i;
2188 switch (nlattr->nla_type) {
2189 case QEMU_IFLA_INET_CONF:
2190 u32 = NLA_DATA(nlattr);
2191 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2192 i++) {
2193 u32[i] = tswap32(u32[i]);
2195 break;
2196 default:
2197 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2199 return 0;
2202 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2203 void *context)
2205 uint32_t *u32;
2206 uint64_t *u64;
2207 struct ifla_cacheinfo *ci;
2208 int i;
2210 switch (nlattr->nla_type) {
2211 /* binaries */
2212 case QEMU_IFLA_INET6_TOKEN:
2213 break;
2214 /* uint8_t */
2215 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2216 break;
2217 /* uint32_t */
2218 case QEMU_IFLA_INET6_FLAGS:
2219 u32 = NLA_DATA(nlattr);
2220 *u32 = tswap32(*u32);
2221 break;
2222 /* uint32_t[] */
2223 case QEMU_IFLA_INET6_CONF:
2224 u32 = NLA_DATA(nlattr);
2225 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2226 i++) {
2227 u32[i] = tswap32(u32[i]);
2229 break;
2230 /* ifla_cacheinfo */
2231 case QEMU_IFLA_INET6_CACHEINFO:
2232 ci = NLA_DATA(nlattr);
2233 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2234 ci->tstamp = tswap32(ci->tstamp);
2235 ci->reachable_time = tswap32(ci->reachable_time);
2236 ci->retrans_time = tswap32(ci->retrans_time);
2237 break;
2238 /* uint64_t[] */
2239 case QEMU_IFLA_INET6_STATS:
2240 case QEMU_IFLA_INET6_ICMP6STATS:
2241 u64 = NLA_DATA(nlattr);
2242 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2243 i++) {
2244 u64[i] = tswap64(u64[i]);
2246 break;
2247 default:
2248 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2250 return 0;
2253 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2254 void *context)
2256 switch (nlattr->nla_type) {
2257 case AF_INET:
2258 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2259 NULL,
2260 host_to_target_data_inet_nlattr);
2261 case AF_INET6:
2262 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2263 NULL,
2264 host_to_target_data_inet6_nlattr);
2265 default:
2266 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2267 break;
2269 return 0;
2272 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2274 uint32_t *u32;
2275 struct rtnl_link_stats *st;
2276 struct rtnl_link_stats64 *st64;
2277 struct rtnl_link_ifmap *map;
2278 struct linkinfo_context li_context;
2280 switch (rtattr->rta_type) {
2281 /* binary stream */
2282 case QEMU_IFLA_ADDRESS:
2283 case QEMU_IFLA_BROADCAST:
2284 /* string */
2285 case QEMU_IFLA_IFNAME:
2286 case QEMU_IFLA_QDISC:
2287 break;
2288 /* uin8_t */
2289 case QEMU_IFLA_OPERSTATE:
2290 case QEMU_IFLA_LINKMODE:
2291 case QEMU_IFLA_CARRIER:
2292 case QEMU_IFLA_PROTO_DOWN:
2293 break;
2294 /* uint32_t */
2295 case QEMU_IFLA_MTU:
2296 case QEMU_IFLA_LINK:
2297 case QEMU_IFLA_WEIGHT:
2298 case QEMU_IFLA_TXQLEN:
2299 case QEMU_IFLA_CARRIER_CHANGES:
2300 case QEMU_IFLA_NUM_RX_QUEUES:
2301 case QEMU_IFLA_NUM_TX_QUEUES:
2302 case QEMU_IFLA_PROMISCUITY:
2303 case QEMU_IFLA_EXT_MASK:
2304 case QEMU_IFLA_LINK_NETNSID:
2305 case QEMU_IFLA_GROUP:
2306 case QEMU_IFLA_MASTER:
2307 case QEMU_IFLA_NUM_VF:
2308 u32 = RTA_DATA(rtattr);
2309 *u32 = tswap32(*u32);
2310 break;
2311 /* struct rtnl_link_stats */
2312 case QEMU_IFLA_STATS:
2313 st = RTA_DATA(rtattr);
2314 st->rx_packets = tswap32(st->rx_packets);
2315 st->tx_packets = tswap32(st->tx_packets);
2316 st->rx_bytes = tswap32(st->rx_bytes);
2317 st->tx_bytes = tswap32(st->tx_bytes);
2318 st->rx_errors = tswap32(st->rx_errors);
2319 st->tx_errors = tswap32(st->tx_errors);
2320 st->rx_dropped = tswap32(st->rx_dropped);
2321 st->tx_dropped = tswap32(st->tx_dropped);
2322 st->multicast = tswap32(st->multicast);
2323 st->collisions = tswap32(st->collisions);
2325 /* detailed rx_errors: */
2326 st->rx_length_errors = tswap32(st->rx_length_errors);
2327 st->rx_over_errors = tswap32(st->rx_over_errors);
2328 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2329 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2330 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2331 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2333 /* detailed tx_errors */
2334 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2335 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2336 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2337 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2338 st->tx_window_errors = tswap32(st->tx_window_errors);
2340 /* for cslip etc */
2341 st->rx_compressed = tswap32(st->rx_compressed);
2342 st->tx_compressed = tswap32(st->tx_compressed);
2343 break;
2344 /* struct rtnl_link_stats64 */
2345 case QEMU_IFLA_STATS64:
2346 st64 = RTA_DATA(rtattr);
2347 st64->rx_packets = tswap64(st64->rx_packets);
2348 st64->tx_packets = tswap64(st64->tx_packets);
2349 st64->rx_bytes = tswap64(st64->rx_bytes);
2350 st64->tx_bytes = tswap64(st64->tx_bytes);
2351 st64->rx_errors = tswap64(st64->rx_errors);
2352 st64->tx_errors = tswap64(st64->tx_errors);
2353 st64->rx_dropped = tswap64(st64->rx_dropped);
2354 st64->tx_dropped = tswap64(st64->tx_dropped);
2355 st64->multicast = tswap64(st64->multicast);
2356 st64->collisions = tswap64(st64->collisions);
2358 /* detailed rx_errors: */
2359 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2360 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2361 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2362 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2363 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2364 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2366 /* detailed tx_errors */
2367 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2368 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2369 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2370 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2371 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2373 /* for cslip etc */
2374 st64->rx_compressed = tswap64(st64->rx_compressed);
2375 st64->tx_compressed = tswap64(st64->tx_compressed);
2376 break;
2377 /* struct rtnl_link_ifmap */
2378 case QEMU_IFLA_MAP:
2379 map = RTA_DATA(rtattr);
2380 map->mem_start = tswap64(map->mem_start);
2381 map->mem_end = tswap64(map->mem_end);
2382 map->base_addr = tswap64(map->base_addr);
2383 map->irq = tswap16(map->irq);
2384 break;
2385 /* nested */
2386 case QEMU_IFLA_LINKINFO:
2387 memset(&li_context, 0, sizeof(li_context));
2388 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2389 &li_context,
2390 host_to_target_data_linkinfo_nlattr);
2391 case QEMU_IFLA_AF_SPEC:
2392 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2393 NULL,
2394 host_to_target_data_spec_nlattr);
2395 default:
2396 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2397 break;
2399 return 0;
2402 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2404 uint32_t *u32;
2405 struct ifa_cacheinfo *ci;
2407 switch (rtattr->rta_type) {
2408 /* binary: depends on family type */
2409 case IFA_ADDRESS:
2410 case IFA_LOCAL:
2411 break;
2412 /* string */
2413 case IFA_LABEL:
2414 break;
2415 /* u32 */
2416 case IFA_FLAGS:
2417 case IFA_BROADCAST:
2418 u32 = RTA_DATA(rtattr);
2419 *u32 = tswap32(*u32);
2420 break;
2421 /* struct ifa_cacheinfo */
2422 case IFA_CACHEINFO:
2423 ci = RTA_DATA(rtattr);
2424 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2425 ci->ifa_valid = tswap32(ci->ifa_valid);
2426 ci->cstamp = tswap32(ci->cstamp);
2427 ci->tstamp = tswap32(ci->tstamp);
2428 break;
2429 default:
2430 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2431 break;
2433 return 0;
2436 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2438 uint32_t *u32;
2439 switch (rtattr->rta_type) {
2440 /* binary: depends on family type */
2441 case RTA_GATEWAY:
2442 case RTA_DST:
2443 case RTA_PREFSRC:
2444 break;
2445 /* u32 */
2446 case RTA_PRIORITY:
2447 case RTA_TABLE:
2448 case RTA_OIF:
2449 u32 = RTA_DATA(rtattr);
2450 *u32 = tswap32(*u32);
2451 break;
2452 default:
2453 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2454 break;
2456 return 0;
2459 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2460 uint32_t rtattr_len)
2462 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2463 host_to_target_data_link_rtattr);
2466 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2467 uint32_t rtattr_len)
2469 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2470 host_to_target_data_addr_rtattr);
2473 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2474 uint32_t rtattr_len)
2476 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2477 host_to_target_data_route_rtattr);
2480 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2482 uint32_t nlmsg_len;
2483 struct ifinfomsg *ifi;
2484 struct ifaddrmsg *ifa;
2485 struct rtmsg *rtm;
2487 nlmsg_len = nlh->nlmsg_len;
2488 switch (nlh->nlmsg_type) {
2489 case RTM_NEWLINK:
2490 case RTM_DELLINK:
2491 case RTM_GETLINK:
2492 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2493 ifi = NLMSG_DATA(nlh);
2494 ifi->ifi_type = tswap16(ifi->ifi_type);
2495 ifi->ifi_index = tswap32(ifi->ifi_index);
2496 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2497 ifi->ifi_change = tswap32(ifi->ifi_change);
2498 host_to_target_link_rtattr(IFLA_RTA(ifi),
2499 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2501 break;
2502 case RTM_NEWADDR:
2503 case RTM_DELADDR:
2504 case RTM_GETADDR:
2505 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2506 ifa = NLMSG_DATA(nlh);
2507 ifa->ifa_index = tswap32(ifa->ifa_index);
2508 host_to_target_addr_rtattr(IFA_RTA(ifa),
2509 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2511 break;
2512 case RTM_NEWROUTE:
2513 case RTM_DELROUTE:
2514 case RTM_GETROUTE:
2515 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2516 rtm = NLMSG_DATA(nlh);
2517 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2518 host_to_target_route_rtattr(RTM_RTA(rtm),
2519 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2521 break;
2522 default:
2523 return -TARGET_EINVAL;
2525 return 0;
2528 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2529 size_t len)
2531 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2534 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2535 size_t len,
2536 abi_long (*target_to_host_rtattr)
2537 (struct rtattr *))
2539 abi_long ret;
2541 while (len >= sizeof(struct rtattr)) {
2542 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2543 tswap16(rtattr->rta_len) > len) {
2544 break;
2546 rtattr->rta_len = tswap16(rtattr->rta_len);
2547 rtattr->rta_type = tswap16(rtattr->rta_type);
2548 ret = target_to_host_rtattr(rtattr);
2549 if (ret < 0) {
2550 return ret;
2552 len -= RTA_ALIGN(rtattr->rta_len);
2553 rtattr = (struct rtattr *)(((char *)rtattr) +
2554 RTA_ALIGN(rtattr->rta_len));
2556 return 0;
2559 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2561 switch (rtattr->rta_type) {
2562 default:
2563 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2564 break;
2566 return 0;
2569 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2571 switch (rtattr->rta_type) {
2572 /* binary: depends on family type */
2573 case IFA_LOCAL:
2574 case IFA_ADDRESS:
2575 break;
2576 default:
2577 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2578 break;
2580 return 0;
2583 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2585 uint32_t *u32;
2586 switch (rtattr->rta_type) {
2587 /* binary: depends on family type */
2588 case RTA_DST:
2589 case RTA_SRC:
2590 case RTA_GATEWAY:
2591 break;
2592 /* u32 */
2593 case RTA_OIF:
2594 u32 = RTA_DATA(rtattr);
2595 *u32 = tswap32(*u32);
2596 break;
2597 default:
2598 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2599 break;
2601 return 0;
2604 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2605 uint32_t rtattr_len)
2607 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2608 target_to_host_data_link_rtattr);
2611 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2612 uint32_t rtattr_len)
2614 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2615 target_to_host_data_addr_rtattr);
2618 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2619 uint32_t rtattr_len)
2621 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2622 target_to_host_data_route_rtattr);
2625 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2627 struct ifinfomsg *ifi;
2628 struct ifaddrmsg *ifa;
2629 struct rtmsg *rtm;
2631 switch (nlh->nlmsg_type) {
2632 case RTM_GETLINK:
2633 break;
2634 case RTM_NEWLINK:
2635 case RTM_DELLINK:
2636 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2637 ifi = NLMSG_DATA(nlh);
2638 ifi->ifi_type = tswap16(ifi->ifi_type);
2639 ifi->ifi_index = tswap32(ifi->ifi_index);
2640 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2641 ifi->ifi_change = tswap32(ifi->ifi_change);
2642 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2643 NLMSG_LENGTH(sizeof(*ifi)));
2645 break;
2646 case RTM_GETADDR:
2647 case RTM_NEWADDR:
2648 case RTM_DELADDR:
2649 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2650 ifa = NLMSG_DATA(nlh);
2651 ifa->ifa_index = tswap32(ifa->ifa_index);
2652 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2653 NLMSG_LENGTH(sizeof(*ifa)));
2655 break;
2656 case RTM_GETROUTE:
2657 break;
2658 case RTM_NEWROUTE:
2659 case RTM_DELROUTE:
2660 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2661 rtm = NLMSG_DATA(nlh);
2662 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2663 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2664 NLMSG_LENGTH(sizeof(*rtm)));
2666 break;
2667 default:
2668 return -TARGET_EOPNOTSUPP;
2670 return 0;
2673 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2675 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2677 #endif /* CONFIG_RTNETLINK */
2679 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2681 switch (nlh->nlmsg_type) {
2682 default:
2683 gemu_log("Unknown host audit message type %d\n",
2684 nlh->nlmsg_type);
2685 return -TARGET_EINVAL;
2687 return 0;
2690 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2691 size_t len)
2693 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2696 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2698 switch (nlh->nlmsg_type) {
2699 case AUDIT_USER:
2700 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2701 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2702 break;
2703 default:
2704 gemu_log("Unknown target audit message type %d\n",
2705 nlh->nlmsg_type);
2706 return -TARGET_EINVAL;
2709 return 0;
2712 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2714 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2717 /* do_setsockopt() Must return target values and target errnos. */
2718 static abi_long do_setsockopt(int sockfd, int level, int optname,
2719 abi_ulong optval_addr, socklen_t optlen)
2721 abi_long ret;
2722 int val;
2723 struct ip_mreqn *ip_mreq;
2724 struct ip_mreq_source *ip_mreq_source;
2726 switch(level) {
2727 case SOL_TCP:
2728 /* TCP options all take an 'int' value. */
2729 if (optlen < sizeof(uint32_t))
2730 return -TARGET_EINVAL;
2732 if (get_user_u32(val, optval_addr))
2733 return -TARGET_EFAULT;
2734 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2735 break;
2736 case SOL_IP:
2737 switch(optname) {
2738 case IP_TOS:
2739 case IP_TTL:
2740 case IP_HDRINCL:
2741 case IP_ROUTER_ALERT:
2742 case IP_RECVOPTS:
2743 case IP_RETOPTS:
2744 case IP_PKTINFO:
2745 case IP_MTU_DISCOVER:
2746 case IP_RECVERR:
2747 case IP_RECVTOS:
2748 #ifdef IP_FREEBIND
2749 case IP_FREEBIND:
2750 #endif
2751 case IP_MULTICAST_TTL:
2752 case IP_MULTICAST_LOOP:
2753 val = 0;
2754 if (optlen >= sizeof(uint32_t)) {
2755 if (get_user_u32(val, optval_addr))
2756 return -TARGET_EFAULT;
2757 } else if (optlen >= 1) {
2758 if (get_user_u8(val, optval_addr))
2759 return -TARGET_EFAULT;
2761 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2762 break;
2763 case IP_ADD_MEMBERSHIP:
2764 case IP_DROP_MEMBERSHIP:
2765 if (optlen < sizeof (struct target_ip_mreq) ||
2766 optlen > sizeof (struct target_ip_mreqn))
2767 return -TARGET_EINVAL;
2769 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2770 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2771 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2772 break;
2774 case IP_BLOCK_SOURCE:
2775 case IP_UNBLOCK_SOURCE:
2776 case IP_ADD_SOURCE_MEMBERSHIP:
2777 case IP_DROP_SOURCE_MEMBERSHIP:
2778 if (optlen != sizeof (struct target_ip_mreq_source))
2779 return -TARGET_EINVAL;
2781 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2782 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2783 unlock_user (ip_mreq_source, optval_addr, 0);
2784 break;
2786 default:
2787 goto unimplemented;
2789 break;
2790 case SOL_IPV6:
2791 switch (optname) {
2792 case IPV6_MTU_DISCOVER:
2793 case IPV6_MTU:
2794 case IPV6_V6ONLY:
2795 case IPV6_RECVPKTINFO:
2796 val = 0;
2797 if (optlen < sizeof(uint32_t)) {
2798 return -TARGET_EINVAL;
2800 if (get_user_u32(val, optval_addr)) {
2801 return -TARGET_EFAULT;
2803 ret = get_errno(setsockopt(sockfd, level, optname,
2804 &val, sizeof(val)));
2805 break;
2806 default:
2807 goto unimplemented;
2809 break;
2810 case SOL_RAW:
2811 switch (optname) {
2812 case ICMP_FILTER:
2813 /* struct icmp_filter takes an u32 value */
2814 if (optlen < sizeof(uint32_t)) {
2815 return -TARGET_EINVAL;
2818 if (get_user_u32(val, optval_addr)) {
2819 return -TARGET_EFAULT;
2821 ret = get_errno(setsockopt(sockfd, level, optname,
2822 &val, sizeof(val)));
2823 break;
2825 default:
2826 goto unimplemented;
2828 break;
2829 case TARGET_SOL_SOCKET:
2830 switch (optname) {
2831 case TARGET_SO_RCVTIMEO:
2833 struct timeval tv;
2835 optname = SO_RCVTIMEO;
2837 set_timeout:
2838 if (optlen != sizeof(struct target_timeval)) {
2839 return -TARGET_EINVAL;
2842 if (copy_from_user_timeval(&tv, optval_addr)) {
2843 return -TARGET_EFAULT;
2846 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2847 &tv, sizeof(tv)));
2848 return ret;
2850 case TARGET_SO_SNDTIMEO:
2851 optname = SO_SNDTIMEO;
2852 goto set_timeout;
2853 case TARGET_SO_ATTACH_FILTER:
2855 struct target_sock_fprog *tfprog;
2856 struct target_sock_filter *tfilter;
2857 struct sock_fprog fprog;
2858 struct sock_filter *filter;
2859 int i;
2861 if (optlen != sizeof(*tfprog)) {
2862 return -TARGET_EINVAL;
2864 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2865 return -TARGET_EFAULT;
2867 if (!lock_user_struct(VERIFY_READ, tfilter,
2868 tswapal(tfprog->filter), 0)) {
2869 unlock_user_struct(tfprog, optval_addr, 1);
2870 return -TARGET_EFAULT;
2873 fprog.len = tswap16(tfprog->len);
2874 filter = g_try_new(struct sock_filter, fprog.len);
2875 if (filter == NULL) {
2876 unlock_user_struct(tfilter, tfprog->filter, 1);
2877 unlock_user_struct(tfprog, optval_addr, 1);
2878 return -TARGET_ENOMEM;
2880 for (i = 0; i < fprog.len; i++) {
2881 filter[i].code = tswap16(tfilter[i].code);
2882 filter[i].jt = tfilter[i].jt;
2883 filter[i].jf = tfilter[i].jf;
2884 filter[i].k = tswap32(tfilter[i].k);
2886 fprog.filter = filter;
2888 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2889 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2890 g_free(filter);
2892 unlock_user_struct(tfilter, tfprog->filter, 1);
2893 unlock_user_struct(tfprog, optval_addr, 1);
2894 return ret;
2896 case TARGET_SO_BINDTODEVICE:
2898 char *dev_ifname, *addr_ifname;
2900 if (optlen > IFNAMSIZ - 1) {
2901 optlen = IFNAMSIZ - 1;
2903 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2904 if (!dev_ifname) {
2905 return -TARGET_EFAULT;
2907 optname = SO_BINDTODEVICE;
2908 addr_ifname = alloca(IFNAMSIZ);
2909 memcpy(addr_ifname, dev_ifname, optlen);
2910 addr_ifname[optlen] = 0;
2911 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2912 addr_ifname, optlen));
2913 unlock_user (dev_ifname, optval_addr, 0);
2914 return ret;
2916 /* Options with 'int' argument. */
2917 case TARGET_SO_DEBUG:
2918 optname = SO_DEBUG;
2919 break;
2920 case TARGET_SO_REUSEADDR:
2921 optname = SO_REUSEADDR;
2922 break;
2923 case TARGET_SO_TYPE:
2924 optname = SO_TYPE;
2925 break;
2926 case TARGET_SO_ERROR:
2927 optname = SO_ERROR;
2928 break;
2929 case TARGET_SO_DONTROUTE:
2930 optname = SO_DONTROUTE;
2931 break;
2932 case TARGET_SO_BROADCAST:
2933 optname = SO_BROADCAST;
2934 break;
2935 case TARGET_SO_SNDBUF:
2936 optname = SO_SNDBUF;
2937 break;
2938 case TARGET_SO_SNDBUFFORCE:
2939 optname = SO_SNDBUFFORCE;
2940 break;
2941 case TARGET_SO_RCVBUF:
2942 optname = SO_RCVBUF;
2943 break;
2944 case TARGET_SO_RCVBUFFORCE:
2945 optname = SO_RCVBUFFORCE;
2946 break;
2947 case TARGET_SO_KEEPALIVE:
2948 optname = SO_KEEPALIVE;
2949 break;
2950 case TARGET_SO_OOBINLINE:
2951 optname = SO_OOBINLINE;
2952 break;
2953 case TARGET_SO_NO_CHECK:
2954 optname = SO_NO_CHECK;
2955 break;
2956 case TARGET_SO_PRIORITY:
2957 optname = SO_PRIORITY;
2958 break;
2959 #ifdef SO_BSDCOMPAT
2960 case TARGET_SO_BSDCOMPAT:
2961 optname = SO_BSDCOMPAT;
2962 break;
2963 #endif
2964 case TARGET_SO_PASSCRED:
2965 optname = SO_PASSCRED;
2966 break;
2967 case TARGET_SO_PASSSEC:
2968 optname = SO_PASSSEC;
2969 break;
2970 case TARGET_SO_TIMESTAMP:
2971 optname = SO_TIMESTAMP;
2972 break;
2973 case TARGET_SO_RCVLOWAT:
2974 optname = SO_RCVLOWAT;
2975 break;
2976 break;
2977 default:
2978 goto unimplemented;
2980 if (optlen < sizeof(uint32_t))
2981 return -TARGET_EINVAL;
2983 if (get_user_u32(val, optval_addr))
2984 return -TARGET_EFAULT;
2985 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2986 break;
2987 default:
2988 unimplemented:
2989 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2990 ret = -TARGET_ENOPROTOOPT;
2992 return ret;
2995 /* do_getsockopt() Must return target values and target errnos. */
2996 static abi_long do_getsockopt(int sockfd, int level, int optname,
2997 abi_ulong optval_addr, abi_ulong optlen)
2999 abi_long ret;
3000 int len, val;
3001 socklen_t lv;
3003 switch(level) {
3004 case TARGET_SOL_SOCKET:
3005 level = SOL_SOCKET;
3006 switch (optname) {
3007 /* These don't just return a single integer */
3008 case TARGET_SO_LINGER:
3009 case TARGET_SO_RCVTIMEO:
3010 case TARGET_SO_SNDTIMEO:
3011 case TARGET_SO_PEERNAME:
3012 goto unimplemented;
3013 case TARGET_SO_PEERCRED: {
3014 struct ucred cr;
3015 socklen_t crlen;
3016 struct target_ucred *tcr;
3018 if (get_user_u32(len, optlen)) {
3019 return -TARGET_EFAULT;
3021 if (len < 0) {
3022 return -TARGET_EINVAL;
3025 crlen = sizeof(cr);
3026 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3027 &cr, &crlen));
3028 if (ret < 0) {
3029 return ret;
3031 if (len > crlen) {
3032 len = crlen;
3034 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3035 return -TARGET_EFAULT;
3037 __put_user(cr.pid, &tcr->pid);
3038 __put_user(cr.uid, &tcr->uid);
3039 __put_user(cr.gid, &tcr->gid);
3040 unlock_user_struct(tcr, optval_addr, 1);
3041 if (put_user_u32(len, optlen)) {
3042 return -TARGET_EFAULT;
3044 break;
3046 /* Options with 'int' argument. */
3047 case TARGET_SO_DEBUG:
3048 optname = SO_DEBUG;
3049 goto int_case;
3050 case TARGET_SO_REUSEADDR:
3051 optname = SO_REUSEADDR;
3052 goto int_case;
3053 case TARGET_SO_TYPE:
3054 optname = SO_TYPE;
3055 goto int_case;
3056 case TARGET_SO_ERROR:
3057 optname = SO_ERROR;
3058 goto int_case;
3059 case TARGET_SO_DONTROUTE:
3060 optname = SO_DONTROUTE;
3061 goto int_case;
3062 case TARGET_SO_BROADCAST:
3063 optname = SO_BROADCAST;
3064 goto int_case;
3065 case TARGET_SO_SNDBUF:
3066 optname = SO_SNDBUF;
3067 goto int_case;
3068 case TARGET_SO_RCVBUF:
3069 optname = SO_RCVBUF;
3070 goto int_case;
3071 case TARGET_SO_KEEPALIVE:
3072 optname = SO_KEEPALIVE;
3073 goto int_case;
3074 case TARGET_SO_OOBINLINE:
3075 optname = SO_OOBINLINE;
3076 goto int_case;
3077 case TARGET_SO_NO_CHECK:
3078 optname = SO_NO_CHECK;
3079 goto int_case;
3080 case TARGET_SO_PRIORITY:
3081 optname = SO_PRIORITY;
3082 goto int_case;
3083 #ifdef SO_BSDCOMPAT
3084 case TARGET_SO_BSDCOMPAT:
3085 optname = SO_BSDCOMPAT;
3086 goto int_case;
3087 #endif
3088 case TARGET_SO_PASSCRED:
3089 optname = SO_PASSCRED;
3090 goto int_case;
3091 case TARGET_SO_TIMESTAMP:
3092 optname = SO_TIMESTAMP;
3093 goto int_case;
3094 case TARGET_SO_RCVLOWAT:
3095 optname = SO_RCVLOWAT;
3096 goto int_case;
3097 case TARGET_SO_ACCEPTCONN:
3098 optname = SO_ACCEPTCONN;
3099 goto int_case;
3100 default:
3101 goto int_case;
3103 break;
3104 case SOL_TCP:
3105 /* TCP options all take an 'int' value. */
3106 int_case:
3107 if (get_user_u32(len, optlen))
3108 return -TARGET_EFAULT;
3109 if (len < 0)
3110 return -TARGET_EINVAL;
3111 lv = sizeof(lv);
3112 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3113 if (ret < 0)
3114 return ret;
3115 if (optname == SO_TYPE) {
3116 val = host_to_target_sock_type(val);
3118 if (len > lv)
3119 len = lv;
3120 if (len == 4) {
3121 if (put_user_u32(val, optval_addr))
3122 return -TARGET_EFAULT;
3123 } else {
3124 if (put_user_u8(val, optval_addr))
3125 return -TARGET_EFAULT;
3127 if (put_user_u32(len, optlen))
3128 return -TARGET_EFAULT;
3129 break;
3130 case SOL_IP:
3131 switch(optname) {
3132 case IP_TOS:
3133 case IP_TTL:
3134 case IP_HDRINCL:
3135 case IP_ROUTER_ALERT:
3136 case IP_RECVOPTS:
3137 case IP_RETOPTS:
3138 case IP_PKTINFO:
3139 case IP_MTU_DISCOVER:
3140 case IP_RECVERR:
3141 case IP_RECVTOS:
3142 #ifdef IP_FREEBIND
3143 case IP_FREEBIND:
3144 #endif
3145 case IP_MULTICAST_TTL:
3146 case IP_MULTICAST_LOOP:
3147 if (get_user_u32(len, optlen))
3148 return -TARGET_EFAULT;
3149 if (len < 0)
3150 return -TARGET_EINVAL;
3151 lv = sizeof(lv);
3152 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3153 if (ret < 0)
3154 return ret;
3155 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3156 len = 1;
3157 if (put_user_u32(len, optlen)
3158 || put_user_u8(val, optval_addr))
3159 return -TARGET_EFAULT;
3160 } else {
3161 if (len > sizeof(int))
3162 len = sizeof(int);
3163 if (put_user_u32(len, optlen)
3164 || put_user_u32(val, optval_addr))
3165 return -TARGET_EFAULT;
3167 break;
3168 default:
3169 ret = -TARGET_ENOPROTOOPT;
3170 break;
3172 break;
3173 default:
3174 unimplemented:
3175 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3176 level, optname);
3177 ret = -TARGET_EOPNOTSUPP;
3178 break;
3180 return ret;
3183 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3184 abi_ulong count, int copy)
3186 struct target_iovec *target_vec;
3187 struct iovec *vec;
3188 abi_ulong total_len, max_len;
3189 int i;
3190 int err = 0;
3191 bool bad_address = false;
3193 if (count == 0) {
3194 errno = 0;
3195 return NULL;
3197 if (count > IOV_MAX) {
3198 errno = EINVAL;
3199 return NULL;
3202 vec = g_try_new0(struct iovec, count);
3203 if (vec == NULL) {
3204 errno = ENOMEM;
3205 return NULL;
3208 target_vec = lock_user(VERIFY_READ, target_addr,
3209 count * sizeof(struct target_iovec), 1);
3210 if (target_vec == NULL) {
3211 err = EFAULT;
3212 goto fail2;
3215 /* ??? If host page size > target page size, this will result in a
3216 value larger than what we can actually support. */
3217 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3218 total_len = 0;
3220 for (i = 0; i < count; i++) {
3221 abi_ulong base = tswapal(target_vec[i].iov_base);
3222 abi_long len = tswapal(target_vec[i].iov_len);
3224 if (len < 0) {
3225 err = EINVAL;
3226 goto fail;
3227 } else if (len == 0) {
3228 /* Zero length pointer is ignored. */
3229 vec[i].iov_base = 0;
3230 } else {
3231 vec[i].iov_base = lock_user(type, base, len, copy);
3232 /* If the first buffer pointer is bad, this is a fault. But
3233 * subsequent bad buffers will result in a partial write; this
3234 * is realized by filling the vector with null pointers and
3235 * zero lengths. */
3236 if (!vec[i].iov_base) {
3237 if (i == 0) {
3238 err = EFAULT;
3239 goto fail;
3240 } else {
3241 bad_address = true;
3244 if (bad_address) {
3245 len = 0;
3247 if (len > max_len - total_len) {
3248 len = max_len - total_len;
3251 vec[i].iov_len = len;
3252 total_len += len;
3255 unlock_user(target_vec, target_addr, 0);
3256 return vec;
3258 fail:
3259 while (--i >= 0) {
3260 if (tswapal(target_vec[i].iov_len) > 0) {
3261 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3264 unlock_user(target_vec, target_addr, 0);
3265 fail2:
3266 g_free(vec);
3267 errno = err;
3268 return NULL;
3271 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3272 abi_ulong count, int copy)
3274 struct target_iovec *target_vec;
3275 int i;
3277 target_vec = lock_user(VERIFY_READ, target_addr,
3278 count * sizeof(struct target_iovec), 1);
3279 if (target_vec) {
3280 for (i = 0; i < count; i++) {
3281 abi_ulong base = tswapal(target_vec[i].iov_base);
3282 abi_long len = tswapal(target_vec[i].iov_len);
3283 if (len < 0) {
3284 break;
3286 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3288 unlock_user(target_vec, target_addr, 0);
3291 g_free(vec);
3294 static inline int target_to_host_sock_type(int *type)
3296 int host_type = 0;
3297 int target_type = *type;
3299 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3300 case TARGET_SOCK_DGRAM:
3301 host_type = SOCK_DGRAM;
3302 break;
3303 case TARGET_SOCK_STREAM:
3304 host_type = SOCK_STREAM;
3305 break;
3306 default:
3307 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3308 break;
3310 if (target_type & TARGET_SOCK_CLOEXEC) {
3311 #if defined(SOCK_CLOEXEC)
3312 host_type |= SOCK_CLOEXEC;
3313 #else
3314 return -TARGET_EINVAL;
3315 #endif
3317 if (target_type & TARGET_SOCK_NONBLOCK) {
3318 #if defined(SOCK_NONBLOCK)
3319 host_type |= SOCK_NONBLOCK;
3320 #elif !defined(O_NONBLOCK)
3321 return -TARGET_EINVAL;
3322 #endif
3324 *type = host_type;
3325 return 0;
3328 /* Try to emulate socket type flags after socket creation. */
3329 static int sock_flags_fixup(int fd, int target_type)
3331 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3332 if (target_type & TARGET_SOCK_NONBLOCK) {
3333 int flags = fcntl(fd, F_GETFL);
3334 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3335 close(fd);
3336 return -TARGET_EINVAL;
3339 #endif
3340 return fd;
3343 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3344 abi_ulong target_addr,
3345 socklen_t len)
3347 struct sockaddr *addr = host_addr;
3348 struct target_sockaddr *target_saddr;
3350 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3351 if (!target_saddr) {
3352 return -TARGET_EFAULT;
3355 memcpy(addr, target_saddr, len);
3356 addr->sa_family = tswap16(target_saddr->sa_family);
3357 /* spkt_protocol is big-endian */
3359 unlock_user(target_saddr, target_addr, 0);
3360 return 0;
3363 static TargetFdTrans target_packet_trans = {
3364 .target_to_host_addr = packet_target_to_host_sockaddr,
3367 #ifdef CONFIG_RTNETLINK
3368 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3370 abi_long ret;
3372 ret = target_to_host_nlmsg_route(buf, len);
3373 if (ret < 0) {
3374 return ret;
3377 return len;
3380 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3382 abi_long ret;
3384 ret = host_to_target_nlmsg_route(buf, len);
3385 if (ret < 0) {
3386 return ret;
3389 return len;
3392 static TargetFdTrans target_netlink_route_trans = {
3393 .target_to_host_data = netlink_route_target_to_host,
3394 .host_to_target_data = netlink_route_host_to_target,
3396 #endif /* CONFIG_RTNETLINK */
3398 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3400 abi_long ret;
3402 ret = target_to_host_nlmsg_audit(buf, len);
3403 if (ret < 0) {
3404 return ret;
3407 return len;
3410 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3412 abi_long ret;
3414 ret = host_to_target_nlmsg_audit(buf, len);
3415 if (ret < 0) {
3416 return ret;
3419 return len;
3422 static TargetFdTrans target_netlink_audit_trans = {
3423 .target_to_host_data = netlink_audit_target_to_host,
3424 .host_to_target_data = netlink_audit_host_to_target,
3427 /* do_socket() Must return target values and target errnos. */
3428 static abi_long do_socket(int domain, int type, int protocol)
3430 int target_type = type;
3431 int ret;
3433 ret = target_to_host_sock_type(&type);
3434 if (ret) {
3435 return ret;
3438 if (domain == PF_NETLINK && !(
3439 #ifdef CONFIG_RTNETLINK
3440 protocol == NETLINK_ROUTE ||
3441 #endif
3442 protocol == NETLINK_KOBJECT_UEVENT ||
3443 protocol == NETLINK_AUDIT)) {
3444 return -EPFNOSUPPORT;
3447 if (domain == AF_PACKET ||
3448 (domain == AF_INET && type == SOCK_PACKET)) {
3449 protocol = tswap16(protocol);
3452 ret = get_errno(socket(domain, type, protocol));
3453 if (ret >= 0) {
3454 ret = sock_flags_fixup(ret, target_type);
3455 if (type == SOCK_PACKET) {
3456 /* Manage an obsolete case :
3457 * if socket type is SOCK_PACKET, bind by name
3459 fd_trans_register(ret, &target_packet_trans);
3460 } else if (domain == PF_NETLINK) {
3461 switch (protocol) {
3462 #ifdef CONFIG_RTNETLINK
3463 case NETLINK_ROUTE:
3464 fd_trans_register(ret, &target_netlink_route_trans);
3465 break;
3466 #endif
3467 case NETLINK_KOBJECT_UEVENT:
3468 /* nothing to do: messages are strings */
3469 break;
3470 case NETLINK_AUDIT:
3471 fd_trans_register(ret, &target_netlink_audit_trans);
3472 break;
3473 default:
3474 g_assert_not_reached();
3478 return ret;
3481 /* do_bind() Must return target values and target errnos. */
3482 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3483 socklen_t addrlen)
3485 void *addr;
3486 abi_long ret;
3488 if ((int)addrlen < 0) {
3489 return -TARGET_EINVAL;
3492 addr = alloca(addrlen+1);
3494 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3495 if (ret)
3496 return ret;
3498 return get_errno(bind(sockfd, addr, addrlen));
3501 /* do_connect() Must return target values and target errnos. */
3502 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3503 socklen_t addrlen)
3505 void *addr;
3506 abi_long ret;
3508 if ((int)addrlen < 0) {
3509 return -TARGET_EINVAL;
3512 addr = alloca(addrlen+1);
3514 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3515 if (ret)
3516 return ret;
3518 return get_errno(safe_connect(sockfd, addr, addrlen));
3521 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3522 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3523 int flags, int send)
3525 abi_long ret, len;
3526 struct msghdr msg;
3527 abi_ulong count;
3528 struct iovec *vec;
3529 abi_ulong target_vec;
3531 if (msgp->msg_name) {
3532 msg.msg_namelen = tswap32(msgp->msg_namelen);
3533 msg.msg_name = alloca(msg.msg_namelen+1);
3534 ret = target_to_host_sockaddr(fd, msg.msg_name,
3535 tswapal(msgp->msg_name),
3536 msg.msg_namelen);
3537 if (ret == -TARGET_EFAULT) {
3538 /* For connected sockets msg_name and msg_namelen must
3539 * be ignored, so returning EFAULT immediately is wrong.
3540 * Instead, pass a bad msg_name to the host kernel, and
3541 * let it decide whether to return EFAULT or not.
3543 msg.msg_name = (void *)-1;
3544 } else if (ret) {
3545 goto out2;
3547 } else {
3548 msg.msg_name = NULL;
3549 msg.msg_namelen = 0;
3551 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3552 msg.msg_control = alloca(msg.msg_controllen);
3553 msg.msg_flags = tswap32(msgp->msg_flags);
3555 count = tswapal(msgp->msg_iovlen);
3556 target_vec = tswapal(msgp->msg_iov);
3558 if (count > IOV_MAX) {
3559 /* sendrcvmsg returns a different errno for this condition than
3560 * readv/writev, so we must catch it here before lock_iovec() does.
3562 ret = -TARGET_EMSGSIZE;
3563 goto out2;
3566 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3567 target_vec, count, send);
3568 if (vec == NULL) {
3569 ret = -host_to_target_errno(errno);
3570 goto out2;
3572 msg.msg_iovlen = count;
3573 msg.msg_iov = vec;
3575 if (send) {
3576 if (fd_trans_target_to_host_data(fd)) {
3577 void *host_msg;
3579 host_msg = g_malloc(msg.msg_iov->iov_len);
3580 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3581 ret = fd_trans_target_to_host_data(fd)(host_msg,
3582 msg.msg_iov->iov_len);
3583 if (ret >= 0) {
3584 msg.msg_iov->iov_base = host_msg;
3585 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3587 g_free(host_msg);
3588 } else {
3589 ret = target_to_host_cmsg(&msg, msgp);
3590 if (ret == 0) {
3591 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3594 } else {
3595 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3596 if (!is_error(ret)) {
3597 len = ret;
3598 if (fd_trans_host_to_target_data(fd)) {
3599 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3600 len);
3601 } else {
3602 ret = host_to_target_cmsg(msgp, &msg);
3604 if (!is_error(ret)) {
3605 msgp->msg_namelen = tswap32(msg.msg_namelen);
3606 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3607 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3608 msg.msg_name, msg.msg_namelen);
3609 if (ret) {
3610 goto out;
3614 ret = len;
3619 out:
3620 unlock_iovec(vec, target_vec, count, !send);
3621 out2:
3622 return ret;
3625 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3626 int flags, int send)
3628 abi_long ret;
3629 struct target_msghdr *msgp;
3631 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3632 msgp,
3633 target_msg,
3634 send ? 1 : 0)) {
3635 return -TARGET_EFAULT;
3637 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3638 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3639 return ret;
3642 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3643 * so it might not have this *mmsg-specific flag either.
3645 #ifndef MSG_WAITFORONE
3646 #define MSG_WAITFORONE 0x10000
3647 #endif
3649 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3650 unsigned int vlen, unsigned int flags,
3651 int send)
3653 struct target_mmsghdr *mmsgp;
3654 abi_long ret = 0;
3655 int i;
3657 if (vlen > UIO_MAXIOV) {
3658 vlen = UIO_MAXIOV;
3661 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3662 if (!mmsgp) {
3663 return -TARGET_EFAULT;
3666 for (i = 0; i < vlen; i++) {
3667 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3668 if (is_error(ret)) {
3669 break;
3671 mmsgp[i].msg_len = tswap32(ret);
3672 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3673 if (flags & MSG_WAITFORONE) {
3674 flags |= MSG_DONTWAIT;
3678 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3680 /* Return number of datagrams sent if we sent any at all;
3681 * otherwise return the error.
3683 if (i) {
3684 return i;
3686 return ret;
3689 /* do_accept4() Must return target values and target errnos. */
3690 static abi_long do_accept4(int fd, abi_ulong target_addr,
3691 abi_ulong target_addrlen_addr, int flags)
3693 socklen_t addrlen;
3694 void *addr;
3695 abi_long ret;
3696 int host_flags;
3698 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3700 if (target_addr == 0) {
3701 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3704 /* linux returns EINVAL if addrlen pointer is invalid */
3705 if (get_user_u32(addrlen, target_addrlen_addr))
3706 return -TARGET_EINVAL;
3708 if ((int)addrlen < 0) {
3709 return -TARGET_EINVAL;
3712 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3713 return -TARGET_EINVAL;
3715 addr = alloca(addrlen);
3717 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3718 if (!is_error(ret)) {
3719 host_to_target_sockaddr(target_addr, addr, addrlen);
3720 if (put_user_u32(addrlen, target_addrlen_addr))
3721 ret = -TARGET_EFAULT;
3723 return ret;
3726 /* do_getpeername() Must return target values and target errnos. */
3727 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3728 abi_ulong target_addrlen_addr)
3730 socklen_t addrlen;
3731 void *addr;
3732 abi_long ret;
3734 if (get_user_u32(addrlen, target_addrlen_addr))
3735 return -TARGET_EFAULT;
3737 if ((int)addrlen < 0) {
3738 return -TARGET_EINVAL;
3741 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3742 return -TARGET_EFAULT;
3744 addr = alloca(addrlen);
3746 ret = get_errno(getpeername(fd, addr, &addrlen));
3747 if (!is_error(ret)) {
3748 host_to_target_sockaddr(target_addr, addr, addrlen);
3749 if (put_user_u32(addrlen, target_addrlen_addr))
3750 ret = -TARGET_EFAULT;
3752 return ret;
3755 /* do_getsockname() Must return target values and target errnos. */
3756 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3757 abi_ulong target_addrlen_addr)
3759 socklen_t addrlen;
3760 void *addr;
3761 abi_long ret;
3763 if (get_user_u32(addrlen, target_addrlen_addr))
3764 return -TARGET_EFAULT;
3766 if ((int)addrlen < 0) {
3767 return -TARGET_EINVAL;
3770 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3771 return -TARGET_EFAULT;
3773 addr = alloca(addrlen);
3775 ret = get_errno(getsockname(fd, addr, &addrlen));
3776 if (!is_error(ret)) {
3777 host_to_target_sockaddr(target_addr, addr, addrlen);
3778 if (put_user_u32(addrlen, target_addrlen_addr))
3779 ret = -TARGET_EFAULT;
3781 return ret;
3784 /* do_socketpair() Must return target values and target errnos. */
3785 static abi_long do_socketpair(int domain, int type, int protocol,
3786 abi_ulong target_tab_addr)
3788 int tab[2];
3789 abi_long ret;
3791 target_to_host_sock_type(&type);
3793 ret = get_errno(socketpair(domain, type, protocol, tab));
3794 if (!is_error(ret)) {
3795 if (put_user_s32(tab[0], target_tab_addr)
3796 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3797 ret = -TARGET_EFAULT;
3799 return ret;
3802 /* do_sendto() Must return target values and target errnos. */
3803 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3804 abi_ulong target_addr, socklen_t addrlen)
3806 void *addr;
3807 void *host_msg;
3808 void *copy_msg = NULL;
3809 abi_long ret;
3811 if ((int)addrlen < 0) {
3812 return -TARGET_EINVAL;
3815 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3816 if (!host_msg)
3817 return -TARGET_EFAULT;
3818 if (fd_trans_target_to_host_data(fd)) {
3819 copy_msg = host_msg;
3820 host_msg = g_malloc(len);
3821 memcpy(host_msg, copy_msg, len);
3822 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3823 if (ret < 0) {
3824 goto fail;
3827 if (target_addr) {
3828 addr = alloca(addrlen+1);
3829 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3830 if (ret) {
3831 goto fail;
3833 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3834 } else {
3835 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3837 fail:
3838 if (copy_msg) {
3839 g_free(host_msg);
3840 host_msg = copy_msg;
3842 unlock_user(host_msg, msg, 0);
3843 return ret;
3846 /* do_recvfrom() Must return target values and target errnos. */
3847 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3848 abi_ulong target_addr,
3849 abi_ulong target_addrlen)
3851 socklen_t addrlen;
3852 void *addr;
3853 void *host_msg;
3854 abi_long ret;
3856 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3857 if (!host_msg)
3858 return -TARGET_EFAULT;
3859 if (target_addr) {
3860 if (get_user_u32(addrlen, target_addrlen)) {
3861 ret = -TARGET_EFAULT;
3862 goto fail;
3864 if ((int)addrlen < 0) {
3865 ret = -TARGET_EINVAL;
3866 goto fail;
3868 addr = alloca(addrlen);
3869 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3870 addr, &addrlen));
3871 } else {
3872 addr = NULL; /* To keep compiler quiet. */
3873 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3875 if (!is_error(ret)) {
3876 if (fd_trans_host_to_target_data(fd)) {
3877 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3879 if (target_addr) {
3880 host_to_target_sockaddr(target_addr, addr, addrlen);
3881 if (put_user_u32(addrlen, target_addrlen)) {
3882 ret = -TARGET_EFAULT;
3883 goto fail;
3886 unlock_user(host_msg, msg, len);
3887 } else {
3888 fail:
3889 unlock_user(host_msg, msg, 0);
3891 return ret;
3894 #ifdef TARGET_NR_socketcall
3895 /* do_socketcall() Must return target values and target errnos. */
3896 static abi_long do_socketcall(int num, abi_ulong vptr)
3898 static const unsigned ac[] = { /* number of arguments per call */
3899 [SOCKOP_socket] = 3, /* domain, type, protocol */
3900 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3901 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3902 [SOCKOP_listen] = 2, /* sockfd, backlog */
3903 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3904 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3905 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3906 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3908 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3909 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3910 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3911 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3912 [SOCKOP_shutdown] = 2, /* sockfd, how */
3913 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3914 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3915 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3916 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3917 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3918 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3920 abi_long a[6]; /* max 6 args */
3922 /* first, collect the arguments in a[] according to ac[] */
3923 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3924 unsigned i;
3925 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3926 for (i = 0; i < ac[num]; ++i) {
3927 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3928 return -TARGET_EFAULT;
3933 /* now when we have the args, actually handle the call */
3934 switch (num) {
3935 case SOCKOP_socket: /* domain, type, protocol */
3936 return do_socket(a[0], a[1], a[2]);
3937 case SOCKOP_bind: /* sockfd, addr, addrlen */
3938 return do_bind(a[0], a[1], a[2]);
3939 case SOCKOP_connect: /* sockfd, addr, addrlen */
3940 return do_connect(a[0], a[1], a[2]);
3941 case SOCKOP_listen: /* sockfd, backlog */
3942 return get_errno(listen(a[0], a[1]));
3943 case SOCKOP_accept: /* sockfd, addr, addrlen */
3944 return do_accept4(a[0], a[1], a[2], 0);
3945 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3946 return do_accept4(a[0], a[1], a[2], a[3]);
3947 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3948 return do_getsockname(a[0], a[1], a[2]);
3949 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3950 return do_getpeername(a[0], a[1], a[2]);
3951 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3952 return do_socketpair(a[0], a[1], a[2], a[3]);
3953 case SOCKOP_send: /* sockfd, msg, len, flags */
3954 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3955 case SOCKOP_recv: /* sockfd, msg, len, flags */
3956 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3957 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3958 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3959 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3960 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3961 case SOCKOP_shutdown: /* sockfd, how */
3962 return get_errno(shutdown(a[0], a[1]));
3963 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3964 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3965 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3966 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3967 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3968 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3969 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3970 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3971 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3972 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3973 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3974 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3975 default:
3976 gemu_log("Unsupported socketcall: %d\n", num);
3977 return -TARGET_ENOSYS;
3980 #endif
3982 #define N_SHM_REGIONS 32
3984 static struct shm_region {
3985 abi_ulong start;
3986 abi_ulong size;
3987 bool in_use;
3988 } shm_regions[N_SHM_REGIONS];
3990 #ifndef TARGET_SEMID64_DS
3991 /* asm-generic version of this struct */
3992 struct target_semid64_ds
3994 struct target_ipc_perm sem_perm;
3995 abi_ulong sem_otime;
3996 #if TARGET_ABI_BITS == 32
3997 abi_ulong __unused1;
3998 #endif
3999 abi_ulong sem_ctime;
4000 #if TARGET_ABI_BITS == 32
4001 abi_ulong __unused2;
4002 #endif
4003 abi_ulong sem_nsems;
4004 abi_ulong __unused3;
4005 abi_ulong __unused4;
4007 #endif
4009 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4010 abi_ulong target_addr)
4012 struct target_ipc_perm *target_ip;
4013 struct target_semid64_ds *target_sd;
4015 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4016 return -TARGET_EFAULT;
4017 target_ip = &(target_sd->sem_perm);
4018 host_ip->__key = tswap32(target_ip->__key);
4019 host_ip->uid = tswap32(target_ip->uid);
4020 host_ip->gid = tswap32(target_ip->gid);
4021 host_ip->cuid = tswap32(target_ip->cuid);
4022 host_ip->cgid = tswap32(target_ip->cgid);
4023 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4024 host_ip->mode = tswap32(target_ip->mode);
4025 #else
4026 host_ip->mode = tswap16(target_ip->mode);
4027 #endif
4028 #if defined(TARGET_PPC)
4029 host_ip->__seq = tswap32(target_ip->__seq);
4030 #else
4031 host_ip->__seq = tswap16(target_ip->__seq);
4032 #endif
4033 unlock_user_struct(target_sd, target_addr, 0);
4034 return 0;
4037 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4038 struct ipc_perm *host_ip)
4040 struct target_ipc_perm *target_ip;
4041 struct target_semid64_ds *target_sd;
4043 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4044 return -TARGET_EFAULT;
4045 target_ip = &(target_sd->sem_perm);
4046 target_ip->__key = tswap32(host_ip->__key);
4047 target_ip->uid = tswap32(host_ip->uid);
4048 target_ip->gid = tswap32(host_ip->gid);
4049 target_ip->cuid = tswap32(host_ip->cuid);
4050 target_ip->cgid = tswap32(host_ip->cgid);
4051 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4052 target_ip->mode = tswap32(host_ip->mode);
4053 #else
4054 target_ip->mode = tswap16(host_ip->mode);
4055 #endif
4056 #if defined(TARGET_PPC)
4057 target_ip->__seq = tswap32(host_ip->__seq);
4058 #else
4059 target_ip->__seq = tswap16(host_ip->__seq);
4060 #endif
4061 unlock_user_struct(target_sd, target_addr, 1);
4062 return 0;
4065 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4066 abi_ulong target_addr)
4068 struct target_semid64_ds *target_sd;
4070 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4071 return -TARGET_EFAULT;
4072 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4073 return -TARGET_EFAULT;
4074 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4075 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4076 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4077 unlock_user_struct(target_sd, target_addr, 0);
4078 return 0;
4081 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4082 struct semid_ds *host_sd)
4084 struct target_semid64_ds *target_sd;
4086 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4087 return -TARGET_EFAULT;
4088 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4089 return -TARGET_EFAULT;
4090 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4091 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4092 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4093 unlock_user_struct(target_sd, target_addr, 1);
4094 return 0;
4097 struct target_seminfo {
4098 int semmap;
4099 int semmni;
4100 int semmns;
4101 int semmnu;
4102 int semmsl;
4103 int semopm;
4104 int semume;
4105 int semusz;
4106 int semvmx;
4107 int semaem;
4110 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4111 struct seminfo *host_seminfo)
4113 struct target_seminfo *target_seminfo;
4114 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4115 return -TARGET_EFAULT;
4116 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4117 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4118 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4119 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4120 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4121 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4122 __put_user(host_seminfo->semume, &target_seminfo->semume);
4123 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4124 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4125 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4126 unlock_user_struct(target_seminfo, target_addr, 1);
4127 return 0;
4130 union semun {
4131 int val;
4132 struct semid_ds *buf;
4133 unsigned short *array;
4134 struct seminfo *__buf;
4137 union target_semun {
4138 int val;
4139 abi_ulong buf;
4140 abi_ulong array;
4141 abi_ulong __buf;
4144 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4145 abi_ulong target_addr)
4147 int nsems;
4148 unsigned short *array;
4149 union semun semun;
4150 struct semid_ds semid_ds;
4151 int i, ret;
4153 semun.buf = &semid_ds;
4155 ret = semctl(semid, 0, IPC_STAT, semun);
4156 if (ret == -1)
4157 return get_errno(ret);
4159 nsems = semid_ds.sem_nsems;
4161 *host_array = g_try_new(unsigned short, nsems);
4162 if (!*host_array) {
4163 return -TARGET_ENOMEM;
4165 array = lock_user(VERIFY_READ, target_addr,
4166 nsems*sizeof(unsigned short), 1);
4167 if (!array) {
4168 g_free(*host_array);
4169 return -TARGET_EFAULT;
4172 for(i=0; i<nsems; i++) {
4173 __get_user((*host_array)[i], &array[i]);
4175 unlock_user(array, target_addr, 0);
4177 return 0;
4180 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4181 unsigned short **host_array)
4183 int nsems;
4184 unsigned short *array;
4185 union semun semun;
4186 struct semid_ds semid_ds;
4187 int i, ret;
4189 semun.buf = &semid_ds;
4191 ret = semctl(semid, 0, IPC_STAT, semun);
4192 if (ret == -1)
4193 return get_errno(ret);
4195 nsems = semid_ds.sem_nsems;
4197 array = lock_user(VERIFY_WRITE, target_addr,
4198 nsems*sizeof(unsigned short), 0);
4199 if (!array)
4200 return -TARGET_EFAULT;
4202 for(i=0; i<nsems; i++) {
4203 __put_user((*host_array)[i], &array[i]);
4205 g_free(*host_array);
4206 unlock_user(array, target_addr, 1);
4208 return 0;
4211 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4212 abi_ulong target_arg)
4214 union target_semun target_su = { .buf = target_arg };
4215 union semun arg;
4216 struct semid_ds dsarg;
4217 unsigned short *array = NULL;
4218 struct seminfo seminfo;
4219 abi_long ret = -TARGET_EINVAL;
4220 abi_long err;
4221 cmd &= 0xff;
4223 switch( cmd ) {
4224 case GETVAL:
4225 case SETVAL:
4226 /* In 64 bit cross-endian situations, we will erroneously pick up
4227 * the wrong half of the union for the "val" element. To rectify
4228 * this, the entire 8-byte structure is byteswapped, followed by
4229 * a swap of the 4 byte val field. In other cases, the data is
4230 * already in proper host byte order. */
4231 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4232 target_su.buf = tswapal(target_su.buf);
4233 arg.val = tswap32(target_su.val);
4234 } else {
4235 arg.val = target_su.val;
4237 ret = get_errno(semctl(semid, semnum, cmd, arg));
4238 break;
4239 case GETALL:
4240 case SETALL:
4241 err = target_to_host_semarray(semid, &array, target_su.array);
4242 if (err)
4243 return err;
4244 arg.array = array;
4245 ret = get_errno(semctl(semid, semnum, cmd, arg));
4246 err = host_to_target_semarray(semid, target_su.array, &array);
4247 if (err)
4248 return err;
4249 break;
4250 case IPC_STAT:
4251 case IPC_SET:
4252 case SEM_STAT:
4253 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4254 if (err)
4255 return err;
4256 arg.buf = &dsarg;
4257 ret = get_errno(semctl(semid, semnum, cmd, arg));
4258 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4259 if (err)
4260 return err;
4261 break;
4262 case IPC_INFO:
4263 case SEM_INFO:
4264 arg.__buf = &seminfo;
4265 ret = get_errno(semctl(semid, semnum, cmd, arg));
4266 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4267 if (err)
4268 return err;
4269 break;
4270 case IPC_RMID:
4271 case GETPID:
4272 case GETNCNT:
4273 case GETZCNT:
4274 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4275 break;
4278 return ret;
4281 struct target_sembuf {
4282 unsigned short sem_num;
4283 short sem_op;
4284 short sem_flg;
4287 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4288 abi_ulong target_addr,
4289 unsigned nsops)
4291 struct target_sembuf *target_sembuf;
4292 int i;
4294 target_sembuf = lock_user(VERIFY_READ, target_addr,
4295 nsops*sizeof(struct target_sembuf), 1);
4296 if (!target_sembuf)
4297 return -TARGET_EFAULT;
4299 for(i=0; i<nsops; i++) {
4300 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4301 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4302 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4305 unlock_user(target_sembuf, target_addr, 0);
4307 return 0;
4310 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4312 struct sembuf sops[nsops];
4314 if (target_to_host_sembuf(sops, ptr, nsops))
4315 return -TARGET_EFAULT;
4317 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4320 struct target_msqid_ds
4322 struct target_ipc_perm msg_perm;
4323 abi_ulong msg_stime;
4324 #if TARGET_ABI_BITS == 32
4325 abi_ulong __unused1;
4326 #endif
4327 abi_ulong msg_rtime;
4328 #if TARGET_ABI_BITS == 32
4329 abi_ulong __unused2;
4330 #endif
4331 abi_ulong msg_ctime;
4332 #if TARGET_ABI_BITS == 32
4333 abi_ulong __unused3;
4334 #endif
4335 abi_ulong __msg_cbytes;
4336 abi_ulong msg_qnum;
4337 abi_ulong msg_qbytes;
4338 abi_ulong msg_lspid;
4339 abi_ulong msg_lrpid;
4340 abi_ulong __unused4;
4341 abi_ulong __unused5;
4344 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4345 abi_ulong target_addr)
4347 struct target_msqid_ds *target_md;
4349 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4350 return -TARGET_EFAULT;
4351 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4352 return -TARGET_EFAULT;
4353 host_md->msg_stime = tswapal(target_md->msg_stime);
4354 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4355 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4356 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4357 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4358 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4359 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4360 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4361 unlock_user_struct(target_md, target_addr, 0);
4362 return 0;
4365 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4366 struct msqid_ds *host_md)
4368 struct target_msqid_ds *target_md;
4370 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4371 return -TARGET_EFAULT;
4372 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4373 return -TARGET_EFAULT;
4374 target_md->msg_stime = tswapal(host_md->msg_stime);
4375 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4376 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4377 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4378 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4379 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4380 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4381 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4382 unlock_user_struct(target_md, target_addr, 1);
4383 return 0;
4386 struct target_msginfo {
4387 int msgpool;
4388 int msgmap;
4389 int msgmax;
4390 int msgmnb;
4391 int msgmni;
4392 int msgssz;
4393 int msgtql;
4394 unsigned short int msgseg;
4397 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4398 struct msginfo *host_msginfo)
4400 struct target_msginfo *target_msginfo;
4401 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4402 return -TARGET_EFAULT;
4403 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4404 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4405 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4406 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4407 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4408 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4409 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4410 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4411 unlock_user_struct(target_msginfo, target_addr, 1);
4412 return 0;
4415 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4417 struct msqid_ds dsarg;
4418 struct msginfo msginfo;
4419 abi_long ret = -TARGET_EINVAL;
4421 cmd &= 0xff;
4423 switch (cmd) {
4424 case IPC_STAT:
4425 case IPC_SET:
4426 case MSG_STAT:
4427 if (target_to_host_msqid_ds(&dsarg,ptr))
4428 return -TARGET_EFAULT;
4429 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4430 if (host_to_target_msqid_ds(ptr,&dsarg))
4431 return -TARGET_EFAULT;
4432 break;
4433 case IPC_RMID:
4434 ret = get_errno(msgctl(msgid, cmd, NULL));
4435 break;
4436 case IPC_INFO:
4437 case MSG_INFO:
4438 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4439 if (host_to_target_msginfo(ptr, &msginfo))
4440 return -TARGET_EFAULT;
4441 break;
4444 return ret;
4447 struct target_msgbuf {
4448 abi_long mtype;
4449 char mtext[1];
4452 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4453 ssize_t msgsz, int msgflg)
4455 struct target_msgbuf *target_mb;
4456 struct msgbuf *host_mb;
4457 abi_long ret = 0;
4459 if (msgsz < 0) {
4460 return -TARGET_EINVAL;
4463 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4464 return -TARGET_EFAULT;
4465 host_mb = g_try_malloc(msgsz + sizeof(long));
4466 if (!host_mb) {
4467 unlock_user_struct(target_mb, msgp, 0);
4468 return -TARGET_ENOMEM;
4470 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4471 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4472 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4473 g_free(host_mb);
4474 unlock_user_struct(target_mb, msgp, 0);
4476 return ret;
4479 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4480 ssize_t msgsz, abi_long msgtyp,
4481 int msgflg)
4483 struct target_msgbuf *target_mb;
4484 char *target_mtext;
4485 struct msgbuf *host_mb;
4486 abi_long ret = 0;
4488 if (msgsz < 0) {
4489 return -TARGET_EINVAL;
4492 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4493 return -TARGET_EFAULT;
4495 host_mb = g_try_malloc(msgsz + sizeof(long));
4496 if (!host_mb) {
4497 ret = -TARGET_ENOMEM;
4498 goto end;
4500 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4502 if (ret > 0) {
4503 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4504 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4505 if (!target_mtext) {
4506 ret = -TARGET_EFAULT;
4507 goto end;
4509 memcpy(target_mb->mtext, host_mb->mtext, ret);
4510 unlock_user(target_mtext, target_mtext_addr, ret);
4513 target_mb->mtype = tswapal(host_mb->mtype);
4515 end:
4516 if (target_mb)
4517 unlock_user_struct(target_mb, msgp, 1);
4518 g_free(host_mb);
4519 return ret;
4522 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4523 abi_ulong target_addr)
4525 struct target_shmid_ds *target_sd;
4527 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4528 return -TARGET_EFAULT;
4529 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4530 return -TARGET_EFAULT;
4531 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4532 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4533 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4534 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4535 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4536 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4537 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4538 unlock_user_struct(target_sd, target_addr, 0);
4539 return 0;
4542 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4543 struct shmid_ds *host_sd)
4545 struct target_shmid_ds *target_sd;
4547 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4548 return -TARGET_EFAULT;
4549 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4550 return -TARGET_EFAULT;
4551 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4552 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4553 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4554 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4555 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4556 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4557 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4558 unlock_user_struct(target_sd, target_addr, 1);
4559 return 0;
4562 struct target_shminfo {
4563 abi_ulong shmmax;
4564 abi_ulong shmmin;
4565 abi_ulong shmmni;
4566 abi_ulong shmseg;
4567 abi_ulong shmall;
4570 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4571 struct shminfo *host_shminfo)
4573 struct target_shminfo *target_shminfo;
4574 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4575 return -TARGET_EFAULT;
4576 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4577 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4578 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4579 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4580 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4581 unlock_user_struct(target_shminfo, target_addr, 1);
4582 return 0;
4585 struct target_shm_info {
4586 int used_ids;
4587 abi_ulong shm_tot;
4588 abi_ulong shm_rss;
4589 abi_ulong shm_swp;
4590 abi_ulong swap_attempts;
4591 abi_ulong swap_successes;
4594 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4595 struct shm_info *host_shm_info)
4597 struct target_shm_info *target_shm_info;
4598 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4599 return -TARGET_EFAULT;
4600 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4601 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4602 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4603 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4604 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4605 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4606 unlock_user_struct(target_shm_info, target_addr, 1);
4607 return 0;
4610 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4612 struct shmid_ds dsarg;
4613 struct shminfo shminfo;
4614 struct shm_info shm_info;
4615 abi_long ret = -TARGET_EINVAL;
4617 cmd &= 0xff;
4619 switch(cmd) {
4620 case IPC_STAT:
4621 case IPC_SET:
4622 case SHM_STAT:
4623 if (target_to_host_shmid_ds(&dsarg, buf))
4624 return -TARGET_EFAULT;
4625 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4626 if (host_to_target_shmid_ds(buf, &dsarg))
4627 return -TARGET_EFAULT;
4628 break;
4629 case IPC_INFO:
4630 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4631 if (host_to_target_shminfo(buf, &shminfo))
4632 return -TARGET_EFAULT;
4633 break;
4634 case SHM_INFO:
4635 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4636 if (host_to_target_shm_info(buf, &shm_info))
4637 return -TARGET_EFAULT;
4638 break;
4639 case IPC_RMID:
4640 case SHM_LOCK:
4641 case SHM_UNLOCK:
4642 ret = get_errno(shmctl(shmid, cmd, NULL));
4643 break;
4646 return ret;
4649 #ifndef TARGET_FORCE_SHMLBA
4650 /* For most architectures, SHMLBA is the same as the page size;
4651 * some architectures have larger values, in which case they should
4652 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4653 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4654 * and defining its own value for SHMLBA.
4656 * The kernel also permits SHMLBA to be set by the architecture to a
4657 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4658 * this means that addresses are rounded to the large size if
4659 * SHM_RND is set but addresses not aligned to that size are not rejected
4660 * as long as they are at least page-aligned. Since the only architecture
4661 * which uses this is ia64 this code doesn't provide for that oddity.
4663 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4665 return TARGET_PAGE_SIZE;
4667 #endif
4669 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4670 int shmid, abi_ulong shmaddr, int shmflg)
4672 abi_long raddr;
4673 void *host_raddr;
4674 struct shmid_ds shm_info;
4675 int i,ret;
4676 abi_ulong shmlba;
4678 /* find out the length of the shared memory segment */
4679 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4680 if (is_error(ret)) {
4681 /* can't get length, bail out */
4682 return ret;
4685 shmlba = target_shmlba(cpu_env);
4687 if (shmaddr & (shmlba - 1)) {
4688 if (shmflg & SHM_RND) {
4689 shmaddr &= ~(shmlba - 1);
4690 } else {
4691 return -TARGET_EINVAL;
4695 mmap_lock();
4697 if (shmaddr)
4698 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4699 else {
4700 abi_ulong mmap_start;
4702 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4704 if (mmap_start == -1) {
4705 errno = ENOMEM;
4706 host_raddr = (void *)-1;
4707 } else
4708 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4711 if (host_raddr == (void *)-1) {
4712 mmap_unlock();
4713 return get_errno((long)host_raddr);
4715 raddr=h2g((unsigned long)host_raddr);
4717 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4718 PAGE_VALID | PAGE_READ |
4719 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4721 for (i = 0; i < N_SHM_REGIONS; i++) {
4722 if (!shm_regions[i].in_use) {
4723 shm_regions[i].in_use = true;
4724 shm_regions[i].start = raddr;
4725 shm_regions[i].size = shm_info.shm_segsz;
4726 break;
4730 mmap_unlock();
4731 return raddr;
4735 static inline abi_long do_shmdt(abi_ulong shmaddr)
4737 int i;
4739 for (i = 0; i < N_SHM_REGIONS; ++i) {
4740 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4741 shm_regions[i].in_use = false;
4742 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4743 break;
4747 return get_errno(shmdt(g2h(shmaddr)));
4750 #ifdef TARGET_NR_ipc
4751 /* ??? This only works with linear mappings. */
4752 /* do_ipc() must return target values and target errnos. */
4753 static abi_long do_ipc(CPUArchState *cpu_env,
4754 unsigned int call, abi_long first,
4755 abi_long second, abi_long third,
4756 abi_long ptr, abi_long fifth)
4758 int version;
4759 abi_long ret = 0;
4761 version = call >> 16;
4762 call &= 0xffff;
4764 switch (call) {
4765 case IPCOP_semop:
4766 ret = do_semop(first, ptr, second);
4767 break;
4769 case IPCOP_semget:
4770 ret = get_errno(semget(first, second, third));
4771 break;
4773 case IPCOP_semctl: {
4774 /* The semun argument to semctl is passed by value, so dereference the
4775 * ptr argument. */
4776 abi_ulong atptr;
4777 get_user_ual(atptr, ptr);
4778 ret = do_semctl(first, second, third, atptr);
4779 break;
4782 case IPCOP_msgget:
4783 ret = get_errno(msgget(first, second));
4784 break;
4786 case IPCOP_msgsnd:
4787 ret = do_msgsnd(first, ptr, second, third);
4788 break;
4790 case IPCOP_msgctl:
4791 ret = do_msgctl(first, second, ptr);
4792 break;
4794 case IPCOP_msgrcv:
4795 switch (version) {
4796 case 0:
4798 struct target_ipc_kludge {
4799 abi_long msgp;
4800 abi_long msgtyp;
4801 } *tmp;
4803 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4804 ret = -TARGET_EFAULT;
4805 break;
4808 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4810 unlock_user_struct(tmp, ptr, 0);
4811 break;
4813 default:
4814 ret = do_msgrcv(first, ptr, second, fifth, third);
4816 break;
4818 case IPCOP_shmat:
4819 switch (version) {
4820 default:
4822 abi_ulong raddr;
4823 raddr = do_shmat(cpu_env, first, ptr, second);
4824 if (is_error(raddr))
4825 return get_errno(raddr);
4826 if (put_user_ual(raddr, third))
4827 return -TARGET_EFAULT;
4828 break;
4830 case 1:
4831 ret = -TARGET_EINVAL;
4832 break;
4834 break;
4835 case IPCOP_shmdt:
4836 ret = do_shmdt(ptr);
4837 break;
4839 case IPCOP_shmget:
4840 /* IPC_* flag values are the same on all linux platforms */
4841 ret = get_errno(shmget(first, second, third));
4842 break;
4844 /* IPC_* and SHM_* command values are the same on all linux platforms */
4845 case IPCOP_shmctl:
4846 ret = do_shmctl(first, second, ptr);
4847 break;
4848 default:
4849 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4850 ret = -TARGET_ENOSYS;
4851 break;
4853 return ret;
4855 #endif
4857 /* kernel structure types definitions */
4859 #define STRUCT(name, ...) STRUCT_ ## name,
4860 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4861 enum {
4862 #include "syscall_types.h"
4863 STRUCT_MAX
4865 #undef STRUCT
4866 #undef STRUCT_SPECIAL
4868 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4869 #define STRUCT_SPECIAL(name)
4870 #include "syscall_types.h"
4871 #undef STRUCT
4872 #undef STRUCT_SPECIAL
4874 typedef struct IOCTLEntry IOCTLEntry;
4876 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4877 int fd, int cmd, abi_long arg);
4879 struct IOCTLEntry {
4880 int target_cmd;
4881 unsigned int host_cmd;
4882 const char *name;
4883 int access;
4884 do_ioctl_fn *do_ioctl;
4885 const argtype arg_type[5];
4888 #define IOC_R 0x0001
4889 #define IOC_W 0x0002
4890 #define IOC_RW (IOC_R | IOC_W)
4892 #define MAX_STRUCT_SIZE 4096
4894 #ifdef CONFIG_FIEMAP
4895 /* So fiemap access checks don't overflow on 32 bit systems.
4896 * This is very slightly smaller than the limit imposed by
4897 * the underlying kernel.
4899 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4900 / sizeof(struct fiemap_extent))
4902 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4903 int fd, int cmd, abi_long arg)
4905 /* The parameter for this ioctl is a struct fiemap followed
4906 * by an array of struct fiemap_extent whose size is set
4907 * in fiemap->fm_extent_count. The array is filled in by the
4908 * ioctl.
4910 int target_size_in, target_size_out;
4911 struct fiemap *fm;
4912 const argtype *arg_type = ie->arg_type;
4913 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4914 void *argptr, *p;
4915 abi_long ret;
4916 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4917 uint32_t outbufsz;
4918 int free_fm = 0;
4920 assert(arg_type[0] == TYPE_PTR);
4921 assert(ie->access == IOC_RW);
4922 arg_type++;
4923 target_size_in = thunk_type_size(arg_type, 0);
4924 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4925 if (!argptr) {
4926 return -TARGET_EFAULT;
4928 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4929 unlock_user(argptr, arg, 0);
4930 fm = (struct fiemap *)buf_temp;
4931 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4932 return -TARGET_EINVAL;
4935 outbufsz = sizeof (*fm) +
4936 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4938 if (outbufsz > MAX_STRUCT_SIZE) {
4939 /* We can't fit all the extents into the fixed size buffer.
4940 * Allocate one that is large enough and use it instead.
4942 fm = g_try_malloc(outbufsz);
4943 if (!fm) {
4944 return -TARGET_ENOMEM;
4946 memcpy(fm, buf_temp, sizeof(struct fiemap));
4947 free_fm = 1;
4949 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4950 if (!is_error(ret)) {
4951 target_size_out = target_size_in;
4952 /* An extent_count of 0 means we were only counting the extents
4953 * so there are no structs to copy
4955 if (fm->fm_extent_count != 0) {
4956 target_size_out += fm->fm_mapped_extents * extent_size;
4958 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4959 if (!argptr) {
4960 ret = -TARGET_EFAULT;
4961 } else {
4962 /* Convert the struct fiemap */
4963 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4964 if (fm->fm_extent_count != 0) {
4965 p = argptr + target_size_in;
4966 /* ...and then all the struct fiemap_extents */
4967 for (i = 0; i < fm->fm_mapped_extents; i++) {
4968 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4969 THUNK_TARGET);
4970 p += extent_size;
4973 unlock_user(argptr, arg, target_size_out);
4976 if (free_fm) {
4977 g_free(fm);
4979 return ret;
4981 #endif
4983 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4984 int fd, int cmd, abi_long arg)
4986 const argtype *arg_type = ie->arg_type;
4987 int target_size;
4988 void *argptr;
4989 int ret;
4990 struct ifconf *host_ifconf;
4991 uint32_t outbufsz;
4992 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4993 int target_ifreq_size;
4994 int nb_ifreq;
4995 int free_buf = 0;
4996 int i;
4997 int target_ifc_len;
4998 abi_long target_ifc_buf;
4999 int host_ifc_len;
5000 char *host_ifc_buf;
5002 assert(arg_type[0] == TYPE_PTR);
5003 assert(ie->access == IOC_RW);
5005 arg_type++;
5006 target_size = thunk_type_size(arg_type, 0);
5008 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5009 if (!argptr)
5010 return -TARGET_EFAULT;
5011 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5012 unlock_user(argptr, arg, 0);
5014 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5015 target_ifc_len = host_ifconf->ifc_len;
5016 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5018 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5019 nb_ifreq = target_ifc_len / target_ifreq_size;
5020 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5022 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5023 if (outbufsz > MAX_STRUCT_SIZE) {
5024 /* We can't fit all the extents into the fixed size buffer.
5025 * Allocate one that is large enough and use it instead.
5027 host_ifconf = malloc(outbufsz);
5028 if (!host_ifconf) {
5029 return -TARGET_ENOMEM;
5031 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5032 free_buf = 1;
5034 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5036 host_ifconf->ifc_len = host_ifc_len;
5037 host_ifconf->ifc_buf = host_ifc_buf;
5039 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5040 if (!is_error(ret)) {
5041 /* convert host ifc_len to target ifc_len */
5043 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5044 target_ifc_len = nb_ifreq * target_ifreq_size;
5045 host_ifconf->ifc_len = target_ifc_len;
5047 /* restore target ifc_buf */
5049 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5051 /* copy struct ifconf to target user */
5053 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5054 if (!argptr)
5055 return -TARGET_EFAULT;
5056 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5057 unlock_user(argptr, arg, target_size);
5059 /* copy ifreq[] to target user */
5061 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5062 for (i = 0; i < nb_ifreq ; i++) {
5063 thunk_convert(argptr + i * target_ifreq_size,
5064 host_ifc_buf + i * sizeof(struct ifreq),
5065 ifreq_arg_type, THUNK_TARGET);
5067 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5070 if (free_buf) {
5071 free(host_ifconf);
5074 return ret;
5077 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5078 int cmd, abi_long arg)
5080 void *argptr;
5081 struct dm_ioctl *host_dm;
5082 abi_long guest_data;
5083 uint32_t guest_data_size;
5084 int target_size;
5085 const argtype *arg_type = ie->arg_type;
5086 abi_long ret;
5087 void *big_buf = NULL;
5088 char *host_data;
5090 arg_type++;
5091 target_size = thunk_type_size(arg_type, 0);
5092 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5093 if (!argptr) {
5094 ret = -TARGET_EFAULT;
5095 goto out;
5097 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5098 unlock_user(argptr, arg, 0);
5100 /* buf_temp is too small, so fetch things into a bigger buffer */
5101 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5102 memcpy(big_buf, buf_temp, target_size);
5103 buf_temp = big_buf;
5104 host_dm = big_buf;
5106 guest_data = arg + host_dm->data_start;
5107 if ((guest_data - arg) < 0) {
5108 ret = -TARGET_EINVAL;
5109 goto out;
5111 guest_data_size = host_dm->data_size - host_dm->data_start;
5112 host_data = (char*)host_dm + host_dm->data_start;
5114 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5115 if (!argptr) {
5116 ret = -TARGET_EFAULT;
5117 goto out;
5120 switch (ie->host_cmd) {
5121 case DM_REMOVE_ALL:
5122 case DM_LIST_DEVICES:
5123 case DM_DEV_CREATE:
5124 case DM_DEV_REMOVE:
5125 case DM_DEV_SUSPEND:
5126 case DM_DEV_STATUS:
5127 case DM_DEV_WAIT:
5128 case DM_TABLE_STATUS:
5129 case DM_TABLE_CLEAR:
5130 case DM_TABLE_DEPS:
5131 case DM_LIST_VERSIONS:
5132 /* no input data */
5133 break;
5134 case DM_DEV_RENAME:
5135 case DM_DEV_SET_GEOMETRY:
5136 /* data contains only strings */
5137 memcpy(host_data, argptr, guest_data_size);
5138 break;
5139 case DM_TARGET_MSG:
5140 memcpy(host_data, argptr, guest_data_size);
5141 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5142 break;
5143 case DM_TABLE_LOAD:
5145 void *gspec = argptr;
5146 void *cur_data = host_data;
5147 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5148 int spec_size = thunk_type_size(arg_type, 0);
5149 int i;
5151 for (i = 0; i < host_dm->target_count; i++) {
5152 struct dm_target_spec *spec = cur_data;
5153 uint32_t next;
5154 int slen;
5156 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5157 slen = strlen((char*)gspec + spec_size) + 1;
5158 next = spec->next;
5159 spec->next = sizeof(*spec) + slen;
5160 strcpy((char*)&spec[1], gspec + spec_size);
5161 gspec += next;
5162 cur_data += spec->next;
5164 break;
5166 default:
5167 ret = -TARGET_EINVAL;
5168 unlock_user(argptr, guest_data, 0);
5169 goto out;
5171 unlock_user(argptr, guest_data, 0);
5173 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5174 if (!is_error(ret)) {
5175 guest_data = arg + host_dm->data_start;
5176 guest_data_size = host_dm->data_size - host_dm->data_start;
5177 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5178 switch (ie->host_cmd) {
5179 case DM_REMOVE_ALL:
5180 case DM_DEV_CREATE:
5181 case DM_DEV_REMOVE:
5182 case DM_DEV_RENAME:
5183 case DM_DEV_SUSPEND:
5184 case DM_DEV_STATUS:
5185 case DM_TABLE_LOAD:
5186 case DM_TABLE_CLEAR:
5187 case DM_TARGET_MSG:
5188 case DM_DEV_SET_GEOMETRY:
5189 /* no return data */
5190 break;
5191 case DM_LIST_DEVICES:
5193 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5194 uint32_t remaining_data = guest_data_size;
5195 void *cur_data = argptr;
5196 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5197 int nl_size = 12; /* can't use thunk_size due to alignment */
5199 while (1) {
5200 uint32_t next = nl->next;
5201 if (next) {
5202 nl->next = nl_size + (strlen(nl->name) + 1);
5204 if (remaining_data < nl->next) {
5205 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5206 break;
5208 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5209 strcpy(cur_data + nl_size, nl->name);
5210 cur_data += nl->next;
5211 remaining_data -= nl->next;
5212 if (!next) {
5213 break;
5215 nl = (void*)nl + next;
5217 break;
5219 case DM_DEV_WAIT:
5220 case DM_TABLE_STATUS:
5222 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5223 void *cur_data = argptr;
5224 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5225 int spec_size = thunk_type_size(arg_type, 0);
5226 int i;
5228 for (i = 0; i < host_dm->target_count; i++) {
5229 uint32_t next = spec->next;
5230 int slen = strlen((char*)&spec[1]) + 1;
5231 spec->next = (cur_data - argptr) + spec_size + slen;
5232 if (guest_data_size < spec->next) {
5233 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5234 break;
5236 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5237 strcpy(cur_data + spec_size, (char*)&spec[1]);
5238 cur_data = argptr + spec->next;
5239 spec = (void*)host_dm + host_dm->data_start + next;
5241 break;
5243 case DM_TABLE_DEPS:
5245 void *hdata = (void*)host_dm + host_dm->data_start;
5246 int count = *(uint32_t*)hdata;
5247 uint64_t *hdev = hdata + 8;
5248 uint64_t *gdev = argptr + 8;
5249 int i;
5251 *(uint32_t*)argptr = tswap32(count);
5252 for (i = 0; i < count; i++) {
5253 *gdev = tswap64(*hdev);
5254 gdev++;
5255 hdev++;
5257 break;
5259 case DM_LIST_VERSIONS:
5261 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5262 uint32_t remaining_data = guest_data_size;
5263 void *cur_data = argptr;
5264 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5265 int vers_size = thunk_type_size(arg_type, 0);
5267 while (1) {
5268 uint32_t next = vers->next;
5269 if (next) {
5270 vers->next = vers_size + (strlen(vers->name) + 1);
5272 if (remaining_data < vers->next) {
5273 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5274 break;
5276 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5277 strcpy(cur_data + vers_size, vers->name);
5278 cur_data += vers->next;
5279 remaining_data -= vers->next;
5280 if (!next) {
5281 break;
5283 vers = (void*)vers + next;
5285 break;
5287 default:
5288 unlock_user(argptr, guest_data, 0);
5289 ret = -TARGET_EINVAL;
5290 goto out;
5292 unlock_user(argptr, guest_data, guest_data_size);
5294 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5295 if (!argptr) {
5296 ret = -TARGET_EFAULT;
5297 goto out;
5299 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5300 unlock_user(argptr, arg, target_size);
5302 out:
5303 g_free(big_buf);
5304 return ret;
5307 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5308 int cmd, abi_long arg)
5310 void *argptr;
5311 int target_size;
5312 const argtype *arg_type = ie->arg_type;
5313 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5314 abi_long ret;
5316 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5317 struct blkpg_partition host_part;
5319 /* Read and convert blkpg */
5320 arg_type++;
5321 target_size = thunk_type_size(arg_type, 0);
5322 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5323 if (!argptr) {
5324 ret = -TARGET_EFAULT;
5325 goto out;
5327 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5328 unlock_user(argptr, arg, 0);
5330 switch (host_blkpg->op) {
5331 case BLKPG_ADD_PARTITION:
5332 case BLKPG_DEL_PARTITION:
5333 /* payload is struct blkpg_partition */
5334 break;
5335 default:
5336 /* Unknown opcode */
5337 ret = -TARGET_EINVAL;
5338 goto out;
5341 /* Read and convert blkpg->data */
5342 arg = (abi_long)(uintptr_t)host_blkpg->data;
5343 target_size = thunk_type_size(part_arg_type, 0);
5344 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5345 if (!argptr) {
5346 ret = -TARGET_EFAULT;
5347 goto out;
5349 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5350 unlock_user(argptr, arg, 0);
5352 /* Swizzle the data pointer to our local copy and call! */
5353 host_blkpg->data = &host_part;
5354 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5356 out:
5357 return ret;
5360 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5361 int fd, int cmd, abi_long arg)
5363 const argtype *arg_type = ie->arg_type;
5364 const StructEntry *se;
5365 const argtype *field_types;
5366 const int *dst_offsets, *src_offsets;
5367 int target_size;
5368 void *argptr;
5369 abi_ulong *target_rt_dev_ptr;
5370 unsigned long *host_rt_dev_ptr;
5371 abi_long ret;
5372 int i;
5374 assert(ie->access == IOC_W);
5375 assert(*arg_type == TYPE_PTR);
5376 arg_type++;
5377 assert(*arg_type == TYPE_STRUCT);
5378 target_size = thunk_type_size(arg_type, 0);
5379 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5380 if (!argptr) {
5381 return -TARGET_EFAULT;
5383 arg_type++;
5384 assert(*arg_type == (int)STRUCT_rtentry);
5385 se = struct_entries + *arg_type++;
5386 assert(se->convert[0] == NULL);
5387 /* convert struct here to be able to catch rt_dev string */
5388 field_types = se->field_types;
5389 dst_offsets = se->field_offsets[THUNK_HOST];
5390 src_offsets = se->field_offsets[THUNK_TARGET];
5391 for (i = 0; i < se->nb_fields; i++) {
5392 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5393 assert(*field_types == TYPE_PTRVOID);
5394 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5395 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5396 if (*target_rt_dev_ptr != 0) {
5397 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5398 tswapal(*target_rt_dev_ptr));
5399 if (!*host_rt_dev_ptr) {
5400 unlock_user(argptr, arg, 0);
5401 return -TARGET_EFAULT;
5403 } else {
5404 *host_rt_dev_ptr = 0;
5406 field_types++;
5407 continue;
5409 field_types = thunk_convert(buf_temp + dst_offsets[i],
5410 argptr + src_offsets[i],
5411 field_types, THUNK_HOST);
5413 unlock_user(argptr, arg, 0);
5415 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5416 if (*host_rt_dev_ptr != 0) {
5417 unlock_user((void *)*host_rt_dev_ptr,
5418 *target_rt_dev_ptr, 0);
5420 return ret;
5423 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5424 int fd, int cmd, abi_long arg)
5426 int sig = target_to_host_signal(arg);
5427 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5430 static IOCTLEntry ioctl_entries[] = {
5431 #define IOCTL(cmd, access, ...) \
5432 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5433 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5434 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5435 #include "ioctls.h"
5436 { 0, 0, },
5439 /* ??? Implement proper locking for ioctls. */
5440 /* do_ioctl() Must return target values and target errnos. */
5441 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5443 const IOCTLEntry *ie;
5444 const argtype *arg_type;
5445 abi_long ret;
5446 uint8_t buf_temp[MAX_STRUCT_SIZE];
5447 int target_size;
5448 void *argptr;
5450 ie = ioctl_entries;
5451 for(;;) {
5452 if (ie->target_cmd == 0) {
5453 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5454 return -TARGET_ENOSYS;
5456 if (ie->target_cmd == cmd)
5457 break;
5458 ie++;
5460 arg_type = ie->arg_type;
5461 #if defined(DEBUG)
5462 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5463 #endif
5464 if (ie->do_ioctl) {
5465 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5468 switch(arg_type[0]) {
5469 case TYPE_NULL:
5470 /* no argument */
5471 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5472 break;
5473 case TYPE_PTRVOID:
5474 case TYPE_INT:
5475 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5476 break;
5477 case TYPE_PTR:
5478 arg_type++;
5479 target_size = thunk_type_size(arg_type, 0);
5480 switch(ie->access) {
5481 case IOC_R:
5482 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5483 if (!is_error(ret)) {
5484 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5485 if (!argptr)
5486 return -TARGET_EFAULT;
5487 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5488 unlock_user(argptr, arg, target_size);
5490 break;
5491 case IOC_W:
5492 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5493 if (!argptr)
5494 return -TARGET_EFAULT;
5495 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5496 unlock_user(argptr, arg, 0);
5497 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5498 break;
5499 default:
5500 case IOC_RW:
5501 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5502 if (!argptr)
5503 return -TARGET_EFAULT;
5504 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5505 unlock_user(argptr, arg, 0);
5506 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5507 if (!is_error(ret)) {
5508 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5509 if (!argptr)
5510 return -TARGET_EFAULT;
5511 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5512 unlock_user(argptr, arg, target_size);
5514 break;
5516 break;
5517 default:
5518 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5519 (long)cmd, arg_type[0]);
5520 ret = -TARGET_ENOSYS;
5521 break;
5523 return ret;
5526 static const bitmask_transtbl iflag_tbl[] = {
5527 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5528 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5529 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5530 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5531 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5532 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5533 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5534 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5535 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5536 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5537 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5538 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5539 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5540 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5541 { 0, 0, 0, 0 }
5544 static const bitmask_transtbl oflag_tbl[] = {
5545 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5546 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5547 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5548 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5549 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5550 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5551 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5552 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5553 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5554 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5555 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5556 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5557 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5558 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5559 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5560 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5561 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5562 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5563 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5564 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5565 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5566 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5567 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5568 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5569 { 0, 0, 0, 0 }
5572 static const bitmask_transtbl cflag_tbl[] = {
5573 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5574 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5575 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5576 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5577 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5578 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5579 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5580 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5581 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5582 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5583 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5584 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5585 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5586 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5587 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5588 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5589 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5590 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5591 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5592 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5593 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5594 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5595 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5596 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5597 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5598 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5599 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5600 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5601 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5602 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5603 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5604 { 0, 0, 0, 0 }
5607 static const bitmask_transtbl lflag_tbl[] = {
5608 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5609 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5610 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5611 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5612 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5613 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5614 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5615 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5616 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5617 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5618 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5619 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5620 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5621 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5622 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5623 { 0, 0, 0, 0 }
5626 static void target_to_host_termios (void *dst, const void *src)
5628 struct host_termios *host = dst;
5629 const struct target_termios *target = src;
5631 host->c_iflag =
5632 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5633 host->c_oflag =
5634 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5635 host->c_cflag =
5636 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5637 host->c_lflag =
5638 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5639 host->c_line = target->c_line;
5641 memset(host->c_cc, 0, sizeof(host->c_cc));
5642 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5643 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5644 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5645 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5646 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5647 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5648 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5649 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5650 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5651 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5652 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5653 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5654 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5655 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5656 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5657 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5658 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5661 static void host_to_target_termios (void *dst, const void *src)
5663 struct target_termios *target = dst;
5664 const struct host_termios *host = src;
5666 target->c_iflag =
5667 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5668 target->c_oflag =
5669 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5670 target->c_cflag =
5671 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5672 target->c_lflag =
5673 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5674 target->c_line = host->c_line;
5676 memset(target->c_cc, 0, sizeof(target->c_cc));
5677 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5678 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5679 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5680 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5681 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5682 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5683 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5684 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5685 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5686 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5687 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5688 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5689 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5690 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5691 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5692 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5693 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5696 static const StructEntry struct_termios_def = {
5697 .convert = { host_to_target_termios, target_to_host_termios },
5698 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5699 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5702 static bitmask_transtbl mmap_flags_tbl[] = {
5703 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5704 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5705 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5706 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5707 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5708 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5709 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5710 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5711 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5712 MAP_NORESERVE },
5713 { 0, 0, 0, 0 }
5716 #if defined(TARGET_I386)
5718 /* NOTE: there is really one LDT for all the threads */
5719 static uint8_t *ldt_table;
5721 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5723 int size;
5724 void *p;
5726 if (!ldt_table)
5727 return 0;
5728 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5729 if (size > bytecount)
5730 size = bytecount;
5731 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5732 if (!p)
5733 return -TARGET_EFAULT;
5734 /* ??? Should this by byteswapped? */
5735 memcpy(p, ldt_table, size);
5736 unlock_user(p, ptr, size);
5737 return size;
5740 /* XXX: add locking support */
5741 static abi_long write_ldt(CPUX86State *env,
5742 abi_ulong ptr, unsigned long bytecount, int oldmode)
5744 struct target_modify_ldt_ldt_s ldt_info;
5745 struct target_modify_ldt_ldt_s *target_ldt_info;
5746 int seg_32bit, contents, read_exec_only, limit_in_pages;
5747 int seg_not_present, useable, lm;
5748 uint32_t *lp, entry_1, entry_2;
5750 if (bytecount != sizeof(ldt_info))
5751 return -TARGET_EINVAL;
5752 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5753 return -TARGET_EFAULT;
5754 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5755 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5756 ldt_info.limit = tswap32(target_ldt_info->limit);
5757 ldt_info.flags = tswap32(target_ldt_info->flags);
5758 unlock_user_struct(target_ldt_info, ptr, 0);
5760 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5761 return -TARGET_EINVAL;
5762 seg_32bit = ldt_info.flags & 1;
5763 contents = (ldt_info.flags >> 1) & 3;
5764 read_exec_only = (ldt_info.flags >> 3) & 1;
5765 limit_in_pages = (ldt_info.flags >> 4) & 1;
5766 seg_not_present = (ldt_info.flags >> 5) & 1;
5767 useable = (ldt_info.flags >> 6) & 1;
5768 #ifdef TARGET_ABI32
5769 lm = 0;
5770 #else
5771 lm = (ldt_info.flags >> 7) & 1;
5772 #endif
5773 if (contents == 3) {
5774 if (oldmode)
5775 return -TARGET_EINVAL;
5776 if (seg_not_present == 0)
5777 return -TARGET_EINVAL;
5779 /* allocate the LDT */
5780 if (!ldt_table) {
5781 env->ldt.base = target_mmap(0,
5782 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5783 PROT_READ|PROT_WRITE,
5784 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5785 if (env->ldt.base == -1)
5786 return -TARGET_ENOMEM;
5787 memset(g2h(env->ldt.base), 0,
5788 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5789 env->ldt.limit = 0xffff;
5790 ldt_table = g2h(env->ldt.base);
5793 /* NOTE: same code as Linux kernel */
5794 /* Allow LDTs to be cleared by the user. */
5795 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5796 if (oldmode ||
5797 (contents == 0 &&
5798 read_exec_only == 1 &&
5799 seg_32bit == 0 &&
5800 limit_in_pages == 0 &&
5801 seg_not_present == 1 &&
5802 useable == 0 )) {
5803 entry_1 = 0;
5804 entry_2 = 0;
5805 goto install;
5809 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5810 (ldt_info.limit & 0x0ffff);
5811 entry_2 = (ldt_info.base_addr & 0xff000000) |
5812 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5813 (ldt_info.limit & 0xf0000) |
5814 ((read_exec_only ^ 1) << 9) |
5815 (contents << 10) |
5816 ((seg_not_present ^ 1) << 15) |
5817 (seg_32bit << 22) |
5818 (limit_in_pages << 23) |
5819 (lm << 21) |
5820 0x7000;
5821 if (!oldmode)
5822 entry_2 |= (useable << 20);
5824 /* Install the new entry ... */
5825 install:
5826 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5827 lp[0] = tswap32(entry_1);
5828 lp[1] = tswap32(entry_2);
5829 return 0;
5832 /* specific and weird i386 syscalls */
5833 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5834 unsigned long bytecount)
5836 abi_long ret;
5838 switch (func) {
5839 case 0:
5840 ret = read_ldt(ptr, bytecount);
5841 break;
5842 case 1:
5843 ret = write_ldt(env, ptr, bytecount, 1);
5844 break;
5845 case 0x11:
5846 ret = write_ldt(env, ptr, bytecount, 0);
5847 break;
5848 default:
5849 ret = -TARGET_ENOSYS;
5850 break;
5852 return ret;
5855 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5856 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5858 uint64_t *gdt_table = g2h(env->gdt.base);
5859 struct target_modify_ldt_ldt_s ldt_info;
5860 struct target_modify_ldt_ldt_s *target_ldt_info;
5861 int seg_32bit, contents, read_exec_only, limit_in_pages;
5862 int seg_not_present, useable, lm;
5863 uint32_t *lp, entry_1, entry_2;
5864 int i;
5866 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5867 if (!target_ldt_info)
5868 return -TARGET_EFAULT;
5869 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5870 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5871 ldt_info.limit = tswap32(target_ldt_info->limit);
5872 ldt_info.flags = tswap32(target_ldt_info->flags);
5873 if (ldt_info.entry_number == -1) {
5874 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5875 if (gdt_table[i] == 0) {
5876 ldt_info.entry_number = i;
5877 target_ldt_info->entry_number = tswap32(i);
5878 break;
5882 unlock_user_struct(target_ldt_info, ptr, 1);
5884 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5885 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5886 return -TARGET_EINVAL;
5887 seg_32bit = ldt_info.flags & 1;
5888 contents = (ldt_info.flags >> 1) & 3;
5889 read_exec_only = (ldt_info.flags >> 3) & 1;
5890 limit_in_pages = (ldt_info.flags >> 4) & 1;
5891 seg_not_present = (ldt_info.flags >> 5) & 1;
5892 useable = (ldt_info.flags >> 6) & 1;
5893 #ifdef TARGET_ABI32
5894 lm = 0;
5895 #else
5896 lm = (ldt_info.flags >> 7) & 1;
5897 #endif
5899 if (contents == 3) {
5900 if (seg_not_present == 0)
5901 return -TARGET_EINVAL;
5904 /* NOTE: same code as Linux kernel */
5905 /* Allow LDTs to be cleared by the user. */
5906 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5907 if ((contents == 0 &&
5908 read_exec_only == 1 &&
5909 seg_32bit == 0 &&
5910 limit_in_pages == 0 &&
5911 seg_not_present == 1 &&
5912 useable == 0 )) {
5913 entry_1 = 0;
5914 entry_2 = 0;
5915 goto install;
5919 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5920 (ldt_info.limit & 0x0ffff);
5921 entry_2 = (ldt_info.base_addr & 0xff000000) |
5922 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5923 (ldt_info.limit & 0xf0000) |
5924 ((read_exec_only ^ 1) << 9) |
5925 (contents << 10) |
5926 ((seg_not_present ^ 1) << 15) |
5927 (seg_32bit << 22) |
5928 (limit_in_pages << 23) |
5929 (useable << 20) |
5930 (lm << 21) |
5931 0x7000;
5933 /* Install the new entry ... */
5934 install:
5935 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5936 lp[0] = tswap32(entry_1);
5937 lp[1] = tswap32(entry_2);
5938 return 0;
5941 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5943 struct target_modify_ldt_ldt_s *target_ldt_info;
5944 uint64_t *gdt_table = g2h(env->gdt.base);
5945 uint32_t base_addr, limit, flags;
5946 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5947 int seg_not_present, useable, lm;
5948 uint32_t *lp, entry_1, entry_2;
5950 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5951 if (!target_ldt_info)
5952 return -TARGET_EFAULT;
5953 idx = tswap32(target_ldt_info->entry_number);
5954 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5955 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5956 unlock_user_struct(target_ldt_info, ptr, 1);
5957 return -TARGET_EINVAL;
5959 lp = (uint32_t *)(gdt_table + idx);
5960 entry_1 = tswap32(lp[0]);
5961 entry_2 = tswap32(lp[1]);
5963 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5964 contents = (entry_2 >> 10) & 3;
5965 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5966 seg_32bit = (entry_2 >> 22) & 1;
5967 limit_in_pages = (entry_2 >> 23) & 1;
5968 useable = (entry_2 >> 20) & 1;
5969 #ifdef TARGET_ABI32
5970 lm = 0;
5971 #else
5972 lm = (entry_2 >> 21) & 1;
5973 #endif
5974 flags = (seg_32bit << 0) | (contents << 1) |
5975 (read_exec_only << 3) | (limit_in_pages << 4) |
5976 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5977 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5978 base_addr = (entry_1 >> 16) |
5979 (entry_2 & 0xff000000) |
5980 ((entry_2 & 0xff) << 16);
5981 target_ldt_info->base_addr = tswapal(base_addr);
5982 target_ldt_info->limit = tswap32(limit);
5983 target_ldt_info->flags = tswap32(flags);
5984 unlock_user_struct(target_ldt_info, ptr, 1);
5985 return 0;
5987 #endif /* TARGET_I386 && TARGET_ABI32 */
5989 #ifndef TARGET_ABI32
5990 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5992 abi_long ret = 0;
5993 abi_ulong val;
5994 int idx;
5996 switch(code) {
5997 case TARGET_ARCH_SET_GS:
5998 case TARGET_ARCH_SET_FS:
5999 if (code == TARGET_ARCH_SET_GS)
6000 idx = R_GS;
6001 else
6002 idx = R_FS;
6003 cpu_x86_load_seg(env, idx, 0);
6004 env->segs[idx].base = addr;
6005 break;
6006 case TARGET_ARCH_GET_GS:
6007 case TARGET_ARCH_GET_FS:
6008 if (code == TARGET_ARCH_GET_GS)
6009 idx = R_GS;
6010 else
6011 idx = R_FS;
6012 val = env->segs[idx].base;
6013 if (put_user(val, addr, abi_ulong))
6014 ret = -TARGET_EFAULT;
6015 break;
6016 default:
6017 ret = -TARGET_EINVAL;
6018 break;
6020 return ret;
6022 #endif
6024 #endif /* defined(TARGET_I386) */
6026 #define NEW_STACK_SIZE 0x40000
6029 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6030 typedef struct {
6031 CPUArchState *env;
6032 pthread_mutex_t mutex;
6033 pthread_cond_t cond;
6034 pthread_t thread;
6035 uint32_t tid;
6036 abi_ulong child_tidptr;
6037 abi_ulong parent_tidptr;
6038 sigset_t sigmask;
6039 } new_thread_info;
6041 static void *clone_func(void *arg)
6043 new_thread_info *info = arg;
6044 CPUArchState *env;
6045 CPUState *cpu;
6046 TaskState *ts;
6048 rcu_register_thread();
6049 env = info->env;
6050 cpu = ENV_GET_CPU(env);
6051 thread_cpu = cpu;
6052 ts = (TaskState *)cpu->opaque;
6053 info->tid = gettid();
6054 cpu->host_tid = info->tid;
6055 task_settid(ts);
6056 if (info->child_tidptr)
6057 put_user_u32(info->tid, info->child_tidptr);
6058 if (info->parent_tidptr)
6059 put_user_u32(info->tid, info->parent_tidptr);
6060 /* Enable signals. */
6061 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6062 /* Signal to the parent that we're ready. */
6063 pthread_mutex_lock(&info->mutex);
6064 pthread_cond_broadcast(&info->cond);
6065 pthread_mutex_unlock(&info->mutex);
6066 /* Wait until the parent has finshed initializing the tls state. */
6067 pthread_mutex_lock(&clone_lock);
6068 pthread_mutex_unlock(&clone_lock);
6069 cpu_loop(env);
6070 /* never exits */
6071 return NULL;
6074 /* do_fork() Must return host values and target errnos (unlike most
6075 do_*() functions). */
6076 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6077 abi_ulong parent_tidptr, target_ulong newtls,
6078 abi_ulong child_tidptr)
6080 CPUState *cpu = ENV_GET_CPU(env);
6081 int ret;
6082 TaskState *ts;
6083 CPUState *new_cpu;
6084 CPUArchState *new_env;
6085 sigset_t sigmask;
6087 flags &= ~CLONE_IGNORED_FLAGS;
6089 /* Emulate vfork() with fork() */
6090 if (flags & CLONE_VFORK)
6091 flags &= ~(CLONE_VFORK | CLONE_VM);
6093 if (flags & CLONE_VM) {
6094 TaskState *parent_ts = (TaskState *)cpu->opaque;
6095 new_thread_info info;
6096 pthread_attr_t attr;
6098 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6099 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6100 return -TARGET_EINVAL;
6103 ts = g_new0(TaskState, 1);
6104 init_task_state(ts);
6105 /* we create a new CPU instance. */
6106 new_env = cpu_copy(env);
6107 /* Init regs that differ from the parent. */
6108 cpu_clone_regs(new_env, newsp);
6109 new_cpu = ENV_GET_CPU(new_env);
6110 new_cpu->opaque = ts;
6111 ts->bprm = parent_ts->bprm;
6112 ts->info = parent_ts->info;
6113 ts->signal_mask = parent_ts->signal_mask;
6115 if (flags & CLONE_CHILD_CLEARTID) {
6116 ts->child_tidptr = child_tidptr;
6119 if (flags & CLONE_SETTLS) {
6120 cpu_set_tls (new_env, newtls);
6123 /* Grab a mutex so that thread setup appears atomic. */
6124 pthread_mutex_lock(&clone_lock);
6126 memset(&info, 0, sizeof(info));
6127 pthread_mutex_init(&info.mutex, NULL);
6128 pthread_mutex_lock(&info.mutex);
6129 pthread_cond_init(&info.cond, NULL);
6130 info.env = new_env;
6131 if (flags & CLONE_CHILD_SETTID) {
6132 info.child_tidptr = child_tidptr;
6134 if (flags & CLONE_PARENT_SETTID) {
6135 info.parent_tidptr = parent_tidptr;
6138 ret = pthread_attr_init(&attr);
6139 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6140 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6141 /* It is not safe to deliver signals until the child has finished
6142 initializing, so temporarily block all signals. */
6143 sigfillset(&sigmask);
6144 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6146 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6147 /* TODO: Free new CPU state if thread creation failed. */
6149 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6150 pthread_attr_destroy(&attr);
6151 if (ret == 0) {
6152 /* Wait for the child to initialize. */
6153 pthread_cond_wait(&info.cond, &info.mutex);
6154 ret = info.tid;
6155 } else {
6156 ret = -1;
6158 pthread_mutex_unlock(&info.mutex);
6159 pthread_cond_destroy(&info.cond);
6160 pthread_mutex_destroy(&info.mutex);
6161 pthread_mutex_unlock(&clone_lock);
6162 } else {
6163 /* if no CLONE_VM, we consider it is a fork */
6164 if (flags & CLONE_INVALID_FORK_FLAGS) {
6165 return -TARGET_EINVAL;
6168 /* We can't support custom termination signals */
6169 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6170 return -TARGET_EINVAL;
6173 if (block_signals()) {
6174 return -TARGET_ERESTARTSYS;
6177 fork_start();
6178 ret = fork();
6179 if (ret == 0) {
6180 /* Child Process. */
6181 rcu_after_fork();
6182 cpu_clone_regs(env, newsp);
6183 fork_end(1);
6184 /* There is a race condition here. The parent process could
6185 theoretically read the TID in the child process before the child
6186 tid is set. This would require using either ptrace
6187 (not implemented) or having *_tidptr to point at a shared memory
6188 mapping. We can't repeat the spinlock hack used above because
6189 the child process gets its own copy of the lock. */
6190 if (flags & CLONE_CHILD_SETTID)
6191 put_user_u32(gettid(), child_tidptr);
6192 if (flags & CLONE_PARENT_SETTID)
6193 put_user_u32(gettid(), parent_tidptr);
6194 ts = (TaskState *)cpu->opaque;
6195 if (flags & CLONE_SETTLS)
6196 cpu_set_tls (env, newtls);
6197 if (flags & CLONE_CHILD_CLEARTID)
6198 ts->child_tidptr = child_tidptr;
6199 } else {
6200 fork_end(0);
6203 return ret;
6206 /* warning : doesn't handle linux specific flags... */
6207 static int target_to_host_fcntl_cmd(int cmd)
6209 switch(cmd) {
6210 case TARGET_F_DUPFD:
6211 case TARGET_F_GETFD:
6212 case TARGET_F_SETFD:
6213 case TARGET_F_GETFL:
6214 case TARGET_F_SETFL:
6215 return cmd;
6216 case TARGET_F_GETLK:
6217 return F_GETLK64;
6218 case TARGET_F_SETLK:
6219 return F_SETLK64;
6220 case TARGET_F_SETLKW:
6221 return F_SETLKW64;
6222 case TARGET_F_GETOWN:
6223 return F_GETOWN;
6224 case TARGET_F_SETOWN:
6225 return F_SETOWN;
6226 case TARGET_F_GETSIG:
6227 return F_GETSIG;
6228 case TARGET_F_SETSIG:
6229 return F_SETSIG;
6230 #if TARGET_ABI_BITS == 32
6231 case TARGET_F_GETLK64:
6232 return F_GETLK64;
6233 case TARGET_F_SETLK64:
6234 return F_SETLK64;
6235 case TARGET_F_SETLKW64:
6236 return F_SETLKW64;
6237 #endif
6238 case TARGET_F_SETLEASE:
6239 return F_SETLEASE;
6240 case TARGET_F_GETLEASE:
6241 return F_GETLEASE;
6242 #ifdef F_DUPFD_CLOEXEC
6243 case TARGET_F_DUPFD_CLOEXEC:
6244 return F_DUPFD_CLOEXEC;
6245 #endif
6246 case TARGET_F_NOTIFY:
6247 return F_NOTIFY;
6248 #ifdef F_GETOWN_EX
6249 case TARGET_F_GETOWN_EX:
6250 return F_GETOWN_EX;
6251 #endif
6252 #ifdef F_SETOWN_EX
6253 case TARGET_F_SETOWN_EX:
6254 return F_SETOWN_EX;
6255 #endif
6256 #ifdef F_SETPIPE_SZ
6257 case TARGET_F_SETPIPE_SZ:
6258 return F_SETPIPE_SZ;
6259 case TARGET_F_GETPIPE_SZ:
6260 return F_GETPIPE_SZ;
6261 #endif
6262 default:
6263 return -TARGET_EINVAL;
6265 return -TARGET_EINVAL;
6268 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6269 static const bitmask_transtbl flock_tbl[] = {
6270 TRANSTBL_CONVERT(F_RDLCK),
6271 TRANSTBL_CONVERT(F_WRLCK),
6272 TRANSTBL_CONVERT(F_UNLCK),
6273 TRANSTBL_CONVERT(F_EXLCK),
6274 TRANSTBL_CONVERT(F_SHLCK),
6275 { 0, 0, 0, 0 }
6278 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6279 abi_ulong target_flock_addr)
6281 struct target_flock *target_fl;
6282 short l_type;
6284 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6285 return -TARGET_EFAULT;
6288 __get_user(l_type, &target_fl->l_type);
6289 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6290 __get_user(fl->l_whence, &target_fl->l_whence);
6291 __get_user(fl->l_start, &target_fl->l_start);
6292 __get_user(fl->l_len, &target_fl->l_len);
6293 __get_user(fl->l_pid, &target_fl->l_pid);
6294 unlock_user_struct(target_fl, target_flock_addr, 0);
6295 return 0;
6298 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6299 const struct flock64 *fl)
6301 struct target_flock *target_fl;
6302 short l_type;
6304 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6305 return -TARGET_EFAULT;
6308 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6309 __put_user(l_type, &target_fl->l_type);
6310 __put_user(fl->l_whence, &target_fl->l_whence);
6311 __put_user(fl->l_start, &target_fl->l_start);
6312 __put_user(fl->l_len, &target_fl->l_len);
6313 __put_user(fl->l_pid, &target_fl->l_pid);
6314 unlock_user_struct(target_fl, target_flock_addr, 1);
6315 return 0;
6318 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6319 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6321 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6322 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6323 abi_ulong target_flock_addr)
6325 struct target_eabi_flock64 *target_fl;
6326 short l_type;
6328 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6329 return -TARGET_EFAULT;
6332 __get_user(l_type, &target_fl->l_type);
6333 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6334 __get_user(fl->l_whence, &target_fl->l_whence);
6335 __get_user(fl->l_start, &target_fl->l_start);
6336 __get_user(fl->l_len, &target_fl->l_len);
6337 __get_user(fl->l_pid, &target_fl->l_pid);
6338 unlock_user_struct(target_fl, target_flock_addr, 0);
6339 return 0;
6342 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6343 const struct flock64 *fl)
6345 struct target_eabi_flock64 *target_fl;
6346 short l_type;
6348 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6349 return -TARGET_EFAULT;
6352 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6353 __put_user(l_type, &target_fl->l_type);
6354 __put_user(fl->l_whence, &target_fl->l_whence);
6355 __put_user(fl->l_start, &target_fl->l_start);
6356 __put_user(fl->l_len, &target_fl->l_len);
6357 __put_user(fl->l_pid, &target_fl->l_pid);
6358 unlock_user_struct(target_fl, target_flock_addr, 1);
6359 return 0;
6361 #endif
6363 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6364 abi_ulong target_flock_addr)
6366 struct target_flock64 *target_fl;
6367 short l_type;
6369 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6370 return -TARGET_EFAULT;
6373 __get_user(l_type, &target_fl->l_type);
6374 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6375 __get_user(fl->l_whence, &target_fl->l_whence);
6376 __get_user(fl->l_start, &target_fl->l_start);
6377 __get_user(fl->l_len, &target_fl->l_len);
6378 __get_user(fl->l_pid, &target_fl->l_pid);
6379 unlock_user_struct(target_fl, target_flock_addr, 0);
6380 return 0;
6383 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6384 const struct flock64 *fl)
6386 struct target_flock64 *target_fl;
6387 short l_type;
6389 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6390 return -TARGET_EFAULT;
6393 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6394 __put_user(l_type, &target_fl->l_type);
6395 __put_user(fl->l_whence, &target_fl->l_whence);
6396 __put_user(fl->l_start, &target_fl->l_start);
6397 __put_user(fl->l_len, &target_fl->l_len);
6398 __put_user(fl->l_pid, &target_fl->l_pid);
6399 unlock_user_struct(target_fl, target_flock_addr, 1);
6400 return 0;
6403 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6405 struct flock64 fl64;
6406 #ifdef F_GETOWN_EX
6407 struct f_owner_ex fox;
6408 struct target_f_owner_ex *target_fox;
6409 #endif
6410 abi_long ret;
6411 int host_cmd = target_to_host_fcntl_cmd(cmd);
6413 if (host_cmd == -TARGET_EINVAL)
6414 return host_cmd;
6416 switch(cmd) {
6417 case TARGET_F_GETLK:
6418 ret = copy_from_user_flock(&fl64, arg);
6419 if (ret) {
6420 return ret;
6422 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6423 if (ret == 0) {
6424 ret = copy_to_user_flock(arg, &fl64);
6426 break;
6428 case TARGET_F_SETLK:
6429 case TARGET_F_SETLKW:
6430 ret = copy_from_user_flock(&fl64, arg);
6431 if (ret) {
6432 return ret;
6434 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6435 break;
6437 case TARGET_F_GETLK64:
6438 ret = copy_from_user_flock64(&fl64, arg);
6439 if (ret) {
6440 return ret;
6442 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6443 if (ret == 0) {
6444 ret = copy_to_user_flock64(arg, &fl64);
6446 break;
6447 case TARGET_F_SETLK64:
6448 case TARGET_F_SETLKW64:
6449 ret = copy_from_user_flock64(&fl64, arg);
6450 if (ret) {
6451 return ret;
6453 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6454 break;
6456 case TARGET_F_GETFL:
6457 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6458 if (ret >= 0) {
6459 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6461 break;
6463 case TARGET_F_SETFL:
6464 ret = get_errno(safe_fcntl(fd, host_cmd,
6465 target_to_host_bitmask(arg,
6466 fcntl_flags_tbl)));
6467 break;
6469 #ifdef F_GETOWN_EX
6470 case TARGET_F_GETOWN_EX:
6471 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6472 if (ret >= 0) {
6473 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6474 return -TARGET_EFAULT;
6475 target_fox->type = tswap32(fox.type);
6476 target_fox->pid = tswap32(fox.pid);
6477 unlock_user_struct(target_fox, arg, 1);
6479 break;
6480 #endif
6482 #ifdef F_SETOWN_EX
6483 case TARGET_F_SETOWN_EX:
6484 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6485 return -TARGET_EFAULT;
6486 fox.type = tswap32(target_fox->type);
6487 fox.pid = tswap32(target_fox->pid);
6488 unlock_user_struct(target_fox, arg, 0);
6489 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6490 break;
6491 #endif
6493 case TARGET_F_SETOWN:
6494 case TARGET_F_GETOWN:
6495 case TARGET_F_SETSIG:
6496 case TARGET_F_GETSIG:
6497 case TARGET_F_SETLEASE:
6498 case TARGET_F_GETLEASE:
6499 case TARGET_F_SETPIPE_SZ:
6500 case TARGET_F_GETPIPE_SZ:
6501 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6502 break;
6504 default:
6505 ret = get_errno(safe_fcntl(fd, cmd, arg));
6506 break;
6508 return ret;
6511 #ifdef USE_UID16
6513 static inline int high2lowuid(int uid)
6515 if (uid > 65535)
6516 return 65534;
6517 else
6518 return uid;
6521 static inline int high2lowgid(int gid)
6523 if (gid > 65535)
6524 return 65534;
6525 else
6526 return gid;
6529 static inline int low2highuid(int uid)
6531 if ((int16_t)uid == -1)
6532 return -1;
6533 else
6534 return uid;
6537 static inline int low2highgid(int gid)
6539 if ((int16_t)gid == -1)
6540 return -1;
6541 else
6542 return gid;
6544 static inline int tswapid(int id)
6546 return tswap16(id);
6549 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6551 #else /* !USE_UID16 */
6552 static inline int high2lowuid(int uid)
6554 return uid;
6556 static inline int high2lowgid(int gid)
6558 return gid;
6560 static inline int low2highuid(int uid)
6562 return uid;
6564 static inline int low2highgid(int gid)
6566 return gid;
6568 static inline int tswapid(int id)
6570 return tswap32(id);
6573 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6575 #endif /* USE_UID16 */
6577 /* We must do direct syscalls for setting UID/GID, because we want to
6578 * implement the Linux system call semantics of "change only for this thread",
6579 * not the libc/POSIX semantics of "change for all threads in process".
6580 * (See http://ewontfix.com/17/ for more details.)
6581 * We use the 32-bit version of the syscalls if present; if it is not
6582 * then either the host architecture supports 32-bit UIDs natively with
6583 * the standard syscall, or the 16-bit UID is the best we can do.
6585 #ifdef __NR_setuid32
6586 #define __NR_sys_setuid __NR_setuid32
6587 #else
6588 #define __NR_sys_setuid __NR_setuid
6589 #endif
6590 #ifdef __NR_setgid32
6591 #define __NR_sys_setgid __NR_setgid32
6592 #else
6593 #define __NR_sys_setgid __NR_setgid
6594 #endif
6595 #ifdef __NR_setresuid32
6596 #define __NR_sys_setresuid __NR_setresuid32
6597 #else
6598 #define __NR_sys_setresuid __NR_setresuid
6599 #endif
6600 #ifdef __NR_setresgid32
6601 #define __NR_sys_setresgid __NR_setresgid32
6602 #else
6603 #define __NR_sys_setresgid __NR_setresgid
6604 #endif
6606 _syscall1(int, sys_setuid, uid_t, uid)
6607 _syscall1(int, sys_setgid, gid_t, gid)
6608 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6609 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6611 void syscall_init(void)
6613 IOCTLEntry *ie;
6614 const argtype *arg_type;
6615 int size;
6616 int i;
6618 thunk_init(STRUCT_MAX);
6620 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6621 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6622 #include "syscall_types.h"
6623 #undef STRUCT
6624 #undef STRUCT_SPECIAL
6626 /* Build target_to_host_errno_table[] table from
6627 * host_to_target_errno_table[]. */
6628 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6629 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6632 /* we patch the ioctl size if necessary. We rely on the fact that
6633 no ioctl has all the bits at '1' in the size field */
6634 ie = ioctl_entries;
6635 while (ie->target_cmd != 0) {
6636 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6637 TARGET_IOC_SIZEMASK) {
6638 arg_type = ie->arg_type;
6639 if (arg_type[0] != TYPE_PTR) {
6640 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6641 ie->target_cmd);
6642 exit(1);
6644 arg_type++;
6645 size = thunk_type_size(arg_type, 0);
6646 ie->target_cmd = (ie->target_cmd &
6647 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6648 (size << TARGET_IOC_SIZESHIFT);
6651 /* automatic consistency check if same arch */
6652 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6653 (defined(__x86_64__) && defined(TARGET_X86_64))
6654 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6655 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6656 ie->name, ie->target_cmd, ie->host_cmd);
6658 #endif
6659 ie++;
6663 #if TARGET_ABI_BITS == 32
6664 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6666 #ifdef TARGET_WORDS_BIGENDIAN
6667 return ((uint64_t)word0 << 32) | word1;
6668 #else
6669 return ((uint64_t)word1 << 32) | word0;
6670 #endif
6672 #else /* TARGET_ABI_BITS == 32 */
6673 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6675 return word0;
6677 #endif /* TARGET_ABI_BITS != 32 */
6679 #ifdef TARGET_NR_truncate64
6680 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6681 abi_long arg2,
6682 abi_long arg3,
6683 abi_long arg4)
6685 if (regpairs_aligned(cpu_env)) {
6686 arg2 = arg3;
6687 arg3 = arg4;
6689 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6691 #endif
6693 #ifdef TARGET_NR_ftruncate64
6694 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6695 abi_long arg2,
6696 abi_long arg3,
6697 abi_long arg4)
6699 if (regpairs_aligned(cpu_env)) {
6700 arg2 = arg3;
6701 arg3 = arg4;
6703 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6705 #endif
6707 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6708 abi_ulong target_addr)
6710 struct target_timespec *target_ts;
6712 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6713 return -TARGET_EFAULT;
6714 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6715 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6716 unlock_user_struct(target_ts, target_addr, 0);
6717 return 0;
6720 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6721 struct timespec *host_ts)
6723 struct target_timespec *target_ts;
6725 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6726 return -TARGET_EFAULT;
6727 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6728 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6729 unlock_user_struct(target_ts, target_addr, 1);
6730 return 0;
6733 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6734 abi_ulong target_addr)
6736 struct target_itimerspec *target_itspec;
6738 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6739 return -TARGET_EFAULT;
6742 host_itspec->it_interval.tv_sec =
6743 tswapal(target_itspec->it_interval.tv_sec);
6744 host_itspec->it_interval.tv_nsec =
6745 tswapal(target_itspec->it_interval.tv_nsec);
6746 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6747 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6749 unlock_user_struct(target_itspec, target_addr, 1);
6750 return 0;
6753 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6754 struct itimerspec *host_its)
6756 struct target_itimerspec *target_itspec;
6758 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6759 return -TARGET_EFAULT;
6762 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6763 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6765 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6766 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6768 unlock_user_struct(target_itspec, target_addr, 0);
6769 return 0;
6772 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6773 abi_ulong target_addr)
6775 struct target_sigevent *target_sevp;
6777 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6778 return -TARGET_EFAULT;
6781 /* This union is awkward on 64 bit systems because it has a 32 bit
6782 * integer and a pointer in it; we follow the conversion approach
6783 * used for handling sigval types in signal.c so the guest should get
6784 * the correct value back even if we did a 64 bit byteswap and it's
6785 * using the 32 bit integer.
6787 host_sevp->sigev_value.sival_ptr =
6788 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6789 host_sevp->sigev_signo =
6790 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6791 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6792 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6794 unlock_user_struct(target_sevp, target_addr, 1);
6795 return 0;
6798 #if defined(TARGET_NR_mlockall)
6799 static inline int target_to_host_mlockall_arg(int arg)
6801 int result = 0;
6803 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6804 result |= MCL_CURRENT;
6806 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6807 result |= MCL_FUTURE;
6809 return result;
6811 #endif
6813 static inline abi_long host_to_target_stat64(void *cpu_env,
6814 abi_ulong target_addr,
6815 struct stat *host_st)
6817 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6818 if (((CPUARMState *)cpu_env)->eabi) {
6819 struct target_eabi_stat64 *target_st;
6821 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6822 return -TARGET_EFAULT;
6823 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6824 __put_user(host_st->st_dev, &target_st->st_dev);
6825 __put_user(host_st->st_ino, &target_st->st_ino);
6826 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6827 __put_user(host_st->st_ino, &target_st->__st_ino);
6828 #endif
6829 __put_user(host_st->st_mode, &target_st->st_mode);
6830 __put_user(host_st->st_nlink, &target_st->st_nlink);
6831 __put_user(host_st->st_uid, &target_st->st_uid);
6832 __put_user(host_st->st_gid, &target_st->st_gid);
6833 __put_user(host_st->st_rdev, &target_st->st_rdev);
6834 __put_user(host_st->st_size, &target_st->st_size);
6835 __put_user(host_st->st_blksize, &target_st->st_blksize);
6836 __put_user(host_st->st_blocks, &target_st->st_blocks);
6837 __put_user(host_st->st_atime, &target_st->target_st_atime);
6838 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6839 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6840 unlock_user_struct(target_st, target_addr, 1);
6841 } else
6842 #endif
6844 #if defined(TARGET_HAS_STRUCT_STAT64)
6845 struct target_stat64 *target_st;
6846 #else
6847 struct target_stat *target_st;
6848 #endif
6850 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6851 return -TARGET_EFAULT;
6852 memset(target_st, 0, sizeof(*target_st));
6853 __put_user(host_st->st_dev, &target_st->st_dev);
6854 __put_user(host_st->st_ino, &target_st->st_ino);
6855 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6856 __put_user(host_st->st_ino, &target_st->__st_ino);
6857 #endif
6858 __put_user(host_st->st_mode, &target_st->st_mode);
6859 __put_user(host_st->st_nlink, &target_st->st_nlink);
6860 __put_user(host_st->st_uid, &target_st->st_uid);
6861 __put_user(host_st->st_gid, &target_st->st_gid);
6862 __put_user(host_st->st_rdev, &target_st->st_rdev);
6863 /* XXX: better use of kernel struct */
6864 __put_user(host_st->st_size, &target_st->st_size);
6865 __put_user(host_st->st_blksize, &target_st->st_blksize);
6866 __put_user(host_st->st_blocks, &target_st->st_blocks);
6867 __put_user(host_st->st_atime, &target_st->target_st_atime);
6868 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6869 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6870 unlock_user_struct(target_st, target_addr, 1);
6873 return 0;
6876 /* ??? Using host futex calls even when target atomic operations
6877 are not really atomic probably breaks things. However implementing
6878 futexes locally would make futexes shared between multiple processes
6879 tricky. However they're probably useless because guest atomic
6880 operations won't work either. */
6881 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6882 target_ulong uaddr2, int val3)
6884 struct timespec ts, *pts;
6885 int base_op;
6887 /* ??? We assume FUTEX_* constants are the same on both host
6888 and target. */
6889 #ifdef FUTEX_CMD_MASK
6890 base_op = op & FUTEX_CMD_MASK;
6891 #else
6892 base_op = op;
6893 #endif
6894 switch (base_op) {
6895 case FUTEX_WAIT:
6896 case FUTEX_WAIT_BITSET:
6897 if (timeout) {
6898 pts = &ts;
6899 target_to_host_timespec(pts, timeout);
6900 } else {
6901 pts = NULL;
6903 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6904 pts, NULL, val3));
6905 case FUTEX_WAKE:
6906 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6907 case FUTEX_FD:
6908 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6909 case FUTEX_REQUEUE:
6910 case FUTEX_CMP_REQUEUE:
6911 case FUTEX_WAKE_OP:
6912 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6913 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6914 But the prototype takes a `struct timespec *'; insert casts
6915 to satisfy the compiler. We do not need to tswap TIMEOUT
6916 since it's not compared to guest memory. */
6917 pts = (struct timespec *)(uintptr_t) timeout;
6918 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6919 g2h(uaddr2),
6920 (base_op == FUTEX_CMP_REQUEUE
6921 ? tswap32(val3)
6922 : val3)));
6923 default:
6924 return -TARGET_ENOSYS;
6927 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6928 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6929 abi_long handle, abi_long mount_id,
6930 abi_long flags)
6932 struct file_handle *target_fh;
6933 struct file_handle *fh;
6934 int mid = 0;
6935 abi_long ret;
6936 char *name;
6937 unsigned int size, total_size;
6939 if (get_user_s32(size, handle)) {
6940 return -TARGET_EFAULT;
6943 name = lock_user_string(pathname);
6944 if (!name) {
6945 return -TARGET_EFAULT;
6948 total_size = sizeof(struct file_handle) + size;
6949 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6950 if (!target_fh) {
6951 unlock_user(name, pathname, 0);
6952 return -TARGET_EFAULT;
6955 fh = g_malloc0(total_size);
6956 fh->handle_bytes = size;
6958 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6959 unlock_user(name, pathname, 0);
6961 /* man name_to_handle_at(2):
6962 * Other than the use of the handle_bytes field, the caller should treat
6963 * the file_handle structure as an opaque data type
6966 memcpy(target_fh, fh, total_size);
6967 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6968 target_fh->handle_type = tswap32(fh->handle_type);
6969 g_free(fh);
6970 unlock_user(target_fh, handle, total_size);
6972 if (put_user_s32(mid, mount_id)) {
6973 return -TARGET_EFAULT;
6976 return ret;
6979 #endif
6981 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6982 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6983 abi_long flags)
6985 struct file_handle *target_fh;
6986 struct file_handle *fh;
6987 unsigned int size, total_size;
6988 abi_long ret;
6990 if (get_user_s32(size, handle)) {
6991 return -TARGET_EFAULT;
6994 total_size = sizeof(struct file_handle) + size;
6995 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6996 if (!target_fh) {
6997 return -TARGET_EFAULT;
7000 fh = g_memdup(target_fh, total_size);
7001 fh->handle_bytes = size;
7002 fh->handle_type = tswap32(target_fh->handle_type);
7004 ret = get_errno(open_by_handle_at(mount_fd, fh,
7005 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7007 g_free(fh);
7009 unlock_user(target_fh, handle, total_size);
7011 return ret;
7013 #endif
7015 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7017 /* signalfd siginfo conversion */
7019 static void
7020 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7021 const struct signalfd_siginfo *info)
7023 int sig = host_to_target_signal(info->ssi_signo);
7025 /* linux/signalfd.h defines a ssi_addr_lsb
7026 * not defined in sys/signalfd.h but used by some kernels
7029 #ifdef BUS_MCEERR_AO
7030 if (tinfo->ssi_signo == SIGBUS &&
7031 (tinfo->ssi_code == BUS_MCEERR_AR ||
7032 tinfo->ssi_code == BUS_MCEERR_AO)) {
7033 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7034 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7035 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7037 #endif
7039 tinfo->ssi_signo = tswap32(sig);
7040 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7041 tinfo->ssi_code = tswap32(info->ssi_code);
7042 tinfo->ssi_pid = tswap32(info->ssi_pid);
7043 tinfo->ssi_uid = tswap32(info->ssi_uid);
7044 tinfo->ssi_fd = tswap32(info->ssi_fd);
7045 tinfo->ssi_tid = tswap32(info->ssi_tid);
7046 tinfo->ssi_band = tswap32(info->ssi_band);
7047 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7048 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7049 tinfo->ssi_status = tswap32(info->ssi_status);
7050 tinfo->ssi_int = tswap32(info->ssi_int);
7051 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7052 tinfo->ssi_utime = tswap64(info->ssi_utime);
7053 tinfo->ssi_stime = tswap64(info->ssi_stime);
7054 tinfo->ssi_addr = tswap64(info->ssi_addr);
7057 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7059 int i;
7061 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7062 host_to_target_signalfd_siginfo(buf + i, buf + i);
7065 return len;
7068 static TargetFdTrans target_signalfd_trans = {
7069 .host_to_target_data = host_to_target_data_signalfd,
7072 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7074 int host_flags;
7075 target_sigset_t *target_mask;
7076 sigset_t host_mask;
7077 abi_long ret;
7079 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7080 return -TARGET_EINVAL;
7082 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7083 return -TARGET_EFAULT;
7086 target_to_host_sigset(&host_mask, target_mask);
7088 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7090 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7091 if (ret >= 0) {
7092 fd_trans_register(ret, &target_signalfd_trans);
7095 unlock_user_struct(target_mask, mask, 0);
7097 return ret;
7099 #endif
7101 /* Map host to target signal numbers for the wait family of syscalls.
7102 Assume all other status bits are the same. */
7103 int host_to_target_waitstatus(int status)
7105 if (WIFSIGNALED(status)) {
7106 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7108 if (WIFSTOPPED(status)) {
7109 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7110 | (status & 0xff);
7112 return status;
7115 static int open_self_cmdline(void *cpu_env, int fd)
7117 int fd_orig = -1;
7118 bool word_skipped = false;
7120 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7121 if (fd_orig < 0) {
7122 return fd_orig;
7125 while (true) {
7126 ssize_t nb_read;
7127 char buf[128];
7128 char *cp_buf = buf;
7130 nb_read = read(fd_orig, buf, sizeof(buf));
7131 if (nb_read < 0) {
7132 int e = errno;
7133 fd_orig = close(fd_orig);
7134 errno = e;
7135 return -1;
7136 } else if (nb_read == 0) {
7137 break;
7140 if (!word_skipped) {
7141 /* Skip the first string, which is the path to qemu-*-static
7142 instead of the actual command. */
7143 cp_buf = memchr(buf, 0, nb_read);
7144 if (cp_buf) {
7145 /* Null byte found, skip one string */
7146 cp_buf++;
7147 nb_read -= cp_buf - buf;
7148 word_skipped = true;
7152 if (word_skipped) {
7153 if (write(fd, cp_buf, nb_read) != nb_read) {
7154 int e = errno;
7155 close(fd_orig);
7156 errno = e;
7157 return -1;
7162 return close(fd_orig);
7165 static int open_self_maps(void *cpu_env, int fd)
7167 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7168 TaskState *ts = cpu->opaque;
7169 FILE *fp;
7170 char *line = NULL;
7171 size_t len = 0;
7172 ssize_t read;
7174 fp = fopen("/proc/self/maps", "r");
7175 if (fp == NULL) {
7176 return -1;
7179 while ((read = getline(&line, &len, fp)) != -1) {
7180 int fields, dev_maj, dev_min, inode;
7181 uint64_t min, max, offset;
7182 char flag_r, flag_w, flag_x, flag_p;
7183 char path[512] = "";
7184 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7185 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7186 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7188 if ((fields < 10) || (fields > 11)) {
7189 continue;
7191 if (h2g_valid(min)) {
7192 int flags = page_get_flags(h2g(min));
7193 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7194 if (page_check_range(h2g(min), max - min, flags) == -1) {
7195 continue;
7197 if (h2g(min) == ts->info->stack_limit) {
7198 pstrcpy(path, sizeof(path), " [stack]");
7200 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7201 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7202 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7203 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7204 path[0] ? " " : "", path);
7208 free(line);
7209 fclose(fp);
7211 return 0;
7214 static int open_self_stat(void *cpu_env, int fd)
7216 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7217 TaskState *ts = cpu->opaque;
7218 abi_ulong start_stack = ts->info->start_stack;
7219 int i;
7221 for (i = 0; i < 44; i++) {
7222 char buf[128];
7223 int len;
7224 uint64_t val = 0;
7226 if (i == 0) {
7227 /* pid */
7228 val = getpid();
7229 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7230 } else if (i == 1) {
7231 /* app name */
7232 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7233 } else if (i == 27) {
7234 /* stack bottom */
7235 val = start_stack;
7236 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7237 } else {
7238 /* for the rest, there is MasterCard */
7239 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7242 len = strlen(buf);
7243 if (write(fd, buf, len) != len) {
7244 return -1;
7248 return 0;
7251 static int open_self_auxv(void *cpu_env, int fd)
7253 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7254 TaskState *ts = cpu->opaque;
7255 abi_ulong auxv = ts->info->saved_auxv;
7256 abi_ulong len = ts->info->auxv_len;
7257 char *ptr;
7260 * Auxiliary vector is stored in target process stack.
7261 * read in whole auxv vector and copy it to file
7263 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7264 if (ptr != NULL) {
7265 while (len > 0) {
7266 ssize_t r;
7267 r = write(fd, ptr, len);
7268 if (r <= 0) {
7269 break;
7271 len -= r;
7272 ptr += r;
7274 lseek(fd, 0, SEEK_SET);
7275 unlock_user(ptr, auxv, len);
7278 return 0;
7281 static int is_proc_myself(const char *filename, const char *entry)
7283 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7284 filename += strlen("/proc/");
7285 if (!strncmp(filename, "self/", strlen("self/"))) {
7286 filename += strlen("self/");
7287 } else if (*filename >= '1' && *filename <= '9') {
7288 char myself[80];
7289 snprintf(myself, sizeof(myself), "%d/", getpid());
7290 if (!strncmp(filename, myself, strlen(myself))) {
7291 filename += strlen(myself);
7292 } else {
7293 return 0;
7295 } else {
7296 return 0;
7298 if (!strcmp(filename, entry)) {
7299 return 1;
7302 return 0;
7305 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7306 static int is_proc(const char *filename, const char *entry)
7308 return strcmp(filename, entry) == 0;
7311 static int open_net_route(void *cpu_env, int fd)
7313 FILE *fp;
7314 char *line = NULL;
7315 size_t len = 0;
7316 ssize_t read;
7318 fp = fopen("/proc/net/route", "r");
7319 if (fp == NULL) {
7320 return -1;
7323 /* read header */
7325 read = getline(&line, &len, fp);
7326 dprintf(fd, "%s", line);
7328 /* read routes */
7330 while ((read = getline(&line, &len, fp)) != -1) {
7331 char iface[16];
7332 uint32_t dest, gw, mask;
7333 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7334 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7335 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7336 &mask, &mtu, &window, &irtt);
7337 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7338 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7339 metric, tswap32(mask), mtu, window, irtt);
7342 free(line);
7343 fclose(fp);
7345 return 0;
7347 #endif
7349 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7351 struct fake_open {
7352 const char *filename;
7353 int (*fill)(void *cpu_env, int fd);
7354 int (*cmp)(const char *s1, const char *s2);
7356 const struct fake_open *fake_open;
7357 static const struct fake_open fakes[] = {
7358 { "maps", open_self_maps, is_proc_myself },
7359 { "stat", open_self_stat, is_proc_myself },
7360 { "auxv", open_self_auxv, is_proc_myself },
7361 { "cmdline", open_self_cmdline, is_proc_myself },
7362 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7363 { "/proc/net/route", open_net_route, is_proc },
7364 #endif
7365 { NULL, NULL, NULL }
7368 if (is_proc_myself(pathname, "exe")) {
7369 int execfd = qemu_getauxval(AT_EXECFD);
7370 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7373 for (fake_open = fakes; fake_open->filename; fake_open++) {
7374 if (fake_open->cmp(pathname, fake_open->filename)) {
7375 break;
7379 if (fake_open->filename) {
7380 const char *tmpdir;
7381 char filename[PATH_MAX];
7382 int fd, r;
7384 /* create temporary file to map stat to */
7385 tmpdir = getenv("TMPDIR");
7386 if (!tmpdir)
7387 tmpdir = "/tmp";
7388 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7389 fd = mkstemp(filename);
7390 if (fd < 0) {
7391 return fd;
7393 unlink(filename);
7395 if ((r = fake_open->fill(cpu_env, fd))) {
7396 int e = errno;
7397 close(fd);
7398 errno = e;
7399 return r;
7401 lseek(fd, 0, SEEK_SET);
7403 return fd;
7406 return safe_openat(dirfd, path(pathname), flags, mode);
7409 #define TIMER_MAGIC 0x0caf0000
7410 #define TIMER_MAGIC_MASK 0xffff0000
7412 /* Convert QEMU provided timer ID back to internal 16bit index format */
7413 static target_timer_t get_timer_id(abi_long arg)
7415 target_timer_t timerid = arg;
7417 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7418 return -TARGET_EINVAL;
7421 timerid &= 0xffff;
7423 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7424 return -TARGET_EINVAL;
7427 return timerid;
7430 /* do_syscall() should always have a single exit point at the end so
7431 that actions, such as logging of syscall results, can be performed.
7432 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7433 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7434 abi_long arg2, abi_long arg3, abi_long arg4,
7435 abi_long arg5, abi_long arg6, abi_long arg7,
7436 abi_long arg8)
7438 CPUState *cpu = ENV_GET_CPU(cpu_env);
7439 abi_long ret;
7440 struct stat st;
7441 struct statfs stfs;
7442 void *p;
7444 #if defined(DEBUG_ERESTARTSYS)
7445 /* Debug-only code for exercising the syscall-restart code paths
7446 * in the per-architecture cpu main loops: restart every syscall
7447 * the guest makes once before letting it through.
7450 static int flag;
7452 flag = !flag;
7453 if (flag) {
7454 return -TARGET_ERESTARTSYS;
7457 #endif
7459 #ifdef DEBUG
7460 gemu_log("syscall %d", num);
7461 #endif
7462 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7463 if(do_strace)
7464 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7466 switch(num) {
7467 case TARGET_NR_exit:
7468 /* In old applications this may be used to implement _exit(2).
7469 However in threaded applictions it is used for thread termination,
7470 and _exit_group is used for application termination.
7471 Do thread termination if we have more then one thread. */
7473 if (block_signals()) {
7474 ret = -TARGET_ERESTARTSYS;
7475 break;
7478 if (CPU_NEXT(first_cpu)) {
7479 TaskState *ts;
7481 cpu_list_lock();
7482 /* Remove the CPU from the list. */
7483 QTAILQ_REMOVE(&cpus, cpu, node);
7484 cpu_list_unlock();
7485 ts = cpu->opaque;
7486 if (ts->child_tidptr) {
7487 put_user_u32(0, ts->child_tidptr);
7488 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7489 NULL, NULL, 0);
7491 thread_cpu = NULL;
7492 object_unref(OBJECT(cpu));
7493 g_free(ts);
7494 rcu_unregister_thread();
7495 pthread_exit(NULL);
7497 #ifdef TARGET_GPROF
7498 _mcleanup();
7499 #endif
7500 gdb_exit(cpu_env, arg1);
7501 _exit(arg1);
7502 ret = 0; /* avoid warning */
7503 break;
7504 case TARGET_NR_read:
7505 if (arg3 == 0)
7506 ret = 0;
7507 else {
7508 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7509 goto efault;
7510 ret = get_errno(safe_read(arg1, p, arg3));
7511 if (ret >= 0 &&
7512 fd_trans_host_to_target_data(arg1)) {
7513 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7515 unlock_user(p, arg2, ret);
7517 break;
7518 case TARGET_NR_write:
7519 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7520 goto efault;
7521 ret = get_errno(safe_write(arg1, p, arg3));
7522 unlock_user(p, arg2, 0);
7523 break;
7524 #ifdef TARGET_NR_open
7525 case TARGET_NR_open:
7526 if (!(p = lock_user_string(arg1)))
7527 goto efault;
7528 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7529 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7530 arg3));
7531 fd_trans_unregister(ret);
7532 unlock_user(p, arg1, 0);
7533 break;
7534 #endif
7535 case TARGET_NR_openat:
7536 if (!(p = lock_user_string(arg2)))
7537 goto efault;
7538 ret = get_errno(do_openat(cpu_env, arg1, p,
7539 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7540 arg4));
7541 fd_trans_unregister(ret);
7542 unlock_user(p, arg2, 0);
7543 break;
7544 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7545 case TARGET_NR_name_to_handle_at:
7546 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7547 break;
7548 #endif
7549 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7550 case TARGET_NR_open_by_handle_at:
7551 ret = do_open_by_handle_at(arg1, arg2, arg3);
7552 fd_trans_unregister(ret);
7553 break;
7554 #endif
7555 case TARGET_NR_close:
7556 fd_trans_unregister(arg1);
7557 ret = get_errno(close(arg1));
7558 break;
7559 case TARGET_NR_brk:
7560 ret = do_brk(arg1);
7561 break;
7562 #ifdef TARGET_NR_fork
7563 case TARGET_NR_fork:
7564 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7565 break;
7566 #endif
7567 #ifdef TARGET_NR_waitpid
7568 case TARGET_NR_waitpid:
7570 int status;
7571 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7572 if (!is_error(ret) && arg2 && ret
7573 && put_user_s32(host_to_target_waitstatus(status), arg2))
7574 goto efault;
7576 break;
7577 #endif
7578 #ifdef TARGET_NR_waitid
7579 case TARGET_NR_waitid:
7581 siginfo_t info;
7582 info.si_pid = 0;
7583 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7584 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7585 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7586 goto efault;
7587 host_to_target_siginfo(p, &info);
7588 unlock_user(p, arg3, sizeof(target_siginfo_t));
7591 break;
7592 #endif
7593 #ifdef TARGET_NR_creat /* not on alpha */
7594 case TARGET_NR_creat:
7595 if (!(p = lock_user_string(arg1)))
7596 goto efault;
7597 ret = get_errno(creat(p, arg2));
7598 fd_trans_unregister(ret);
7599 unlock_user(p, arg1, 0);
7600 break;
7601 #endif
7602 #ifdef TARGET_NR_link
7603 case TARGET_NR_link:
7605 void * p2;
7606 p = lock_user_string(arg1);
7607 p2 = lock_user_string(arg2);
7608 if (!p || !p2)
7609 ret = -TARGET_EFAULT;
7610 else
7611 ret = get_errno(link(p, p2));
7612 unlock_user(p2, arg2, 0);
7613 unlock_user(p, arg1, 0);
7615 break;
7616 #endif
7617 #if defined(TARGET_NR_linkat)
7618 case TARGET_NR_linkat:
7620 void * p2 = NULL;
7621 if (!arg2 || !arg4)
7622 goto efault;
7623 p = lock_user_string(arg2);
7624 p2 = lock_user_string(arg4);
7625 if (!p || !p2)
7626 ret = -TARGET_EFAULT;
7627 else
7628 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7629 unlock_user(p, arg2, 0);
7630 unlock_user(p2, arg4, 0);
7632 break;
7633 #endif
7634 #ifdef TARGET_NR_unlink
7635 case TARGET_NR_unlink:
7636 if (!(p = lock_user_string(arg1)))
7637 goto efault;
7638 ret = get_errno(unlink(p));
7639 unlock_user(p, arg1, 0);
7640 break;
7641 #endif
7642 #if defined(TARGET_NR_unlinkat)
7643 case TARGET_NR_unlinkat:
7644 if (!(p = lock_user_string(arg2)))
7645 goto efault;
7646 ret = get_errno(unlinkat(arg1, p, arg3));
7647 unlock_user(p, arg2, 0);
7648 break;
7649 #endif
7650 case TARGET_NR_execve:
7652 char **argp, **envp;
7653 int argc, envc;
7654 abi_ulong gp;
7655 abi_ulong guest_argp;
7656 abi_ulong guest_envp;
7657 abi_ulong addr;
7658 char **q;
7659 int total_size = 0;
7661 argc = 0;
7662 guest_argp = arg2;
7663 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7664 if (get_user_ual(addr, gp))
7665 goto efault;
7666 if (!addr)
7667 break;
7668 argc++;
7670 envc = 0;
7671 guest_envp = arg3;
7672 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7673 if (get_user_ual(addr, gp))
7674 goto efault;
7675 if (!addr)
7676 break;
7677 envc++;
7680 argp = alloca((argc + 1) * sizeof(void *));
7681 envp = alloca((envc + 1) * sizeof(void *));
7683 for (gp = guest_argp, q = argp; gp;
7684 gp += sizeof(abi_ulong), q++) {
7685 if (get_user_ual(addr, gp))
7686 goto execve_efault;
7687 if (!addr)
7688 break;
7689 if (!(*q = lock_user_string(addr)))
7690 goto execve_efault;
7691 total_size += strlen(*q) + 1;
7693 *q = NULL;
7695 for (gp = guest_envp, q = envp; gp;
7696 gp += sizeof(abi_ulong), q++) {
7697 if (get_user_ual(addr, gp))
7698 goto execve_efault;
7699 if (!addr)
7700 break;
7701 if (!(*q = lock_user_string(addr)))
7702 goto execve_efault;
7703 total_size += strlen(*q) + 1;
7705 *q = NULL;
7707 if (!(p = lock_user_string(arg1)))
7708 goto execve_efault;
7709 /* Although execve() is not an interruptible syscall it is
7710 * a special case where we must use the safe_syscall wrapper:
7711 * if we allow a signal to happen before we make the host
7712 * syscall then we will 'lose' it, because at the point of
7713 * execve the process leaves QEMU's control. So we use the
7714 * safe syscall wrapper to ensure that we either take the
7715 * signal as a guest signal, or else it does not happen
7716 * before the execve completes and makes it the other
7717 * program's problem.
7719 ret = get_errno(safe_execve(p, argp, envp));
7720 unlock_user(p, arg1, 0);
7722 goto execve_end;
7724 execve_efault:
7725 ret = -TARGET_EFAULT;
7727 execve_end:
7728 for (gp = guest_argp, q = argp; *q;
7729 gp += sizeof(abi_ulong), q++) {
7730 if (get_user_ual(addr, gp)
7731 || !addr)
7732 break;
7733 unlock_user(*q, addr, 0);
7735 for (gp = guest_envp, q = envp; *q;
7736 gp += sizeof(abi_ulong), q++) {
7737 if (get_user_ual(addr, gp)
7738 || !addr)
7739 break;
7740 unlock_user(*q, addr, 0);
7743 break;
7744 case TARGET_NR_chdir:
7745 if (!(p = lock_user_string(arg1)))
7746 goto efault;
7747 ret = get_errno(chdir(p));
7748 unlock_user(p, arg1, 0);
7749 break;
7750 #ifdef TARGET_NR_time
7751 case TARGET_NR_time:
7753 time_t host_time;
7754 ret = get_errno(time(&host_time));
7755 if (!is_error(ret)
7756 && arg1
7757 && put_user_sal(host_time, arg1))
7758 goto efault;
7760 break;
7761 #endif
7762 #ifdef TARGET_NR_mknod
7763 case TARGET_NR_mknod:
7764 if (!(p = lock_user_string(arg1)))
7765 goto efault;
7766 ret = get_errno(mknod(p, arg2, arg3));
7767 unlock_user(p, arg1, 0);
7768 break;
7769 #endif
7770 #if defined(TARGET_NR_mknodat)
7771 case TARGET_NR_mknodat:
7772 if (!(p = lock_user_string(arg2)))
7773 goto efault;
7774 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7775 unlock_user(p, arg2, 0);
7776 break;
7777 #endif
7778 #ifdef TARGET_NR_chmod
7779 case TARGET_NR_chmod:
7780 if (!(p = lock_user_string(arg1)))
7781 goto efault;
7782 ret = get_errno(chmod(p, arg2));
7783 unlock_user(p, arg1, 0);
7784 break;
7785 #endif
7786 #ifdef TARGET_NR_break
7787 case TARGET_NR_break:
7788 goto unimplemented;
7789 #endif
7790 #ifdef TARGET_NR_oldstat
7791 case TARGET_NR_oldstat:
7792 goto unimplemented;
7793 #endif
7794 case TARGET_NR_lseek:
7795 ret = get_errno(lseek(arg1, arg2, arg3));
7796 break;
7797 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7798 /* Alpha specific */
7799 case TARGET_NR_getxpid:
7800 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7801 ret = get_errno(getpid());
7802 break;
7803 #endif
7804 #ifdef TARGET_NR_getpid
7805 case TARGET_NR_getpid:
7806 ret = get_errno(getpid());
7807 break;
7808 #endif
7809 case TARGET_NR_mount:
7811 /* need to look at the data field */
7812 void *p2, *p3;
7814 if (arg1) {
7815 p = lock_user_string(arg1);
7816 if (!p) {
7817 goto efault;
7819 } else {
7820 p = NULL;
7823 p2 = lock_user_string(arg2);
7824 if (!p2) {
7825 if (arg1) {
7826 unlock_user(p, arg1, 0);
7828 goto efault;
7831 if (arg3) {
7832 p3 = lock_user_string(arg3);
7833 if (!p3) {
7834 if (arg1) {
7835 unlock_user(p, arg1, 0);
7837 unlock_user(p2, arg2, 0);
7838 goto efault;
7840 } else {
7841 p3 = NULL;
7844 /* FIXME - arg5 should be locked, but it isn't clear how to
7845 * do that since it's not guaranteed to be a NULL-terminated
7846 * string.
7848 if (!arg5) {
7849 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7850 } else {
7851 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7853 ret = get_errno(ret);
7855 if (arg1) {
7856 unlock_user(p, arg1, 0);
7858 unlock_user(p2, arg2, 0);
7859 if (arg3) {
7860 unlock_user(p3, arg3, 0);
7863 break;
7864 #ifdef TARGET_NR_umount
7865 case TARGET_NR_umount:
7866 if (!(p = lock_user_string(arg1)))
7867 goto efault;
7868 ret = get_errno(umount(p));
7869 unlock_user(p, arg1, 0);
7870 break;
7871 #endif
7872 #ifdef TARGET_NR_stime /* not on alpha */
7873 case TARGET_NR_stime:
7875 time_t host_time;
7876 if (get_user_sal(host_time, arg1))
7877 goto efault;
7878 ret = get_errno(stime(&host_time));
7880 break;
7881 #endif
7882 case TARGET_NR_ptrace:
7883 goto unimplemented;
7884 #ifdef TARGET_NR_alarm /* not on alpha */
7885 case TARGET_NR_alarm:
7886 ret = alarm(arg1);
7887 break;
7888 #endif
7889 #ifdef TARGET_NR_oldfstat
7890 case TARGET_NR_oldfstat:
7891 goto unimplemented;
7892 #endif
7893 #ifdef TARGET_NR_pause /* not on alpha */
7894 case TARGET_NR_pause:
7895 if (!block_signals()) {
7896 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7898 ret = -TARGET_EINTR;
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_utime
7902 case TARGET_NR_utime:
7904 struct utimbuf tbuf, *host_tbuf;
7905 struct target_utimbuf *target_tbuf;
7906 if (arg2) {
7907 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7908 goto efault;
7909 tbuf.actime = tswapal(target_tbuf->actime);
7910 tbuf.modtime = tswapal(target_tbuf->modtime);
7911 unlock_user_struct(target_tbuf, arg2, 0);
7912 host_tbuf = &tbuf;
7913 } else {
7914 host_tbuf = NULL;
7916 if (!(p = lock_user_string(arg1)))
7917 goto efault;
7918 ret = get_errno(utime(p, host_tbuf));
7919 unlock_user(p, arg1, 0);
7921 break;
7922 #endif
7923 #ifdef TARGET_NR_utimes
7924 case TARGET_NR_utimes:
7926 struct timeval *tvp, tv[2];
7927 if (arg2) {
7928 if (copy_from_user_timeval(&tv[0], arg2)
7929 || copy_from_user_timeval(&tv[1],
7930 arg2 + sizeof(struct target_timeval)))
7931 goto efault;
7932 tvp = tv;
7933 } else {
7934 tvp = NULL;
7936 if (!(p = lock_user_string(arg1)))
7937 goto efault;
7938 ret = get_errno(utimes(p, tvp));
7939 unlock_user(p, arg1, 0);
7941 break;
7942 #endif
7943 #if defined(TARGET_NR_futimesat)
7944 case TARGET_NR_futimesat:
7946 struct timeval *tvp, tv[2];
7947 if (arg3) {
7948 if (copy_from_user_timeval(&tv[0], arg3)
7949 || copy_from_user_timeval(&tv[1],
7950 arg3 + sizeof(struct target_timeval)))
7951 goto efault;
7952 tvp = tv;
7953 } else {
7954 tvp = NULL;
7956 if (!(p = lock_user_string(arg2)))
7957 goto efault;
7958 ret = get_errno(futimesat(arg1, path(p), tvp));
7959 unlock_user(p, arg2, 0);
7961 break;
7962 #endif
7963 #ifdef TARGET_NR_stty
7964 case TARGET_NR_stty:
7965 goto unimplemented;
7966 #endif
7967 #ifdef TARGET_NR_gtty
7968 case TARGET_NR_gtty:
7969 goto unimplemented;
7970 #endif
7971 #ifdef TARGET_NR_access
7972 case TARGET_NR_access:
7973 if (!(p = lock_user_string(arg1)))
7974 goto efault;
7975 ret = get_errno(access(path(p), arg2));
7976 unlock_user(p, arg1, 0);
7977 break;
7978 #endif
7979 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7980 case TARGET_NR_faccessat:
7981 if (!(p = lock_user_string(arg2)))
7982 goto efault;
7983 ret = get_errno(faccessat(arg1, p, arg3, 0));
7984 unlock_user(p, arg2, 0);
7985 break;
7986 #endif
7987 #ifdef TARGET_NR_nice /* not on alpha */
7988 case TARGET_NR_nice:
7989 ret = get_errno(nice(arg1));
7990 break;
7991 #endif
7992 #ifdef TARGET_NR_ftime
7993 case TARGET_NR_ftime:
7994 goto unimplemented;
7995 #endif
7996 case TARGET_NR_sync:
7997 sync();
7998 ret = 0;
7999 break;
8000 case TARGET_NR_kill:
8001 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8002 break;
8003 #ifdef TARGET_NR_rename
8004 case TARGET_NR_rename:
8006 void *p2;
8007 p = lock_user_string(arg1);
8008 p2 = lock_user_string(arg2);
8009 if (!p || !p2)
8010 ret = -TARGET_EFAULT;
8011 else
8012 ret = get_errno(rename(p, p2));
8013 unlock_user(p2, arg2, 0);
8014 unlock_user(p, arg1, 0);
8016 break;
8017 #endif
8018 #if defined(TARGET_NR_renameat)
8019 case TARGET_NR_renameat:
8021 void *p2;
8022 p = lock_user_string(arg2);
8023 p2 = lock_user_string(arg4);
8024 if (!p || !p2)
8025 ret = -TARGET_EFAULT;
8026 else
8027 ret = get_errno(renameat(arg1, p, arg3, p2));
8028 unlock_user(p2, arg4, 0);
8029 unlock_user(p, arg2, 0);
8031 break;
8032 #endif
8033 #ifdef TARGET_NR_mkdir
8034 case TARGET_NR_mkdir:
8035 if (!(p = lock_user_string(arg1)))
8036 goto efault;
8037 ret = get_errno(mkdir(p, arg2));
8038 unlock_user(p, arg1, 0);
8039 break;
8040 #endif
8041 #if defined(TARGET_NR_mkdirat)
8042 case TARGET_NR_mkdirat:
8043 if (!(p = lock_user_string(arg2)))
8044 goto efault;
8045 ret = get_errno(mkdirat(arg1, p, arg3));
8046 unlock_user(p, arg2, 0);
8047 break;
8048 #endif
8049 #ifdef TARGET_NR_rmdir
8050 case TARGET_NR_rmdir:
8051 if (!(p = lock_user_string(arg1)))
8052 goto efault;
8053 ret = get_errno(rmdir(p));
8054 unlock_user(p, arg1, 0);
8055 break;
8056 #endif
8057 case TARGET_NR_dup:
8058 ret = get_errno(dup(arg1));
8059 if (ret >= 0) {
8060 fd_trans_dup(arg1, ret);
8062 break;
8063 #ifdef TARGET_NR_pipe
8064 case TARGET_NR_pipe:
8065 ret = do_pipe(cpu_env, arg1, 0, 0);
8066 break;
8067 #endif
8068 #ifdef TARGET_NR_pipe2
8069 case TARGET_NR_pipe2:
8070 ret = do_pipe(cpu_env, arg1,
8071 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8072 break;
8073 #endif
8074 case TARGET_NR_times:
8076 struct target_tms *tmsp;
8077 struct tms tms;
8078 ret = get_errno(times(&tms));
8079 if (arg1) {
8080 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8081 if (!tmsp)
8082 goto efault;
8083 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8084 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8085 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8086 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8088 if (!is_error(ret))
8089 ret = host_to_target_clock_t(ret);
8091 break;
8092 #ifdef TARGET_NR_prof
8093 case TARGET_NR_prof:
8094 goto unimplemented;
8095 #endif
8096 #ifdef TARGET_NR_signal
8097 case TARGET_NR_signal:
8098 goto unimplemented;
8099 #endif
8100 case TARGET_NR_acct:
8101 if (arg1 == 0) {
8102 ret = get_errno(acct(NULL));
8103 } else {
8104 if (!(p = lock_user_string(arg1)))
8105 goto efault;
8106 ret = get_errno(acct(path(p)));
8107 unlock_user(p, arg1, 0);
8109 break;
8110 #ifdef TARGET_NR_umount2
8111 case TARGET_NR_umount2:
8112 if (!(p = lock_user_string(arg1)))
8113 goto efault;
8114 ret = get_errno(umount2(p, arg2));
8115 unlock_user(p, arg1, 0);
8116 break;
8117 #endif
8118 #ifdef TARGET_NR_lock
8119 case TARGET_NR_lock:
8120 goto unimplemented;
8121 #endif
8122 case TARGET_NR_ioctl:
8123 ret = do_ioctl(arg1, arg2, arg3);
8124 break;
8125 case TARGET_NR_fcntl:
8126 ret = do_fcntl(arg1, arg2, arg3);
8127 break;
8128 #ifdef TARGET_NR_mpx
8129 case TARGET_NR_mpx:
8130 goto unimplemented;
8131 #endif
8132 case TARGET_NR_setpgid:
8133 ret = get_errno(setpgid(arg1, arg2));
8134 break;
8135 #ifdef TARGET_NR_ulimit
8136 case TARGET_NR_ulimit:
8137 goto unimplemented;
8138 #endif
8139 #ifdef TARGET_NR_oldolduname
8140 case TARGET_NR_oldolduname:
8141 goto unimplemented;
8142 #endif
8143 case TARGET_NR_umask:
8144 ret = get_errno(umask(arg1));
8145 break;
8146 case TARGET_NR_chroot:
8147 if (!(p = lock_user_string(arg1)))
8148 goto efault;
8149 ret = get_errno(chroot(p));
8150 unlock_user(p, arg1, 0);
8151 break;
8152 #ifdef TARGET_NR_ustat
8153 case TARGET_NR_ustat:
8154 goto unimplemented;
8155 #endif
8156 #ifdef TARGET_NR_dup2
8157 case TARGET_NR_dup2:
8158 ret = get_errno(dup2(arg1, arg2));
8159 if (ret >= 0) {
8160 fd_trans_dup(arg1, arg2);
8162 break;
8163 #endif
8164 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8165 case TARGET_NR_dup3:
8166 ret = get_errno(dup3(arg1, arg2, arg3));
8167 if (ret >= 0) {
8168 fd_trans_dup(arg1, arg2);
8170 break;
8171 #endif
8172 #ifdef TARGET_NR_getppid /* not on alpha */
8173 case TARGET_NR_getppid:
8174 ret = get_errno(getppid());
8175 break;
8176 #endif
8177 #ifdef TARGET_NR_getpgrp
8178 case TARGET_NR_getpgrp:
8179 ret = get_errno(getpgrp());
8180 break;
8181 #endif
8182 case TARGET_NR_setsid:
8183 ret = get_errno(setsid());
8184 break;
8185 #ifdef TARGET_NR_sigaction
8186 case TARGET_NR_sigaction:
8188 #if defined(TARGET_ALPHA)
8189 struct target_sigaction act, oact, *pact = 0;
8190 struct target_old_sigaction *old_act;
8191 if (arg2) {
8192 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8193 goto efault;
8194 act._sa_handler = old_act->_sa_handler;
8195 target_siginitset(&act.sa_mask, old_act->sa_mask);
8196 act.sa_flags = old_act->sa_flags;
8197 act.sa_restorer = 0;
8198 unlock_user_struct(old_act, arg2, 0);
8199 pact = &act;
8201 ret = get_errno(do_sigaction(arg1, pact, &oact));
8202 if (!is_error(ret) && arg3) {
8203 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8204 goto efault;
8205 old_act->_sa_handler = oact._sa_handler;
8206 old_act->sa_mask = oact.sa_mask.sig[0];
8207 old_act->sa_flags = oact.sa_flags;
8208 unlock_user_struct(old_act, arg3, 1);
8210 #elif defined(TARGET_MIPS)
8211 struct target_sigaction act, oact, *pact, *old_act;
8213 if (arg2) {
8214 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8215 goto efault;
8216 act._sa_handler = old_act->_sa_handler;
8217 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8218 act.sa_flags = old_act->sa_flags;
8219 unlock_user_struct(old_act, arg2, 0);
8220 pact = &act;
8221 } else {
8222 pact = NULL;
8225 ret = get_errno(do_sigaction(arg1, pact, &oact));
8227 if (!is_error(ret) && arg3) {
8228 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8229 goto efault;
8230 old_act->_sa_handler = oact._sa_handler;
8231 old_act->sa_flags = oact.sa_flags;
8232 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8233 old_act->sa_mask.sig[1] = 0;
8234 old_act->sa_mask.sig[2] = 0;
8235 old_act->sa_mask.sig[3] = 0;
8236 unlock_user_struct(old_act, arg3, 1);
8238 #else
8239 struct target_old_sigaction *old_act;
8240 struct target_sigaction act, oact, *pact;
8241 if (arg2) {
8242 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8243 goto efault;
8244 act._sa_handler = old_act->_sa_handler;
8245 target_siginitset(&act.sa_mask, old_act->sa_mask);
8246 act.sa_flags = old_act->sa_flags;
8247 act.sa_restorer = old_act->sa_restorer;
8248 unlock_user_struct(old_act, arg2, 0);
8249 pact = &act;
8250 } else {
8251 pact = NULL;
8253 ret = get_errno(do_sigaction(arg1, pact, &oact));
8254 if (!is_error(ret) && arg3) {
8255 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8256 goto efault;
8257 old_act->_sa_handler = oact._sa_handler;
8258 old_act->sa_mask = oact.sa_mask.sig[0];
8259 old_act->sa_flags = oact.sa_flags;
8260 old_act->sa_restorer = oact.sa_restorer;
8261 unlock_user_struct(old_act, arg3, 1);
8263 #endif
8265 break;
8266 #endif
8267 case TARGET_NR_rt_sigaction:
8269 #if defined(TARGET_ALPHA)
8270 struct target_sigaction act, oact, *pact = 0;
8271 struct target_rt_sigaction *rt_act;
8273 if (arg4 != sizeof(target_sigset_t)) {
8274 ret = -TARGET_EINVAL;
8275 break;
8277 if (arg2) {
8278 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8279 goto efault;
8280 act._sa_handler = rt_act->_sa_handler;
8281 act.sa_mask = rt_act->sa_mask;
8282 act.sa_flags = rt_act->sa_flags;
8283 act.sa_restorer = arg5;
8284 unlock_user_struct(rt_act, arg2, 0);
8285 pact = &act;
8287 ret = get_errno(do_sigaction(arg1, pact, &oact));
8288 if (!is_error(ret) && arg3) {
8289 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8290 goto efault;
8291 rt_act->_sa_handler = oact._sa_handler;
8292 rt_act->sa_mask = oact.sa_mask;
8293 rt_act->sa_flags = oact.sa_flags;
8294 unlock_user_struct(rt_act, arg3, 1);
8296 #else
8297 struct target_sigaction *act;
8298 struct target_sigaction *oact;
8300 if (arg4 != sizeof(target_sigset_t)) {
8301 ret = -TARGET_EINVAL;
8302 break;
8304 if (arg2) {
8305 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8306 goto efault;
8307 } else
8308 act = NULL;
8309 if (arg3) {
8310 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8311 ret = -TARGET_EFAULT;
8312 goto rt_sigaction_fail;
8314 } else
8315 oact = NULL;
8316 ret = get_errno(do_sigaction(arg1, act, oact));
8317 rt_sigaction_fail:
8318 if (act)
8319 unlock_user_struct(act, arg2, 0);
8320 if (oact)
8321 unlock_user_struct(oact, arg3, 1);
8322 #endif
8324 break;
8325 #ifdef TARGET_NR_sgetmask /* not on alpha */
8326 case TARGET_NR_sgetmask:
8328 sigset_t cur_set;
8329 abi_ulong target_set;
8330 ret = do_sigprocmask(0, NULL, &cur_set);
8331 if (!ret) {
8332 host_to_target_old_sigset(&target_set, &cur_set);
8333 ret = target_set;
8336 break;
8337 #endif
8338 #ifdef TARGET_NR_ssetmask /* not on alpha */
8339 case TARGET_NR_ssetmask:
8341 sigset_t set, oset, cur_set;
8342 abi_ulong target_set = arg1;
8343 /* We only have one word of the new mask so we must read
8344 * the rest of it with do_sigprocmask() and OR in this word.
8345 * We are guaranteed that a do_sigprocmask() that only queries
8346 * the signal mask will not fail.
8348 ret = do_sigprocmask(0, NULL, &cur_set);
8349 assert(!ret);
8350 target_to_host_old_sigset(&set, &target_set);
8351 sigorset(&set, &set, &cur_set);
8352 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8353 if (!ret) {
8354 host_to_target_old_sigset(&target_set, &oset);
8355 ret = target_set;
8358 break;
8359 #endif
8360 #ifdef TARGET_NR_sigprocmask
8361 case TARGET_NR_sigprocmask:
8363 #if defined(TARGET_ALPHA)
8364 sigset_t set, oldset;
8365 abi_ulong mask;
8366 int how;
8368 switch (arg1) {
8369 case TARGET_SIG_BLOCK:
8370 how = SIG_BLOCK;
8371 break;
8372 case TARGET_SIG_UNBLOCK:
8373 how = SIG_UNBLOCK;
8374 break;
8375 case TARGET_SIG_SETMASK:
8376 how = SIG_SETMASK;
8377 break;
8378 default:
8379 ret = -TARGET_EINVAL;
8380 goto fail;
8382 mask = arg2;
8383 target_to_host_old_sigset(&set, &mask);
8385 ret = do_sigprocmask(how, &set, &oldset);
8386 if (!is_error(ret)) {
8387 host_to_target_old_sigset(&mask, &oldset);
8388 ret = mask;
8389 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8391 #else
8392 sigset_t set, oldset, *set_ptr;
8393 int how;
8395 if (arg2) {
8396 switch (arg1) {
8397 case TARGET_SIG_BLOCK:
8398 how = SIG_BLOCK;
8399 break;
8400 case TARGET_SIG_UNBLOCK:
8401 how = SIG_UNBLOCK;
8402 break;
8403 case TARGET_SIG_SETMASK:
8404 how = SIG_SETMASK;
8405 break;
8406 default:
8407 ret = -TARGET_EINVAL;
8408 goto fail;
8410 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8411 goto efault;
8412 target_to_host_old_sigset(&set, p);
8413 unlock_user(p, arg2, 0);
8414 set_ptr = &set;
8415 } else {
8416 how = 0;
8417 set_ptr = NULL;
8419 ret = do_sigprocmask(how, set_ptr, &oldset);
8420 if (!is_error(ret) && arg3) {
8421 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8422 goto efault;
8423 host_to_target_old_sigset(p, &oldset);
8424 unlock_user(p, arg3, sizeof(target_sigset_t));
8426 #endif
8428 break;
8429 #endif
8430 case TARGET_NR_rt_sigprocmask:
8432 int how = arg1;
8433 sigset_t set, oldset, *set_ptr;
8435 if (arg4 != sizeof(target_sigset_t)) {
8436 ret = -TARGET_EINVAL;
8437 break;
8440 if (arg2) {
8441 switch(how) {
8442 case TARGET_SIG_BLOCK:
8443 how = SIG_BLOCK;
8444 break;
8445 case TARGET_SIG_UNBLOCK:
8446 how = SIG_UNBLOCK;
8447 break;
8448 case TARGET_SIG_SETMASK:
8449 how = SIG_SETMASK;
8450 break;
8451 default:
8452 ret = -TARGET_EINVAL;
8453 goto fail;
8455 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8456 goto efault;
8457 target_to_host_sigset(&set, p);
8458 unlock_user(p, arg2, 0);
8459 set_ptr = &set;
8460 } else {
8461 how = 0;
8462 set_ptr = NULL;
8464 ret = do_sigprocmask(how, set_ptr, &oldset);
8465 if (!is_error(ret) && arg3) {
8466 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8467 goto efault;
8468 host_to_target_sigset(p, &oldset);
8469 unlock_user(p, arg3, sizeof(target_sigset_t));
8472 break;
8473 #ifdef TARGET_NR_sigpending
8474 case TARGET_NR_sigpending:
8476 sigset_t set;
8477 ret = get_errno(sigpending(&set));
8478 if (!is_error(ret)) {
8479 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8480 goto efault;
8481 host_to_target_old_sigset(p, &set);
8482 unlock_user(p, arg1, sizeof(target_sigset_t));
8485 break;
8486 #endif
8487 case TARGET_NR_rt_sigpending:
8489 sigset_t set;
8491 /* Yes, this check is >, not != like most. We follow the kernel's
8492 * logic and it does it like this because it implements
8493 * NR_sigpending through the same code path, and in that case
8494 * the old_sigset_t is smaller in size.
8496 if (arg2 > sizeof(target_sigset_t)) {
8497 ret = -TARGET_EINVAL;
8498 break;
8501 ret = get_errno(sigpending(&set));
8502 if (!is_error(ret)) {
8503 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8504 goto efault;
8505 host_to_target_sigset(p, &set);
8506 unlock_user(p, arg1, sizeof(target_sigset_t));
8509 break;
8510 #ifdef TARGET_NR_sigsuspend
8511 case TARGET_NR_sigsuspend:
8513 TaskState *ts = cpu->opaque;
8514 #if defined(TARGET_ALPHA)
8515 abi_ulong mask = arg1;
8516 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8517 #else
8518 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8519 goto efault;
8520 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8521 unlock_user(p, arg1, 0);
8522 #endif
8523 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8524 SIGSET_T_SIZE));
8525 if (ret != -TARGET_ERESTARTSYS) {
8526 ts->in_sigsuspend = 1;
8529 break;
8530 #endif
8531 case TARGET_NR_rt_sigsuspend:
8533 TaskState *ts = cpu->opaque;
8535 if (arg2 != sizeof(target_sigset_t)) {
8536 ret = -TARGET_EINVAL;
8537 break;
8539 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8540 goto efault;
8541 target_to_host_sigset(&ts->sigsuspend_mask, p);
8542 unlock_user(p, arg1, 0);
8543 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8544 SIGSET_T_SIZE));
8545 if (ret != -TARGET_ERESTARTSYS) {
8546 ts->in_sigsuspend = 1;
8549 break;
8550 case TARGET_NR_rt_sigtimedwait:
8552 sigset_t set;
8553 struct timespec uts, *puts;
8554 siginfo_t uinfo;
8556 if (arg4 != sizeof(target_sigset_t)) {
8557 ret = -TARGET_EINVAL;
8558 break;
8561 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8562 goto efault;
8563 target_to_host_sigset(&set, p);
8564 unlock_user(p, arg1, 0);
8565 if (arg3) {
8566 puts = &uts;
8567 target_to_host_timespec(puts, arg3);
8568 } else {
8569 puts = NULL;
8571 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8572 SIGSET_T_SIZE));
8573 if (!is_error(ret)) {
8574 if (arg2) {
8575 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8577 if (!p) {
8578 goto efault;
8580 host_to_target_siginfo(p, &uinfo);
8581 unlock_user(p, arg2, sizeof(target_siginfo_t));
8583 ret = host_to_target_signal(ret);
8586 break;
8587 case TARGET_NR_rt_sigqueueinfo:
8589 siginfo_t uinfo;
8591 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8592 if (!p) {
8593 goto efault;
8595 target_to_host_siginfo(&uinfo, p);
8596 unlock_user(p, arg1, 0);
8597 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8599 break;
8600 #ifdef TARGET_NR_sigreturn
8601 case TARGET_NR_sigreturn:
8602 if (block_signals()) {
8603 ret = -TARGET_ERESTARTSYS;
8604 } else {
8605 ret = do_sigreturn(cpu_env);
8607 break;
8608 #endif
8609 case TARGET_NR_rt_sigreturn:
8610 if (block_signals()) {
8611 ret = -TARGET_ERESTARTSYS;
8612 } else {
8613 ret = do_rt_sigreturn(cpu_env);
8615 break;
8616 case TARGET_NR_sethostname:
8617 if (!(p = lock_user_string(arg1)))
8618 goto efault;
8619 ret = get_errno(sethostname(p, arg2));
8620 unlock_user(p, arg1, 0);
8621 break;
8622 case TARGET_NR_setrlimit:
8624 int resource = target_to_host_resource(arg1);
8625 struct target_rlimit *target_rlim;
8626 struct rlimit rlim;
8627 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8628 goto efault;
8629 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8630 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8631 unlock_user_struct(target_rlim, arg2, 0);
8632 ret = get_errno(setrlimit(resource, &rlim));
8634 break;
8635 case TARGET_NR_getrlimit:
8637 int resource = target_to_host_resource(arg1);
8638 struct target_rlimit *target_rlim;
8639 struct rlimit rlim;
8641 ret = get_errno(getrlimit(resource, &rlim));
8642 if (!is_error(ret)) {
8643 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8644 goto efault;
8645 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8646 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8647 unlock_user_struct(target_rlim, arg2, 1);
8650 break;
8651 case TARGET_NR_getrusage:
8653 struct rusage rusage;
8654 ret = get_errno(getrusage(arg1, &rusage));
8655 if (!is_error(ret)) {
8656 ret = host_to_target_rusage(arg2, &rusage);
8659 break;
8660 case TARGET_NR_gettimeofday:
8662 struct timeval tv;
8663 ret = get_errno(gettimeofday(&tv, NULL));
8664 if (!is_error(ret)) {
8665 if (copy_to_user_timeval(arg1, &tv))
8666 goto efault;
8669 break;
8670 case TARGET_NR_settimeofday:
8672 struct timeval tv, *ptv = NULL;
8673 struct timezone tz, *ptz = NULL;
8675 if (arg1) {
8676 if (copy_from_user_timeval(&tv, arg1)) {
8677 goto efault;
8679 ptv = &tv;
8682 if (arg2) {
8683 if (copy_from_user_timezone(&tz, arg2)) {
8684 goto efault;
8686 ptz = &tz;
8689 ret = get_errno(settimeofday(ptv, ptz));
8691 break;
8692 #if defined(TARGET_NR_select)
8693 case TARGET_NR_select:
8694 #if defined(TARGET_WANT_NI_OLD_SELECT)
8695 /* some architectures used to have old_select here
8696 * but now ENOSYS it.
8698 ret = -TARGET_ENOSYS;
8699 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8700 ret = do_old_select(arg1);
8701 #else
8702 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8703 #endif
8704 break;
8705 #endif
8706 #ifdef TARGET_NR_pselect6
8707 case TARGET_NR_pselect6:
8709 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8710 fd_set rfds, wfds, efds;
8711 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8712 struct timespec ts, *ts_ptr;
8715 * The 6th arg is actually two args smashed together,
8716 * so we cannot use the C library.
8718 sigset_t set;
8719 struct {
8720 sigset_t *set;
8721 size_t size;
8722 } sig, *sig_ptr;
8724 abi_ulong arg_sigset, arg_sigsize, *arg7;
8725 target_sigset_t *target_sigset;
8727 n = arg1;
8728 rfd_addr = arg2;
8729 wfd_addr = arg3;
8730 efd_addr = arg4;
8731 ts_addr = arg5;
8733 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8734 if (ret) {
8735 goto fail;
8737 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8738 if (ret) {
8739 goto fail;
8741 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8742 if (ret) {
8743 goto fail;
8747 * This takes a timespec, and not a timeval, so we cannot
8748 * use the do_select() helper ...
8750 if (ts_addr) {
8751 if (target_to_host_timespec(&ts, ts_addr)) {
8752 goto efault;
8754 ts_ptr = &ts;
8755 } else {
8756 ts_ptr = NULL;
8759 /* Extract the two packed args for the sigset */
8760 if (arg6) {
8761 sig_ptr = &sig;
8762 sig.size = SIGSET_T_SIZE;
8764 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8765 if (!arg7) {
8766 goto efault;
8768 arg_sigset = tswapal(arg7[0]);
8769 arg_sigsize = tswapal(arg7[1]);
8770 unlock_user(arg7, arg6, 0);
8772 if (arg_sigset) {
8773 sig.set = &set;
8774 if (arg_sigsize != sizeof(*target_sigset)) {
8775 /* Like the kernel, we enforce correct size sigsets */
8776 ret = -TARGET_EINVAL;
8777 goto fail;
8779 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8780 sizeof(*target_sigset), 1);
8781 if (!target_sigset) {
8782 goto efault;
8784 target_to_host_sigset(&set, target_sigset);
8785 unlock_user(target_sigset, arg_sigset, 0);
8786 } else {
8787 sig.set = NULL;
8789 } else {
8790 sig_ptr = NULL;
8793 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8794 ts_ptr, sig_ptr));
8796 if (!is_error(ret)) {
8797 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8798 goto efault;
8799 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8800 goto efault;
8801 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8802 goto efault;
8804 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8805 goto efault;
8808 break;
8809 #endif
8810 #ifdef TARGET_NR_symlink
8811 case TARGET_NR_symlink:
8813 void *p2;
8814 p = lock_user_string(arg1);
8815 p2 = lock_user_string(arg2);
8816 if (!p || !p2)
8817 ret = -TARGET_EFAULT;
8818 else
8819 ret = get_errno(symlink(p, p2));
8820 unlock_user(p2, arg2, 0);
8821 unlock_user(p, arg1, 0);
8823 break;
8824 #endif
8825 #if defined(TARGET_NR_symlinkat)
8826 case TARGET_NR_symlinkat:
8828 void *p2;
8829 p = lock_user_string(arg1);
8830 p2 = lock_user_string(arg3);
8831 if (!p || !p2)
8832 ret = -TARGET_EFAULT;
8833 else
8834 ret = get_errno(symlinkat(p, arg2, p2));
8835 unlock_user(p2, arg3, 0);
8836 unlock_user(p, arg1, 0);
8838 break;
8839 #endif
8840 #ifdef TARGET_NR_oldlstat
8841 case TARGET_NR_oldlstat:
8842 goto unimplemented;
8843 #endif
8844 #ifdef TARGET_NR_readlink
8845 case TARGET_NR_readlink:
8847 void *p2;
8848 p = lock_user_string(arg1);
8849 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8850 if (!p || !p2) {
8851 ret = -TARGET_EFAULT;
8852 } else if (!arg3) {
8853 /* Short circuit this for the magic exe check. */
8854 ret = -TARGET_EINVAL;
8855 } else if (is_proc_myself((const char *)p, "exe")) {
8856 char real[PATH_MAX], *temp;
8857 temp = realpath(exec_path, real);
8858 /* Return value is # of bytes that we wrote to the buffer. */
8859 if (temp == NULL) {
8860 ret = get_errno(-1);
8861 } else {
8862 /* Don't worry about sign mismatch as earlier mapping
8863 * logic would have thrown a bad address error. */
8864 ret = MIN(strlen(real), arg3);
8865 /* We cannot NUL terminate the string. */
8866 memcpy(p2, real, ret);
8868 } else {
8869 ret = get_errno(readlink(path(p), p2, arg3));
8871 unlock_user(p2, arg2, ret);
8872 unlock_user(p, arg1, 0);
8874 break;
8875 #endif
8876 #if defined(TARGET_NR_readlinkat)
8877 case TARGET_NR_readlinkat:
8879 void *p2;
8880 p = lock_user_string(arg2);
8881 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8882 if (!p || !p2) {
8883 ret = -TARGET_EFAULT;
8884 } else if (is_proc_myself((const char *)p, "exe")) {
8885 char real[PATH_MAX], *temp;
8886 temp = realpath(exec_path, real);
8887 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8888 snprintf((char *)p2, arg4, "%s", real);
8889 } else {
8890 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8892 unlock_user(p2, arg3, ret);
8893 unlock_user(p, arg2, 0);
8895 break;
8896 #endif
8897 #ifdef TARGET_NR_uselib
8898 case TARGET_NR_uselib:
8899 goto unimplemented;
8900 #endif
8901 #ifdef TARGET_NR_swapon
8902 case TARGET_NR_swapon:
8903 if (!(p = lock_user_string(arg1)))
8904 goto efault;
8905 ret = get_errno(swapon(p, arg2));
8906 unlock_user(p, arg1, 0);
8907 break;
8908 #endif
8909 case TARGET_NR_reboot:
8910 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8911 /* arg4 must be ignored in all other cases */
8912 p = lock_user_string(arg4);
8913 if (!p) {
8914 goto efault;
8916 ret = get_errno(reboot(arg1, arg2, arg3, p));
8917 unlock_user(p, arg4, 0);
8918 } else {
8919 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8921 break;
8922 #ifdef TARGET_NR_readdir
8923 case TARGET_NR_readdir:
8924 goto unimplemented;
8925 #endif
8926 #ifdef TARGET_NR_mmap
8927 case TARGET_NR_mmap:
8928 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8929 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8930 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8931 || defined(TARGET_S390X)
8933 abi_ulong *v;
8934 abi_ulong v1, v2, v3, v4, v5, v6;
8935 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8936 goto efault;
8937 v1 = tswapal(v[0]);
8938 v2 = tswapal(v[1]);
8939 v3 = tswapal(v[2]);
8940 v4 = tswapal(v[3]);
8941 v5 = tswapal(v[4]);
8942 v6 = tswapal(v[5]);
8943 unlock_user(v, arg1, 0);
8944 ret = get_errno(target_mmap(v1, v2, v3,
8945 target_to_host_bitmask(v4, mmap_flags_tbl),
8946 v5, v6));
8948 #else
8949 ret = get_errno(target_mmap(arg1, arg2, arg3,
8950 target_to_host_bitmask(arg4, mmap_flags_tbl),
8951 arg5,
8952 arg6));
8953 #endif
8954 break;
8955 #endif
8956 #ifdef TARGET_NR_mmap2
8957 case TARGET_NR_mmap2:
8958 #ifndef MMAP_SHIFT
8959 #define MMAP_SHIFT 12
8960 #endif
8961 ret = get_errno(target_mmap(arg1, arg2, arg3,
8962 target_to_host_bitmask(arg4, mmap_flags_tbl),
8963 arg5,
8964 arg6 << MMAP_SHIFT));
8965 break;
8966 #endif
8967 case TARGET_NR_munmap:
8968 ret = get_errno(target_munmap(arg1, arg2));
8969 break;
8970 case TARGET_NR_mprotect:
8972 TaskState *ts = cpu->opaque;
8973 /* Special hack to detect libc making the stack executable. */
8974 if ((arg3 & PROT_GROWSDOWN)
8975 && arg1 >= ts->info->stack_limit
8976 && arg1 <= ts->info->start_stack) {
8977 arg3 &= ~PROT_GROWSDOWN;
8978 arg2 = arg2 + arg1 - ts->info->stack_limit;
8979 arg1 = ts->info->stack_limit;
8982 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8983 break;
8984 #ifdef TARGET_NR_mremap
8985 case TARGET_NR_mremap:
8986 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8987 break;
8988 #endif
8989 /* ??? msync/mlock/munlock are broken for softmmu. */
8990 #ifdef TARGET_NR_msync
8991 case TARGET_NR_msync:
8992 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8993 break;
8994 #endif
8995 #ifdef TARGET_NR_mlock
8996 case TARGET_NR_mlock:
8997 ret = get_errno(mlock(g2h(arg1), arg2));
8998 break;
8999 #endif
9000 #ifdef TARGET_NR_munlock
9001 case TARGET_NR_munlock:
9002 ret = get_errno(munlock(g2h(arg1), arg2));
9003 break;
9004 #endif
9005 #ifdef TARGET_NR_mlockall
9006 case TARGET_NR_mlockall:
9007 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9008 break;
9009 #endif
9010 #ifdef TARGET_NR_munlockall
9011 case TARGET_NR_munlockall:
9012 ret = get_errno(munlockall());
9013 break;
9014 #endif
9015 case TARGET_NR_truncate:
9016 if (!(p = lock_user_string(arg1)))
9017 goto efault;
9018 ret = get_errno(truncate(p, arg2));
9019 unlock_user(p, arg1, 0);
9020 break;
9021 case TARGET_NR_ftruncate:
9022 ret = get_errno(ftruncate(arg1, arg2));
9023 break;
9024 case TARGET_NR_fchmod:
9025 ret = get_errno(fchmod(arg1, arg2));
9026 break;
9027 #if defined(TARGET_NR_fchmodat)
9028 case TARGET_NR_fchmodat:
9029 if (!(p = lock_user_string(arg2)))
9030 goto efault;
9031 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9032 unlock_user(p, arg2, 0);
9033 break;
9034 #endif
9035 case TARGET_NR_getpriority:
9036 /* Note that negative values are valid for getpriority, so we must
9037 differentiate based on errno settings. */
9038 errno = 0;
9039 ret = getpriority(arg1, arg2);
9040 if (ret == -1 && errno != 0) {
9041 ret = -host_to_target_errno(errno);
9042 break;
9044 #ifdef TARGET_ALPHA
9045 /* Return value is the unbiased priority. Signal no error. */
9046 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9047 #else
9048 /* Return value is a biased priority to avoid negative numbers. */
9049 ret = 20 - ret;
9050 #endif
9051 break;
9052 case TARGET_NR_setpriority:
9053 ret = get_errno(setpriority(arg1, arg2, arg3));
9054 break;
9055 #ifdef TARGET_NR_profil
9056 case TARGET_NR_profil:
9057 goto unimplemented;
9058 #endif
9059 case TARGET_NR_statfs:
9060 if (!(p = lock_user_string(arg1)))
9061 goto efault;
9062 ret = get_errno(statfs(path(p), &stfs));
9063 unlock_user(p, arg1, 0);
9064 convert_statfs:
9065 if (!is_error(ret)) {
9066 struct target_statfs *target_stfs;
9068 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9069 goto efault;
9070 __put_user(stfs.f_type, &target_stfs->f_type);
9071 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9072 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9073 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9074 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9075 __put_user(stfs.f_files, &target_stfs->f_files);
9076 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9077 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9078 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9079 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9080 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9081 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9082 unlock_user_struct(target_stfs, arg2, 1);
9084 break;
9085 case TARGET_NR_fstatfs:
9086 ret = get_errno(fstatfs(arg1, &stfs));
9087 goto convert_statfs;
9088 #ifdef TARGET_NR_statfs64
9089 case TARGET_NR_statfs64:
9090 if (!(p = lock_user_string(arg1)))
9091 goto efault;
9092 ret = get_errno(statfs(path(p), &stfs));
9093 unlock_user(p, arg1, 0);
9094 convert_statfs64:
9095 if (!is_error(ret)) {
9096 struct target_statfs64 *target_stfs;
9098 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9099 goto efault;
9100 __put_user(stfs.f_type, &target_stfs->f_type);
9101 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9102 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9103 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9104 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9105 __put_user(stfs.f_files, &target_stfs->f_files);
9106 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9107 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9108 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9109 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9110 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9111 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9112 unlock_user_struct(target_stfs, arg3, 1);
9114 break;
9115 case TARGET_NR_fstatfs64:
9116 ret = get_errno(fstatfs(arg1, &stfs));
9117 goto convert_statfs64;
9118 #endif
9119 #ifdef TARGET_NR_ioperm
9120 case TARGET_NR_ioperm:
9121 goto unimplemented;
9122 #endif
9123 #ifdef TARGET_NR_socketcall
9124 case TARGET_NR_socketcall:
9125 ret = do_socketcall(arg1, arg2);
9126 break;
9127 #endif
9128 #ifdef TARGET_NR_accept
9129 case TARGET_NR_accept:
9130 ret = do_accept4(arg1, arg2, arg3, 0);
9131 break;
9132 #endif
9133 #ifdef TARGET_NR_accept4
9134 case TARGET_NR_accept4:
9135 ret = do_accept4(arg1, arg2, arg3, arg4);
9136 break;
9137 #endif
9138 #ifdef TARGET_NR_bind
9139 case TARGET_NR_bind:
9140 ret = do_bind(arg1, arg2, arg3);
9141 break;
9142 #endif
9143 #ifdef TARGET_NR_connect
9144 case TARGET_NR_connect:
9145 ret = do_connect(arg1, arg2, arg3);
9146 break;
9147 #endif
9148 #ifdef TARGET_NR_getpeername
9149 case TARGET_NR_getpeername:
9150 ret = do_getpeername(arg1, arg2, arg3);
9151 break;
9152 #endif
9153 #ifdef TARGET_NR_getsockname
9154 case TARGET_NR_getsockname:
9155 ret = do_getsockname(arg1, arg2, arg3);
9156 break;
9157 #endif
9158 #ifdef TARGET_NR_getsockopt
9159 case TARGET_NR_getsockopt:
9160 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9161 break;
9162 #endif
9163 #ifdef TARGET_NR_listen
9164 case TARGET_NR_listen:
9165 ret = get_errno(listen(arg1, arg2));
9166 break;
9167 #endif
9168 #ifdef TARGET_NR_recv
9169 case TARGET_NR_recv:
9170 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9171 break;
9172 #endif
9173 #ifdef TARGET_NR_recvfrom
9174 case TARGET_NR_recvfrom:
9175 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9176 break;
9177 #endif
9178 #ifdef TARGET_NR_recvmsg
9179 case TARGET_NR_recvmsg:
9180 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9181 break;
9182 #endif
9183 #ifdef TARGET_NR_send
9184 case TARGET_NR_send:
9185 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9186 break;
9187 #endif
9188 #ifdef TARGET_NR_sendmsg
9189 case TARGET_NR_sendmsg:
9190 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9191 break;
9192 #endif
9193 #ifdef TARGET_NR_sendmmsg
9194 case TARGET_NR_sendmmsg:
9195 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9196 break;
9197 case TARGET_NR_recvmmsg:
9198 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9199 break;
9200 #endif
9201 #ifdef TARGET_NR_sendto
9202 case TARGET_NR_sendto:
9203 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9204 break;
9205 #endif
9206 #ifdef TARGET_NR_shutdown
9207 case TARGET_NR_shutdown:
9208 ret = get_errno(shutdown(arg1, arg2));
9209 break;
9210 #endif
9211 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9212 case TARGET_NR_getrandom:
9213 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9214 if (!p) {
9215 goto efault;
9217 ret = get_errno(getrandom(p, arg2, arg3));
9218 unlock_user(p, arg1, ret);
9219 break;
9220 #endif
9221 #ifdef TARGET_NR_socket
9222 case TARGET_NR_socket:
9223 ret = do_socket(arg1, arg2, arg3);
9224 fd_trans_unregister(ret);
9225 break;
9226 #endif
9227 #ifdef TARGET_NR_socketpair
9228 case TARGET_NR_socketpair:
9229 ret = do_socketpair(arg1, arg2, arg3, arg4);
9230 break;
9231 #endif
9232 #ifdef TARGET_NR_setsockopt
9233 case TARGET_NR_setsockopt:
9234 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9235 break;
9236 #endif
9238 case TARGET_NR_syslog:
9239 if (!(p = lock_user_string(arg2)))
9240 goto efault;
9241 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9242 unlock_user(p, arg2, 0);
9243 break;
9245 case TARGET_NR_setitimer:
9247 struct itimerval value, ovalue, *pvalue;
9249 if (arg2) {
9250 pvalue = &value;
9251 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9252 || copy_from_user_timeval(&pvalue->it_value,
9253 arg2 + sizeof(struct target_timeval)))
9254 goto efault;
9255 } else {
9256 pvalue = NULL;
9258 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9259 if (!is_error(ret) && arg3) {
9260 if (copy_to_user_timeval(arg3,
9261 &ovalue.it_interval)
9262 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9263 &ovalue.it_value))
9264 goto efault;
9267 break;
9268 case TARGET_NR_getitimer:
9270 struct itimerval value;
9272 ret = get_errno(getitimer(arg1, &value));
9273 if (!is_error(ret) && arg2) {
9274 if (copy_to_user_timeval(arg2,
9275 &value.it_interval)
9276 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9277 &value.it_value))
9278 goto efault;
9281 break;
9282 #ifdef TARGET_NR_stat
9283 case TARGET_NR_stat:
9284 if (!(p = lock_user_string(arg1)))
9285 goto efault;
9286 ret = get_errno(stat(path(p), &st));
9287 unlock_user(p, arg1, 0);
9288 goto do_stat;
9289 #endif
9290 #ifdef TARGET_NR_lstat
9291 case TARGET_NR_lstat:
9292 if (!(p = lock_user_string(arg1)))
9293 goto efault;
9294 ret = get_errno(lstat(path(p), &st));
9295 unlock_user(p, arg1, 0);
9296 goto do_stat;
9297 #endif
9298 case TARGET_NR_fstat:
9300 ret = get_errno(fstat(arg1, &st));
9301 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9302 do_stat:
9303 #endif
9304 if (!is_error(ret)) {
9305 struct target_stat *target_st;
9307 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9308 goto efault;
9309 memset(target_st, 0, sizeof(*target_st));
9310 __put_user(st.st_dev, &target_st->st_dev);
9311 __put_user(st.st_ino, &target_st->st_ino);
9312 __put_user(st.st_mode, &target_st->st_mode);
9313 __put_user(st.st_uid, &target_st->st_uid);
9314 __put_user(st.st_gid, &target_st->st_gid);
9315 __put_user(st.st_nlink, &target_st->st_nlink);
9316 __put_user(st.st_rdev, &target_st->st_rdev);
9317 __put_user(st.st_size, &target_st->st_size);
9318 __put_user(st.st_blksize, &target_st->st_blksize);
9319 __put_user(st.st_blocks, &target_st->st_blocks);
9320 __put_user(st.st_atime, &target_st->target_st_atime);
9321 __put_user(st.st_mtime, &target_st->target_st_mtime);
9322 __put_user(st.st_ctime, &target_st->target_st_ctime);
9323 unlock_user_struct(target_st, arg2, 1);
9326 break;
9327 #ifdef TARGET_NR_olduname
9328 case TARGET_NR_olduname:
9329 goto unimplemented;
9330 #endif
9331 #ifdef TARGET_NR_iopl
9332 case TARGET_NR_iopl:
9333 goto unimplemented;
9334 #endif
9335 case TARGET_NR_vhangup:
9336 ret = get_errno(vhangup());
9337 break;
9338 #ifdef TARGET_NR_idle
9339 case TARGET_NR_idle:
9340 goto unimplemented;
9341 #endif
9342 #ifdef TARGET_NR_syscall
9343 case TARGET_NR_syscall:
9344 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9345 arg6, arg7, arg8, 0);
9346 break;
9347 #endif
9348 case TARGET_NR_wait4:
9350 int status;
9351 abi_long status_ptr = arg2;
9352 struct rusage rusage, *rusage_ptr;
9353 abi_ulong target_rusage = arg4;
9354 abi_long rusage_err;
9355 if (target_rusage)
9356 rusage_ptr = &rusage;
9357 else
9358 rusage_ptr = NULL;
9359 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9360 if (!is_error(ret)) {
9361 if (status_ptr && ret) {
9362 status = host_to_target_waitstatus(status);
9363 if (put_user_s32(status, status_ptr))
9364 goto efault;
9366 if (target_rusage) {
9367 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9368 if (rusage_err) {
9369 ret = rusage_err;
9374 break;
9375 #ifdef TARGET_NR_swapoff
9376 case TARGET_NR_swapoff:
9377 if (!(p = lock_user_string(arg1)))
9378 goto efault;
9379 ret = get_errno(swapoff(p));
9380 unlock_user(p, arg1, 0);
9381 break;
9382 #endif
9383 case TARGET_NR_sysinfo:
9385 struct target_sysinfo *target_value;
9386 struct sysinfo value;
9387 ret = get_errno(sysinfo(&value));
9388 if (!is_error(ret) && arg1)
9390 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9391 goto efault;
9392 __put_user(value.uptime, &target_value->uptime);
9393 __put_user(value.loads[0], &target_value->loads[0]);
9394 __put_user(value.loads[1], &target_value->loads[1]);
9395 __put_user(value.loads[2], &target_value->loads[2]);
9396 __put_user(value.totalram, &target_value->totalram);
9397 __put_user(value.freeram, &target_value->freeram);
9398 __put_user(value.sharedram, &target_value->sharedram);
9399 __put_user(value.bufferram, &target_value->bufferram);
9400 __put_user(value.totalswap, &target_value->totalswap);
9401 __put_user(value.freeswap, &target_value->freeswap);
9402 __put_user(value.procs, &target_value->procs);
9403 __put_user(value.totalhigh, &target_value->totalhigh);
9404 __put_user(value.freehigh, &target_value->freehigh);
9405 __put_user(value.mem_unit, &target_value->mem_unit);
9406 unlock_user_struct(target_value, arg1, 1);
9409 break;
9410 #ifdef TARGET_NR_ipc
9411 case TARGET_NR_ipc:
9412 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9413 break;
9414 #endif
9415 #ifdef TARGET_NR_semget
9416 case TARGET_NR_semget:
9417 ret = get_errno(semget(arg1, arg2, arg3));
9418 break;
9419 #endif
9420 #ifdef TARGET_NR_semop
9421 case TARGET_NR_semop:
9422 ret = do_semop(arg1, arg2, arg3);
9423 break;
9424 #endif
9425 #ifdef TARGET_NR_semctl
9426 case TARGET_NR_semctl:
9427 ret = do_semctl(arg1, arg2, arg3, arg4);
9428 break;
9429 #endif
9430 #ifdef TARGET_NR_msgctl
9431 case TARGET_NR_msgctl:
9432 ret = do_msgctl(arg1, arg2, arg3);
9433 break;
9434 #endif
9435 #ifdef TARGET_NR_msgget
9436 case TARGET_NR_msgget:
9437 ret = get_errno(msgget(arg1, arg2));
9438 break;
9439 #endif
9440 #ifdef TARGET_NR_msgrcv
9441 case TARGET_NR_msgrcv:
9442 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9443 break;
9444 #endif
9445 #ifdef TARGET_NR_msgsnd
9446 case TARGET_NR_msgsnd:
9447 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9448 break;
9449 #endif
9450 #ifdef TARGET_NR_shmget
9451 case TARGET_NR_shmget:
9452 ret = get_errno(shmget(arg1, arg2, arg3));
9453 break;
9454 #endif
9455 #ifdef TARGET_NR_shmctl
9456 case TARGET_NR_shmctl:
9457 ret = do_shmctl(arg1, arg2, arg3);
9458 break;
9459 #endif
9460 #ifdef TARGET_NR_shmat
9461 case TARGET_NR_shmat:
9462 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9463 break;
9464 #endif
9465 #ifdef TARGET_NR_shmdt
9466 case TARGET_NR_shmdt:
9467 ret = do_shmdt(arg1);
9468 break;
9469 #endif
9470 case TARGET_NR_fsync:
9471 ret = get_errno(fsync(arg1));
9472 break;
9473 case TARGET_NR_clone:
9474 /* Linux manages to have three different orderings for its
9475 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9476 * match the kernel's CONFIG_CLONE_* settings.
9477 * Microblaze is further special in that it uses a sixth
9478 * implicit argument to clone for the TLS pointer.
9480 #if defined(TARGET_MICROBLAZE)
9481 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9482 #elif defined(TARGET_CLONE_BACKWARDS)
9483 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9484 #elif defined(TARGET_CLONE_BACKWARDS2)
9485 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9486 #else
9487 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9488 #endif
9489 break;
9490 #ifdef __NR_exit_group
9491 /* new thread calls */
9492 case TARGET_NR_exit_group:
9493 #ifdef TARGET_GPROF
9494 _mcleanup();
9495 #endif
9496 gdb_exit(cpu_env, arg1);
9497 ret = get_errno(exit_group(arg1));
9498 break;
9499 #endif
9500 case TARGET_NR_setdomainname:
9501 if (!(p = lock_user_string(arg1)))
9502 goto efault;
9503 ret = get_errno(setdomainname(p, arg2));
9504 unlock_user(p, arg1, 0);
9505 break;
9506 case TARGET_NR_uname:
9507 /* no need to transcode because we use the linux syscall */
9509 struct new_utsname * buf;
9511 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9512 goto efault;
9513 ret = get_errno(sys_uname(buf));
9514 if (!is_error(ret)) {
9515 /* Overwrite the native machine name with whatever is being
9516 emulated. */
9517 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9518 /* Allow the user to override the reported release. */
9519 if (qemu_uname_release && *qemu_uname_release) {
9520 g_strlcpy(buf->release, qemu_uname_release,
9521 sizeof(buf->release));
9524 unlock_user_struct(buf, arg1, 1);
9526 break;
9527 #ifdef TARGET_I386
9528 case TARGET_NR_modify_ldt:
9529 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9530 break;
9531 #if !defined(TARGET_X86_64)
9532 case TARGET_NR_vm86old:
9533 goto unimplemented;
9534 case TARGET_NR_vm86:
9535 ret = do_vm86(cpu_env, arg1, arg2);
9536 break;
9537 #endif
9538 #endif
9539 case TARGET_NR_adjtimex:
9540 goto unimplemented;
9541 #ifdef TARGET_NR_create_module
9542 case TARGET_NR_create_module:
9543 #endif
9544 case TARGET_NR_init_module:
9545 case TARGET_NR_delete_module:
9546 #ifdef TARGET_NR_get_kernel_syms
9547 case TARGET_NR_get_kernel_syms:
9548 #endif
9549 goto unimplemented;
9550 case TARGET_NR_quotactl:
9551 goto unimplemented;
9552 case TARGET_NR_getpgid:
9553 ret = get_errno(getpgid(arg1));
9554 break;
9555 case TARGET_NR_fchdir:
9556 ret = get_errno(fchdir(arg1));
9557 break;
9558 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9559 case TARGET_NR_bdflush:
9560 goto unimplemented;
9561 #endif
9562 #ifdef TARGET_NR_sysfs
9563 case TARGET_NR_sysfs:
9564 goto unimplemented;
9565 #endif
9566 case TARGET_NR_personality:
9567 ret = get_errno(personality(arg1));
9568 break;
9569 #ifdef TARGET_NR_afs_syscall
9570 case TARGET_NR_afs_syscall:
9571 goto unimplemented;
9572 #endif
9573 #ifdef TARGET_NR__llseek /* Not on alpha */
9574 case TARGET_NR__llseek:
9576 int64_t res;
9577 #if !defined(__NR_llseek)
9578 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9579 if (res == -1) {
9580 ret = get_errno(res);
9581 } else {
9582 ret = 0;
9584 #else
9585 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9586 #endif
9587 if ((ret == 0) && put_user_s64(res, arg4)) {
9588 goto efault;
9591 break;
9592 #endif
9593 #ifdef TARGET_NR_getdents
9594 case TARGET_NR_getdents:
9595 #ifdef __NR_getdents
9596 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9598 struct target_dirent *target_dirp;
9599 struct linux_dirent *dirp;
9600 abi_long count = arg3;
9602 dirp = g_try_malloc(count);
9603 if (!dirp) {
9604 ret = -TARGET_ENOMEM;
9605 goto fail;
9608 ret = get_errno(sys_getdents(arg1, dirp, count));
9609 if (!is_error(ret)) {
9610 struct linux_dirent *de;
9611 struct target_dirent *tde;
9612 int len = ret;
9613 int reclen, treclen;
9614 int count1, tnamelen;
9616 count1 = 0;
9617 de = dirp;
9618 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9619 goto efault;
9620 tde = target_dirp;
9621 while (len > 0) {
9622 reclen = de->d_reclen;
9623 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9624 assert(tnamelen >= 0);
9625 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9626 assert(count1 + treclen <= count);
9627 tde->d_reclen = tswap16(treclen);
9628 tde->d_ino = tswapal(de->d_ino);
9629 tde->d_off = tswapal(de->d_off);
9630 memcpy(tde->d_name, de->d_name, tnamelen);
9631 de = (struct linux_dirent *)((char *)de + reclen);
9632 len -= reclen;
9633 tde = (struct target_dirent *)((char *)tde + treclen);
9634 count1 += treclen;
9636 ret = count1;
9637 unlock_user(target_dirp, arg2, ret);
9639 g_free(dirp);
9641 #else
9643 struct linux_dirent *dirp;
9644 abi_long count = arg3;
9646 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9647 goto efault;
9648 ret = get_errno(sys_getdents(arg1, dirp, count));
9649 if (!is_error(ret)) {
9650 struct linux_dirent *de;
9651 int len = ret;
9652 int reclen;
9653 de = dirp;
9654 while (len > 0) {
9655 reclen = de->d_reclen;
9656 if (reclen > len)
9657 break;
9658 de->d_reclen = tswap16(reclen);
9659 tswapls(&de->d_ino);
9660 tswapls(&de->d_off);
9661 de = (struct linux_dirent *)((char *)de + reclen);
9662 len -= reclen;
9665 unlock_user(dirp, arg2, ret);
9667 #endif
9668 #else
9669 /* Implement getdents in terms of getdents64 */
9671 struct linux_dirent64 *dirp;
9672 abi_long count = arg3;
9674 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9675 if (!dirp) {
9676 goto efault;
9678 ret = get_errno(sys_getdents64(arg1, dirp, count));
9679 if (!is_error(ret)) {
9680 /* Convert the dirent64 structs to target dirent. We do this
9681 * in-place, since we can guarantee that a target_dirent is no
9682 * larger than a dirent64; however this means we have to be
9683 * careful to read everything before writing in the new format.
9685 struct linux_dirent64 *de;
9686 struct target_dirent *tde;
9687 int len = ret;
9688 int tlen = 0;
9690 de = dirp;
9691 tde = (struct target_dirent *)dirp;
9692 while (len > 0) {
9693 int namelen, treclen;
9694 int reclen = de->d_reclen;
9695 uint64_t ino = de->d_ino;
9696 int64_t off = de->d_off;
9697 uint8_t type = de->d_type;
9699 namelen = strlen(de->d_name);
9700 treclen = offsetof(struct target_dirent, d_name)
9701 + namelen + 2;
9702 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9704 memmove(tde->d_name, de->d_name, namelen + 1);
9705 tde->d_ino = tswapal(ino);
9706 tde->d_off = tswapal(off);
9707 tde->d_reclen = tswap16(treclen);
9708 /* The target_dirent type is in what was formerly a padding
9709 * byte at the end of the structure:
9711 *(((char *)tde) + treclen - 1) = type;
9713 de = (struct linux_dirent64 *)((char *)de + reclen);
9714 tde = (struct target_dirent *)((char *)tde + treclen);
9715 len -= reclen;
9716 tlen += treclen;
9718 ret = tlen;
9720 unlock_user(dirp, arg2, ret);
9722 #endif
9723 break;
9724 #endif /* TARGET_NR_getdents */
9725 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9726 case TARGET_NR_getdents64:
9728 struct linux_dirent64 *dirp;
9729 abi_long count = arg3;
9730 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9731 goto efault;
9732 ret = get_errno(sys_getdents64(arg1, dirp, count));
9733 if (!is_error(ret)) {
9734 struct linux_dirent64 *de;
9735 int len = ret;
9736 int reclen;
9737 de = dirp;
9738 while (len > 0) {
9739 reclen = de->d_reclen;
9740 if (reclen > len)
9741 break;
9742 de->d_reclen = tswap16(reclen);
9743 tswap64s((uint64_t *)&de->d_ino);
9744 tswap64s((uint64_t *)&de->d_off);
9745 de = (struct linux_dirent64 *)((char *)de + reclen);
9746 len -= reclen;
9749 unlock_user(dirp, arg2, ret);
9751 break;
9752 #endif /* TARGET_NR_getdents64 */
9753 #if defined(TARGET_NR__newselect)
9754 case TARGET_NR__newselect:
9755 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9756 break;
9757 #endif
9758 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9759 # ifdef TARGET_NR_poll
9760 case TARGET_NR_poll:
9761 # endif
9762 # ifdef TARGET_NR_ppoll
9763 case TARGET_NR_ppoll:
9764 # endif
9766 struct target_pollfd *target_pfd;
9767 unsigned int nfds = arg2;
9768 struct pollfd *pfd;
9769 unsigned int i;
9771 pfd = NULL;
9772 target_pfd = NULL;
9773 if (nfds) {
9774 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9775 ret = -TARGET_EINVAL;
9776 break;
9779 target_pfd = lock_user(VERIFY_WRITE, arg1,
9780 sizeof(struct target_pollfd) * nfds, 1);
9781 if (!target_pfd) {
9782 goto efault;
9785 pfd = alloca(sizeof(struct pollfd) * nfds);
9786 for (i = 0; i < nfds; i++) {
9787 pfd[i].fd = tswap32(target_pfd[i].fd);
9788 pfd[i].events = tswap16(target_pfd[i].events);
9792 switch (num) {
9793 # ifdef TARGET_NR_ppoll
9794 case TARGET_NR_ppoll:
9796 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9797 target_sigset_t *target_set;
9798 sigset_t _set, *set = &_set;
9800 if (arg3) {
9801 if (target_to_host_timespec(timeout_ts, arg3)) {
9802 unlock_user(target_pfd, arg1, 0);
9803 goto efault;
9805 } else {
9806 timeout_ts = NULL;
9809 if (arg4) {
9810 if (arg5 != sizeof(target_sigset_t)) {
9811 unlock_user(target_pfd, arg1, 0);
9812 ret = -TARGET_EINVAL;
9813 break;
9816 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9817 if (!target_set) {
9818 unlock_user(target_pfd, arg1, 0);
9819 goto efault;
9821 target_to_host_sigset(set, target_set);
9822 } else {
9823 set = NULL;
9826 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9827 set, SIGSET_T_SIZE));
9829 if (!is_error(ret) && arg3) {
9830 host_to_target_timespec(arg3, timeout_ts);
9832 if (arg4) {
9833 unlock_user(target_set, arg4, 0);
9835 break;
9837 # endif
9838 # ifdef TARGET_NR_poll
9839 case TARGET_NR_poll:
9841 struct timespec ts, *pts;
9843 if (arg3 >= 0) {
9844 /* Convert ms to secs, ns */
9845 ts.tv_sec = arg3 / 1000;
9846 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9847 pts = &ts;
9848 } else {
9849 /* -ve poll() timeout means "infinite" */
9850 pts = NULL;
9852 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9853 break;
9855 # endif
9856 default:
9857 g_assert_not_reached();
9860 if (!is_error(ret)) {
9861 for(i = 0; i < nfds; i++) {
9862 target_pfd[i].revents = tswap16(pfd[i].revents);
9865 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9867 break;
9868 #endif
9869 case TARGET_NR_flock:
9870 /* NOTE: the flock constant seems to be the same for every
9871 Linux platform */
9872 ret = get_errno(safe_flock(arg1, arg2));
9873 break;
9874 case TARGET_NR_readv:
9876 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9877 if (vec != NULL) {
9878 ret = get_errno(safe_readv(arg1, vec, arg3));
9879 unlock_iovec(vec, arg2, arg3, 1);
9880 } else {
9881 ret = -host_to_target_errno(errno);
9884 break;
9885 case TARGET_NR_writev:
9887 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9888 if (vec != NULL) {
9889 ret = get_errno(safe_writev(arg1, vec, arg3));
9890 unlock_iovec(vec, arg2, arg3, 0);
9891 } else {
9892 ret = -host_to_target_errno(errno);
9895 break;
9896 case TARGET_NR_getsid:
9897 ret = get_errno(getsid(arg1));
9898 break;
9899 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9900 case TARGET_NR_fdatasync:
9901 ret = get_errno(fdatasync(arg1));
9902 break;
9903 #endif
9904 #ifdef TARGET_NR__sysctl
9905 case TARGET_NR__sysctl:
9906 /* We don't implement this, but ENOTDIR is always a safe
9907 return value. */
9908 ret = -TARGET_ENOTDIR;
9909 break;
9910 #endif
9911 case TARGET_NR_sched_getaffinity:
9913 unsigned int mask_size;
9914 unsigned long *mask;
9917 * sched_getaffinity needs multiples of ulong, so need to take
9918 * care of mismatches between target ulong and host ulong sizes.
9920 if (arg2 & (sizeof(abi_ulong) - 1)) {
9921 ret = -TARGET_EINVAL;
9922 break;
9924 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9926 mask = alloca(mask_size);
9927 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9929 if (!is_error(ret)) {
9930 if (ret > arg2) {
9931 /* More data returned than the caller's buffer will fit.
9932 * This only happens if sizeof(abi_long) < sizeof(long)
9933 * and the caller passed us a buffer holding an odd number
9934 * of abi_longs. If the host kernel is actually using the
9935 * extra 4 bytes then fail EINVAL; otherwise we can just
9936 * ignore them and only copy the interesting part.
9938 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9939 if (numcpus > arg2 * 8) {
9940 ret = -TARGET_EINVAL;
9941 break;
9943 ret = arg2;
9946 if (copy_to_user(arg3, mask, ret)) {
9947 goto efault;
9951 break;
9952 case TARGET_NR_sched_setaffinity:
9954 unsigned int mask_size;
9955 unsigned long *mask;
9958 * sched_setaffinity needs multiples of ulong, so need to take
9959 * care of mismatches between target ulong and host ulong sizes.
9961 if (arg2 & (sizeof(abi_ulong) - 1)) {
9962 ret = -TARGET_EINVAL;
9963 break;
9965 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9967 mask = alloca(mask_size);
9968 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9969 goto efault;
9971 memcpy(mask, p, arg2);
9972 unlock_user_struct(p, arg2, 0);
9974 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9976 break;
9977 case TARGET_NR_sched_setparam:
9979 struct sched_param *target_schp;
9980 struct sched_param schp;
9982 if (arg2 == 0) {
9983 return -TARGET_EINVAL;
9985 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9986 goto efault;
9987 schp.sched_priority = tswap32(target_schp->sched_priority);
9988 unlock_user_struct(target_schp, arg2, 0);
9989 ret = get_errno(sched_setparam(arg1, &schp));
9991 break;
9992 case TARGET_NR_sched_getparam:
9994 struct sched_param *target_schp;
9995 struct sched_param schp;
9997 if (arg2 == 0) {
9998 return -TARGET_EINVAL;
10000 ret = get_errno(sched_getparam(arg1, &schp));
10001 if (!is_error(ret)) {
10002 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10003 goto efault;
10004 target_schp->sched_priority = tswap32(schp.sched_priority);
10005 unlock_user_struct(target_schp, arg2, 1);
10008 break;
10009 case TARGET_NR_sched_setscheduler:
10011 struct sched_param *target_schp;
10012 struct sched_param schp;
10013 if (arg3 == 0) {
10014 return -TARGET_EINVAL;
10016 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10017 goto efault;
10018 schp.sched_priority = tswap32(target_schp->sched_priority);
10019 unlock_user_struct(target_schp, arg3, 0);
10020 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10022 break;
10023 case TARGET_NR_sched_getscheduler:
10024 ret = get_errno(sched_getscheduler(arg1));
10025 break;
10026 case TARGET_NR_sched_yield:
10027 ret = get_errno(sched_yield());
10028 break;
10029 case TARGET_NR_sched_get_priority_max:
10030 ret = get_errno(sched_get_priority_max(arg1));
10031 break;
10032 case TARGET_NR_sched_get_priority_min:
10033 ret = get_errno(sched_get_priority_min(arg1));
10034 break;
10035 case TARGET_NR_sched_rr_get_interval:
10037 struct timespec ts;
10038 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10039 if (!is_error(ret)) {
10040 ret = host_to_target_timespec(arg2, &ts);
10043 break;
10044 case TARGET_NR_nanosleep:
10046 struct timespec req, rem;
10047 target_to_host_timespec(&req, arg1);
10048 ret = get_errno(safe_nanosleep(&req, &rem));
10049 if (is_error(ret) && arg2) {
10050 host_to_target_timespec(arg2, &rem);
10053 break;
10054 #ifdef TARGET_NR_query_module
10055 case TARGET_NR_query_module:
10056 goto unimplemented;
10057 #endif
10058 #ifdef TARGET_NR_nfsservctl
10059 case TARGET_NR_nfsservctl:
10060 goto unimplemented;
10061 #endif
10062 case TARGET_NR_prctl:
10063 switch (arg1) {
10064 case PR_GET_PDEATHSIG:
10066 int deathsig;
10067 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10068 if (!is_error(ret) && arg2
10069 && put_user_ual(deathsig, arg2)) {
10070 goto efault;
10072 break;
10074 #ifdef PR_GET_NAME
10075 case PR_GET_NAME:
10077 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10078 if (!name) {
10079 goto efault;
10081 ret = get_errno(prctl(arg1, (unsigned long)name,
10082 arg3, arg4, arg5));
10083 unlock_user(name, arg2, 16);
10084 break;
10086 case PR_SET_NAME:
10088 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10089 if (!name) {
10090 goto efault;
10092 ret = get_errno(prctl(arg1, (unsigned long)name,
10093 arg3, arg4, arg5));
10094 unlock_user(name, arg2, 0);
10095 break;
10097 #endif
10098 default:
10099 /* Most prctl options have no pointer arguments */
10100 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10101 break;
10103 break;
10104 #ifdef TARGET_NR_arch_prctl
10105 case TARGET_NR_arch_prctl:
10106 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10107 ret = do_arch_prctl(cpu_env, arg1, arg2);
10108 break;
10109 #else
10110 goto unimplemented;
10111 #endif
10112 #endif
10113 #ifdef TARGET_NR_pread64
10114 case TARGET_NR_pread64:
10115 if (regpairs_aligned(cpu_env)) {
10116 arg4 = arg5;
10117 arg5 = arg6;
10119 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10120 goto efault;
10121 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10122 unlock_user(p, arg2, ret);
10123 break;
10124 case TARGET_NR_pwrite64:
10125 if (regpairs_aligned(cpu_env)) {
10126 arg4 = arg5;
10127 arg5 = arg6;
10129 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10130 goto efault;
10131 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10132 unlock_user(p, arg2, 0);
10133 break;
10134 #endif
10135 case TARGET_NR_getcwd:
10136 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10137 goto efault;
10138 ret = get_errno(sys_getcwd1(p, arg2));
10139 unlock_user(p, arg1, ret);
10140 break;
10141 case TARGET_NR_capget:
10142 case TARGET_NR_capset:
10144 struct target_user_cap_header *target_header;
10145 struct target_user_cap_data *target_data = NULL;
10146 struct __user_cap_header_struct header;
10147 struct __user_cap_data_struct data[2];
10148 struct __user_cap_data_struct *dataptr = NULL;
10149 int i, target_datalen;
10150 int data_items = 1;
10152 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10153 goto efault;
10155 header.version = tswap32(target_header->version);
10156 header.pid = tswap32(target_header->pid);
10158 if (header.version != _LINUX_CAPABILITY_VERSION) {
10159 /* Version 2 and up takes pointer to two user_data structs */
10160 data_items = 2;
10163 target_datalen = sizeof(*target_data) * data_items;
10165 if (arg2) {
10166 if (num == TARGET_NR_capget) {
10167 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10168 } else {
10169 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10171 if (!target_data) {
10172 unlock_user_struct(target_header, arg1, 0);
10173 goto efault;
10176 if (num == TARGET_NR_capset) {
10177 for (i = 0; i < data_items; i++) {
10178 data[i].effective = tswap32(target_data[i].effective);
10179 data[i].permitted = tswap32(target_data[i].permitted);
10180 data[i].inheritable = tswap32(target_data[i].inheritable);
10184 dataptr = data;
10187 if (num == TARGET_NR_capget) {
10188 ret = get_errno(capget(&header, dataptr));
10189 } else {
10190 ret = get_errno(capset(&header, dataptr));
10193 /* The kernel always updates version for both capget and capset */
10194 target_header->version = tswap32(header.version);
10195 unlock_user_struct(target_header, arg1, 1);
10197 if (arg2) {
10198 if (num == TARGET_NR_capget) {
10199 for (i = 0; i < data_items; i++) {
10200 target_data[i].effective = tswap32(data[i].effective);
10201 target_data[i].permitted = tswap32(data[i].permitted);
10202 target_data[i].inheritable = tswap32(data[i].inheritable);
10204 unlock_user(target_data, arg2, target_datalen);
10205 } else {
10206 unlock_user(target_data, arg2, 0);
10209 break;
10211 case TARGET_NR_sigaltstack:
10212 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10213 break;
10215 #ifdef CONFIG_SENDFILE
10216 case TARGET_NR_sendfile:
10218 off_t *offp = NULL;
10219 off_t off;
10220 if (arg3) {
10221 ret = get_user_sal(off, arg3);
10222 if (is_error(ret)) {
10223 break;
10225 offp = &off;
10227 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10228 if (!is_error(ret) && arg3) {
10229 abi_long ret2 = put_user_sal(off, arg3);
10230 if (is_error(ret2)) {
10231 ret = ret2;
10234 break;
10236 #ifdef TARGET_NR_sendfile64
10237 case TARGET_NR_sendfile64:
10239 off_t *offp = NULL;
10240 off_t off;
10241 if (arg3) {
10242 ret = get_user_s64(off, arg3);
10243 if (is_error(ret)) {
10244 break;
10246 offp = &off;
10248 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10249 if (!is_error(ret) && arg3) {
10250 abi_long ret2 = put_user_s64(off, arg3);
10251 if (is_error(ret2)) {
10252 ret = ret2;
10255 break;
10257 #endif
10258 #else
10259 case TARGET_NR_sendfile:
10260 #ifdef TARGET_NR_sendfile64
10261 case TARGET_NR_sendfile64:
10262 #endif
10263 goto unimplemented;
10264 #endif
10266 #ifdef TARGET_NR_getpmsg
10267 case TARGET_NR_getpmsg:
10268 goto unimplemented;
10269 #endif
10270 #ifdef TARGET_NR_putpmsg
10271 case TARGET_NR_putpmsg:
10272 goto unimplemented;
10273 #endif
10274 #ifdef TARGET_NR_vfork
10275 case TARGET_NR_vfork:
10276 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10277 0, 0, 0, 0));
10278 break;
10279 #endif
10280 #ifdef TARGET_NR_ugetrlimit
10281 case TARGET_NR_ugetrlimit:
10283 struct rlimit rlim;
10284 int resource = target_to_host_resource(arg1);
10285 ret = get_errno(getrlimit(resource, &rlim));
10286 if (!is_error(ret)) {
10287 struct target_rlimit *target_rlim;
10288 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10289 goto efault;
10290 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10291 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10292 unlock_user_struct(target_rlim, arg2, 1);
10294 break;
10296 #endif
10297 #ifdef TARGET_NR_truncate64
10298 case TARGET_NR_truncate64:
10299 if (!(p = lock_user_string(arg1)))
10300 goto efault;
10301 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10302 unlock_user(p, arg1, 0);
10303 break;
10304 #endif
10305 #ifdef TARGET_NR_ftruncate64
10306 case TARGET_NR_ftruncate64:
10307 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10308 break;
10309 #endif
10310 #ifdef TARGET_NR_stat64
10311 case TARGET_NR_stat64:
10312 if (!(p = lock_user_string(arg1)))
10313 goto efault;
10314 ret = get_errno(stat(path(p), &st));
10315 unlock_user(p, arg1, 0);
10316 if (!is_error(ret))
10317 ret = host_to_target_stat64(cpu_env, arg2, &st);
10318 break;
10319 #endif
10320 #ifdef TARGET_NR_lstat64
10321 case TARGET_NR_lstat64:
10322 if (!(p = lock_user_string(arg1)))
10323 goto efault;
10324 ret = get_errno(lstat(path(p), &st));
10325 unlock_user(p, arg1, 0);
10326 if (!is_error(ret))
10327 ret = host_to_target_stat64(cpu_env, arg2, &st);
10328 break;
10329 #endif
10330 #ifdef TARGET_NR_fstat64
10331 case TARGET_NR_fstat64:
10332 ret = get_errno(fstat(arg1, &st));
10333 if (!is_error(ret))
10334 ret = host_to_target_stat64(cpu_env, arg2, &st);
10335 break;
10336 #endif
10337 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10338 #ifdef TARGET_NR_fstatat64
10339 case TARGET_NR_fstatat64:
10340 #endif
10341 #ifdef TARGET_NR_newfstatat
10342 case TARGET_NR_newfstatat:
10343 #endif
10344 if (!(p = lock_user_string(arg2)))
10345 goto efault;
10346 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10347 if (!is_error(ret))
10348 ret = host_to_target_stat64(cpu_env, arg3, &st);
10349 break;
10350 #endif
10351 #ifdef TARGET_NR_lchown
10352 case TARGET_NR_lchown:
10353 if (!(p = lock_user_string(arg1)))
10354 goto efault;
10355 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10356 unlock_user(p, arg1, 0);
10357 break;
10358 #endif
10359 #ifdef TARGET_NR_getuid
10360 case TARGET_NR_getuid:
10361 ret = get_errno(high2lowuid(getuid()));
10362 break;
10363 #endif
10364 #ifdef TARGET_NR_getgid
10365 case TARGET_NR_getgid:
10366 ret = get_errno(high2lowgid(getgid()));
10367 break;
10368 #endif
10369 #ifdef TARGET_NR_geteuid
10370 case TARGET_NR_geteuid:
10371 ret = get_errno(high2lowuid(geteuid()));
10372 break;
10373 #endif
10374 #ifdef TARGET_NR_getegid
10375 case TARGET_NR_getegid:
10376 ret = get_errno(high2lowgid(getegid()));
10377 break;
10378 #endif
10379 case TARGET_NR_setreuid:
10380 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10381 break;
10382 case TARGET_NR_setregid:
10383 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10384 break;
10385 case TARGET_NR_getgroups:
10387 int gidsetsize = arg1;
10388 target_id *target_grouplist;
10389 gid_t *grouplist;
10390 int i;
10392 grouplist = alloca(gidsetsize * sizeof(gid_t));
10393 ret = get_errno(getgroups(gidsetsize, grouplist));
10394 if (gidsetsize == 0)
10395 break;
10396 if (!is_error(ret)) {
10397 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10398 if (!target_grouplist)
10399 goto efault;
10400 for(i = 0;i < ret; i++)
10401 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10402 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10405 break;
10406 case TARGET_NR_setgroups:
10408 int gidsetsize = arg1;
10409 target_id *target_grouplist;
10410 gid_t *grouplist = NULL;
10411 int i;
10412 if (gidsetsize) {
10413 grouplist = alloca(gidsetsize * sizeof(gid_t));
10414 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10415 if (!target_grouplist) {
10416 ret = -TARGET_EFAULT;
10417 goto fail;
10419 for (i = 0; i < gidsetsize; i++) {
10420 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10422 unlock_user(target_grouplist, arg2, 0);
10424 ret = get_errno(setgroups(gidsetsize, grouplist));
10426 break;
10427 case TARGET_NR_fchown:
10428 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10429 break;
10430 #if defined(TARGET_NR_fchownat)
10431 case TARGET_NR_fchownat:
10432 if (!(p = lock_user_string(arg2)))
10433 goto efault;
10434 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10435 low2highgid(arg4), arg5));
10436 unlock_user(p, arg2, 0);
10437 break;
10438 #endif
10439 #ifdef TARGET_NR_setresuid
10440 case TARGET_NR_setresuid:
10441 ret = get_errno(sys_setresuid(low2highuid(arg1),
10442 low2highuid(arg2),
10443 low2highuid(arg3)));
10444 break;
10445 #endif
10446 #ifdef TARGET_NR_getresuid
10447 case TARGET_NR_getresuid:
10449 uid_t ruid, euid, suid;
10450 ret = get_errno(getresuid(&ruid, &euid, &suid));
10451 if (!is_error(ret)) {
10452 if (put_user_id(high2lowuid(ruid), arg1)
10453 || put_user_id(high2lowuid(euid), arg2)
10454 || put_user_id(high2lowuid(suid), arg3))
10455 goto efault;
10458 break;
10459 #endif
10460 #ifdef TARGET_NR_getresgid
10461 case TARGET_NR_setresgid:
10462 ret = get_errno(sys_setresgid(low2highgid(arg1),
10463 low2highgid(arg2),
10464 low2highgid(arg3)));
10465 break;
10466 #endif
10467 #ifdef TARGET_NR_getresgid
10468 case TARGET_NR_getresgid:
10470 gid_t rgid, egid, sgid;
10471 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10472 if (!is_error(ret)) {
10473 if (put_user_id(high2lowgid(rgid), arg1)
10474 || put_user_id(high2lowgid(egid), arg2)
10475 || put_user_id(high2lowgid(sgid), arg3))
10476 goto efault;
10479 break;
10480 #endif
10481 #ifdef TARGET_NR_chown
10482 case TARGET_NR_chown:
10483 if (!(p = lock_user_string(arg1)))
10484 goto efault;
10485 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10486 unlock_user(p, arg1, 0);
10487 break;
10488 #endif
10489 case TARGET_NR_setuid:
10490 ret = get_errno(sys_setuid(low2highuid(arg1)));
10491 break;
10492 case TARGET_NR_setgid:
10493 ret = get_errno(sys_setgid(low2highgid(arg1)));
10494 break;
10495 case TARGET_NR_setfsuid:
10496 ret = get_errno(setfsuid(arg1));
10497 break;
10498 case TARGET_NR_setfsgid:
10499 ret = get_errno(setfsgid(arg1));
10500 break;
10502 #ifdef TARGET_NR_lchown32
10503 case TARGET_NR_lchown32:
10504 if (!(p = lock_user_string(arg1)))
10505 goto efault;
10506 ret = get_errno(lchown(p, arg2, arg3));
10507 unlock_user(p, arg1, 0);
10508 break;
10509 #endif
10510 #ifdef TARGET_NR_getuid32
10511 case TARGET_NR_getuid32:
10512 ret = get_errno(getuid());
10513 break;
10514 #endif
10516 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10517 /* Alpha specific */
10518 case TARGET_NR_getxuid:
10520 uid_t euid;
10521 euid=geteuid();
10522 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10524 ret = get_errno(getuid());
10525 break;
10526 #endif
10527 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10528 /* Alpha specific */
10529 case TARGET_NR_getxgid:
10531 uid_t egid;
10532 egid=getegid();
10533 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10535 ret = get_errno(getgid());
10536 break;
10537 #endif
10538 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10539 /* Alpha specific */
10540 case TARGET_NR_osf_getsysinfo:
10541 ret = -TARGET_EOPNOTSUPP;
10542 switch (arg1) {
10543 case TARGET_GSI_IEEE_FP_CONTROL:
10545 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10547 /* Copied from linux ieee_fpcr_to_swcr. */
10548 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10549 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10550 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10551 | SWCR_TRAP_ENABLE_DZE
10552 | SWCR_TRAP_ENABLE_OVF);
10553 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10554 | SWCR_TRAP_ENABLE_INE);
10555 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10556 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10558 if (put_user_u64 (swcr, arg2))
10559 goto efault;
10560 ret = 0;
10562 break;
10564 /* case GSI_IEEE_STATE_AT_SIGNAL:
10565 -- Not implemented in linux kernel.
10566 case GSI_UACPROC:
10567 -- Retrieves current unaligned access state; not much used.
10568 case GSI_PROC_TYPE:
10569 -- Retrieves implver information; surely not used.
10570 case GSI_GET_HWRPB:
10571 -- Grabs a copy of the HWRPB; surely not used.
10574 break;
10575 #endif
10576 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10577 /* Alpha specific */
10578 case TARGET_NR_osf_setsysinfo:
10579 ret = -TARGET_EOPNOTSUPP;
10580 switch (arg1) {
10581 case TARGET_SSI_IEEE_FP_CONTROL:
10583 uint64_t swcr, fpcr, orig_fpcr;
10585 if (get_user_u64 (swcr, arg2)) {
10586 goto efault;
10588 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10589 fpcr = orig_fpcr & FPCR_DYN_MASK;
10591 /* Copied from linux ieee_swcr_to_fpcr. */
10592 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10593 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10594 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10595 | SWCR_TRAP_ENABLE_DZE
10596 | SWCR_TRAP_ENABLE_OVF)) << 48;
10597 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10598 | SWCR_TRAP_ENABLE_INE)) << 57;
10599 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10600 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10602 cpu_alpha_store_fpcr(cpu_env, fpcr);
10603 ret = 0;
10605 break;
10607 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10609 uint64_t exc, fpcr, orig_fpcr;
10610 int si_code;
10612 if (get_user_u64(exc, arg2)) {
10613 goto efault;
10616 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10618 /* We only add to the exception status here. */
10619 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10621 cpu_alpha_store_fpcr(cpu_env, fpcr);
10622 ret = 0;
10624 /* Old exceptions are not signaled. */
10625 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10627 /* If any exceptions set by this call,
10628 and are unmasked, send a signal. */
10629 si_code = 0;
10630 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10631 si_code = TARGET_FPE_FLTRES;
10633 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10634 si_code = TARGET_FPE_FLTUND;
10636 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10637 si_code = TARGET_FPE_FLTOVF;
10639 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10640 si_code = TARGET_FPE_FLTDIV;
10642 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10643 si_code = TARGET_FPE_FLTINV;
10645 if (si_code != 0) {
10646 target_siginfo_t info;
10647 info.si_signo = SIGFPE;
10648 info.si_errno = 0;
10649 info.si_code = si_code;
10650 info._sifields._sigfault._addr
10651 = ((CPUArchState *)cpu_env)->pc;
10652 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10653 QEMU_SI_FAULT, &info);
10656 break;
10658 /* case SSI_NVPAIRS:
10659 -- Used with SSIN_UACPROC to enable unaligned accesses.
10660 case SSI_IEEE_STATE_AT_SIGNAL:
10661 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10662 -- Not implemented in linux kernel
10665 break;
10666 #endif
10667 #ifdef TARGET_NR_osf_sigprocmask
10668 /* Alpha specific. */
10669 case TARGET_NR_osf_sigprocmask:
10671 abi_ulong mask;
10672 int how;
10673 sigset_t set, oldset;
10675 switch(arg1) {
10676 case TARGET_SIG_BLOCK:
10677 how = SIG_BLOCK;
10678 break;
10679 case TARGET_SIG_UNBLOCK:
10680 how = SIG_UNBLOCK;
10681 break;
10682 case TARGET_SIG_SETMASK:
10683 how = SIG_SETMASK;
10684 break;
10685 default:
10686 ret = -TARGET_EINVAL;
10687 goto fail;
10689 mask = arg2;
10690 target_to_host_old_sigset(&set, &mask);
10691 ret = do_sigprocmask(how, &set, &oldset);
10692 if (!ret) {
10693 host_to_target_old_sigset(&mask, &oldset);
10694 ret = mask;
10697 break;
10698 #endif
10700 #ifdef TARGET_NR_getgid32
10701 case TARGET_NR_getgid32:
10702 ret = get_errno(getgid());
10703 break;
10704 #endif
10705 #ifdef TARGET_NR_geteuid32
10706 case TARGET_NR_geteuid32:
10707 ret = get_errno(geteuid());
10708 break;
10709 #endif
10710 #ifdef TARGET_NR_getegid32
10711 case TARGET_NR_getegid32:
10712 ret = get_errno(getegid());
10713 break;
10714 #endif
10715 #ifdef TARGET_NR_setreuid32
10716 case TARGET_NR_setreuid32:
10717 ret = get_errno(setreuid(arg1, arg2));
10718 break;
10719 #endif
10720 #ifdef TARGET_NR_setregid32
10721 case TARGET_NR_setregid32:
10722 ret = get_errno(setregid(arg1, arg2));
10723 break;
10724 #endif
10725 #ifdef TARGET_NR_getgroups32
10726 case TARGET_NR_getgroups32:
10728 int gidsetsize = arg1;
10729 uint32_t *target_grouplist;
10730 gid_t *grouplist;
10731 int i;
10733 grouplist = alloca(gidsetsize * sizeof(gid_t));
10734 ret = get_errno(getgroups(gidsetsize, grouplist));
10735 if (gidsetsize == 0)
10736 break;
10737 if (!is_error(ret)) {
10738 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10739 if (!target_grouplist) {
10740 ret = -TARGET_EFAULT;
10741 goto fail;
10743 for(i = 0;i < ret; i++)
10744 target_grouplist[i] = tswap32(grouplist[i]);
10745 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10748 break;
10749 #endif
10750 #ifdef TARGET_NR_setgroups32
10751 case TARGET_NR_setgroups32:
10753 int gidsetsize = arg1;
10754 uint32_t *target_grouplist;
10755 gid_t *grouplist;
10756 int i;
10758 grouplist = alloca(gidsetsize * sizeof(gid_t));
10759 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10760 if (!target_grouplist) {
10761 ret = -TARGET_EFAULT;
10762 goto fail;
10764 for(i = 0;i < gidsetsize; i++)
10765 grouplist[i] = tswap32(target_grouplist[i]);
10766 unlock_user(target_grouplist, arg2, 0);
10767 ret = get_errno(setgroups(gidsetsize, grouplist));
10769 break;
10770 #endif
10771 #ifdef TARGET_NR_fchown32
10772 case TARGET_NR_fchown32:
10773 ret = get_errno(fchown(arg1, arg2, arg3));
10774 break;
10775 #endif
10776 #ifdef TARGET_NR_setresuid32
10777 case TARGET_NR_setresuid32:
10778 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10779 break;
10780 #endif
10781 #ifdef TARGET_NR_getresuid32
10782 case TARGET_NR_getresuid32:
10784 uid_t ruid, euid, suid;
10785 ret = get_errno(getresuid(&ruid, &euid, &suid));
10786 if (!is_error(ret)) {
10787 if (put_user_u32(ruid, arg1)
10788 || put_user_u32(euid, arg2)
10789 || put_user_u32(suid, arg3))
10790 goto efault;
10793 break;
10794 #endif
10795 #ifdef TARGET_NR_setresgid32
10796 case TARGET_NR_setresgid32:
10797 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10798 break;
10799 #endif
10800 #ifdef TARGET_NR_getresgid32
10801 case TARGET_NR_getresgid32:
10803 gid_t rgid, egid, sgid;
10804 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10805 if (!is_error(ret)) {
10806 if (put_user_u32(rgid, arg1)
10807 || put_user_u32(egid, arg2)
10808 || put_user_u32(sgid, arg3))
10809 goto efault;
10812 break;
10813 #endif
10814 #ifdef TARGET_NR_chown32
10815 case TARGET_NR_chown32:
10816 if (!(p = lock_user_string(arg1)))
10817 goto efault;
10818 ret = get_errno(chown(p, arg2, arg3));
10819 unlock_user(p, arg1, 0);
10820 break;
10821 #endif
10822 #ifdef TARGET_NR_setuid32
10823 case TARGET_NR_setuid32:
10824 ret = get_errno(sys_setuid(arg1));
10825 break;
10826 #endif
10827 #ifdef TARGET_NR_setgid32
10828 case TARGET_NR_setgid32:
10829 ret = get_errno(sys_setgid(arg1));
10830 break;
10831 #endif
10832 #ifdef TARGET_NR_setfsuid32
10833 case TARGET_NR_setfsuid32:
10834 ret = get_errno(setfsuid(arg1));
10835 break;
10836 #endif
10837 #ifdef TARGET_NR_setfsgid32
10838 case TARGET_NR_setfsgid32:
10839 ret = get_errno(setfsgid(arg1));
10840 break;
10841 #endif
10843 case TARGET_NR_pivot_root:
10844 goto unimplemented;
10845 #ifdef TARGET_NR_mincore
10846 case TARGET_NR_mincore:
10848 void *a;
10849 ret = -TARGET_EFAULT;
10850 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10851 goto efault;
10852 if (!(p = lock_user_string(arg3)))
10853 goto mincore_fail;
10854 ret = get_errno(mincore(a, arg2, p));
10855 unlock_user(p, arg3, ret);
10856 mincore_fail:
10857 unlock_user(a, arg1, 0);
10859 break;
10860 #endif
10861 #ifdef TARGET_NR_arm_fadvise64_64
10862 case TARGET_NR_arm_fadvise64_64:
10863 /* arm_fadvise64_64 looks like fadvise64_64 but
10864 * with different argument order: fd, advice, offset, len
10865 * rather than the usual fd, offset, len, advice.
10866 * Note that offset and len are both 64-bit so appear as
10867 * pairs of 32-bit registers.
10869 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10870 target_offset64(arg5, arg6), arg2);
10871 ret = -host_to_target_errno(ret);
10872 break;
10873 #endif
10875 #if TARGET_ABI_BITS == 32
10877 #ifdef TARGET_NR_fadvise64_64
10878 case TARGET_NR_fadvise64_64:
10879 /* 6 args: fd, offset (high, low), len (high, low), advice */
10880 if (regpairs_aligned(cpu_env)) {
10881 /* offset is in (3,4), len in (5,6) and advice in 7 */
10882 arg2 = arg3;
10883 arg3 = arg4;
10884 arg4 = arg5;
10885 arg5 = arg6;
10886 arg6 = arg7;
10888 ret = -host_to_target_errno(posix_fadvise(arg1,
10889 target_offset64(arg2, arg3),
10890 target_offset64(arg4, arg5),
10891 arg6));
10892 break;
10893 #endif
10895 #ifdef TARGET_NR_fadvise64
10896 case TARGET_NR_fadvise64:
10897 /* 5 args: fd, offset (high, low), len, advice */
10898 if (regpairs_aligned(cpu_env)) {
10899 /* offset is in (3,4), len in 5 and advice in 6 */
10900 arg2 = arg3;
10901 arg3 = arg4;
10902 arg4 = arg5;
10903 arg5 = arg6;
10905 ret = -host_to_target_errno(posix_fadvise(arg1,
10906 target_offset64(arg2, arg3),
10907 arg4, arg5));
10908 break;
10909 #endif
10911 #else /* not a 32-bit ABI */
10912 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10913 #ifdef TARGET_NR_fadvise64_64
10914 case TARGET_NR_fadvise64_64:
10915 #endif
10916 #ifdef TARGET_NR_fadvise64
10917 case TARGET_NR_fadvise64:
10918 #endif
10919 #ifdef TARGET_S390X
10920 switch (arg4) {
10921 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10922 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10923 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10924 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10925 default: break;
10927 #endif
10928 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10929 break;
10930 #endif
10931 #endif /* end of 64-bit ABI fadvise handling */
10933 #ifdef TARGET_NR_madvise
10934 case TARGET_NR_madvise:
10935 /* A straight passthrough may not be safe because qemu sometimes
10936 turns private file-backed mappings into anonymous mappings.
10937 This will break MADV_DONTNEED.
10938 This is a hint, so ignoring and returning success is ok. */
10939 ret = get_errno(0);
10940 break;
10941 #endif
10942 #if TARGET_ABI_BITS == 32
10943 case TARGET_NR_fcntl64:
10945 int cmd;
10946 struct flock64 fl;
10947 from_flock64_fn *copyfrom = copy_from_user_flock64;
10948 to_flock64_fn *copyto = copy_to_user_flock64;
10950 #ifdef TARGET_ARM
10951 if (((CPUARMState *)cpu_env)->eabi) {
10952 copyfrom = copy_from_user_eabi_flock64;
10953 copyto = copy_to_user_eabi_flock64;
10955 #endif
10957 cmd = target_to_host_fcntl_cmd(arg2);
10958 if (cmd == -TARGET_EINVAL) {
10959 ret = cmd;
10960 break;
10963 switch(arg2) {
10964 case TARGET_F_GETLK64:
10965 ret = copyfrom(&fl, arg3);
10966 if (ret) {
10967 break;
10969 ret = get_errno(fcntl(arg1, cmd, &fl));
10970 if (ret == 0) {
10971 ret = copyto(arg3, &fl);
10973 break;
10975 case TARGET_F_SETLK64:
10976 case TARGET_F_SETLKW64:
10977 ret = copyfrom(&fl, arg3);
10978 if (ret) {
10979 break;
10981 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10982 break;
10983 default:
10984 ret = do_fcntl(arg1, arg2, arg3);
10985 break;
10987 break;
10989 #endif
10990 #ifdef TARGET_NR_cacheflush
10991 case TARGET_NR_cacheflush:
10992 /* self-modifying code is handled automatically, so nothing needed */
10993 ret = 0;
10994 break;
10995 #endif
10996 #ifdef TARGET_NR_security
10997 case TARGET_NR_security:
10998 goto unimplemented;
10999 #endif
11000 #ifdef TARGET_NR_getpagesize
11001 case TARGET_NR_getpagesize:
11002 ret = TARGET_PAGE_SIZE;
11003 break;
11004 #endif
11005 case TARGET_NR_gettid:
11006 ret = get_errno(gettid());
11007 break;
11008 #ifdef TARGET_NR_readahead
11009 case TARGET_NR_readahead:
11010 #if TARGET_ABI_BITS == 32
11011 if (regpairs_aligned(cpu_env)) {
11012 arg2 = arg3;
11013 arg3 = arg4;
11014 arg4 = arg5;
11016 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11017 #else
11018 ret = get_errno(readahead(arg1, arg2, arg3));
11019 #endif
11020 break;
11021 #endif
11022 #ifdef CONFIG_ATTR
11023 #ifdef TARGET_NR_setxattr
11024 case TARGET_NR_listxattr:
11025 case TARGET_NR_llistxattr:
11027 void *p, *b = 0;
11028 if (arg2) {
11029 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11030 if (!b) {
11031 ret = -TARGET_EFAULT;
11032 break;
11035 p = lock_user_string(arg1);
11036 if (p) {
11037 if (num == TARGET_NR_listxattr) {
11038 ret = get_errno(listxattr(p, b, arg3));
11039 } else {
11040 ret = get_errno(llistxattr(p, b, arg3));
11042 } else {
11043 ret = -TARGET_EFAULT;
11045 unlock_user(p, arg1, 0);
11046 unlock_user(b, arg2, arg3);
11047 break;
11049 case TARGET_NR_flistxattr:
11051 void *b = 0;
11052 if (arg2) {
11053 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11054 if (!b) {
11055 ret = -TARGET_EFAULT;
11056 break;
11059 ret = get_errno(flistxattr(arg1, b, arg3));
11060 unlock_user(b, arg2, arg3);
11061 break;
11063 case TARGET_NR_setxattr:
11064 case TARGET_NR_lsetxattr:
11066 void *p, *n, *v = 0;
11067 if (arg3) {
11068 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11069 if (!v) {
11070 ret = -TARGET_EFAULT;
11071 break;
11074 p = lock_user_string(arg1);
11075 n = lock_user_string(arg2);
11076 if (p && n) {
11077 if (num == TARGET_NR_setxattr) {
11078 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11079 } else {
11080 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11082 } else {
11083 ret = -TARGET_EFAULT;
11085 unlock_user(p, arg1, 0);
11086 unlock_user(n, arg2, 0);
11087 unlock_user(v, arg3, 0);
11089 break;
11090 case TARGET_NR_fsetxattr:
11092 void *n, *v = 0;
11093 if (arg3) {
11094 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11095 if (!v) {
11096 ret = -TARGET_EFAULT;
11097 break;
11100 n = lock_user_string(arg2);
11101 if (n) {
11102 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11103 } else {
11104 ret = -TARGET_EFAULT;
11106 unlock_user(n, arg2, 0);
11107 unlock_user(v, arg3, 0);
11109 break;
11110 case TARGET_NR_getxattr:
11111 case TARGET_NR_lgetxattr:
11113 void *p, *n, *v = 0;
11114 if (arg3) {
11115 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11116 if (!v) {
11117 ret = -TARGET_EFAULT;
11118 break;
11121 p = lock_user_string(arg1);
11122 n = lock_user_string(arg2);
11123 if (p && n) {
11124 if (num == TARGET_NR_getxattr) {
11125 ret = get_errno(getxattr(p, n, v, arg4));
11126 } else {
11127 ret = get_errno(lgetxattr(p, n, v, arg4));
11129 } else {
11130 ret = -TARGET_EFAULT;
11132 unlock_user(p, arg1, 0);
11133 unlock_user(n, arg2, 0);
11134 unlock_user(v, arg3, arg4);
11136 break;
11137 case TARGET_NR_fgetxattr:
11139 void *n, *v = 0;
11140 if (arg3) {
11141 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11142 if (!v) {
11143 ret = -TARGET_EFAULT;
11144 break;
11147 n = lock_user_string(arg2);
11148 if (n) {
11149 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11150 } else {
11151 ret = -TARGET_EFAULT;
11153 unlock_user(n, arg2, 0);
11154 unlock_user(v, arg3, arg4);
11156 break;
11157 case TARGET_NR_removexattr:
11158 case TARGET_NR_lremovexattr:
11160 void *p, *n;
11161 p = lock_user_string(arg1);
11162 n = lock_user_string(arg2);
11163 if (p && n) {
11164 if (num == TARGET_NR_removexattr) {
11165 ret = get_errno(removexattr(p, n));
11166 } else {
11167 ret = get_errno(lremovexattr(p, n));
11169 } else {
11170 ret = -TARGET_EFAULT;
11172 unlock_user(p, arg1, 0);
11173 unlock_user(n, arg2, 0);
11175 break;
11176 case TARGET_NR_fremovexattr:
11178 void *n;
11179 n = lock_user_string(arg2);
11180 if (n) {
11181 ret = get_errno(fremovexattr(arg1, n));
11182 } else {
11183 ret = -TARGET_EFAULT;
11185 unlock_user(n, arg2, 0);
11187 break;
11188 #endif
11189 #endif /* CONFIG_ATTR */
11190 #ifdef TARGET_NR_set_thread_area
11191 case TARGET_NR_set_thread_area:
11192 #if defined(TARGET_MIPS)
11193 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11194 ret = 0;
11195 break;
11196 #elif defined(TARGET_CRIS)
11197 if (arg1 & 0xff)
11198 ret = -TARGET_EINVAL;
11199 else {
11200 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11201 ret = 0;
11203 break;
11204 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11205 ret = do_set_thread_area(cpu_env, arg1);
11206 break;
11207 #elif defined(TARGET_M68K)
11209 TaskState *ts = cpu->opaque;
11210 ts->tp_value = arg1;
11211 ret = 0;
11212 break;
11214 #else
11215 goto unimplemented_nowarn;
11216 #endif
11217 #endif
11218 #ifdef TARGET_NR_get_thread_area
11219 case TARGET_NR_get_thread_area:
11220 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11221 ret = do_get_thread_area(cpu_env, arg1);
11222 break;
11223 #elif defined(TARGET_M68K)
11225 TaskState *ts = cpu->opaque;
11226 ret = ts->tp_value;
11227 break;
11229 #else
11230 goto unimplemented_nowarn;
11231 #endif
11232 #endif
11233 #ifdef TARGET_NR_getdomainname
11234 case TARGET_NR_getdomainname:
11235 goto unimplemented_nowarn;
11236 #endif
11238 #ifdef TARGET_NR_clock_gettime
11239 case TARGET_NR_clock_gettime:
11241 struct timespec ts;
11242 ret = get_errno(clock_gettime(arg1, &ts));
11243 if (!is_error(ret)) {
11244 host_to_target_timespec(arg2, &ts);
11246 break;
11248 #endif
11249 #ifdef TARGET_NR_clock_getres
11250 case TARGET_NR_clock_getres:
11252 struct timespec ts;
11253 ret = get_errno(clock_getres(arg1, &ts));
11254 if (!is_error(ret)) {
11255 host_to_target_timespec(arg2, &ts);
11257 break;
11259 #endif
11260 #ifdef TARGET_NR_clock_nanosleep
11261 case TARGET_NR_clock_nanosleep:
11263 struct timespec ts;
11264 target_to_host_timespec(&ts, arg3);
11265 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11266 &ts, arg4 ? &ts : NULL));
11267 if (arg4)
11268 host_to_target_timespec(arg4, &ts);
11270 #if defined(TARGET_PPC)
11271 /* clock_nanosleep is odd in that it returns positive errno values.
11272 * On PPC, CR0 bit 3 should be set in such a situation. */
11273 if (ret && ret != -TARGET_ERESTARTSYS) {
11274 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11276 #endif
11277 break;
11279 #endif
11281 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11282 case TARGET_NR_set_tid_address:
11283 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11284 break;
11285 #endif
11287 case TARGET_NR_tkill:
11288 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11289 break;
11291 case TARGET_NR_tgkill:
11292 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11293 target_to_host_signal(arg3)));
11294 break;
11296 #ifdef TARGET_NR_set_robust_list
11297 case TARGET_NR_set_robust_list:
11298 case TARGET_NR_get_robust_list:
11299 /* The ABI for supporting robust futexes has userspace pass
11300 * the kernel a pointer to a linked list which is updated by
11301 * userspace after the syscall; the list is walked by the kernel
11302 * when the thread exits. Since the linked list in QEMU guest
11303 * memory isn't a valid linked list for the host and we have
11304 * no way to reliably intercept the thread-death event, we can't
11305 * support these. Silently return ENOSYS so that guest userspace
11306 * falls back to a non-robust futex implementation (which should
11307 * be OK except in the corner case of the guest crashing while
11308 * holding a mutex that is shared with another process via
11309 * shared memory).
11311 goto unimplemented_nowarn;
11312 #endif
11314 #if defined(TARGET_NR_utimensat)
11315 case TARGET_NR_utimensat:
11317 struct timespec *tsp, ts[2];
11318 if (!arg3) {
11319 tsp = NULL;
11320 } else {
11321 target_to_host_timespec(ts, arg3);
11322 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11323 tsp = ts;
11325 if (!arg2)
11326 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11327 else {
11328 if (!(p = lock_user_string(arg2))) {
11329 ret = -TARGET_EFAULT;
11330 goto fail;
11332 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11333 unlock_user(p, arg2, 0);
11336 break;
11337 #endif
11338 case TARGET_NR_futex:
11339 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11340 break;
11341 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11342 case TARGET_NR_inotify_init:
11343 ret = get_errno(sys_inotify_init());
11344 break;
11345 #endif
11346 #ifdef CONFIG_INOTIFY1
11347 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11348 case TARGET_NR_inotify_init1:
11349 ret = get_errno(sys_inotify_init1(arg1));
11350 break;
11351 #endif
11352 #endif
11353 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11354 case TARGET_NR_inotify_add_watch:
11355 p = lock_user_string(arg2);
11356 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11357 unlock_user(p, arg2, 0);
11358 break;
11359 #endif
11360 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11361 case TARGET_NR_inotify_rm_watch:
11362 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11363 break;
11364 #endif
11366 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11367 case TARGET_NR_mq_open:
11369 struct mq_attr posix_mq_attr, *attrp;
11371 p = lock_user_string(arg1 - 1);
11372 if (arg4 != 0) {
11373 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11374 attrp = &posix_mq_attr;
11375 } else {
11376 attrp = 0;
11378 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11379 unlock_user (p, arg1, 0);
11381 break;
11383 case TARGET_NR_mq_unlink:
11384 p = lock_user_string(arg1 - 1);
11385 if (!p) {
11386 ret = -TARGET_EFAULT;
11387 break;
11389 ret = get_errno(mq_unlink(p));
11390 unlock_user (p, arg1, 0);
11391 break;
11393 case TARGET_NR_mq_timedsend:
11395 struct timespec ts;
11397 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11398 if (arg5 != 0) {
11399 target_to_host_timespec(&ts, arg5);
11400 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11401 host_to_target_timespec(arg5, &ts);
11402 } else {
11403 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11405 unlock_user (p, arg2, arg3);
11407 break;
11409 case TARGET_NR_mq_timedreceive:
11411 struct timespec ts;
11412 unsigned int prio;
11414 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11415 if (arg5 != 0) {
11416 target_to_host_timespec(&ts, arg5);
11417 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11418 &prio, &ts));
11419 host_to_target_timespec(arg5, &ts);
11420 } else {
11421 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11422 &prio, NULL));
11424 unlock_user (p, arg2, arg3);
11425 if (arg4 != 0)
11426 put_user_u32(prio, arg4);
11428 break;
11430 /* Not implemented for now... */
11431 /* case TARGET_NR_mq_notify: */
11432 /* break; */
11434 case TARGET_NR_mq_getsetattr:
11436 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11437 ret = 0;
11438 if (arg3 != 0) {
11439 ret = mq_getattr(arg1, &posix_mq_attr_out);
11440 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11442 if (arg2 != 0) {
11443 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11444 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11448 break;
11449 #endif
11451 #ifdef CONFIG_SPLICE
11452 #ifdef TARGET_NR_tee
11453 case TARGET_NR_tee:
11455 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11457 break;
11458 #endif
11459 #ifdef TARGET_NR_splice
11460 case TARGET_NR_splice:
11462 loff_t loff_in, loff_out;
11463 loff_t *ploff_in = NULL, *ploff_out = NULL;
11464 if (arg2) {
11465 if (get_user_u64(loff_in, arg2)) {
11466 goto efault;
11468 ploff_in = &loff_in;
11470 if (arg4) {
11471 if (get_user_u64(loff_out, arg4)) {
11472 goto efault;
11474 ploff_out = &loff_out;
11476 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11477 if (arg2) {
11478 if (put_user_u64(loff_in, arg2)) {
11479 goto efault;
11482 if (arg4) {
11483 if (put_user_u64(loff_out, arg4)) {
11484 goto efault;
11488 break;
11489 #endif
11490 #ifdef TARGET_NR_vmsplice
11491 case TARGET_NR_vmsplice:
11493 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11494 if (vec != NULL) {
11495 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11496 unlock_iovec(vec, arg2, arg3, 0);
11497 } else {
11498 ret = -host_to_target_errno(errno);
11501 break;
11502 #endif
11503 #endif /* CONFIG_SPLICE */
11504 #ifdef CONFIG_EVENTFD
11505 #if defined(TARGET_NR_eventfd)
11506 case TARGET_NR_eventfd:
11507 ret = get_errno(eventfd(arg1, 0));
11508 fd_trans_unregister(ret);
11509 break;
11510 #endif
11511 #if defined(TARGET_NR_eventfd2)
11512 case TARGET_NR_eventfd2:
11514 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11515 if (arg2 & TARGET_O_NONBLOCK) {
11516 host_flags |= O_NONBLOCK;
11518 if (arg2 & TARGET_O_CLOEXEC) {
11519 host_flags |= O_CLOEXEC;
11521 ret = get_errno(eventfd(arg1, host_flags));
11522 fd_trans_unregister(ret);
11523 break;
11525 #endif
11526 #endif /* CONFIG_EVENTFD */
11527 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11528 case TARGET_NR_fallocate:
11529 #if TARGET_ABI_BITS == 32
11530 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11531 target_offset64(arg5, arg6)));
11532 #else
11533 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11534 #endif
11535 break;
11536 #endif
11537 #if defined(CONFIG_SYNC_FILE_RANGE)
11538 #if defined(TARGET_NR_sync_file_range)
11539 case TARGET_NR_sync_file_range:
11540 #if TARGET_ABI_BITS == 32
11541 #if defined(TARGET_MIPS)
11542 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11543 target_offset64(arg5, arg6), arg7));
11544 #else
11545 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11546 target_offset64(arg4, arg5), arg6));
11547 #endif /* !TARGET_MIPS */
11548 #else
11549 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11550 #endif
11551 break;
11552 #endif
11553 #if defined(TARGET_NR_sync_file_range2)
11554 case TARGET_NR_sync_file_range2:
11555 /* This is like sync_file_range but the arguments are reordered */
11556 #if TARGET_ABI_BITS == 32
11557 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11558 target_offset64(arg5, arg6), arg2));
11559 #else
11560 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11561 #endif
11562 break;
11563 #endif
11564 #endif
11565 #if defined(TARGET_NR_signalfd4)
11566 case TARGET_NR_signalfd4:
11567 ret = do_signalfd4(arg1, arg2, arg4);
11568 break;
11569 #endif
11570 #if defined(TARGET_NR_signalfd)
11571 case TARGET_NR_signalfd:
11572 ret = do_signalfd4(arg1, arg2, 0);
11573 break;
11574 #endif
11575 #if defined(CONFIG_EPOLL)
11576 #if defined(TARGET_NR_epoll_create)
11577 case TARGET_NR_epoll_create:
11578 ret = get_errno(epoll_create(arg1));
11579 break;
11580 #endif
11581 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11582 case TARGET_NR_epoll_create1:
11583 ret = get_errno(epoll_create1(arg1));
11584 break;
11585 #endif
11586 #if defined(TARGET_NR_epoll_ctl)
11587 case TARGET_NR_epoll_ctl:
11589 struct epoll_event ep;
11590 struct epoll_event *epp = 0;
11591 if (arg4) {
11592 struct target_epoll_event *target_ep;
11593 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11594 goto efault;
11596 ep.events = tswap32(target_ep->events);
11597 /* The epoll_data_t union is just opaque data to the kernel,
11598 * so we transfer all 64 bits across and need not worry what
11599 * actual data type it is.
11601 ep.data.u64 = tswap64(target_ep->data.u64);
11602 unlock_user_struct(target_ep, arg4, 0);
11603 epp = &ep;
11605 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11606 break;
11608 #endif
11610 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11611 #if defined(TARGET_NR_epoll_wait)
11612 case TARGET_NR_epoll_wait:
11613 #endif
11614 #if defined(TARGET_NR_epoll_pwait)
11615 case TARGET_NR_epoll_pwait:
11616 #endif
11618 struct target_epoll_event *target_ep;
11619 struct epoll_event *ep;
11620 int epfd = arg1;
11621 int maxevents = arg3;
11622 int timeout = arg4;
11624 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11625 ret = -TARGET_EINVAL;
11626 break;
11629 target_ep = lock_user(VERIFY_WRITE, arg2,
11630 maxevents * sizeof(struct target_epoll_event), 1);
11631 if (!target_ep) {
11632 goto efault;
11635 ep = alloca(maxevents * sizeof(struct epoll_event));
11637 switch (num) {
11638 #if defined(TARGET_NR_epoll_pwait)
11639 case TARGET_NR_epoll_pwait:
11641 target_sigset_t *target_set;
11642 sigset_t _set, *set = &_set;
11644 if (arg5) {
11645 if (arg6 != sizeof(target_sigset_t)) {
11646 ret = -TARGET_EINVAL;
11647 break;
11650 target_set = lock_user(VERIFY_READ, arg5,
11651 sizeof(target_sigset_t), 1);
11652 if (!target_set) {
11653 unlock_user(target_ep, arg2, 0);
11654 goto efault;
11656 target_to_host_sigset(set, target_set);
11657 unlock_user(target_set, arg5, 0);
11658 } else {
11659 set = NULL;
11662 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11663 set, SIGSET_T_SIZE));
11664 break;
11666 #endif
11667 #if defined(TARGET_NR_epoll_wait)
11668 case TARGET_NR_epoll_wait:
11669 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11670 NULL, 0));
11671 break;
11672 #endif
11673 default:
11674 ret = -TARGET_ENOSYS;
11676 if (!is_error(ret)) {
11677 int i;
11678 for (i = 0; i < ret; i++) {
11679 target_ep[i].events = tswap32(ep[i].events);
11680 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11683 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11684 break;
11686 #endif
11687 #endif
11688 #ifdef TARGET_NR_prlimit64
11689 case TARGET_NR_prlimit64:
11691 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11692 struct target_rlimit64 *target_rnew, *target_rold;
11693 struct host_rlimit64 rnew, rold, *rnewp = 0;
11694 int resource = target_to_host_resource(arg2);
11695 if (arg3) {
11696 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11697 goto efault;
11699 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11700 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11701 unlock_user_struct(target_rnew, arg3, 0);
11702 rnewp = &rnew;
11705 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11706 if (!is_error(ret) && arg4) {
11707 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11708 goto efault;
11710 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11711 target_rold->rlim_max = tswap64(rold.rlim_max);
11712 unlock_user_struct(target_rold, arg4, 1);
11714 break;
11716 #endif
11717 #ifdef TARGET_NR_gethostname
11718 case TARGET_NR_gethostname:
11720 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11721 if (name) {
11722 ret = get_errno(gethostname(name, arg2));
11723 unlock_user(name, arg1, arg2);
11724 } else {
11725 ret = -TARGET_EFAULT;
11727 break;
11729 #endif
11730 #ifdef TARGET_NR_atomic_cmpxchg_32
11731 case TARGET_NR_atomic_cmpxchg_32:
11733 /* should use start_exclusive from main.c */
11734 abi_ulong mem_value;
11735 if (get_user_u32(mem_value, arg6)) {
11736 target_siginfo_t info;
11737 info.si_signo = SIGSEGV;
11738 info.si_errno = 0;
11739 info.si_code = TARGET_SEGV_MAPERR;
11740 info._sifields._sigfault._addr = arg6;
11741 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11742 QEMU_SI_FAULT, &info);
11743 ret = 0xdeadbeef;
11746 if (mem_value == arg2)
11747 put_user_u32(arg1, arg6);
11748 ret = mem_value;
11749 break;
11751 #endif
11752 #ifdef TARGET_NR_atomic_barrier
11753 case TARGET_NR_atomic_barrier:
11755 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11756 ret = 0;
11757 break;
11759 #endif
11761 #ifdef TARGET_NR_timer_create
11762 case TARGET_NR_timer_create:
11764 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11766 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11768 int clkid = arg1;
11769 int timer_index = next_free_host_timer();
11771 if (timer_index < 0) {
11772 ret = -TARGET_EAGAIN;
11773 } else {
11774 timer_t *phtimer = g_posix_timers + timer_index;
11776 if (arg2) {
11777 phost_sevp = &host_sevp;
11778 ret = target_to_host_sigevent(phost_sevp, arg2);
11779 if (ret != 0) {
11780 break;
11784 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11785 if (ret) {
11786 phtimer = NULL;
11787 } else {
11788 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11789 goto efault;
11793 break;
11795 #endif
11797 #ifdef TARGET_NR_timer_settime
11798 case TARGET_NR_timer_settime:
11800 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11801 * struct itimerspec * old_value */
11802 target_timer_t timerid = get_timer_id(arg1);
11804 if (timerid < 0) {
11805 ret = timerid;
11806 } else if (arg3 == 0) {
11807 ret = -TARGET_EINVAL;
11808 } else {
11809 timer_t htimer = g_posix_timers[timerid];
11810 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11812 target_to_host_itimerspec(&hspec_new, arg3);
11813 ret = get_errno(
11814 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11815 host_to_target_itimerspec(arg2, &hspec_old);
11817 break;
11819 #endif
11821 #ifdef TARGET_NR_timer_gettime
11822 case TARGET_NR_timer_gettime:
11824 /* args: timer_t timerid, struct itimerspec *curr_value */
11825 target_timer_t timerid = get_timer_id(arg1);
11827 if (timerid < 0) {
11828 ret = timerid;
11829 } else if (!arg2) {
11830 ret = -TARGET_EFAULT;
11831 } else {
11832 timer_t htimer = g_posix_timers[timerid];
11833 struct itimerspec hspec;
11834 ret = get_errno(timer_gettime(htimer, &hspec));
11836 if (host_to_target_itimerspec(arg2, &hspec)) {
11837 ret = -TARGET_EFAULT;
11840 break;
11842 #endif
11844 #ifdef TARGET_NR_timer_getoverrun
11845 case TARGET_NR_timer_getoverrun:
11847 /* args: timer_t timerid */
11848 target_timer_t timerid = get_timer_id(arg1);
11850 if (timerid < 0) {
11851 ret = timerid;
11852 } else {
11853 timer_t htimer = g_posix_timers[timerid];
11854 ret = get_errno(timer_getoverrun(htimer));
11856 fd_trans_unregister(ret);
11857 break;
11859 #endif
11861 #ifdef TARGET_NR_timer_delete
11862 case TARGET_NR_timer_delete:
11864 /* args: timer_t timerid */
11865 target_timer_t timerid = get_timer_id(arg1);
11867 if (timerid < 0) {
11868 ret = timerid;
11869 } else {
11870 timer_t htimer = g_posix_timers[timerid];
11871 ret = get_errno(timer_delete(htimer));
11872 g_posix_timers[timerid] = 0;
11874 break;
11876 #endif
11878 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11879 case TARGET_NR_timerfd_create:
11880 ret = get_errno(timerfd_create(arg1,
11881 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11882 break;
11883 #endif
11885 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11886 case TARGET_NR_timerfd_gettime:
11888 struct itimerspec its_curr;
11890 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11892 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11893 goto efault;
11896 break;
11897 #endif
11899 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11900 case TARGET_NR_timerfd_settime:
11902 struct itimerspec its_new, its_old, *p_new;
11904 if (arg3) {
11905 if (target_to_host_itimerspec(&its_new, arg3)) {
11906 goto efault;
11908 p_new = &its_new;
11909 } else {
11910 p_new = NULL;
11913 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11915 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11916 goto efault;
11919 break;
11920 #endif
11922 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11923 case TARGET_NR_ioprio_get:
11924 ret = get_errno(ioprio_get(arg1, arg2));
11925 break;
11926 #endif
11928 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11929 case TARGET_NR_ioprio_set:
11930 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11931 break;
11932 #endif
11934 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11935 case TARGET_NR_setns:
11936 ret = get_errno(setns(arg1, arg2));
11937 break;
11938 #endif
11939 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11940 case TARGET_NR_unshare:
11941 ret = get_errno(unshare(arg1));
11942 break;
11943 #endif
11945 default:
11946 unimplemented:
11947 gemu_log("qemu: Unsupported syscall: %d\n", num);
11948 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11949 unimplemented_nowarn:
11950 #endif
11951 ret = -TARGET_ENOSYS;
11952 break;
11954 fail:
11955 #ifdef DEBUG
11956 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11957 #endif
11958 if(do_strace)
11959 print_syscall_ret(num, ret);
11960 trace_guest_user_syscall_ret(cpu, num, ret);
11961 return ret;
11962 efault:
11963 ret = -TARGET_EFAULT;
11964 goto fail;