Merge remote-tracking branch 'remotes/kraxel/tags/pull-input-20160928-1' into staging
[qemu/kevin.git] / linux-user / syscall.c
blob0815f3096549e81cfc3b67b2de61d9cfe58db00e
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #ifdef __ia64__
39 int __clone2(int (*fn)(void *), void *child_stack_base,
40 size_t stack_size, int flags, void *arg, ...);
41 #endif
42 #include <sys/socket.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <sys/poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/ip.h>
55 #include <netinet/tcp.h>
56 #include <linux/wireless.h>
57 #include <linux/icmp.h>
58 #include "qemu-common.h"
59 #ifdef CONFIG_TIMERFD
60 #include <sys/timerfd.h>
61 #endif
62 #ifdef TARGET_GPROF
63 #include <sys/gmon.h>
64 #endif
65 #ifdef CONFIG_EVENTFD
66 #include <sys/eventfd.h>
67 #endif
68 #ifdef CONFIG_EPOLL
69 #include <sys/epoll.h>
70 #endif
71 #ifdef CONFIG_ATTR
72 #include "qemu/xattr.h"
73 #endif
74 #ifdef CONFIG_SENDFILE
75 #include <sys/sendfile.h>
76 #endif
78 #define termios host_termios
79 #define winsize host_winsize
80 #define termio host_termio
81 #define sgttyb host_sgttyb /* same as target */
82 #define tchars host_tchars /* same as target */
83 #define ltchars host_ltchars /* same as target */
85 #include <linux/termios.h>
86 #include <linux/unistd.h>
87 #include <linux/cdrom.h>
88 #include <linux/hdreg.h>
89 #include <linux/soundcard.h>
90 #include <linux/kd.h>
91 #include <linux/mtio.h>
92 #include <linux/fs.h>
93 #if defined(CONFIG_FIEMAP)
94 #include <linux/fiemap.h>
95 #endif
96 #include <linux/fb.h>
97 #include <linux/vt.h>
98 #include <linux/dm-ioctl.h>
99 #include <linux/reboot.h>
100 #include <linux/route.h>
101 #include <linux/filter.h>
102 #include <linux/blkpg.h>
103 #include <netpacket/packet.h>
104 #include <linux/netlink.h>
105 #ifdef CONFIG_RTNETLINK
106 #include <linux/rtnetlink.h>
107 #include <linux/if_bridge.h>
108 #endif
109 #include <linux/audit.h>
110 #include "linux_loop.h"
111 #include "uname.h"
113 #include "qemu.h"
115 #ifndef CLONE_IO
116 #define CLONE_IO 0x80000000 /* Clone io context */
117 #endif
119 /* We can't directly call the host clone syscall, because this will
120 * badly confuse libc (breaking mutexes, for example). So we must
121 * divide clone flags into:
122 * * flag combinations that look like pthread_create()
123 * * flag combinations that look like fork()
124 * * flags we can implement within QEMU itself
125 * * flags we can't support and will return an error for
127 /* For thread creation, all these flags must be present; for
128 * fork, none must be present.
130 #define CLONE_THREAD_FLAGS \
131 (CLONE_VM | CLONE_FS | CLONE_FILES | \
132 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
134 /* These flags are ignored:
135 * CLONE_DETACHED is now ignored by the kernel;
136 * CLONE_IO is just an optimisation hint to the I/O scheduler
138 #define CLONE_IGNORED_FLAGS \
139 (CLONE_DETACHED | CLONE_IO)
141 /* Flags for fork which we can implement within QEMU itself */
142 #define CLONE_OPTIONAL_FORK_FLAGS \
143 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
144 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
146 /* Flags for thread creation which we can implement within QEMU itself */
147 #define CLONE_OPTIONAL_THREAD_FLAGS \
148 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
149 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
151 #define CLONE_INVALID_FORK_FLAGS \
152 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
154 #define CLONE_INVALID_THREAD_FLAGS \
155 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
156 CLONE_IGNORED_FLAGS))
158 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
159 * have almost all been allocated. We cannot support any of
160 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
161 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
162 * The checks against the invalid thread masks above will catch these.
163 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
166 //#define DEBUG
167 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
168 * once. This exercises the codepaths for restart.
170 //#define DEBUG_ERESTARTSYS
172 //#include <linux/msdos_fs.h>
173 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
174 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
176 #undef _syscall0
177 #undef _syscall1
178 #undef _syscall2
179 #undef _syscall3
180 #undef _syscall4
181 #undef _syscall5
182 #undef _syscall6
184 #define _syscall0(type,name) \
185 static type name (void) \
187 return syscall(__NR_##name); \
190 #define _syscall1(type,name,type1,arg1) \
191 static type name (type1 arg1) \
193 return syscall(__NR_##name, arg1); \
196 #define _syscall2(type,name,type1,arg1,type2,arg2) \
197 static type name (type1 arg1,type2 arg2) \
199 return syscall(__NR_##name, arg1, arg2); \
202 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
203 static type name (type1 arg1,type2 arg2,type3 arg3) \
205 return syscall(__NR_##name, arg1, arg2, arg3); \
208 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
209 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
211 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
214 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
215 type5,arg5) \
216 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
218 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
222 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
223 type5,arg5,type6,arg6) \
224 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
225 type6 arg6) \
227 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
231 #define __NR_sys_uname __NR_uname
232 #define __NR_sys_getcwd1 __NR_getcwd
233 #define __NR_sys_getdents __NR_getdents
234 #define __NR_sys_getdents64 __NR_getdents64
235 #define __NR_sys_getpriority __NR_getpriority
236 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
237 #define __NR_sys_syslog __NR_syslog
238 #define __NR_sys_futex __NR_futex
239 #define __NR_sys_inotify_init __NR_inotify_init
240 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
241 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
243 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
244 defined(__s390x__)
245 #define __NR__llseek __NR_lseek
246 #endif
248 /* Newer kernel ports have llseek() instead of _llseek() */
249 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
250 #define TARGET_NR__llseek TARGET_NR_llseek
251 #endif
253 #ifdef __NR_gettid
254 _syscall0(int, gettid)
255 #else
256 /* This is a replacement for the host gettid() and must return a host
257 errno. */
258 static int gettid(void) {
259 return -ENOSYS;
261 #endif
262 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
263 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
264 #endif
265 #if !defined(__NR_getdents) || \
266 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
267 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
268 #endif
269 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
270 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
271 loff_t *, res, uint, wh);
272 #endif
273 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
274 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
275 #ifdef __NR_exit_group
276 _syscall1(int,exit_group,int,error_code)
277 #endif
278 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
279 _syscall1(int,set_tid_address,int *,tidptr)
280 #endif
281 #if defined(TARGET_NR_futex) && defined(__NR_futex)
282 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
283 const struct timespec *,timeout,int *,uaddr2,int,val3)
284 #endif
285 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
286 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
287 unsigned long *, user_mask_ptr);
288 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
289 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
290 unsigned long *, user_mask_ptr);
291 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
292 void *, arg);
293 _syscall2(int, capget, struct __user_cap_header_struct *, header,
294 struct __user_cap_data_struct *, data);
295 _syscall2(int, capset, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
298 _syscall2(int, ioprio_get, int, which, int, who)
299 #endif
300 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
301 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
302 #endif
303 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
304 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
305 #endif
307 static bitmask_transtbl fcntl_flags_tbl[] = {
308 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
309 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
310 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
311 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
312 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
313 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
314 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
315 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
316 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
317 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
318 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
319 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
320 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
321 #if defined(O_DIRECT)
322 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
323 #endif
324 #if defined(O_NOATIME)
325 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
326 #endif
327 #if defined(O_CLOEXEC)
328 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
329 #endif
330 #if defined(O_PATH)
331 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
332 #endif
333 /* Don't terminate the list prematurely on 64-bit host+guest. */
334 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
335 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
336 #endif
337 { 0, 0, 0, 0 }
340 enum {
341 QEMU_IFLA_BR_UNSPEC,
342 QEMU_IFLA_BR_FORWARD_DELAY,
343 QEMU_IFLA_BR_HELLO_TIME,
344 QEMU_IFLA_BR_MAX_AGE,
345 QEMU_IFLA_BR_AGEING_TIME,
346 QEMU_IFLA_BR_STP_STATE,
347 QEMU_IFLA_BR_PRIORITY,
348 QEMU_IFLA_BR_VLAN_FILTERING,
349 QEMU_IFLA_BR_VLAN_PROTOCOL,
350 QEMU_IFLA_BR_GROUP_FWD_MASK,
351 QEMU_IFLA_BR_ROOT_ID,
352 QEMU_IFLA_BR_BRIDGE_ID,
353 QEMU_IFLA_BR_ROOT_PORT,
354 QEMU_IFLA_BR_ROOT_PATH_COST,
355 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
356 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
357 QEMU_IFLA_BR_HELLO_TIMER,
358 QEMU_IFLA_BR_TCN_TIMER,
359 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
360 QEMU_IFLA_BR_GC_TIMER,
361 QEMU_IFLA_BR_GROUP_ADDR,
362 QEMU_IFLA_BR_FDB_FLUSH,
363 QEMU_IFLA_BR_MCAST_ROUTER,
364 QEMU_IFLA_BR_MCAST_SNOOPING,
365 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
366 QEMU_IFLA_BR_MCAST_QUERIER,
367 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
368 QEMU_IFLA_BR_MCAST_HASH_MAX,
369 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
370 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
371 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
372 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
373 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
374 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
375 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
376 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
377 QEMU_IFLA_BR_NF_CALL_IPTABLES,
378 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
379 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
380 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
381 QEMU_IFLA_BR_PAD,
382 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
383 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
384 QEMU___IFLA_BR_MAX,
387 enum {
388 QEMU_IFLA_UNSPEC,
389 QEMU_IFLA_ADDRESS,
390 QEMU_IFLA_BROADCAST,
391 QEMU_IFLA_IFNAME,
392 QEMU_IFLA_MTU,
393 QEMU_IFLA_LINK,
394 QEMU_IFLA_QDISC,
395 QEMU_IFLA_STATS,
396 QEMU_IFLA_COST,
397 QEMU_IFLA_PRIORITY,
398 QEMU_IFLA_MASTER,
399 QEMU_IFLA_WIRELESS,
400 QEMU_IFLA_PROTINFO,
401 QEMU_IFLA_TXQLEN,
402 QEMU_IFLA_MAP,
403 QEMU_IFLA_WEIGHT,
404 QEMU_IFLA_OPERSTATE,
405 QEMU_IFLA_LINKMODE,
406 QEMU_IFLA_LINKINFO,
407 QEMU_IFLA_NET_NS_PID,
408 QEMU_IFLA_IFALIAS,
409 QEMU_IFLA_NUM_VF,
410 QEMU_IFLA_VFINFO_LIST,
411 QEMU_IFLA_STATS64,
412 QEMU_IFLA_VF_PORTS,
413 QEMU_IFLA_PORT_SELF,
414 QEMU_IFLA_AF_SPEC,
415 QEMU_IFLA_GROUP,
416 QEMU_IFLA_NET_NS_FD,
417 QEMU_IFLA_EXT_MASK,
418 QEMU_IFLA_PROMISCUITY,
419 QEMU_IFLA_NUM_TX_QUEUES,
420 QEMU_IFLA_NUM_RX_QUEUES,
421 QEMU_IFLA_CARRIER,
422 QEMU_IFLA_PHYS_PORT_ID,
423 QEMU_IFLA_CARRIER_CHANGES,
424 QEMU_IFLA_PHYS_SWITCH_ID,
425 QEMU_IFLA_LINK_NETNSID,
426 QEMU_IFLA_PHYS_PORT_NAME,
427 QEMU_IFLA_PROTO_DOWN,
428 QEMU_IFLA_GSO_MAX_SEGS,
429 QEMU_IFLA_GSO_MAX_SIZE,
430 QEMU_IFLA_PAD,
431 QEMU_IFLA_XDP,
432 QEMU___IFLA_MAX
435 enum {
436 QEMU_IFLA_BRPORT_UNSPEC,
437 QEMU_IFLA_BRPORT_STATE,
438 QEMU_IFLA_BRPORT_PRIORITY,
439 QEMU_IFLA_BRPORT_COST,
440 QEMU_IFLA_BRPORT_MODE,
441 QEMU_IFLA_BRPORT_GUARD,
442 QEMU_IFLA_BRPORT_PROTECT,
443 QEMU_IFLA_BRPORT_FAST_LEAVE,
444 QEMU_IFLA_BRPORT_LEARNING,
445 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
446 QEMU_IFLA_BRPORT_PROXYARP,
447 QEMU_IFLA_BRPORT_LEARNING_SYNC,
448 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
449 QEMU_IFLA_BRPORT_ROOT_ID,
450 QEMU_IFLA_BRPORT_BRIDGE_ID,
451 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
452 QEMU_IFLA_BRPORT_DESIGNATED_COST,
453 QEMU_IFLA_BRPORT_ID,
454 QEMU_IFLA_BRPORT_NO,
455 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
456 QEMU_IFLA_BRPORT_CONFIG_PENDING,
457 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
458 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
459 QEMU_IFLA_BRPORT_HOLD_TIMER,
460 QEMU_IFLA_BRPORT_FLUSH,
461 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
462 QEMU_IFLA_BRPORT_PAD,
463 QEMU___IFLA_BRPORT_MAX
466 enum {
467 QEMU_IFLA_INFO_UNSPEC,
468 QEMU_IFLA_INFO_KIND,
469 QEMU_IFLA_INFO_DATA,
470 QEMU_IFLA_INFO_XSTATS,
471 QEMU_IFLA_INFO_SLAVE_KIND,
472 QEMU_IFLA_INFO_SLAVE_DATA,
473 QEMU___IFLA_INFO_MAX,
476 enum {
477 QEMU_IFLA_INET_UNSPEC,
478 QEMU_IFLA_INET_CONF,
479 QEMU___IFLA_INET_MAX,
482 enum {
483 QEMU_IFLA_INET6_UNSPEC,
484 QEMU_IFLA_INET6_FLAGS,
485 QEMU_IFLA_INET6_CONF,
486 QEMU_IFLA_INET6_STATS,
487 QEMU_IFLA_INET6_MCAST,
488 QEMU_IFLA_INET6_CACHEINFO,
489 QEMU_IFLA_INET6_ICMP6STATS,
490 QEMU_IFLA_INET6_TOKEN,
491 QEMU_IFLA_INET6_ADDR_GEN_MODE,
492 QEMU___IFLA_INET6_MAX
495 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
496 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
497 typedef struct TargetFdTrans {
498 TargetFdDataFunc host_to_target_data;
499 TargetFdDataFunc target_to_host_data;
500 TargetFdAddrFunc target_to_host_addr;
501 } TargetFdTrans;
503 static TargetFdTrans **target_fd_trans;
505 static unsigned int target_fd_max;
507 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
509 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
510 return target_fd_trans[fd]->target_to_host_data;
512 return NULL;
515 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
517 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
518 return target_fd_trans[fd]->host_to_target_data;
520 return NULL;
523 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
525 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
526 return target_fd_trans[fd]->target_to_host_addr;
528 return NULL;
531 static void fd_trans_register(int fd, TargetFdTrans *trans)
533 unsigned int oldmax;
535 if (fd >= target_fd_max) {
536 oldmax = target_fd_max;
537 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
538 target_fd_trans = g_renew(TargetFdTrans *,
539 target_fd_trans, target_fd_max);
540 memset((void *)(target_fd_trans + oldmax), 0,
541 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
543 target_fd_trans[fd] = trans;
546 static void fd_trans_unregister(int fd)
548 if (fd >= 0 && fd < target_fd_max) {
549 target_fd_trans[fd] = NULL;
553 static void fd_trans_dup(int oldfd, int newfd)
555 fd_trans_unregister(newfd);
556 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
557 fd_trans_register(newfd, target_fd_trans[oldfd]);
561 static int sys_getcwd1(char *buf, size_t size)
563 if (getcwd(buf, size) == NULL) {
564 /* getcwd() sets errno */
565 return (-1);
567 return strlen(buf)+1;
570 #ifdef TARGET_NR_utimensat
571 #if defined(__NR_utimensat)
572 #define __NR_sys_utimensat __NR_utimensat
573 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
574 const struct timespec *,tsp,int,flags)
575 #else
576 static int sys_utimensat(int dirfd, const char *pathname,
577 const struct timespec times[2], int flags)
579 errno = ENOSYS;
580 return -1;
582 #endif
583 #endif /* TARGET_NR_utimensat */
585 #ifdef CONFIG_INOTIFY
586 #include <sys/inotify.h>
588 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
589 static int sys_inotify_init(void)
591 return (inotify_init());
593 #endif
594 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
595 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
597 return (inotify_add_watch(fd, pathname, mask));
599 #endif
600 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
601 static int sys_inotify_rm_watch(int fd, int32_t wd)
603 return (inotify_rm_watch(fd, wd));
605 #endif
606 #ifdef CONFIG_INOTIFY1
607 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
608 static int sys_inotify_init1(int flags)
610 return (inotify_init1(flags));
612 #endif
613 #endif
614 #else
615 /* Userspace can usually survive runtime without inotify */
616 #undef TARGET_NR_inotify_init
617 #undef TARGET_NR_inotify_init1
618 #undef TARGET_NR_inotify_add_watch
619 #undef TARGET_NR_inotify_rm_watch
620 #endif /* CONFIG_INOTIFY */
622 #if defined(TARGET_NR_prlimit64)
623 #ifndef __NR_prlimit64
624 # define __NR_prlimit64 -1
625 #endif
626 #define __NR_sys_prlimit64 __NR_prlimit64
627 /* The glibc rlimit structure may not be that used by the underlying syscall */
628 struct host_rlimit64 {
629 uint64_t rlim_cur;
630 uint64_t rlim_max;
632 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
633 const struct host_rlimit64 *, new_limit,
634 struct host_rlimit64 *, old_limit)
635 #endif
638 #if defined(TARGET_NR_timer_create)
639 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
640 static timer_t g_posix_timers[32] = { 0, } ;
642 static inline int next_free_host_timer(void)
644 int k ;
645 /* FIXME: Does finding the next free slot require a lock? */
646 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
647 if (g_posix_timers[k] == 0) {
648 g_posix_timers[k] = (timer_t) 1;
649 return k;
652 return -1;
654 #endif
656 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
657 #ifdef TARGET_ARM
658 static inline int regpairs_aligned(void *cpu_env) {
659 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
661 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
662 static inline int regpairs_aligned(void *cpu_env) { return 1; }
663 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
664 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
665 * of registers which translates to the same as ARM/MIPS, because we start with
666 * r3 as arg1 */
667 static inline int regpairs_aligned(void *cpu_env) { return 1; }
668 #else
669 static inline int regpairs_aligned(void *cpu_env) { return 0; }
670 #endif
672 #define ERRNO_TABLE_SIZE 1200
674 /* target_to_host_errno_table[] is initialized from
675 * host_to_target_errno_table[] in syscall_init(). */
676 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
680 * This list is the union of errno values overridden in asm-<arch>/errno.h
681 * minus the errnos that are not actually generic to all archs.
683 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
684 [EAGAIN] = TARGET_EAGAIN,
685 [EIDRM] = TARGET_EIDRM,
686 [ECHRNG] = TARGET_ECHRNG,
687 [EL2NSYNC] = TARGET_EL2NSYNC,
688 [EL3HLT] = TARGET_EL3HLT,
689 [EL3RST] = TARGET_EL3RST,
690 [ELNRNG] = TARGET_ELNRNG,
691 [EUNATCH] = TARGET_EUNATCH,
692 [ENOCSI] = TARGET_ENOCSI,
693 [EL2HLT] = TARGET_EL2HLT,
694 [EDEADLK] = TARGET_EDEADLK,
695 [ENOLCK] = TARGET_ENOLCK,
696 [EBADE] = TARGET_EBADE,
697 [EBADR] = TARGET_EBADR,
698 [EXFULL] = TARGET_EXFULL,
699 [ENOANO] = TARGET_ENOANO,
700 [EBADRQC] = TARGET_EBADRQC,
701 [EBADSLT] = TARGET_EBADSLT,
702 [EBFONT] = TARGET_EBFONT,
703 [ENOSTR] = TARGET_ENOSTR,
704 [ENODATA] = TARGET_ENODATA,
705 [ETIME] = TARGET_ETIME,
706 [ENOSR] = TARGET_ENOSR,
707 [ENONET] = TARGET_ENONET,
708 [ENOPKG] = TARGET_ENOPKG,
709 [EREMOTE] = TARGET_EREMOTE,
710 [ENOLINK] = TARGET_ENOLINK,
711 [EADV] = TARGET_EADV,
712 [ESRMNT] = TARGET_ESRMNT,
713 [ECOMM] = TARGET_ECOMM,
714 [EPROTO] = TARGET_EPROTO,
715 [EDOTDOT] = TARGET_EDOTDOT,
716 [EMULTIHOP] = TARGET_EMULTIHOP,
717 [EBADMSG] = TARGET_EBADMSG,
718 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
719 [EOVERFLOW] = TARGET_EOVERFLOW,
720 [ENOTUNIQ] = TARGET_ENOTUNIQ,
721 [EBADFD] = TARGET_EBADFD,
722 [EREMCHG] = TARGET_EREMCHG,
723 [ELIBACC] = TARGET_ELIBACC,
724 [ELIBBAD] = TARGET_ELIBBAD,
725 [ELIBSCN] = TARGET_ELIBSCN,
726 [ELIBMAX] = TARGET_ELIBMAX,
727 [ELIBEXEC] = TARGET_ELIBEXEC,
728 [EILSEQ] = TARGET_EILSEQ,
729 [ENOSYS] = TARGET_ENOSYS,
730 [ELOOP] = TARGET_ELOOP,
731 [ERESTART] = TARGET_ERESTART,
732 [ESTRPIPE] = TARGET_ESTRPIPE,
733 [ENOTEMPTY] = TARGET_ENOTEMPTY,
734 [EUSERS] = TARGET_EUSERS,
735 [ENOTSOCK] = TARGET_ENOTSOCK,
736 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
737 [EMSGSIZE] = TARGET_EMSGSIZE,
738 [EPROTOTYPE] = TARGET_EPROTOTYPE,
739 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
740 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
741 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
742 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
743 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
744 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
745 [EADDRINUSE] = TARGET_EADDRINUSE,
746 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
747 [ENETDOWN] = TARGET_ENETDOWN,
748 [ENETUNREACH] = TARGET_ENETUNREACH,
749 [ENETRESET] = TARGET_ENETRESET,
750 [ECONNABORTED] = TARGET_ECONNABORTED,
751 [ECONNRESET] = TARGET_ECONNRESET,
752 [ENOBUFS] = TARGET_ENOBUFS,
753 [EISCONN] = TARGET_EISCONN,
754 [ENOTCONN] = TARGET_ENOTCONN,
755 [EUCLEAN] = TARGET_EUCLEAN,
756 [ENOTNAM] = TARGET_ENOTNAM,
757 [ENAVAIL] = TARGET_ENAVAIL,
758 [EISNAM] = TARGET_EISNAM,
759 [EREMOTEIO] = TARGET_EREMOTEIO,
760 [EDQUOT] = TARGET_EDQUOT,
761 [ESHUTDOWN] = TARGET_ESHUTDOWN,
762 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
763 [ETIMEDOUT] = TARGET_ETIMEDOUT,
764 [ECONNREFUSED] = TARGET_ECONNREFUSED,
765 [EHOSTDOWN] = TARGET_EHOSTDOWN,
766 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
767 [EALREADY] = TARGET_EALREADY,
768 [EINPROGRESS] = TARGET_EINPROGRESS,
769 [ESTALE] = TARGET_ESTALE,
770 [ECANCELED] = TARGET_ECANCELED,
771 [ENOMEDIUM] = TARGET_ENOMEDIUM,
772 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
773 #ifdef ENOKEY
774 [ENOKEY] = TARGET_ENOKEY,
775 #endif
776 #ifdef EKEYEXPIRED
777 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
778 #endif
779 #ifdef EKEYREVOKED
780 [EKEYREVOKED] = TARGET_EKEYREVOKED,
781 #endif
782 #ifdef EKEYREJECTED
783 [EKEYREJECTED] = TARGET_EKEYREJECTED,
784 #endif
785 #ifdef EOWNERDEAD
786 [EOWNERDEAD] = TARGET_EOWNERDEAD,
787 #endif
788 #ifdef ENOTRECOVERABLE
789 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
790 #endif
793 static inline int host_to_target_errno(int err)
795 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
796 host_to_target_errno_table[err]) {
797 return host_to_target_errno_table[err];
799 return err;
802 static inline int target_to_host_errno(int err)
804 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
805 target_to_host_errno_table[err]) {
806 return target_to_host_errno_table[err];
808 return err;
811 static inline abi_long get_errno(abi_long ret)
813 if (ret == -1)
814 return -host_to_target_errno(errno);
815 else
816 return ret;
819 static inline int is_error(abi_long ret)
821 return (abi_ulong)ret >= (abi_ulong)(-4096);
824 const char *target_strerror(int err)
826 if (err == TARGET_ERESTARTSYS) {
827 return "To be restarted";
829 if (err == TARGET_QEMU_ESIGRETURN) {
830 return "Successful exit from sigreturn";
833 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
834 return NULL;
836 return strerror(target_to_host_errno(err));
839 #define safe_syscall0(type, name) \
840 static type safe_##name(void) \
842 return safe_syscall(__NR_##name); \
845 #define safe_syscall1(type, name, type1, arg1) \
846 static type safe_##name(type1 arg1) \
848 return safe_syscall(__NR_##name, arg1); \
851 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
852 static type safe_##name(type1 arg1, type2 arg2) \
854 return safe_syscall(__NR_##name, arg1, arg2); \
857 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
858 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
860 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
863 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
864 type4, arg4) \
865 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
867 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
870 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
871 type4, arg4, type5, arg5) \
872 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
873 type5 arg5) \
875 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
878 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
879 type4, arg4, type5, arg5, type6, arg6) \
880 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
881 type5 arg5, type6 arg6) \
883 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
886 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
887 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
888 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
889 int, flags, mode_t, mode)
890 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
891 struct rusage *, rusage)
892 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
893 int, options, struct rusage *, rusage)
894 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
895 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
896 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
897 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
898 struct timespec *, tsp, const sigset_t *, sigmask,
899 size_t, sigsetsize)
900 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
901 int, maxevents, int, timeout, const sigset_t *, sigmask,
902 size_t, sigsetsize)
903 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
904 const struct timespec *,timeout,int *,uaddr2,int,val3)
905 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
906 safe_syscall2(int, kill, pid_t, pid, int, sig)
907 safe_syscall2(int, tkill, int, tid, int, sig)
908 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
909 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
910 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
911 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
912 socklen_t, addrlen)
913 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
914 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
915 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
916 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
917 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
918 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
919 safe_syscall2(int, flock, int, fd, int, operation)
920 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
921 const struct timespec *, uts, size_t, sigsetsize)
922 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
923 int, flags)
924 safe_syscall2(int, nanosleep, const struct timespec *, req,
925 struct timespec *, rem)
926 #ifdef TARGET_NR_clock_nanosleep
927 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
928 const struct timespec *, req, struct timespec *, rem)
929 #endif
930 #ifdef __NR_msgsnd
931 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
932 int, flags)
933 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
934 long, msgtype, int, flags)
935 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
936 unsigned, nsops, const struct timespec *, timeout)
937 #else
938 /* This host kernel architecture uses a single ipc syscall; fake up
939 * wrappers for the sub-operations to hide this implementation detail.
940 * Annoyingly we can't include linux/ipc.h to get the constant definitions
941 * for the call parameter because some structs in there conflict with the
942 * sys/ipc.h ones. So we just define them here, and rely on them being
943 * the same for all host architectures.
945 #define Q_SEMTIMEDOP 4
946 #define Q_MSGSND 11
947 #define Q_MSGRCV 12
948 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
950 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
951 void *, ptr, long, fifth)
952 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
954 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
956 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
958 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
960 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
961 const struct timespec *timeout)
963 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
964 (long)timeout);
966 #endif
967 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
968 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
969 size_t, len, unsigned, prio, const struct timespec *, timeout)
970 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
971 size_t, len, unsigned *, prio, const struct timespec *, timeout)
972 #endif
973 /* We do ioctl like this rather than via safe_syscall3 to preserve the
974 * "third argument might be integer or pointer or not present" behaviour of
975 * the libc function.
977 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
978 /* Similarly for fcntl. Note that callers must always:
979 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
980 * use the flock64 struct rather than unsuffixed flock
981 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
983 #ifdef __NR_fcntl64
984 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
985 #else
986 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
987 #endif
989 static inline int host_to_target_sock_type(int host_type)
991 int target_type;
993 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
994 case SOCK_DGRAM:
995 target_type = TARGET_SOCK_DGRAM;
996 break;
997 case SOCK_STREAM:
998 target_type = TARGET_SOCK_STREAM;
999 break;
1000 default:
1001 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1002 break;
1005 #if defined(SOCK_CLOEXEC)
1006 if (host_type & SOCK_CLOEXEC) {
1007 target_type |= TARGET_SOCK_CLOEXEC;
1009 #endif
1011 #if defined(SOCK_NONBLOCK)
1012 if (host_type & SOCK_NONBLOCK) {
1013 target_type |= TARGET_SOCK_NONBLOCK;
1015 #endif
1017 return target_type;
1020 static abi_ulong target_brk;
1021 static abi_ulong target_original_brk;
1022 static abi_ulong brk_page;
1024 void target_set_brk(abi_ulong new_brk)
1026 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1027 brk_page = HOST_PAGE_ALIGN(target_brk);
1030 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1031 #define DEBUGF_BRK(message, args...)
1033 /* do_brk() must return target values and target errnos. */
1034 abi_long do_brk(abi_ulong new_brk)
1036 abi_long mapped_addr;
1037 abi_ulong new_alloc_size;
1039 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1041 if (!new_brk) {
1042 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1043 return target_brk;
1045 if (new_brk < target_original_brk) {
1046 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1047 target_brk);
1048 return target_brk;
1051 /* If the new brk is less than the highest page reserved to the
1052 * target heap allocation, set it and we're almost done... */
1053 if (new_brk <= brk_page) {
1054 /* Heap contents are initialized to zero, as for anonymous
1055 * mapped pages. */
1056 if (new_brk > target_brk) {
1057 memset(g2h(target_brk), 0, new_brk - target_brk);
1059 target_brk = new_brk;
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1061 return target_brk;
1064 /* We need to allocate more memory after the brk... Note that
1065 * we don't use MAP_FIXED because that will map over the top of
1066 * any existing mapping (like the one with the host libc or qemu
1067 * itself); instead we treat "mapped but at wrong address" as
1068 * a failure and unmap again.
1070 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1071 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1072 PROT_READ|PROT_WRITE,
1073 MAP_ANON|MAP_PRIVATE, 0, 0));
1075 if (mapped_addr == brk_page) {
1076 /* Heap contents are initialized to zero, as for anonymous
1077 * mapped pages. Technically the new pages are already
1078 * initialized to zero since they *are* anonymous mapped
1079 * pages, however we have to take care with the contents that
1080 * come from the remaining part of the previous page: it may
1081 * contains garbage data due to a previous heap usage (grown
1082 * then shrunken). */
1083 memset(g2h(target_brk), 0, brk_page - target_brk);
1085 target_brk = new_brk;
1086 brk_page = HOST_PAGE_ALIGN(target_brk);
1087 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1088 target_brk);
1089 return target_brk;
1090 } else if (mapped_addr != -1) {
1091 /* Mapped but at wrong address, meaning there wasn't actually
1092 * enough space for this brk.
1094 target_munmap(mapped_addr, new_alloc_size);
1095 mapped_addr = -1;
1096 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1098 else {
1099 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1102 #if defined(TARGET_ALPHA)
1103 /* We (partially) emulate OSF/1 on Alpha, which requires we
1104 return a proper errno, not an unchanged brk value. */
1105 return -TARGET_ENOMEM;
1106 #endif
1107 /* For everything else, return the previous break. */
1108 return target_brk;
1111 static inline abi_long copy_from_user_fdset(fd_set *fds,
1112 abi_ulong target_fds_addr,
1113 int n)
1115 int i, nw, j, k;
1116 abi_ulong b, *target_fds;
1118 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1119 if (!(target_fds = lock_user(VERIFY_READ,
1120 target_fds_addr,
1121 sizeof(abi_ulong) * nw,
1122 1)))
1123 return -TARGET_EFAULT;
1125 FD_ZERO(fds);
1126 k = 0;
1127 for (i = 0; i < nw; i++) {
1128 /* grab the abi_ulong */
1129 __get_user(b, &target_fds[i]);
1130 for (j = 0; j < TARGET_ABI_BITS; j++) {
1131 /* check the bit inside the abi_ulong */
1132 if ((b >> j) & 1)
1133 FD_SET(k, fds);
1134 k++;
1138 unlock_user(target_fds, target_fds_addr, 0);
1140 return 0;
1143 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1144 abi_ulong target_fds_addr,
1145 int n)
1147 if (target_fds_addr) {
1148 if (copy_from_user_fdset(fds, target_fds_addr, n))
1149 return -TARGET_EFAULT;
1150 *fds_ptr = fds;
1151 } else {
1152 *fds_ptr = NULL;
1154 return 0;
1157 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1158 const fd_set *fds,
1159 int n)
1161 int i, nw, j, k;
1162 abi_long v;
1163 abi_ulong *target_fds;
1165 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1166 if (!(target_fds = lock_user(VERIFY_WRITE,
1167 target_fds_addr,
1168 sizeof(abi_ulong) * nw,
1169 0)))
1170 return -TARGET_EFAULT;
1172 k = 0;
1173 for (i = 0; i < nw; i++) {
1174 v = 0;
1175 for (j = 0; j < TARGET_ABI_BITS; j++) {
1176 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1177 k++;
1179 __put_user(v, &target_fds[i]);
1182 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1184 return 0;
1187 #if defined(__alpha__)
1188 #define HOST_HZ 1024
1189 #else
1190 #define HOST_HZ 100
1191 #endif
1193 static inline abi_long host_to_target_clock_t(long ticks)
1195 #if HOST_HZ == TARGET_HZ
1196 return ticks;
1197 #else
1198 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1199 #endif
1202 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1203 const struct rusage *rusage)
1205 struct target_rusage *target_rusage;
1207 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1208 return -TARGET_EFAULT;
1209 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1210 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1211 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1212 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1213 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1214 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1215 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1216 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1217 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1218 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1219 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1220 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1221 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1222 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1223 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1224 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1225 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1226 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1227 unlock_user_struct(target_rusage, target_addr, 1);
1229 return 0;
1232 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1234 abi_ulong target_rlim_swap;
1235 rlim_t result;
1237 target_rlim_swap = tswapal(target_rlim);
1238 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1239 return RLIM_INFINITY;
1241 result = target_rlim_swap;
1242 if (target_rlim_swap != (rlim_t)result)
1243 return RLIM_INFINITY;
1245 return result;
1248 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1250 abi_ulong target_rlim_swap;
1251 abi_ulong result;
1253 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1254 target_rlim_swap = TARGET_RLIM_INFINITY;
1255 else
1256 target_rlim_swap = rlim;
1257 result = tswapal(target_rlim_swap);
1259 return result;
1262 static inline int target_to_host_resource(int code)
1264 switch (code) {
1265 case TARGET_RLIMIT_AS:
1266 return RLIMIT_AS;
1267 case TARGET_RLIMIT_CORE:
1268 return RLIMIT_CORE;
1269 case TARGET_RLIMIT_CPU:
1270 return RLIMIT_CPU;
1271 case TARGET_RLIMIT_DATA:
1272 return RLIMIT_DATA;
1273 case TARGET_RLIMIT_FSIZE:
1274 return RLIMIT_FSIZE;
1275 case TARGET_RLIMIT_LOCKS:
1276 return RLIMIT_LOCKS;
1277 case TARGET_RLIMIT_MEMLOCK:
1278 return RLIMIT_MEMLOCK;
1279 case TARGET_RLIMIT_MSGQUEUE:
1280 return RLIMIT_MSGQUEUE;
1281 case TARGET_RLIMIT_NICE:
1282 return RLIMIT_NICE;
1283 case TARGET_RLIMIT_NOFILE:
1284 return RLIMIT_NOFILE;
1285 case TARGET_RLIMIT_NPROC:
1286 return RLIMIT_NPROC;
1287 case TARGET_RLIMIT_RSS:
1288 return RLIMIT_RSS;
1289 case TARGET_RLIMIT_RTPRIO:
1290 return RLIMIT_RTPRIO;
1291 case TARGET_RLIMIT_SIGPENDING:
1292 return RLIMIT_SIGPENDING;
1293 case TARGET_RLIMIT_STACK:
1294 return RLIMIT_STACK;
1295 default:
1296 return code;
1300 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1301 abi_ulong target_tv_addr)
1303 struct target_timeval *target_tv;
1305 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1306 return -TARGET_EFAULT;
1308 __get_user(tv->tv_sec, &target_tv->tv_sec);
1309 __get_user(tv->tv_usec, &target_tv->tv_usec);
1311 unlock_user_struct(target_tv, target_tv_addr, 0);
1313 return 0;
1316 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1317 const struct timeval *tv)
1319 struct target_timeval *target_tv;
1321 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1322 return -TARGET_EFAULT;
1324 __put_user(tv->tv_sec, &target_tv->tv_sec);
1325 __put_user(tv->tv_usec, &target_tv->tv_usec);
1327 unlock_user_struct(target_tv, target_tv_addr, 1);
1329 return 0;
1332 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1333 abi_ulong target_tz_addr)
1335 struct target_timezone *target_tz;
1337 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1338 return -TARGET_EFAULT;
1341 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1342 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1344 unlock_user_struct(target_tz, target_tz_addr, 0);
1346 return 0;
1349 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1350 #include <mqueue.h>
1352 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1353 abi_ulong target_mq_attr_addr)
1355 struct target_mq_attr *target_mq_attr;
1357 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1358 target_mq_attr_addr, 1))
1359 return -TARGET_EFAULT;
1361 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1362 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1363 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1364 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1366 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1368 return 0;
1371 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1372 const struct mq_attr *attr)
1374 struct target_mq_attr *target_mq_attr;
1376 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1377 target_mq_attr_addr, 0))
1378 return -TARGET_EFAULT;
1380 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1381 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1382 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1383 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1385 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1387 return 0;
1389 #endif
1391 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1392 /* do_select() must return target values and target errnos. */
1393 static abi_long do_select(int n,
1394 abi_ulong rfd_addr, abi_ulong wfd_addr,
1395 abi_ulong efd_addr, abi_ulong target_tv_addr)
1397 fd_set rfds, wfds, efds;
1398 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1399 struct timeval tv;
1400 struct timespec ts, *ts_ptr;
1401 abi_long ret;
1403 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1404 if (ret) {
1405 return ret;
1407 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1408 if (ret) {
1409 return ret;
1411 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1412 if (ret) {
1413 return ret;
1416 if (target_tv_addr) {
1417 if (copy_from_user_timeval(&tv, target_tv_addr))
1418 return -TARGET_EFAULT;
1419 ts.tv_sec = tv.tv_sec;
1420 ts.tv_nsec = tv.tv_usec * 1000;
1421 ts_ptr = &ts;
1422 } else {
1423 ts_ptr = NULL;
1426 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1427 ts_ptr, NULL));
1429 if (!is_error(ret)) {
1430 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1431 return -TARGET_EFAULT;
1432 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1433 return -TARGET_EFAULT;
1434 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1435 return -TARGET_EFAULT;
1437 if (target_tv_addr) {
1438 tv.tv_sec = ts.tv_sec;
1439 tv.tv_usec = ts.tv_nsec / 1000;
1440 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1441 return -TARGET_EFAULT;
1446 return ret;
1449 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1450 static abi_long do_old_select(abi_ulong arg1)
1452 struct target_sel_arg_struct *sel;
1453 abi_ulong inp, outp, exp, tvp;
1454 long nsel;
1456 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1457 return -TARGET_EFAULT;
1460 nsel = tswapal(sel->n);
1461 inp = tswapal(sel->inp);
1462 outp = tswapal(sel->outp);
1463 exp = tswapal(sel->exp);
1464 tvp = tswapal(sel->tvp);
1466 unlock_user_struct(sel, arg1, 0);
1468 return do_select(nsel, inp, outp, exp, tvp);
1470 #endif
1471 #endif
1473 static abi_long do_pipe2(int host_pipe[], int flags)
1475 #ifdef CONFIG_PIPE2
1476 return pipe2(host_pipe, flags);
1477 #else
1478 return -ENOSYS;
1479 #endif
1482 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1483 int flags, int is_pipe2)
1485 int host_pipe[2];
1486 abi_long ret;
1487 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1489 if (is_error(ret))
1490 return get_errno(ret);
1492 /* Several targets have special calling conventions for the original
1493 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1494 if (!is_pipe2) {
1495 #if defined(TARGET_ALPHA)
1496 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1497 return host_pipe[0];
1498 #elif defined(TARGET_MIPS)
1499 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1500 return host_pipe[0];
1501 #elif defined(TARGET_SH4)
1502 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1503 return host_pipe[0];
1504 #elif defined(TARGET_SPARC)
1505 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1506 return host_pipe[0];
1507 #endif
1510 if (put_user_s32(host_pipe[0], pipedes)
1511 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1512 return -TARGET_EFAULT;
1513 return get_errno(ret);
1516 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1517 abi_ulong target_addr,
1518 socklen_t len)
1520 struct target_ip_mreqn *target_smreqn;
1522 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1523 if (!target_smreqn)
1524 return -TARGET_EFAULT;
1525 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1526 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1527 if (len == sizeof(struct target_ip_mreqn))
1528 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1529 unlock_user(target_smreqn, target_addr, 0);
1531 return 0;
1534 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1535 abi_ulong target_addr,
1536 socklen_t len)
1538 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1539 sa_family_t sa_family;
1540 struct target_sockaddr *target_saddr;
1542 if (fd_trans_target_to_host_addr(fd)) {
1543 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1546 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1547 if (!target_saddr)
1548 return -TARGET_EFAULT;
1550 sa_family = tswap16(target_saddr->sa_family);
1552 /* Oops. The caller might send a incomplete sun_path; sun_path
1553 * must be terminated by \0 (see the manual page), but
1554 * unfortunately it is quite common to specify sockaddr_un
1555 * length as "strlen(x->sun_path)" while it should be
1556 * "strlen(...) + 1". We'll fix that here if needed.
1557 * Linux kernel has a similar feature.
1560 if (sa_family == AF_UNIX) {
1561 if (len < unix_maxlen && len > 0) {
1562 char *cp = (char*)target_saddr;
1564 if ( cp[len-1] && !cp[len] )
1565 len++;
1567 if (len > unix_maxlen)
1568 len = unix_maxlen;
1571 memcpy(addr, target_saddr, len);
1572 addr->sa_family = sa_family;
1573 if (sa_family == AF_NETLINK) {
1574 struct sockaddr_nl *nladdr;
1576 nladdr = (struct sockaddr_nl *)addr;
1577 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1578 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1579 } else if (sa_family == AF_PACKET) {
1580 struct target_sockaddr_ll *lladdr;
1582 lladdr = (struct target_sockaddr_ll *)addr;
1583 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1584 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1586 unlock_user(target_saddr, target_addr, 0);
1588 return 0;
1591 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1592 struct sockaddr *addr,
1593 socklen_t len)
1595 struct target_sockaddr *target_saddr;
1597 if (len == 0) {
1598 return 0;
1601 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1602 if (!target_saddr)
1603 return -TARGET_EFAULT;
1604 memcpy(target_saddr, addr, len);
1605 if (len >= offsetof(struct target_sockaddr, sa_family) +
1606 sizeof(target_saddr->sa_family)) {
1607 target_saddr->sa_family = tswap16(addr->sa_family);
1609 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1610 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1611 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1612 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1613 } else if (addr->sa_family == AF_PACKET) {
1614 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1615 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1616 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1618 unlock_user(target_saddr, target_addr, len);
1620 return 0;
1623 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1624 struct target_msghdr *target_msgh)
1626 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1627 abi_long msg_controllen;
1628 abi_ulong target_cmsg_addr;
1629 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1630 socklen_t space = 0;
1632 msg_controllen = tswapal(target_msgh->msg_controllen);
1633 if (msg_controllen < sizeof (struct target_cmsghdr))
1634 goto the_end;
1635 target_cmsg_addr = tswapal(target_msgh->msg_control);
1636 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1637 target_cmsg_start = target_cmsg;
1638 if (!target_cmsg)
1639 return -TARGET_EFAULT;
1641 while (cmsg && target_cmsg) {
1642 void *data = CMSG_DATA(cmsg);
1643 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1645 int len = tswapal(target_cmsg->cmsg_len)
1646 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1648 space += CMSG_SPACE(len);
1649 if (space > msgh->msg_controllen) {
1650 space -= CMSG_SPACE(len);
1651 /* This is a QEMU bug, since we allocated the payload
1652 * area ourselves (unlike overflow in host-to-target
1653 * conversion, which is just the guest giving us a buffer
1654 * that's too small). It can't happen for the payload types
1655 * we currently support; if it becomes an issue in future
1656 * we would need to improve our allocation strategy to
1657 * something more intelligent than "twice the size of the
1658 * target buffer we're reading from".
1660 gemu_log("Host cmsg overflow\n");
1661 break;
1664 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1665 cmsg->cmsg_level = SOL_SOCKET;
1666 } else {
1667 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1669 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1670 cmsg->cmsg_len = CMSG_LEN(len);
1672 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1673 int *fd = (int *)data;
1674 int *target_fd = (int *)target_data;
1675 int i, numfds = len / sizeof(int);
1677 for (i = 0; i < numfds; i++) {
1678 __get_user(fd[i], target_fd + i);
1680 } else if (cmsg->cmsg_level == SOL_SOCKET
1681 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1682 struct ucred *cred = (struct ucred *)data;
1683 struct target_ucred *target_cred =
1684 (struct target_ucred *)target_data;
1686 __get_user(cred->pid, &target_cred->pid);
1687 __get_user(cred->uid, &target_cred->uid);
1688 __get_user(cred->gid, &target_cred->gid);
1689 } else {
1690 gemu_log("Unsupported ancillary data: %d/%d\n",
1691 cmsg->cmsg_level, cmsg->cmsg_type);
1692 memcpy(data, target_data, len);
1695 cmsg = CMSG_NXTHDR(msgh, cmsg);
1696 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1697 target_cmsg_start);
1699 unlock_user(target_cmsg, target_cmsg_addr, 0);
1700 the_end:
1701 msgh->msg_controllen = space;
1702 return 0;
1705 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1706 struct msghdr *msgh)
1708 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1709 abi_long msg_controllen;
1710 abi_ulong target_cmsg_addr;
1711 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1712 socklen_t space = 0;
1714 msg_controllen = tswapal(target_msgh->msg_controllen);
1715 if (msg_controllen < sizeof (struct target_cmsghdr))
1716 goto the_end;
1717 target_cmsg_addr = tswapal(target_msgh->msg_control);
1718 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1719 target_cmsg_start = target_cmsg;
1720 if (!target_cmsg)
1721 return -TARGET_EFAULT;
1723 while (cmsg && target_cmsg) {
1724 void *data = CMSG_DATA(cmsg);
1725 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1727 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1728 int tgt_len, tgt_space;
1730 /* We never copy a half-header but may copy half-data;
1731 * this is Linux's behaviour in put_cmsg(). Note that
1732 * truncation here is a guest problem (which we report
1733 * to the guest via the CTRUNC bit), unlike truncation
1734 * in target_to_host_cmsg, which is a QEMU bug.
1736 if (msg_controllen < sizeof(struct cmsghdr)) {
1737 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1738 break;
1741 if (cmsg->cmsg_level == SOL_SOCKET) {
1742 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1743 } else {
1744 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1746 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1748 tgt_len = TARGET_CMSG_LEN(len);
1750 /* Payload types which need a different size of payload on
1751 * the target must adjust tgt_len here.
1753 switch (cmsg->cmsg_level) {
1754 case SOL_SOCKET:
1755 switch (cmsg->cmsg_type) {
1756 case SO_TIMESTAMP:
1757 tgt_len = sizeof(struct target_timeval);
1758 break;
1759 default:
1760 break;
1762 default:
1763 break;
1766 if (msg_controllen < tgt_len) {
1767 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1768 tgt_len = msg_controllen;
1771 /* We must now copy-and-convert len bytes of payload
1772 * into tgt_len bytes of destination space. Bear in mind
1773 * that in both source and destination we may be dealing
1774 * with a truncated value!
1776 switch (cmsg->cmsg_level) {
1777 case SOL_SOCKET:
1778 switch (cmsg->cmsg_type) {
1779 case SCM_RIGHTS:
1781 int *fd = (int *)data;
1782 int *target_fd = (int *)target_data;
1783 int i, numfds = tgt_len / sizeof(int);
1785 for (i = 0; i < numfds; i++) {
1786 __put_user(fd[i], target_fd + i);
1788 break;
1790 case SO_TIMESTAMP:
1792 struct timeval *tv = (struct timeval *)data;
1793 struct target_timeval *target_tv =
1794 (struct target_timeval *)target_data;
1796 if (len != sizeof(struct timeval) ||
1797 tgt_len != sizeof(struct target_timeval)) {
1798 goto unimplemented;
1801 /* copy struct timeval to target */
1802 __put_user(tv->tv_sec, &target_tv->tv_sec);
1803 __put_user(tv->tv_usec, &target_tv->tv_usec);
1804 break;
1806 case SCM_CREDENTIALS:
1808 struct ucred *cred = (struct ucred *)data;
1809 struct target_ucred *target_cred =
1810 (struct target_ucred *)target_data;
1812 __put_user(cred->pid, &target_cred->pid);
1813 __put_user(cred->uid, &target_cred->uid);
1814 __put_user(cred->gid, &target_cred->gid);
1815 break;
1817 default:
1818 goto unimplemented;
1820 break;
1822 default:
1823 unimplemented:
1824 gemu_log("Unsupported ancillary data: %d/%d\n",
1825 cmsg->cmsg_level, cmsg->cmsg_type);
1826 memcpy(target_data, data, MIN(len, tgt_len));
1827 if (tgt_len > len) {
1828 memset(target_data + len, 0, tgt_len - len);
1832 target_cmsg->cmsg_len = tswapal(tgt_len);
1833 tgt_space = TARGET_CMSG_SPACE(len);
1834 if (msg_controllen < tgt_space) {
1835 tgt_space = msg_controllen;
1837 msg_controllen -= tgt_space;
1838 space += tgt_space;
1839 cmsg = CMSG_NXTHDR(msgh, cmsg);
1840 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1841 target_cmsg_start);
1843 unlock_user(target_cmsg, target_cmsg_addr, space);
1844 the_end:
1845 target_msgh->msg_controllen = tswapal(space);
1846 return 0;
1849 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1851 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1852 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1853 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1854 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1855 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1858 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1859 size_t len,
1860 abi_long (*host_to_target_nlmsg)
1861 (struct nlmsghdr *))
1863 uint32_t nlmsg_len;
1864 abi_long ret;
1866 while (len > sizeof(struct nlmsghdr)) {
1868 nlmsg_len = nlh->nlmsg_len;
1869 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1870 nlmsg_len > len) {
1871 break;
1874 switch (nlh->nlmsg_type) {
1875 case NLMSG_DONE:
1876 tswap_nlmsghdr(nlh);
1877 return 0;
1878 case NLMSG_NOOP:
1879 break;
1880 case NLMSG_ERROR:
1882 struct nlmsgerr *e = NLMSG_DATA(nlh);
1883 e->error = tswap32(e->error);
1884 tswap_nlmsghdr(&e->msg);
1885 tswap_nlmsghdr(nlh);
1886 return 0;
1888 default:
1889 ret = host_to_target_nlmsg(nlh);
1890 if (ret < 0) {
1891 tswap_nlmsghdr(nlh);
1892 return ret;
1894 break;
1896 tswap_nlmsghdr(nlh);
1897 len -= NLMSG_ALIGN(nlmsg_len);
1898 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1900 return 0;
1903 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1904 size_t len,
1905 abi_long (*target_to_host_nlmsg)
1906 (struct nlmsghdr *))
1908 int ret;
1910 while (len > sizeof(struct nlmsghdr)) {
1911 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1912 tswap32(nlh->nlmsg_len) > len) {
1913 break;
1915 tswap_nlmsghdr(nlh);
1916 switch (nlh->nlmsg_type) {
1917 case NLMSG_DONE:
1918 return 0;
1919 case NLMSG_NOOP:
1920 break;
1921 case NLMSG_ERROR:
1923 struct nlmsgerr *e = NLMSG_DATA(nlh);
1924 e->error = tswap32(e->error);
1925 tswap_nlmsghdr(&e->msg);
1926 return 0;
1928 default:
1929 ret = target_to_host_nlmsg(nlh);
1930 if (ret < 0) {
1931 return ret;
1934 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1935 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1937 return 0;
1940 #ifdef CONFIG_RTNETLINK
1941 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1942 size_t len, void *context,
1943 abi_long (*host_to_target_nlattr)
1944 (struct nlattr *,
1945 void *context))
1947 unsigned short nla_len;
1948 abi_long ret;
1950 while (len > sizeof(struct nlattr)) {
1951 nla_len = nlattr->nla_len;
1952 if (nla_len < sizeof(struct nlattr) ||
1953 nla_len > len) {
1954 break;
1956 ret = host_to_target_nlattr(nlattr, context);
1957 nlattr->nla_len = tswap16(nlattr->nla_len);
1958 nlattr->nla_type = tswap16(nlattr->nla_type);
1959 if (ret < 0) {
1960 return ret;
1962 len -= NLA_ALIGN(nla_len);
1963 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1965 return 0;
1968 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1969 size_t len,
1970 abi_long (*host_to_target_rtattr)
1971 (struct rtattr *))
1973 unsigned short rta_len;
1974 abi_long ret;
1976 while (len > sizeof(struct rtattr)) {
1977 rta_len = rtattr->rta_len;
1978 if (rta_len < sizeof(struct rtattr) ||
1979 rta_len > len) {
1980 break;
1982 ret = host_to_target_rtattr(rtattr);
1983 rtattr->rta_len = tswap16(rtattr->rta_len);
1984 rtattr->rta_type = tswap16(rtattr->rta_type);
1985 if (ret < 0) {
1986 return ret;
1988 len -= RTA_ALIGN(rta_len);
1989 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
1991 return 0;
1994 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
1996 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
1997 void *context)
1999 uint16_t *u16;
2000 uint32_t *u32;
2001 uint64_t *u64;
2003 switch (nlattr->nla_type) {
2004 /* no data */
2005 case QEMU_IFLA_BR_FDB_FLUSH:
2006 break;
2007 /* binary */
2008 case QEMU_IFLA_BR_GROUP_ADDR:
2009 break;
2010 /* uint8_t */
2011 case QEMU_IFLA_BR_VLAN_FILTERING:
2012 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2013 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2014 case QEMU_IFLA_BR_MCAST_ROUTER:
2015 case QEMU_IFLA_BR_MCAST_SNOOPING:
2016 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2017 case QEMU_IFLA_BR_MCAST_QUERIER:
2018 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2019 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2020 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2021 break;
2022 /* uint16_t */
2023 case QEMU_IFLA_BR_PRIORITY:
2024 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2025 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2026 case QEMU_IFLA_BR_ROOT_PORT:
2027 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2028 u16 = NLA_DATA(nlattr);
2029 *u16 = tswap16(*u16);
2030 break;
2031 /* uint32_t */
2032 case QEMU_IFLA_BR_FORWARD_DELAY:
2033 case QEMU_IFLA_BR_HELLO_TIME:
2034 case QEMU_IFLA_BR_MAX_AGE:
2035 case QEMU_IFLA_BR_AGEING_TIME:
2036 case QEMU_IFLA_BR_STP_STATE:
2037 case QEMU_IFLA_BR_ROOT_PATH_COST:
2038 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2039 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2040 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2041 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2042 u32 = NLA_DATA(nlattr);
2043 *u32 = tswap32(*u32);
2044 break;
2045 /* uint64_t */
2046 case QEMU_IFLA_BR_HELLO_TIMER:
2047 case QEMU_IFLA_BR_TCN_TIMER:
2048 case QEMU_IFLA_BR_GC_TIMER:
2049 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2050 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2051 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2052 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2053 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2054 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2056 u64 = NLA_DATA(nlattr);
2057 *u64 = tswap64(*u64);
2058 break;
2059 /* ifla_bridge_id: uin8_t[] */
2060 case QEMU_IFLA_BR_ROOT_ID:
2061 case QEMU_IFLA_BR_BRIDGE_ID:
2062 break;
2063 default:
2064 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2065 break;
2067 return 0;
2070 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2071 void *context)
2073 uint16_t *u16;
2074 uint32_t *u32;
2075 uint64_t *u64;
2077 switch (nlattr->nla_type) {
2078 /* uint8_t */
2079 case QEMU_IFLA_BRPORT_STATE:
2080 case QEMU_IFLA_BRPORT_MODE:
2081 case QEMU_IFLA_BRPORT_GUARD:
2082 case QEMU_IFLA_BRPORT_PROTECT:
2083 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2084 case QEMU_IFLA_BRPORT_LEARNING:
2085 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2086 case QEMU_IFLA_BRPORT_PROXYARP:
2087 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2088 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2089 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2090 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2091 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2092 break;
2093 /* uint16_t */
2094 case QEMU_IFLA_BRPORT_PRIORITY:
2095 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2096 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2097 case QEMU_IFLA_BRPORT_ID:
2098 case QEMU_IFLA_BRPORT_NO:
2099 u16 = NLA_DATA(nlattr);
2100 *u16 = tswap16(*u16);
2101 break;
2102 /* uin32_t */
2103 case QEMU_IFLA_BRPORT_COST:
2104 u32 = NLA_DATA(nlattr);
2105 *u32 = tswap32(*u32);
2106 break;
2107 /* uint64_t */
2108 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2109 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2110 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2111 u64 = NLA_DATA(nlattr);
2112 *u64 = tswap64(*u64);
2113 break;
2114 /* ifla_bridge_id: uint8_t[] */
2115 case QEMU_IFLA_BRPORT_ROOT_ID:
2116 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2117 break;
2118 default:
2119 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2120 break;
2122 return 0;
2125 struct linkinfo_context {
2126 int len;
2127 char *name;
2128 int slave_len;
2129 char *slave_name;
2132 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2133 void *context)
2135 struct linkinfo_context *li_context = context;
2137 switch (nlattr->nla_type) {
2138 /* string */
2139 case QEMU_IFLA_INFO_KIND:
2140 li_context->name = NLA_DATA(nlattr);
2141 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2142 break;
2143 case QEMU_IFLA_INFO_SLAVE_KIND:
2144 li_context->slave_name = NLA_DATA(nlattr);
2145 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2146 break;
2147 /* stats */
2148 case QEMU_IFLA_INFO_XSTATS:
2149 /* FIXME: only used by CAN */
2150 break;
2151 /* nested */
2152 case QEMU_IFLA_INFO_DATA:
2153 if (strncmp(li_context->name, "bridge",
2154 li_context->len) == 0) {
2155 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2156 nlattr->nla_len,
2157 NULL,
2158 host_to_target_data_bridge_nlattr);
2159 } else {
2160 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2162 break;
2163 case QEMU_IFLA_INFO_SLAVE_DATA:
2164 if (strncmp(li_context->slave_name, "bridge",
2165 li_context->slave_len) == 0) {
2166 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2167 nlattr->nla_len,
2168 NULL,
2169 host_to_target_slave_data_bridge_nlattr);
2170 } else {
2171 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2172 li_context->slave_name);
2174 break;
2175 default:
2176 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2177 break;
2180 return 0;
2183 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2184 void *context)
2186 uint32_t *u32;
2187 int i;
2189 switch (nlattr->nla_type) {
2190 case QEMU_IFLA_INET_CONF:
2191 u32 = NLA_DATA(nlattr);
2192 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2193 i++) {
2194 u32[i] = tswap32(u32[i]);
2196 break;
2197 default:
2198 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2200 return 0;
2203 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2204 void *context)
2206 uint32_t *u32;
2207 uint64_t *u64;
2208 struct ifla_cacheinfo *ci;
2209 int i;
2211 switch (nlattr->nla_type) {
2212 /* binaries */
2213 case QEMU_IFLA_INET6_TOKEN:
2214 break;
2215 /* uint8_t */
2216 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2217 break;
2218 /* uint32_t */
2219 case QEMU_IFLA_INET6_FLAGS:
2220 u32 = NLA_DATA(nlattr);
2221 *u32 = tswap32(*u32);
2222 break;
2223 /* uint32_t[] */
2224 case QEMU_IFLA_INET6_CONF:
2225 u32 = NLA_DATA(nlattr);
2226 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2227 i++) {
2228 u32[i] = tswap32(u32[i]);
2230 break;
2231 /* ifla_cacheinfo */
2232 case QEMU_IFLA_INET6_CACHEINFO:
2233 ci = NLA_DATA(nlattr);
2234 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2235 ci->tstamp = tswap32(ci->tstamp);
2236 ci->reachable_time = tswap32(ci->reachable_time);
2237 ci->retrans_time = tswap32(ci->retrans_time);
2238 break;
2239 /* uint64_t[] */
2240 case QEMU_IFLA_INET6_STATS:
2241 case QEMU_IFLA_INET6_ICMP6STATS:
2242 u64 = NLA_DATA(nlattr);
2243 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2244 i++) {
2245 u64[i] = tswap64(u64[i]);
2247 break;
2248 default:
2249 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2251 return 0;
2254 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2255 void *context)
2257 switch (nlattr->nla_type) {
2258 case AF_INET:
2259 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2260 NULL,
2261 host_to_target_data_inet_nlattr);
2262 case AF_INET6:
2263 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2264 NULL,
2265 host_to_target_data_inet6_nlattr);
2266 default:
2267 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2268 break;
2270 return 0;
2273 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2275 uint32_t *u32;
2276 struct rtnl_link_stats *st;
2277 struct rtnl_link_stats64 *st64;
2278 struct rtnl_link_ifmap *map;
2279 struct linkinfo_context li_context;
2281 switch (rtattr->rta_type) {
2282 /* binary stream */
2283 case QEMU_IFLA_ADDRESS:
2284 case QEMU_IFLA_BROADCAST:
2285 /* string */
2286 case QEMU_IFLA_IFNAME:
2287 case QEMU_IFLA_QDISC:
2288 break;
2289 /* uin8_t */
2290 case QEMU_IFLA_OPERSTATE:
2291 case QEMU_IFLA_LINKMODE:
2292 case QEMU_IFLA_CARRIER:
2293 case QEMU_IFLA_PROTO_DOWN:
2294 break;
2295 /* uint32_t */
2296 case QEMU_IFLA_MTU:
2297 case QEMU_IFLA_LINK:
2298 case QEMU_IFLA_WEIGHT:
2299 case QEMU_IFLA_TXQLEN:
2300 case QEMU_IFLA_CARRIER_CHANGES:
2301 case QEMU_IFLA_NUM_RX_QUEUES:
2302 case QEMU_IFLA_NUM_TX_QUEUES:
2303 case QEMU_IFLA_PROMISCUITY:
2304 case QEMU_IFLA_EXT_MASK:
2305 case QEMU_IFLA_LINK_NETNSID:
2306 case QEMU_IFLA_GROUP:
2307 case QEMU_IFLA_MASTER:
2308 case QEMU_IFLA_NUM_VF:
2309 u32 = RTA_DATA(rtattr);
2310 *u32 = tswap32(*u32);
2311 break;
2312 /* struct rtnl_link_stats */
2313 case QEMU_IFLA_STATS:
2314 st = RTA_DATA(rtattr);
2315 st->rx_packets = tswap32(st->rx_packets);
2316 st->tx_packets = tswap32(st->tx_packets);
2317 st->rx_bytes = tswap32(st->rx_bytes);
2318 st->tx_bytes = tswap32(st->tx_bytes);
2319 st->rx_errors = tswap32(st->rx_errors);
2320 st->tx_errors = tswap32(st->tx_errors);
2321 st->rx_dropped = tswap32(st->rx_dropped);
2322 st->tx_dropped = tswap32(st->tx_dropped);
2323 st->multicast = tswap32(st->multicast);
2324 st->collisions = tswap32(st->collisions);
2326 /* detailed rx_errors: */
2327 st->rx_length_errors = tswap32(st->rx_length_errors);
2328 st->rx_over_errors = tswap32(st->rx_over_errors);
2329 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2330 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2331 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2332 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2334 /* detailed tx_errors */
2335 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2336 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2337 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2338 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2339 st->tx_window_errors = tswap32(st->tx_window_errors);
2341 /* for cslip etc */
2342 st->rx_compressed = tswap32(st->rx_compressed);
2343 st->tx_compressed = tswap32(st->tx_compressed);
2344 break;
2345 /* struct rtnl_link_stats64 */
2346 case QEMU_IFLA_STATS64:
2347 st64 = RTA_DATA(rtattr);
2348 st64->rx_packets = tswap64(st64->rx_packets);
2349 st64->tx_packets = tswap64(st64->tx_packets);
2350 st64->rx_bytes = tswap64(st64->rx_bytes);
2351 st64->tx_bytes = tswap64(st64->tx_bytes);
2352 st64->rx_errors = tswap64(st64->rx_errors);
2353 st64->tx_errors = tswap64(st64->tx_errors);
2354 st64->rx_dropped = tswap64(st64->rx_dropped);
2355 st64->tx_dropped = tswap64(st64->tx_dropped);
2356 st64->multicast = tswap64(st64->multicast);
2357 st64->collisions = tswap64(st64->collisions);
2359 /* detailed rx_errors: */
2360 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2361 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2362 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2363 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2364 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2365 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2367 /* detailed tx_errors */
2368 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2369 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2370 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2371 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2372 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2374 /* for cslip etc */
2375 st64->rx_compressed = tswap64(st64->rx_compressed);
2376 st64->tx_compressed = tswap64(st64->tx_compressed);
2377 break;
2378 /* struct rtnl_link_ifmap */
2379 case QEMU_IFLA_MAP:
2380 map = RTA_DATA(rtattr);
2381 map->mem_start = tswap64(map->mem_start);
2382 map->mem_end = tswap64(map->mem_end);
2383 map->base_addr = tswap64(map->base_addr);
2384 map->irq = tswap16(map->irq);
2385 break;
2386 /* nested */
2387 case QEMU_IFLA_LINKINFO:
2388 memset(&li_context, 0, sizeof(li_context));
2389 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2390 &li_context,
2391 host_to_target_data_linkinfo_nlattr);
2392 case QEMU_IFLA_AF_SPEC:
2393 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2394 NULL,
2395 host_to_target_data_spec_nlattr);
2396 default:
2397 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2398 break;
2400 return 0;
2403 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2405 uint32_t *u32;
2406 struct ifa_cacheinfo *ci;
2408 switch (rtattr->rta_type) {
2409 /* binary: depends on family type */
2410 case IFA_ADDRESS:
2411 case IFA_LOCAL:
2412 break;
2413 /* string */
2414 case IFA_LABEL:
2415 break;
2416 /* u32 */
2417 case IFA_FLAGS:
2418 case IFA_BROADCAST:
2419 u32 = RTA_DATA(rtattr);
2420 *u32 = tswap32(*u32);
2421 break;
2422 /* struct ifa_cacheinfo */
2423 case IFA_CACHEINFO:
2424 ci = RTA_DATA(rtattr);
2425 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2426 ci->ifa_valid = tswap32(ci->ifa_valid);
2427 ci->cstamp = tswap32(ci->cstamp);
2428 ci->tstamp = tswap32(ci->tstamp);
2429 break;
2430 default:
2431 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2432 break;
2434 return 0;
2437 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2439 uint32_t *u32;
2440 switch (rtattr->rta_type) {
2441 /* binary: depends on family type */
2442 case RTA_GATEWAY:
2443 case RTA_DST:
2444 case RTA_PREFSRC:
2445 break;
2446 /* u32 */
2447 case RTA_PRIORITY:
2448 case RTA_TABLE:
2449 case RTA_OIF:
2450 u32 = RTA_DATA(rtattr);
2451 *u32 = tswap32(*u32);
2452 break;
2453 default:
2454 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2455 break;
2457 return 0;
2460 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2461 uint32_t rtattr_len)
2463 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2464 host_to_target_data_link_rtattr);
2467 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2468 uint32_t rtattr_len)
2470 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2471 host_to_target_data_addr_rtattr);
2474 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2475 uint32_t rtattr_len)
2477 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2478 host_to_target_data_route_rtattr);
2481 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2483 uint32_t nlmsg_len;
2484 struct ifinfomsg *ifi;
2485 struct ifaddrmsg *ifa;
2486 struct rtmsg *rtm;
2488 nlmsg_len = nlh->nlmsg_len;
2489 switch (nlh->nlmsg_type) {
2490 case RTM_NEWLINK:
2491 case RTM_DELLINK:
2492 case RTM_GETLINK:
2493 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2494 ifi = NLMSG_DATA(nlh);
2495 ifi->ifi_type = tswap16(ifi->ifi_type);
2496 ifi->ifi_index = tswap32(ifi->ifi_index);
2497 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2498 ifi->ifi_change = tswap32(ifi->ifi_change);
2499 host_to_target_link_rtattr(IFLA_RTA(ifi),
2500 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2502 break;
2503 case RTM_NEWADDR:
2504 case RTM_DELADDR:
2505 case RTM_GETADDR:
2506 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2507 ifa = NLMSG_DATA(nlh);
2508 ifa->ifa_index = tswap32(ifa->ifa_index);
2509 host_to_target_addr_rtattr(IFA_RTA(ifa),
2510 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2512 break;
2513 case RTM_NEWROUTE:
2514 case RTM_DELROUTE:
2515 case RTM_GETROUTE:
2516 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2517 rtm = NLMSG_DATA(nlh);
2518 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2519 host_to_target_route_rtattr(RTM_RTA(rtm),
2520 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2522 break;
2523 default:
2524 return -TARGET_EINVAL;
2526 return 0;
2529 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2530 size_t len)
2532 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2535 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2536 size_t len,
2537 abi_long (*target_to_host_rtattr)
2538 (struct rtattr *))
2540 abi_long ret;
2542 while (len >= sizeof(struct rtattr)) {
2543 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2544 tswap16(rtattr->rta_len) > len) {
2545 break;
2547 rtattr->rta_len = tswap16(rtattr->rta_len);
2548 rtattr->rta_type = tswap16(rtattr->rta_type);
2549 ret = target_to_host_rtattr(rtattr);
2550 if (ret < 0) {
2551 return ret;
2553 len -= RTA_ALIGN(rtattr->rta_len);
2554 rtattr = (struct rtattr *)(((char *)rtattr) +
2555 RTA_ALIGN(rtattr->rta_len));
2557 return 0;
2560 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2562 switch (rtattr->rta_type) {
2563 default:
2564 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2565 break;
2567 return 0;
2570 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2572 switch (rtattr->rta_type) {
2573 /* binary: depends on family type */
2574 case IFA_LOCAL:
2575 case IFA_ADDRESS:
2576 break;
2577 default:
2578 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2579 break;
2581 return 0;
2584 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2586 uint32_t *u32;
2587 switch (rtattr->rta_type) {
2588 /* binary: depends on family type */
2589 case RTA_DST:
2590 case RTA_SRC:
2591 case RTA_GATEWAY:
2592 break;
2593 /* u32 */
2594 case RTA_OIF:
2595 u32 = RTA_DATA(rtattr);
2596 *u32 = tswap32(*u32);
2597 break;
2598 default:
2599 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2600 break;
2602 return 0;
2605 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2606 uint32_t rtattr_len)
2608 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2609 target_to_host_data_link_rtattr);
2612 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2613 uint32_t rtattr_len)
2615 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2616 target_to_host_data_addr_rtattr);
2619 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2620 uint32_t rtattr_len)
2622 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2623 target_to_host_data_route_rtattr);
2626 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2628 struct ifinfomsg *ifi;
2629 struct ifaddrmsg *ifa;
2630 struct rtmsg *rtm;
2632 switch (nlh->nlmsg_type) {
2633 case RTM_GETLINK:
2634 break;
2635 case RTM_NEWLINK:
2636 case RTM_DELLINK:
2637 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2638 ifi = NLMSG_DATA(nlh);
2639 ifi->ifi_type = tswap16(ifi->ifi_type);
2640 ifi->ifi_index = tswap32(ifi->ifi_index);
2641 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2642 ifi->ifi_change = tswap32(ifi->ifi_change);
2643 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2644 NLMSG_LENGTH(sizeof(*ifi)));
2646 break;
2647 case RTM_GETADDR:
2648 case RTM_NEWADDR:
2649 case RTM_DELADDR:
2650 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2651 ifa = NLMSG_DATA(nlh);
2652 ifa->ifa_index = tswap32(ifa->ifa_index);
2653 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2654 NLMSG_LENGTH(sizeof(*ifa)));
2656 break;
2657 case RTM_GETROUTE:
2658 break;
2659 case RTM_NEWROUTE:
2660 case RTM_DELROUTE:
2661 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2662 rtm = NLMSG_DATA(nlh);
2663 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2664 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2665 NLMSG_LENGTH(sizeof(*rtm)));
2667 break;
2668 default:
2669 return -TARGET_EOPNOTSUPP;
2671 return 0;
2674 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2676 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2678 #endif /* CONFIG_RTNETLINK */
2680 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2682 switch (nlh->nlmsg_type) {
2683 default:
2684 gemu_log("Unknown host audit message type %d\n",
2685 nlh->nlmsg_type);
2686 return -TARGET_EINVAL;
2688 return 0;
2691 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2692 size_t len)
2694 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2697 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2699 switch (nlh->nlmsg_type) {
2700 case AUDIT_USER:
2701 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2702 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2703 break;
2704 default:
2705 gemu_log("Unknown target audit message type %d\n",
2706 nlh->nlmsg_type);
2707 return -TARGET_EINVAL;
2710 return 0;
2713 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2715 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2718 /* do_setsockopt() Must return target values and target errnos. */
2719 static abi_long do_setsockopt(int sockfd, int level, int optname,
2720 abi_ulong optval_addr, socklen_t optlen)
2722 abi_long ret;
2723 int val;
2724 struct ip_mreqn *ip_mreq;
2725 struct ip_mreq_source *ip_mreq_source;
2727 switch(level) {
2728 case SOL_TCP:
2729 /* TCP options all take an 'int' value. */
2730 if (optlen < sizeof(uint32_t))
2731 return -TARGET_EINVAL;
2733 if (get_user_u32(val, optval_addr))
2734 return -TARGET_EFAULT;
2735 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2736 break;
2737 case SOL_IP:
2738 switch(optname) {
2739 case IP_TOS:
2740 case IP_TTL:
2741 case IP_HDRINCL:
2742 case IP_ROUTER_ALERT:
2743 case IP_RECVOPTS:
2744 case IP_RETOPTS:
2745 case IP_PKTINFO:
2746 case IP_MTU_DISCOVER:
2747 case IP_RECVERR:
2748 case IP_RECVTOS:
2749 #ifdef IP_FREEBIND
2750 case IP_FREEBIND:
2751 #endif
2752 case IP_MULTICAST_TTL:
2753 case IP_MULTICAST_LOOP:
2754 val = 0;
2755 if (optlen >= sizeof(uint32_t)) {
2756 if (get_user_u32(val, optval_addr))
2757 return -TARGET_EFAULT;
2758 } else if (optlen >= 1) {
2759 if (get_user_u8(val, optval_addr))
2760 return -TARGET_EFAULT;
2762 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2763 break;
2764 case IP_ADD_MEMBERSHIP:
2765 case IP_DROP_MEMBERSHIP:
2766 if (optlen < sizeof (struct target_ip_mreq) ||
2767 optlen > sizeof (struct target_ip_mreqn))
2768 return -TARGET_EINVAL;
2770 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2771 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2772 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2773 break;
2775 case IP_BLOCK_SOURCE:
2776 case IP_UNBLOCK_SOURCE:
2777 case IP_ADD_SOURCE_MEMBERSHIP:
2778 case IP_DROP_SOURCE_MEMBERSHIP:
2779 if (optlen != sizeof (struct target_ip_mreq_source))
2780 return -TARGET_EINVAL;
2782 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2783 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2784 unlock_user (ip_mreq_source, optval_addr, 0);
2785 break;
2787 default:
2788 goto unimplemented;
2790 break;
2791 case SOL_IPV6:
2792 switch (optname) {
2793 case IPV6_MTU_DISCOVER:
2794 case IPV6_MTU:
2795 case IPV6_V6ONLY:
2796 case IPV6_RECVPKTINFO:
2797 val = 0;
2798 if (optlen < sizeof(uint32_t)) {
2799 return -TARGET_EINVAL;
2801 if (get_user_u32(val, optval_addr)) {
2802 return -TARGET_EFAULT;
2804 ret = get_errno(setsockopt(sockfd, level, optname,
2805 &val, sizeof(val)));
2806 break;
2807 default:
2808 goto unimplemented;
2810 break;
2811 case SOL_RAW:
2812 switch (optname) {
2813 case ICMP_FILTER:
2814 /* struct icmp_filter takes an u32 value */
2815 if (optlen < sizeof(uint32_t)) {
2816 return -TARGET_EINVAL;
2819 if (get_user_u32(val, optval_addr)) {
2820 return -TARGET_EFAULT;
2822 ret = get_errno(setsockopt(sockfd, level, optname,
2823 &val, sizeof(val)));
2824 break;
2826 default:
2827 goto unimplemented;
2829 break;
2830 case TARGET_SOL_SOCKET:
2831 switch (optname) {
2832 case TARGET_SO_RCVTIMEO:
2834 struct timeval tv;
2836 optname = SO_RCVTIMEO;
2838 set_timeout:
2839 if (optlen != sizeof(struct target_timeval)) {
2840 return -TARGET_EINVAL;
2843 if (copy_from_user_timeval(&tv, optval_addr)) {
2844 return -TARGET_EFAULT;
2847 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2848 &tv, sizeof(tv)));
2849 return ret;
2851 case TARGET_SO_SNDTIMEO:
2852 optname = SO_SNDTIMEO;
2853 goto set_timeout;
2854 case TARGET_SO_ATTACH_FILTER:
2856 struct target_sock_fprog *tfprog;
2857 struct target_sock_filter *tfilter;
2858 struct sock_fprog fprog;
2859 struct sock_filter *filter;
2860 int i;
2862 if (optlen != sizeof(*tfprog)) {
2863 return -TARGET_EINVAL;
2865 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2866 return -TARGET_EFAULT;
2868 if (!lock_user_struct(VERIFY_READ, tfilter,
2869 tswapal(tfprog->filter), 0)) {
2870 unlock_user_struct(tfprog, optval_addr, 1);
2871 return -TARGET_EFAULT;
2874 fprog.len = tswap16(tfprog->len);
2875 filter = g_try_new(struct sock_filter, fprog.len);
2876 if (filter == NULL) {
2877 unlock_user_struct(tfilter, tfprog->filter, 1);
2878 unlock_user_struct(tfprog, optval_addr, 1);
2879 return -TARGET_ENOMEM;
2881 for (i = 0; i < fprog.len; i++) {
2882 filter[i].code = tswap16(tfilter[i].code);
2883 filter[i].jt = tfilter[i].jt;
2884 filter[i].jf = tfilter[i].jf;
2885 filter[i].k = tswap32(tfilter[i].k);
2887 fprog.filter = filter;
2889 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2890 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2891 g_free(filter);
2893 unlock_user_struct(tfilter, tfprog->filter, 1);
2894 unlock_user_struct(tfprog, optval_addr, 1);
2895 return ret;
2897 case TARGET_SO_BINDTODEVICE:
2899 char *dev_ifname, *addr_ifname;
2901 if (optlen > IFNAMSIZ - 1) {
2902 optlen = IFNAMSIZ - 1;
2904 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2905 if (!dev_ifname) {
2906 return -TARGET_EFAULT;
2908 optname = SO_BINDTODEVICE;
2909 addr_ifname = alloca(IFNAMSIZ);
2910 memcpy(addr_ifname, dev_ifname, optlen);
2911 addr_ifname[optlen] = 0;
2912 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2913 addr_ifname, optlen));
2914 unlock_user (dev_ifname, optval_addr, 0);
2915 return ret;
2917 /* Options with 'int' argument. */
2918 case TARGET_SO_DEBUG:
2919 optname = SO_DEBUG;
2920 break;
2921 case TARGET_SO_REUSEADDR:
2922 optname = SO_REUSEADDR;
2923 break;
2924 case TARGET_SO_TYPE:
2925 optname = SO_TYPE;
2926 break;
2927 case TARGET_SO_ERROR:
2928 optname = SO_ERROR;
2929 break;
2930 case TARGET_SO_DONTROUTE:
2931 optname = SO_DONTROUTE;
2932 break;
2933 case TARGET_SO_BROADCAST:
2934 optname = SO_BROADCAST;
2935 break;
2936 case TARGET_SO_SNDBUF:
2937 optname = SO_SNDBUF;
2938 break;
2939 case TARGET_SO_SNDBUFFORCE:
2940 optname = SO_SNDBUFFORCE;
2941 break;
2942 case TARGET_SO_RCVBUF:
2943 optname = SO_RCVBUF;
2944 break;
2945 case TARGET_SO_RCVBUFFORCE:
2946 optname = SO_RCVBUFFORCE;
2947 break;
2948 case TARGET_SO_KEEPALIVE:
2949 optname = SO_KEEPALIVE;
2950 break;
2951 case TARGET_SO_OOBINLINE:
2952 optname = SO_OOBINLINE;
2953 break;
2954 case TARGET_SO_NO_CHECK:
2955 optname = SO_NO_CHECK;
2956 break;
2957 case TARGET_SO_PRIORITY:
2958 optname = SO_PRIORITY;
2959 break;
2960 #ifdef SO_BSDCOMPAT
2961 case TARGET_SO_BSDCOMPAT:
2962 optname = SO_BSDCOMPAT;
2963 break;
2964 #endif
2965 case TARGET_SO_PASSCRED:
2966 optname = SO_PASSCRED;
2967 break;
2968 case TARGET_SO_PASSSEC:
2969 optname = SO_PASSSEC;
2970 break;
2971 case TARGET_SO_TIMESTAMP:
2972 optname = SO_TIMESTAMP;
2973 break;
2974 case TARGET_SO_RCVLOWAT:
2975 optname = SO_RCVLOWAT;
2976 break;
2977 break;
2978 default:
2979 goto unimplemented;
2981 if (optlen < sizeof(uint32_t))
2982 return -TARGET_EINVAL;
2984 if (get_user_u32(val, optval_addr))
2985 return -TARGET_EFAULT;
2986 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2987 break;
2988 default:
2989 unimplemented:
2990 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
2991 ret = -TARGET_ENOPROTOOPT;
2993 return ret;
2996 /* do_getsockopt() Must return target values and target errnos. */
2997 static abi_long do_getsockopt(int sockfd, int level, int optname,
2998 abi_ulong optval_addr, abi_ulong optlen)
3000 abi_long ret;
3001 int len, val;
3002 socklen_t lv;
3004 switch(level) {
3005 case TARGET_SOL_SOCKET:
3006 level = SOL_SOCKET;
3007 switch (optname) {
3008 /* These don't just return a single integer */
3009 case TARGET_SO_LINGER:
3010 case TARGET_SO_RCVTIMEO:
3011 case TARGET_SO_SNDTIMEO:
3012 case TARGET_SO_PEERNAME:
3013 goto unimplemented;
3014 case TARGET_SO_PEERCRED: {
3015 struct ucred cr;
3016 socklen_t crlen;
3017 struct target_ucred *tcr;
3019 if (get_user_u32(len, optlen)) {
3020 return -TARGET_EFAULT;
3022 if (len < 0) {
3023 return -TARGET_EINVAL;
3026 crlen = sizeof(cr);
3027 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3028 &cr, &crlen));
3029 if (ret < 0) {
3030 return ret;
3032 if (len > crlen) {
3033 len = crlen;
3035 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3036 return -TARGET_EFAULT;
3038 __put_user(cr.pid, &tcr->pid);
3039 __put_user(cr.uid, &tcr->uid);
3040 __put_user(cr.gid, &tcr->gid);
3041 unlock_user_struct(tcr, optval_addr, 1);
3042 if (put_user_u32(len, optlen)) {
3043 return -TARGET_EFAULT;
3045 break;
3047 /* Options with 'int' argument. */
3048 case TARGET_SO_DEBUG:
3049 optname = SO_DEBUG;
3050 goto int_case;
3051 case TARGET_SO_REUSEADDR:
3052 optname = SO_REUSEADDR;
3053 goto int_case;
3054 case TARGET_SO_TYPE:
3055 optname = SO_TYPE;
3056 goto int_case;
3057 case TARGET_SO_ERROR:
3058 optname = SO_ERROR;
3059 goto int_case;
3060 case TARGET_SO_DONTROUTE:
3061 optname = SO_DONTROUTE;
3062 goto int_case;
3063 case TARGET_SO_BROADCAST:
3064 optname = SO_BROADCAST;
3065 goto int_case;
3066 case TARGET_SO_SNDBUF:
3067 optname = SO_SNDBUF;
3068 goto int_case;
3069 case TARGET_SO_RCVBUF:
3070 optname = SO_RCVBUF;
3071 goto int_case;
3072 case TARGET_SO_KEEPALIVE:
3073 optname = SO_KEEPALIVE;
3074 goto int_case;
3075 case TARGET_SO_OOBINLINE:
3076 optname = SO_OOBINLINE;
3077 goto int_case;
3078 case TARGET_SO_NO_CHECK:
3079 optname = SO_NO_CHECK;
3080 goto int_case;
3081 case TARGET_SO_PRIORITY:
3082 optname = SO_PRIORITY;
3083 goto int_case;
3084 #ifdef SO_BSDCOMPAT
3085 case TARGET_SO_BSDCOMPAT:
3086 optname = SO_BSDCOMPAT;
3087 goto int_case;
3088 #endif
3089 case TARGET_SO_PASSCRED:
3090 optname = SO_PASSCRED;
3091 goto int_case;
3092 case TARGET_SO_TIMESTAMP:
3093 optname = SO_TIMESTAMP;
3094 goto int_case;
3095 case TARGET_SO_RCVLOWAT:
3096 optname = SO_RCVLOWAT;
3097 goto int_case;
3098 case TARGET_SO_ACCEPTCONN:
3099 optname = SO_ACCEPTCONN;
3100 goto int_case;
3101 default:
3102 goto int_case;
3104 break;
3105 case SOL_TCP:
3106 /* TCP options all take an 'int' value. */
3107 int_case:
3108 if (get_user_u32(len, optlen))
3109 return -TARGET_EFAULT;
3110 if (len < 0)
3111 return -TARGET_EINVAL;
3112 lv = sizeof(lv);
3113 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3114 if (ret < 0)
3115 return ret;
3116 if (optname == SO_TYPE) {
3117 val = host_to_target_sock_type(val);
3119 if (len > lv)
3120 len = lv;
3121 if (len == 4) {
3122 if (put_user_u32(val, optval_addr))
3123 return -TARGET_EFAULT;
3124 } else {
3125 if (put_user_u8(val, optval_addr))
3126 return -TARGET_EFAULT;
3128 if (put_user_u32(len, optlen))
3129 return -TARGET_EFAULT;
3130 break;
3131 case SOL_IP:
3132 switch(optname) {
3133 case IP_TOS:
3134 case IP_TTL:
3135 case IP_HDRINCL:
3136 case IP_ROUTER_ALERT:
3137 case IP_RECVOPTS:
3138 case IP_RETOPTS:
3139 case IP_PKTINFO:
3140 case IP_MTU_DISCOVER:
3141 case IP_RECVERR:
3142 case IP_RECVTOS:
3143 #ifdef IP_FREEBIND
3144 case IP_FREEBIND:
3145 #endif
3146 case IP_MULTICAST_TTL:
3147 case IP_MULTICAST_LOOP:
3148 if (get_user_u32(len, optlen))
3149 return -TARGET_EFAULT;
3150 if (len < 0)
3151 return -TARGET_EINVAL;
3152 lv = sizeof(lv);
3153 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3154 if (ret < 0)
3155 return ret;
3156 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3157 len = 1;
3158 if (put_user_u32(len, optlen)
3159 || put_user_u8(val, optval_addr))
3160 return -TARGET_EFAULT;
3161 } else {
3162 if (len > sizeof(int))
3163 len = sizeof(int);
3164 if (put_user_u32(len, optlen)
3165 || put_user_u32(val, optval_addr))
3166 return -TARGET_EFAULT;
3168 break;
3169 default:
3170 ret = -TARGET_ENOPROTOOPT;
3171 break;
3173 break;
3174 default:
3175 unimplemented:
3176 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3177 level, optname);
3178 ret = -TARGET_EOPNOTSUPP;
3179 break;
3181 return ret;
3184 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3185 abi_ulong count, int copy)
3187 struct target_iovec *target_vec;
3188 struct iovec *vec;
3189 abi_ulong total_len, max_len;
3190 int i;
3191 int err = 0;
3192 bool bad_address = false;
3194 if (count == 0) {
3195 errno = 0;
3196 return NULL;
3198 if (count > IOV_MAX) {
3199 errno = EINVAL;
3200 return NULL;
3203 vec = g_try_new0(struct iovec, count);
3204 if (vec == NULL) {
3205 errno = ENOMEM;
3206 return NULL;
3209 target_vec = lock_user(VERIFY_READ, target_addr,
3210 count * sizeof(struct target_iovec), 1);
3211 if (target_vec == NULL) {
3212 err = EFAULT;
3213 goto fail2;
3216 /* ??? If host page size > target page size, this will result in a
3217 value larger than what we can actually support. */
3218 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3219 total_len = 0;
3221 for (i = 0; i < count; i++) {
3222 abi_ulong base = tswapal(target_vec[i].iov_base);
3223 abi_long len = tswapal(target_vec[i].iov_len);
3225 if (len < 0) {
3226 err = EINVAL;
3227 goto fail;
3228 } else if (len == 0) {
3229 /* Zero length pointer is ignored. */
3230 vec[i].iov_base = 0;
3231 } else {
3232 vec[i].iov_base = lock_user(type, base, len, copy);
3233 /* If the first buffer pointer is bad, this is a fault. But
3234 * subsequent bad buffers will result in a partial write; this
3235 * is realized by filling the vector with null pointers and
3236 * zero lengths. */
3237 if (!vec[i].iov_base) {
3238 if (i == 0) {
3239 err = EFAULT;
3240 goto fail;
3241 } else {
3242 bad_address = true;
3245 if (bad_address) {
3246 len = 0;
3248 if (len > max_len - total_len) {
3249 len = max_len - total_len;
3252 vec[i].iov_len = len;
3253 total_len += len;
3256 unlock_user(target_vec, target_addr, 0);
3257 return vec;
3259 fail:
3260 while (--i >= 0) {
3261 if (tswapal(target_vec[i].iov_len) > 0) {
3262 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3265 unlock_user(target_vec, target_addr, 0);
3266 fail2:
3267 g_free(vec);
3268 errno = err;
3269 return NULL;
3272 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3273 abi_ulong count, int copy)
3275 struct target_iovec *target_vec;
3276 int i;
3278 target_vec = lock_user(VERIFY_READ, target_addr,
3279 count * sizeof(struct target_iovec), 1);
3280 if (target_vec) {
3281 for (i = 0; i < count; i++) {
3282 abi_ulong base = tswapal(target_vec[i].iov_base);
3283 abi_long len = tswapal(target_vec[i].iov_len);
3284 if (len < 0) {
3285 break;
3287 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3289 unlock_user(target_vec, target_addr, 0);
3292 g_free(vec);
3295 static inline int target_to_host_sock_type(int *type)
3297 int host_type = 0;
3298 int target_type = *type;
3300 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3301 case TARGET_SOCK_DGRAM:
3302 host_type = SOCK_DGRAM;
3303 break;
3304 case TARGET_SOCK_STREAM:
3305 host_type = SOCK_STREAM;
3306 break;
3307 default:
3308 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3309 break;
3311 if (target_type & TARGET_SOCK_CLOEXEC) {
3312 #if defined(SOCK_CLOEXEC)
3313 host_type |= SOCK_CLOEXEC;
3314 #else
3315 return -TARGET_EINVAL;
3316 #endif
3318 if (target_type & TARGET_SOCK_NONBLOCK) {
3319 #if defined(SOCK_NONBLOCK)
3320 host_type |= SOCK_NONBLOCK;
3321 #elif !defined(O_NONBLOCK)
3322 return -TARGET_EINVAL;
3323 #endif
3325 *type = host_type;
3326 return 0;
3329 /* Try to emulate socket type flags after socket creation. */
3330 static int sock_flags_fixup(int fd, int target_type)
3332 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3333 if (target_type & TARGET_SOCK_NONBLOCK) {
3334 int flags = fcntl(fd, F_GETFL);
3335 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3336 close(fd);
3337 return -TARGET_EINVAL;
3340 #endif
3341 return fd;
3344 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3345 abi_ulong target_addr,
3346 socklen_t len)
3348 struct sockaddr *addr = host_addr;
3349 struct target_sockaddr *target_saddr;
3351 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3352 if (!target_saddr) {
3353 return -TARGET_EFAULT;
3356 memcpy(addr, target_saddr, len);
3357 addr->sa_family = tswap16(target_saddr->sa_family);
3358 /* spkt_protocol is big-endian */
3360 unlock_user(target_saddr, target_addr, 0);
3361 return 0;
3364 static TargetFdTrans target_packet_trans = {
3365 .target_to_host_addr = packet_target_to_host_sockaddr,
3368 #ifdef CONFIG_RTNETLINK
3369 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3371 abi_long ret;
3373 ret = target_to_host_nlmsg_route(buf, len);
3374 if (ret < 0) {
3375 return ret;
3378 return len;
3381 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3383 abi_long ret;
3385 ret = host_to_target_nlmsg_route(buf, len);
3386 if (ret < 0) {
3387 return ret;
3390 return len;
3393 static TargetFdTrans target_netlink_route_trans = {
3394 .target_to_host_data = netlink_route_target_to_host,
3395 .host_to_target_data = netlink_route_host_to_target,
3397 #endif /* CONFIG_RTNETLINK */
3399 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3401 abi_long ret;
3403 ret = target_to_host_nlmsg_audit(buf, len);
3404 if (ret < 0) {
3405 return ret;
3408 return len;
3411 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3413 abi_long ret;
3415 ret = host_to_target_nlmsg_audit(buf, len);
3416 if (ret < 0) {
3417 return ret;
3420 return len;
3423 static TargetFdTrans target_netlink_audit_trans = {
3424 .target_to_host_data = netlink_audit_target_to_host,
3425 .host_to_target_data = netlink_audit_host_to_target,
3428 /* do_socket() Must return target values and target errnos. */
3429 static abi_long do_socket(int domain, int type, int protocol)
3431 int target_type = type;
3432 int ret;
3434 ret = target_to_host_sock_type(&type);
3435 if (ret) {
3436 return ret;
3439 if (domain == PF_NETLINK && !(
3440 #ifdef CONFIG_RTNETLINK
3441 protocol == NETLINK_ROUTE ||
3442 #endif
3443 protocol == NETLINK_KOBJECT_UEVENT ||
3444 protocol == NETLINK_AUDIT)) {
3445 return -EPFNOSUPPORT;
3448 if (domain == AF_PACKET ||
3449 (domain == AF_INET && type == SOCK_PACKET)) {
3450 protocol = tswap16(protocol);
3453 ret = get_errno(socket(domain, type, protocol));
3454 if (ret >= 0) {
3455 ret = sock_flags_fixup(ret, target_type);
3456 if (type == SOCK_PACKET) {
3457 /* Manage an obsolete case :
3458 * if socket type is SOCK_PACKET, bind by name
3460 fd_trans_register(ret, &target_packet_trans);
3461 } else if (domain == PF_NETLINK) {
3462 switch (protocol) {
3463 #ifdef CONFIG_RTNETLINK
3464 case NETLINK_ROUTE:
3465 fd_trans_register(ret, &target_netlink_route_trans);
3466 break;
3467 #endif
3468 case NETLINK_KOBJECT_UEVENT:
3469 /* nothing to do: messages are strings */
3470 break;
3471 case NETLINK_AUDIT:
3472 fd_trans_register(ret, &target_netlink_audit_trans);
3473 break;
3474 default:
3475 g_assert_not_reached();
3479 return ret;
3482 /* do_bind() Must return target values and target errnos. */
3483 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3484 socklen_t addrlen)
3486 void *addr;
3487 abi_long ret;
3489 if ((int)addrlen < 0) {
3490 return -TARGET_EINVAL;
3493 addr = alloca(addrlen+1);
3495 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3496 if (ret)
3497 return ret;
3499 return get_errno(bind(sockfd, addr, addrlen));
3502 /* do_connect() Must return target values and target errnos. */
3503 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3504 socklen_t addrlen)
3506 void *addr;
3507 abi_long ret;
3509 if ((int)addrlen < 0) {
3510 return -TARGET_EINVAL;
3513 addr = alloca(addrlen+1);
3515 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3516 if (ret)
3517 return ret;
3519 return get_errno(safe_connect(sockfd, addr, addrlen));
3522 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3523 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3524 int flags, int send)
3526 abi_long ret, len;
3527 struct msghdr msg;
3528 abi_ulong count;
3529 struct iovec *vec;
3530 abi_ulong target_vec;
3532 if (msgp->msg_name) {
3533 msg.msg_namelen = tswap32(msgp->msg_namelen);
3534 msg.msg_name = alloca(msg.msg_namelen+1);
3535 ret = target_to_host_sockaddr(fd, msg.msg_name,
3536 tswapal(msgp->msg_name),
3537 msg.msg_namelen);
3538 if (ret == -TARGET_EFAULT) {
3539 /* For connected sockets msg_name and msg_namelen must
3540 * be ignored, so returning EFAULT immediately is wrong.
3541 * Instead, pass a bad msg_name to the host kernel, and
3542 * let it decide whether to return EFAULT or not.
3544 msg.msg_name = (void *)-1;
3545 } else if (ret) {
3546 goto out2;
3548 } else {
3549 msg.msg_name = NULL;
3550 msg.msg_namelen = 0;
3552 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3553 msg.msg_control = alloca(msg.msg_controllen);
3554 msg.msg_flags = tswap32(msgp->msg_flags);
3556 count = tswapal(msgp->msg_iovlen);
3557 target_vec = tswapal(msgp->msg_iov);
3559 if (count > IOV_MAX) {
3560 /* sendrcvmsg returns a different errno for this condition than
3561 * readv/writev, so we must catch it here before lock_iovec() does.
3563 ret = -TARGET_EMSGSIZE;
3564 goto out2;
3567 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3568 target_vec, count, send);
3569 if (vec == NULL) {
3570 ret = -host_to_target_errno(errno);
3571 goto out2;
3573 msg.msg_iovlen = count;
3574 msg.msg_iov = vec;
3576 if (send) {
3577 if (fd_trans_target_to_host_data(fd)) {
3578 void *host_msg;
3580 host_msg = g_malloc(msg.msg_iov->iov_len);
3581 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3582 ret = fd_trans_target_to_host_data(fd)(host_msg,
3583 msg.msg_iov->iov_len);
3584 if (ret >= 0) {
3585 msg.msg_iov->iov_base = host_msg;
3586 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3588 g_free(host_msg);
3589 } else {
3590 ret = target_to_host_cmsg(&msg, msgp);
3591 if (ret == 0) {
3592 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3595 } else {
3596 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3597 if (!is_error(ret)) {
3598 len = ret;
3599 if (fd_trans_host_to_target_data(fd)) {
3600 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3601 len);
3602 } else {
3603 ret = host_to_target_cmsg(msgp, &msg);
3605 if (!is_error(ret)) {
3606 msgp->msg_namelen = tswap32(msg.msg_namelen);
3607 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3608 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3609 msg.msg_name, msg.msg_namelen);
3610 if (ret) {
3611 goto out;
3615 ret = len;
3620 out:
3621 unlock_iovec(vec, target_vec, count, !send);
3622 out2:
3623 return ret;
3626 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3627 int flags, int send)
3629 abi_long ret;
3630 struct target_msghdr *msgp;
3632 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3633 msgp,
3634 target_msg,
3635 send ? 1 : 0)) {
3636 return -TARGET_EFAULT;
3638 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3639 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3640 return ret;
3643 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3644 * so it might not have this *mmsg-specific flag either.
3646 #ifndef MSG_WAITFORONE
3647 #define MSG_WAITFORONE 0x10000
3648 #endif
3650 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3651 unsigned int vlen, unsigned int flags,
3652 int send)
3654 struct target_mmsghdr *mmsgp;
3655 abi_long ret = 0;
3656 int i;
3658 if (vlen > UIO_MAXIOV) {
3659 vlen = UIO_MAXIOV;
3662 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3663 if (!mmsgp) {
3664 return -TARGET_EFAULT;
3667 for (i = 0; i < vlen; i++) {
3668 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3669 if (is_error(ret)) {
3670 break;
3672 mmsgp[i].msg_len = tswap32(ret);
3673 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3674 if (flags & MSG_WAITFORONE) {
3675 flags |= MSG_DONTWAIT;
3679 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3681 /* Return number of datagrams sent if we sent any at all;
3682 * otherwise return the error.
3684 if (i) {
3685 return i;
3687 return ret;
3690 /* do_accept4() Must return target values and target errnos. */
3691 static abi_long do_accept4(int fd, abi_ulong target_addr,
3692 abi_ulong target_addrlen_addr, int flags)
3694 socklen_t addrlen;
3695 void *addr;
3696 abi_long ret;
3697 int host_flags;
3699 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3701 if (target_addr == 0) {
3702 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3705 /* linux returns EINVAL if addrlen pointer is invalid */
3706 if (get_user_u32(addrlen, target_addrlen_addr))
3707 return -TARGET_EINVAL;
3709 if ((int)addrlen < 0) {
3710 return -TARGET_EINVAL;
3713 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3714 return -TARGET_EINVAL;
3716 addr = alloca(addrlen);
3718 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3719 if (!is_error(ret)) {
3720 host_to_target_sockaddr(target_addr, addr, addrlen);
3721 if (put_user_u32(addrlen, target_addrlen_addr))
3722 ret = -TARGET_EFAULT;
3724 return ret;
3727 /* do_getpeername() Must return target values and target errnos. */
3728 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3729 abi_ulong target_addrlen_addr)
3731 socklen_t addrlen;
3732 void *addr;
3733 abi_long ret;
3735 if (get_user_u32(addrlen, target_addrlen_addr))
3736 return -TARGET_EFAULT;
3738 if ((int)addrlen < 0) {
3739 return -TARGET_EINVAL;
3742 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3743 return -TARGET_EFAULT;
3745 addr = alloca(addrlen);
3747 ret = get_errno(getpeername(fd, addr, &addrlen));
3748 if (!is_error(ret)) {
3749 host_to_target_sockaddr(target_addr, addr, addrlen);
3750 if (put_user_u32(addrlen, target_addrlen_addr))
3751 ret = -TARGET_EFAULT;
3753 return ret;
3756 /* do_getsockname() Must return target values and target errnos. */
3757 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3758 abi_ulong target_addrlen_addr)
3760 socklen_t addrlen;
3761 void *addr;
3762 abi_long ret;
3764 if (get_user_u32(addrlen, target_addrlen_addr))
3765 return -TARGET_EFAULT;
3767 if ((int)addrlen < 0) {
3768 return -TARGET_EINVAL;
3771 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3772 return -TARGET_EFAULT;
3774 addr = alloca(addrlen);
3776 ret = get_errno(getsockname(fd, addr, &addrlen));
3777 if (!is_error(ret)) {
3778 host_to_target_sockaddr(target_addr, addr, addrlen);
3779 if (put_user_u32(addrlen, target_addrlen_addr))
3780 ret = -TARGET_EFAULT;
3782 return ret;
3785 /* do_socketpair() Must return target values and target errnos. */
3786 static abi_long do_socketpair(int domain, int type, int protocol,
3787 abi_ulong target_tab_addr)
3789 int tab[2];
3790 abi_long ret;
3792 target_to_host_sock_type(&type);
3794 ret = get_errno(socketpair(domain, type, protocol, tab));
3795 if (!is_error(ret)) {
3796 if (put_user_s32(tab[0], target_tab_addr)
3797 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3798 ret = -TARGET_EFAULT;
3800 return ret;
3803 /* do_sendto() Must return target values and target errnos. */
3804 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3805 abi_ulong target_addr, socklen_t addrlen)
3807 void *addr;
3808 void *host_msg;
3809 void *copy_msg = NULL;
3810 abi_long ret;
3812 if ((int)addrlen < 0) {
3813 return -TARGET_EINVAL;
3816 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3817 if (!host_msg)
3818 return -TARGET_EFAULT;
3819 if (fd_trans_target_to_host_data(fd)) {
3820 copy_msg = host_msg;
3821 host_msg = g_malloc(len);
3822 memcpy(host_msg, copy_msg, len);
3823 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3824 if (ret < 0) {
3825 goto fail;
3828 if (target_addr) {
3829 addr = alloca(addrlen+1);
3830 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3831 if (ret) {
3832 goto fail;
3834 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3835 } else {
3836 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3838 fail:
3839 if (copy_msg) {
3840 g_free(host_msg);
3841 host_msg = copy_msg;
3843 unlock_user(host_msg, msg, 0);
3844 return ret;
3847 /* do_recvfrom() Must return target values and target errnos. */
3848 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3849 abi_ulong target_addr,
3850 abi_ulong target_addrlen)
3852 socklen_t addrlen;
3853 void *addr;
3854 void *host_msg;
3855 abi_long ret;
3857 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3858 if (!host_msg)
3859 return -TARGET_EFAULT;
3860 if (target_addr) {
3861 if (get_user_u32(addrlen, target_addrlen)) {
3862 ret = -TARGET_EFAULT;
3863 goto fail;
3865 if ((int)addrlen < 0) {
3866 ret = -TARGET_EINVAL;
3867 goto fail;
3869 addr = alloca(addrlen);
3870 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3871 addr, &addrlen));
3872 } else {
3873 addr = NULL; /* To keep compiler quiet. */
3874 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3876 if (!is_error(ret)) {
3877 if (fd_trans_host_to_target_data(fd)) {
3878 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3880 if (target_addr) {
3881 host_to_target_sockaddr(target_addr, addr, addrlen);
3882 if (put_user_u32(addrlen, target_addrlen)) {
3883 ret = -TARGET_EFAULT;
3884 goto fail;
3887 unlock_user(host_msg, msg, len);
3888 } else {
3889 fail:
3890 unlock_user(host_msg, msg, 0);
3892 return ret;
3895 #ifdef TARGET_NR_socketcall
3896 /* do_socketcall() Must return target values and target errnos. */
3897 static abi_long do_socketcall(int num, abi_ulong vptr)
3899 static const unsigned ac[] = { /* number of arguments per call */
3900 [SOCKOP_socket] = 3, /* domain, type, protocol */
3901 [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
3902 [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
3903 [SOCKOP_listen] = 2, /* sockfd, backlog */
3904 [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
3905 [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
3906 [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
3907 [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
3908 [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
3909 [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
3910 [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
3911 [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3912 [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
3913 [SOCKOP_shutdown] = 2, /* sockfd, how */
3914 [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
3915 [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
3916 [SOCKOP_sendmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3917 [SOCKOP_recvmmsg] = 4, /* sockfd, msgvec, vlen, flags */
3918 [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3919 [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
3921 abi_long a[6]; /* max 6 args */
3923 /* first, collect the arguments in a[] according to ac[] */
3924 if (num >= 0 && num < ARRAY_SIZE(ac)) {
3925 unsigned i;
3926 assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
3927 for (i = 0; i < ac[num]; ++i) {
3928 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3929 return -TARGET_EFAULT;
3934 /* now when we have the args, actually handle the call */
3935 switch (num) {
3936 case SOCKOP_socket: /* domain, type, protocol */
3937 return do_socket(a[0], a[1], a[2]);
3938 case SOCKOP_bind: /* sockfd, addr, addrlen */
3939 return do_bind(a[0], a[1], a[2]);
3940 case SOCKOP_connect: /* sockfd, addr, addrlen */
3941 return do_connect(a[0], a[1], a[2]);
3942 case SOCKOP_listen: /* sockfd, backlog */
3943 return get_errno(listen(a[0], a[1]));
3944 case SOCKOP_accept: /* sockfd, addr, addrlen */
3945 return do_accept4(a[0], a[1], a[2], 0);
3946 case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
3947 return do_accept4(a[0], a[1], a[2], a[3]);
3948 case SOCKOP_getsockname: /* sockfd, addr, addrlen */
3949 return do_getsockname(a[0], a[1], a[2]);
3950 case SOCKOP_getpeername: /* sockfd, addr, addrlen */
3951 return do_getpeername(a[0], a[1], a[2]);
3952 case SOCKOP_socketpair: /* domain, type, protocol, tab */
3953 return do_socketpair(a[0], a[1], a[2], a[3]);
3954 case SOCKOP_send: /* sockfd, msg, len, flags */
3955 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3956 case SOCKOP_recv: /* sockfd, msg, len, flags */
3957 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3958 case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
3959 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3960 case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
3961 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3962 case SOCKOP_shutdown: /* sockfd, how */
3963 return get_errno(shutdown(a[0], a[1]));
3964 case SOCKOP_sendmsg: /* sockfd, msg, flags */
3965 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3966 case SOCKOP_recvmsg: /* sockfd, msg, flags */
3967 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3968 case SOCKOP_sendmmsg: /* sockfd, msgvec, vlen, flags */
3969 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3970 case SOCKOP_recvmmsg: /* sockfd, msgvec, vlen, flags */
3971 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3972 case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
3973 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3974 case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
3975 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3976 default:
3977 gemu_log("Unsupported socketcall: %d\n", num);
3978 return -TARGET_ENOSYS;
3981 #endif
3983 #define N_SHM_REGIONS 32
3985 static struct shm_region {
3986 abi_ulong start;
3987 abi_ulong size;
3988 bool in_use;
3989 } shm_regions[N_SHM_REGIONS];
3991 #ifndef TARGET_SEMID64_DS
3992 /* asm-generic version of this struct */
3993 struct target_semid64_ds
3995 struct target_ipc_perm sem_perm;
3996 abi_ulong sem_otime;
3997 #if TARGET_ABI_BITS == 32
3998 abi_ulong __unused1;
3999 #endif
4000 abi_ulong sem_ctime;
4001 #if TARGET_ABI_BITS == 32
4002 abi_ulong __unused2;
4003 #endif
4004 abi_ulong sem_nsems;
4005 abi_ulong __unused3;
4006 abi_ulong __unused4;
4008 #endif
4010 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4011 abi_ulong target_addr)
4013 struct target_ipc_perm *target_ip;
4014 struct target_semid64_ds *target_sd;
4016 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4017 return -TARGET_EFAULT;
4018 target_ip = &(target_sd->sem_perm);
4019 host_ip->__key = tswap32(target_ip->__key);
4020 host_ip->uid = tswap32(target_ip->uid);
4021 host_ip->gid = tswap32(target_ip->gid);
4022 host_ip->cuid = tswap32(target_ip->cuid);
4023 host_ip->cgid = tswap32(target_ip->cgid);
4024 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4025 host_ip->mode = tswap32(target_ip->mode);
4026 #else
4027 host_ip->mode = tswap16(target_ip->mode);
4028 #endif
4029 #if defined(TARGET_PPC)
4030 host_ip->__seq = tswap32(target_ip->__seq);
4031 #else
4032 host_ip->__seq = tswap16(target_ip->__seq);
4033 #endif
4034 unlock_user_struct(target_sd, target_addr, 0);
4035 return 0;
4038 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4039 struct ipc_perm *host_ip)
4041 struct target_ipc_perm *target_ip;
4042 struct target_semid64_ds *target_sd;
4044 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4045 return -TARGET_EFAULT;
4046 target_ip = &(target_sd->sem_perm);
4047 target_ip->__key = tswap32(host_ip->__key);
4048 target_ip->uid = tswap32(host_ip->uid);
4049 target_ip->gid = tswap32(host_ip->gid);
4050 target_ip->cuid = tswap32(host_ip->cuid);
4051 target_ip->cgid = tswap32(host_ip->cgid);
4052 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4053 target_ip->mode = tswap32(host_ip->mode);
4054 #else
4055 target_ip->mode = tswap16(host_ip->mode);
4056 #endif
4057 #if defined(TARGET_PPC)
4058 target_ip->__seq = tswap32(host_ip->__seq);
4059 #else
4060 target_ip->__seq = tswap16(host_ip->__seq);
4061 #endif
4062 unlock_user_struct(target_sd, target_addr, 1);
4063 return 0;
4066 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4067 abi_ulong target_addr)
4069 struct target_semid64_ds *target_sd;
4071 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4072 return -TARGET_EFAULT;
4073 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4074 return -TARGET_EFAULT;
4075 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4076 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4077 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4078 unlock_user_struct(target_sd, target_addr, 0);
4079 return 0;
4082 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4083 struct semid_ds *host_sd)
4085 struct target_semid64_ds *target_sd;
4087 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4088 return -TARGET_EFAULT;
4089 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4090 return -TARGET_EFAULT;
4091 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4092 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4093 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4094 unlock_user_struct(target_sd, target_addr, 1);
4095 return 0;
4098 struct target_seminfo {
4099 int semmap;
4100 int semmni;
4101 int semmns;
4102 int semmnu;
4103 int semmsl;
4104 int semopm;
4105 int semume;
4106 int semusz;
4107 int semvmx;
4108 int semaem;
4111 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4112 struct seminfo *host_seminfo)
4114 struct target_seminfo *target_seminfo;
4115 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4116 return -TARGET_EFAULT;
4117 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4118 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4119 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4120 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4121 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4122 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4123 __put_user(host_seminfo->semume, &target_seminfo->semume);
4124 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4125 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4126 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4127 unlock_user_struct(target_seminfo, target_addr, 1);
4128 return 0;
4131 union semun {
4132 int val;
4133 struct semid_ds *buf;
4134 unsigned short *array;
4135 struct seminfo *__buf;
4138 union target_semun {
4139 int val;
4140 abi_ulong buf;
4141 abi_ulong array;
4142 abi_ulong __buf;
4145 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4146 abi_ulong target_addr)
4148 int nsems;
4149 unsigned short *array;
4150 union semun semun;
4151 struct semid_ds semid_ds;
4152 int i, ret;
4154 semun.buf = &semid_ds;
4156 ret = semctl(semid, 0, IPC_STAT, semun);
4157 if (ret == -1)
4158 return get_errno(ret);
4160 nsems = semid_ds.sem_nsems;
4162 *host_array = g_try_new(unsigned short, nsems);
4163 if (!*host_array) {
4164 return -TARGET_ENOMEM;
4166 array = lock_user(VERIFY_READ, target_addr,
4167 nsems*sizeof(unsigned short), 1);
4168 if (!array) {
4169 g_free(*host_array);
4170 return -TARGET_EFAULT;
4173 for(i=0; i<nsems; i++) {
4174 __get_user((*host_array)[i], &array[i]);
4176 unlock_user(array, target_addr, 0);
4178 return 0;
4181 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4182 unsigned short **host_array)
4184 int nsems;
4185 unsigned short *array;
4186 union semun semun;
4187 struct semid_ds semid_ds;
4188 int i, ret;
4190 semun.buf = &semid_ds;
4192 ret = semctl(semid, 0, IPC_STAT, semun);
4193 if (ret == -1)
4194 return get_errno(ret);
4196 nsems = semid_ds.sem_nsems;
4198 array = lock_user(VERIFY_WRITE, target_addr,
4199 nsems*sizeof(unsigned short), 0);
4200 if (!array)
4201 return -TARGET_EFAULT;
4203 for(i=0; i<nsems; i++) {
4204 __put_user((*host_array)[i], &array[i]);
4206 g_free(*host_array);
4207 unlock_user(array, target_addr, 1);
4209 return 0;
4212 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4213 abi_ulong target_arg)
4215 union target_semun target_su = { .buf = target_arg };
4216 union semun arg;
4217 struct semid_ds dsarg;
4218 unsigned short *array = NULL;
4219 struct seminfo seminfo;
4220 abi_long ret = -TARGET_EINVAL;
4221 abi_long err;
4222 cmd &= 0xff;
4224 switch( cmd ) {
4225 case GETVAL:
4226 case SETVAL:
4227 /* In 64 bit cross-endian situations, we will erroneously pick up
4228 * the wrong half of the union for the "val" element. To rectify
4229 * this, the entire 8-byte structure is byteswapped, followed by
4230 * a swap of the 4 byte val field. In other cases, the data is
4231 * already in proper host byte order. */
4232 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4233 target_su.buf = tswapal(target_su.buf);
4234 arg.val = tswap32(target_su.val);
4235 } else {
4236 arg.val = target_su.val;
4238 ret = get_errno(semctl(semid, semnum, cmd, arg));
4239 break;
4240 case GETALL:
4241 case SETALL:
4242 err = target_to_host_semarray(semid, &array, target_su.array);
4243 if (err)
4244 return err;
4245 arg.array = array;
4246 ret = get_errno(semctl(semid, semnum, cmd, arg));
4247 err = host_to_target_semarray(semid, target_su.array, &array);
4248 if (err)
4249 return err;
4250 break;
4251 case IPC_STAT:
4252 case IPC_SET:
4253 case SEM_STAT:
4254 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4255 if (err)
4256 return err;
4257 arg.buf = &dsarg;
4258 ret = get_errno(semctl(semid, semnum, cmd, arg));
4259 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4260 if (err)
4261 return err;
4262 break;
4263 case IPC_INFO:
4264 case SEM_INFO:
4265 arg.__buf = &seminfo;
4266 ret = get_errno(semctl(semid, semnum, cmd, arg));
4267 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4268 if (err)
4269 return err;
4270 break;
4271 case IPC_RMID:
4272 case GETPID:
4273 case GETNCNT:
4274 case GETZCNT:
4275 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4276 break;
4279 return ret;
4282 struct target_sembuf {
4283 unsigned short sem_num;
4284 short sem_op;
4285 short sem_flg;
4288 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4289 abi_ulong target_addr,
4290 unsigned nsops)
4292 struct target_sembuf *target_sembuf;
4293 int i;
4295 target_sembuf = lock_user(VERIFY_READ, target_addr,
4296 nsops*sizeof(struct target_sembuf), 1);
4297 if (!target_sembuf)
4298 return -TARGET_EFAULT;
4300 for(i=0; i<nsops; i++) {
4301 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4302 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4303 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4306 unlock_user(target_sembuf, target_addr, 0);
4308 return 0;
4311 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4313 struct sembuf sops[nsops];
4315 if (target_to_host_sembuf(sops, ptr, nsops))
4316 return -TARGET_EFAULT;
4318 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4321 struct target_msqid_ds
4323 struct target_ipc_perm msg_perm;
4324 abi_ulong msg_stime;
4325 #if TARGET_ABI_BITS == 32
4326 abi_ulong __unused1;
4327 #endif
4328 abi_ulong msg_rtime;
4329 #if TARGET_ABI_BITS == 32
4330 abi_ulong __unused2;
4331 #endif
4332 abi_ulong msg_ctime;
4333 #if TARGET_ABI_BITS == 32
4334 abi_ulong __unused3;
4335 #endif
4336 abi_ulong __msg_cbytes;
4337 abi_ulong msg_qnum;
4338 abi_ulong msg_qbytes;
4339 abi_ulong msg_lspid;
4340 abi_ulong msg_lrpid;
4341 abi_ulong __unused4;
4342 abi_ulong __unused5;
4345 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4346 abi_ulong target_addr)
4348 struct target_msqid_ds *target_md;
4350 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4351 return -TARGET_EFAULT;
4352 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4353 return -TARGET_EFAULT;
4354 host_md->msg_stime = tswapal(target_md->msg_stime);
4355 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4356 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4357 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4358 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4359 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4360 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4361 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4362 unlock_user_struct(target_md, target_addr, 0);
4363 return 0;
4366 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4367 struct msqid_ds *host_md)
4369 struct target_msqid_ds *target_md;
4371 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4372 return -TARGET_EFAULT;
4373 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4374 return -TARGET_EFAULT;
4375 target_md->msg_stime = tswapal(host_md->msg_stime);
4376 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4377 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4378 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4379 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4380 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4381 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4382 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4383 unlock_user_struct(target_md, target_addr, 1);
4384 return 0;
4387 struct target_msginfo {
4388 int msgpool;
4389 int msgmap;
4390 int msgmax;
4391 int msgmnb;
4392 int msgmni;
4393 int msgssz;
4394 int msgtql;
4395 unsigned short int msgseg;
4398 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4399 struct msginfo *host_msginfo)
4401 struct target_msginfo *target_msginfo;
4402 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4403 return -TARGET_EFAULT;
4404 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4405 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4406 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4407 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4408 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4409 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4410 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4411 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4412 unlock_user_struct(target_msginfo, target_addr, 1);
4413 return 0;
4416 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4418 struct msqid_ds dsarg;
4419 struct msginfo msginfo;
4420 abi_long ret = -TARGET_EINVAL;
4422 cmd &= 0xff;
4424 switch (cmd) {
4425 case IPC_STAT:
4426 case IPC_SET:
4427 case MSG_STAT:
4428 if (target_to_host_msqid_ds(&dsarg,ptr))
4429 return -TARGET_EFAULT;
4430 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4431 if (host_to_target_msqid_ds(ptr,&dsarg))
4432 return -TARGET_EFAULT;
4433 break;
4434 case IPC_RMID:
4435 ret = get_errno(msgctl(msgid, cmd, NULL));
4436 break;
4437 case IPC_INFO:
4438 case MSG_INFO:
4439 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4440 if (host_to_target_msginfo(ptr, &msginfo))
4441 return -TARGET_EFAULT;
4442 break;
4445 return ret;
4448 struct target_msgbuf {
4449 abi_long mtype;
4450 char mtext[1];
4453 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4454 ssize_t msgsz, int msgflg)
4456 struct target_msgbuf *target_mb;
4457 struct msgbuf *host_mb;
4458 abi_long ret = 0;
4460 if (msgsz < 0) {
4461 return -TARGET_EINVAL;
4464 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4465 return -TARGET_EFAULT;
4466 host_mb = g_try_malloc(msgsz + sizeof(long));
4467 if (!host_mb) {
4468 unlock_user_struct(target_mb, msgp, 0);
4469 return -TARGET_ENOMEM;
4471 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4472 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4473 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4474 g_free(host_mb);
4475 unlock_user_struct(target_mb, msgp, 0);
4477 return ret;
4480 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4481 ssize_t msgsz, abi_long msgtyp,
4482 int msgflg)
4484 struct target_msgbuf *target_mb;
4485 char *target_mtext;
4486 struct msgbuf *host_mb;
4487 abi_long ret = 0;
4489 if (msgsz < 0) {
4490 return -TARGET_EINVAL;
4493 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4494 return -TARGET_EFAULT;
4496 host_mb = g_try_malloc(msgsz + sizeof(long));
4497 if (!host_mb) {
4498 ret = -TARGET_ENOMEM;
4499 goto end;
4501 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4503 if (ret > 0) {
4504 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4505 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4506 if (!target_mtext) {
4507 ret = -TARGET_EFAULT;
4508 goto end;
4510 memcpy(target_mb->mtext, host_mb->mtext, ret);
4511 unlock_user(target_mtext, target_mtext_addr, ret);
4514 target_mb->mtype = tswapal(host_mb->mtype);
4516 end:
4517 if (target_mb)
4518 unlock_user_struct(target_mb, msgp, 1);
4519 g_free(host_mb);
4520 return ret;
4523 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4524 abi_ulong target_addr)
4526 struct target_shmid_ds *target_sd;
4528 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4529 return -TARGET_EFAULT;
4530 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4531 return -TARGET_EFAULT;
4532 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4533 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4534 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4535 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4536 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4537 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4538 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4539 unlock_user_struct(target_sd, target_addr, 0);
4540 return 0;
4543 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4544 struct shmid_ds *host_sd)
4546 struct target_shmid_ds *target_sd;
4548 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4549 return -TARGET_EFAULT;
4550 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4551 return -TARGET_EFAULT;
4552 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4553 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4554 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4555 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4556 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4557 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4558 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4559 unlock_user_struct(target_sd, target_addr, 1);
4560 return 0;
4563 struct target_shminfo {
4564 abi_ulong shmmax;
4565 abi_ulong shmmin;
4566 abi_ulong shmmni;
4567 abi_ulong shmseg;
4568 abi_ulong shmall;
4571 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4572 struct shminfo *host_shminfo)
4574 struct target_shminfo *target_shminfo;
4575 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4576 return -TARGET_EFAULT;
4577 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4578 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4579 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4580 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4581 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4582 unlock_user_struct(target_shminfo, target_addr, 1);
4583 return 0;
4586 struct target_shm_info {
4587 int used_ids;
4588 abi_ulong shm_tot;
4589 abi_ulong shm_rss;
4590 abi_ulong shm_swp;
4591 abi_ulong swap_attempts;
4592 abi_ulong swap_successes;
4595 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4596 struct shm_info *host_shm_info)
4598 struct target_shm_info *target_shm_info;
4599 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4600 return -TARGET_EFAULT;
4601 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4602 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4603 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4604 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4605 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4606 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4607 unlock_user_struct(target_shm_info, target_addr, 1);
4608 return 0;
4611 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4613 struct shmid_ds dsarg;
4614 struct shminfo shminfo;
4615 struct shm_info shm_info;
4616 abi_long ret = -TARGET_EINVAL;
4618 cmd &= 0xff;
4620 switch(cmd) {
4621 case IPC_STAT:
4622 case IPC_SET:
4623 case SHM_STAT:
4624 if (target_to_host_shmid_ds(&dsarg, buf))
4625 return -TARGET_EFAULT;
4626 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4627 if (host_to_target_shmid_ds(buf, &dsarg))
4628 return -TARGET_EFAULT;
4629 break;
4630 case IPC_INFO:
4631 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4632 if (host_to_target_shminfo(buf, &shminfo))
4633 return -TARGET_EFAULT;
4634 break;
4635 case SHM_INFO:
4636 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4637 if (host_to_target_shm_info(buf, &shm_info))
4638 return -TARGET_EFAULT;
4639 break;
4640 case IPC_RMID:
4641 case SHM_LOCK:
4642 case SHM_UNLOCK:
4643 ret = get_errno(shmctl(shmid, cmd, NULL));
4644 break;
4647 return ret;
4650 #ifndef TARGET_FORCE_SHMLBA
4651 /* For most architectures, SHMLBA is the same as the page size;
4652 * some architectures have larger values, in which case they should
4653 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4654 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4655 * and defining its own value for SHMLBA.
4657 * The kernel also permits SHMLBA to be set by the architecture to a
4658 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4659 * this means that addresses are rounded to the large size if
4660 * SHM_RND is set but addresses not aligned to that size are not rejected
4661 * as long as they are at least page-aligned. Since the only architecture
4662 * which uses this is ia64 this code doesn't provide for that oddity.
4664 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4666 return TARGET_PAGE_SIZE;
4668 #endif
4670 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4671 int shmid, abi_ulong shmaddr, int shmflg)
4673 abi_long raddr;
4674 void *host_raddr;
4675 struct shmid_ds shm_info;
4676 int i,ret;
4677 abi_ulong shmlba;
4679 /* find out the length of the shared memory segment */
4680 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4681 if (is_error(ret)) {
4682 /* can't get length, bail out */
4683 return ret;
4686 shmlba = target_shmlba(cpu_env);
4688 if (shmaddr & (shmlba - 1)) {
4689 if (shmflg & SHM_RND) {
4690 shmaddr &= ~(shmlba - 1);
4691 } else {
4692 return -TARGET_EINVAL;
4696 mmap_lock();
4698 if (shmaddr)
4699 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4700 else {
4701 abi_ulong mmap_start;
4703 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4705 if (mmap_start == -1) {
4706 errno = ENOMEM;
4707 host_raddr = (void *)-1;
4708 } else
4709 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4712 if (host_raddr == (void *)-1) {
4713 mmap_unlock();
4714 return get_errno((long)host_raddr);
4716 raddr=h2g((unsigned long)host_raddr);
4718 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4719 PAGE_VALID | PAGE_READ |
4720 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4722 for (i = 0; i < N_SHM_REGIONS; i++) {
4723 if (!shm_regions[i].in_use) {
4724 shm_regions[i].in_use = true;
4725 shm_regions[i].start = raddr;
4726 shm_regions[i].size = shm_info.shm_segsz;
4727 break;
4731 mmap_unlock();
4732 return raddr;
4736 static inline abi_long do_shmdt(abi_ulong shmaddr)
4738 int i;
4740 for (i = 0; i < N_SHM_REGIONS; ++i) {
4741 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4742 shm_regions[i].in_use = false;
4743 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4744 break;
4748 return get_errno(shmdt(g2h(shmaddr)));
4751 #ifdef TARGET_NR_ipc
4752 /* ??? This only works with linear mappings. */
4753 /* do_ipc() must return target values and target errnos. */
4754 static abi_long do_ipc(CPUArchState *cpu_env,
4755 unsigned int call, abi_long first,
4756 abi_long second, abi_long third,
4757 abi_long ptr, abi_long fifth)
4759 int version;
4760 abi_long ret = 0;
4762 version = call >> 16;
4763 call &= 0xffff;
4765 switch (call) {
4766 case IPCOP_semop:
4767 ret = do_semop(first, ptr, second);
4768 break;
4770 case IPCOP_semget:
4771 ret = get_errno(semget(first, second, third));
4772 break;
4774 case IPCOP_semctl: {
4775 /* The semun argument to semctl is passed by value, so dereference the
4776 * ptr argument. */
4777 abi_ulong atptr;
4778 get_user_ual(atptr, ptr);
4779 ret = do_semctl(first, second, third, atptr);
4780 break;
4783 case IPCOP_msgget:
4784 ret = get_errno(msgget(first, second));
4785 break;
4787 case IPCOP_msgsnd:
4788 ret = do_msgsnd(first, ptr, second, third);
4789 break;
4791 case IPCOP_msgctl:
4792 ret = do_msgctl(first, second, ptr);
4793 break;
4795 case IPCOP_msgrcv:
4796 switch (version) {
4797 case 0:
4799 struct target_ipc_kludge {
4800 abi_long msgp;
4801 abi_long msgtyp;
4802 } *tmp;
4804 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4805 ret = -TARGET_EFAULT;
4806 break;
4809 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4811 unlock_user_struct(tmp, ptr, 0);
4812 break;
4814 default:
4815 ret = do_msgrcv(first, ptr, second, fifth, third);
4817 break;
4819 case IPCOP_shmat:
4820 switch (version) {
4821 default:
4823 abi_ulong raddr;
4824 raddr = do_shmat(cpu_env, first, ptr, second);
4825 if (is_error(raddr))
4826 return get_errno(raddr);
4827 if (put_user_ual(raddr, third))
4828 return -TARGET_EFAULT;
4829 break;
4831 case 1:
4832 ret = -TARGET_EINVAL;
4833 break;
4835 break;
4836 case IPCOP_shmdt:
4837 ret = do_shmdt(ptr);
4838 break;
4840 case IPCOP_shmget:
4841 /* IPC_* flag values are the same on all linux platforms */
4842 ret = get_errno(shmget(first, second, third));
4843 break;
4845 /* IPC_* and SHM_* command values are the same on all linux platforms */
4846 case IPCOP_shmctl:
4847 ret = do_shmctl(first, second, ptr);
4848 break;
4849 default:
4850 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4851 ret = -TARGET_ENOSYS;
4852 break;
4854 return ret;
4856 #endif
4858 /* kernel structure types definitions */
4860 #define STRUCT(name, ...) STRUCT_ ## name,
4861 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4862 enum {
4863 #include "syscall_types.h"
4864 STRUCT_MAX
4866 #undef STRUCT
4867 #undef STRUCT_SPECIAL
4869 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4870 #define STRUCT_SPECIAL(name)
4871 #include "syscall_types.h"
4872 #undef STRUCT
4873 #undef STRUCT_SPECIAL
4875 typedef struct IOCTLEntry IOCTLEntry;
4877 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4878 int fd, int cmd, abi_long arg);
4880 struct IOCTLEntry {
4881 int target_cmd;
4882 unsigned int host_cmd;
4883 const char *name;
4884 int access;
4885 do_ioctl_fn *do_ioctl;
4886 const argtype arg_type[5];
4889 #define IOC_R 0x0001
4890 #define IOC_W 0x0002
4891 #define IOC_RW (IOC_R | IOC_W)
4893 #define MAX_STRUCT_SIZE 4096
4895 #ifdef CONFIG_FIEMAP
4896 /* So fiemap access checks don't overflow on 32 bit systems.
4897 * This is very slightly smaller than the limit imposed by
4898 * the underlying kernel.
4900 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4901 / sizeof(struct fiemap_extent))
4903 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4904 int fd, int cmd, abi_long arg)
4906 /* The parameter for this ioctl is a struct fiemap followed
4907 * by an array of struct fiemap_extent whose size is set
4908 * in fiemap->fm_extent_count. The array is filled in by the
4909 * ioctl.
4911 int target_size_in, target_size_out;
4912 struct fiemap *fm;
4913 const argtype *arg_type = ie->arg_type;
4914 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4915 void *argptr, *p;
4916 abi_long ret;
4917 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4918 uint32_t outbufsz;
4919 int free_fm = 0;
4921 assert(arg_type[0] == TYPE_PTR);
4922 assert(ie->access == IOC_RW);
4923 arg_type++;
4924 target_size_in = thunk_type_size(arg_type, 0);
4925 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4926 if (!argptr) {
4927 return -TARGET_EFAULT;
4929 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4930 unlock_user(argptr, arg, 0);
4931 fm = (struct fiemap *)buf_temp;
4932 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4933 return -TARGET_EINVAL;
4936 outbufsz = sizeof (*fm) +
4937 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4939 if (outbufsz > MAX_STRUCT_SIZE) {
4940 /* We can't fit all the extents into the fixed size buffer.
4941 * Allocate one that is large enough and use it instead.
4943 fm = g_try_malloc(outbufsz);
4944 if (!fm) {
4945 return -TARGET_ENOMEM;
4947 memcpy(fm, buf_temp, sizeof(struct fiemap));
4948 free_fm = 1;
4950 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4951 if (!is_error(ret)) {
4952 target_size_out = target_size_in;
4953 /* An extent_count of 0 means we were only counting the extents
4954 * so there are no structs to copy
4956 if (fm->fm_extent_count != 0) {
4957 target_size_out += fm->fm_mapped_extents * extent_size;
4959 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4960 if (!argptr) {
4961 ret = -TARGET_EFAULT;
4962 } else {
4963 /* Convert the struct fiemap */
4964 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4965 if (fm->fm_extent_count != 0) {
4966 p = argptr + target_size_in;
4967 /* ...and then all the struct fiemap_extents */
4968 for (i = 0; i < fm->fm_mapped_extents; i++) {
4969 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4970 THUNK_TARGET);
4971 p += extent_size;
4974 unlock_user(argptr, arg, target_size_out);
4977 if (free_fm) {
4978 g_free(fm);
4980 return ret;
4982 #endif
4984 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4985 int fd, int cmd, abi_long arg)
4987 const argtype *arg_type = ie->arg_type;
4988 int target_size;
4989 void *argptr;
4990 int ret;
4991 struct ifconf *host_ifconf;
4992 uint32_t outbufsz;
4993 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4994 int target_ifreq_size;
4995 int nb_ifreq;
4996 int free_buf = 0;
4997 int i;
4998 int target_ifc_len;
4999 abi_long target_ifc_buf;
5000 int host_ifc_len;
5001 char *host_ifc_buf;
5003 assert(arg_type[0] == TYPE_PTR);
5004 assert(ie->access == IOC_RW);
5006 arg_type++;
5007 target_size = thunk_type_size(arg_type, 0);
5009 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5010 if (!argptr)
5011 return -TARGET_EFAULT;
5012 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5013 unlock_user(argptr, arg, 0);
5015 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5016 target_ifc_len = host_ifconf->ifc_len;
5017 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5019 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5020 nb_ifreq = target_ifc_len / target_ifreq_size;
5021 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5023 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5024 if (outbufsz > MAX_STRUCT_SIZE) {
5025 /* We can't fit all the extents into the fixed size buffer.
5026 * Allocate one that is large enough and use it instead.
5028 host_ifconf = malloc(outbufsz);
5029 if (!host_ifconf) {
5030 return -TARGET_ENOMEM;
5032 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5033 free_buf = 1;
5035 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5037 host_ifconf->ifc_len = host_ifc_len;
5038 host_ifconf->ifc_buf = host_ifc_buf;
5040 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5041 if (!is_error(ret)) {
5042 /* convert host ifc_len to target ifc_len */
5044 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5045 target_ifc_len = nb_ifreq * target_ifreq_size;
5046 host_ifconf->ifc_len = target_ifc_len;
5048 /* restore target ifc_buf */
5050 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5052 /* copy struct ifconf to target user */
5054 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5055 if (!argptr)
5056 return -TARGET_EFAULT;
5057 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5058 unlock_user(argptr, arg, target_size);
5060 /* copy ifreq[] to target user */
5062 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5063 for (i = 0; i < nb_ifreq ; i++) {
5064 thunk_convert(argptr + i * target_ifreq_size,
5065 host_ifc_buf + i * sizeof(struct ifreq),
5066 ifreq_arg_type, THUNK_TARGET);
5068 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5071 if (free_buf) {
5072 free(host_ifconf);
5075 return ret;
5078 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5079 int cmd, abi_long arg)
5081 void *argptr;
5082 struct dm_ioctl *host_dm;
5083 abi_long guest_data;
5084 uint32_t guest_data_size;
5085 int target_size;
5086 const argtype *arg_type = ie->arg_type;
5087 abi_long ret;
5088 void *big_buf = NULL;
5089 char *host_data;
5091 arg_type++;
5092 target_size = thunk_type_size(arg_type, 0);
5093 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5094 if (!argptr) {
5095 ret = -TARGET_EFAULT;
5096 goto out;
5098 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5099 unlock_user(argptr, arg, 0);
5101 /* buf_temp is too small, so fetch things into a bigger buffer */
5102 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5103 memcpy(big_buf, buf_temp, target_size);
5104 buf_temp = big_buf;
5105 host_dm = big_buf;
5107 guest_data = arg + host_dm->data_start;
5108 if ((guest_data - arg) < 0) {
5109 ret = -TARGET_EINVAL;
5110 goto out;
5112 guest_data_size = host_dm->data_size - host_dm->data_start;
5113 host_data = (char*)host_dm + host_dm->data_start;
5115 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5116 if (!argptr) {
5117 ret = -TARGET_EFAULT;
5118 goto out;
5121 switch (ie->host_cmd) {
5122 case DM_REMOVE_ALL:
5123 case DM_LIST_DEVICES:
5124 case DM_DEV_CREATE:
5125 case DM_DEV_REMOVE:
5126 case DM_DEV_SUSPEND:
5127 case DM_DEV_STATUS:
5128 case DM_DEV_WAIT:
5129 case DM_TABLE_STATUS:
5130 case DM_TABLE_CLEAR:
5131 case DM_TABLE_DEPS:
5132 case DM_LIST_VERSIONS:
5133 /* no input data */
5134 break;
5135 case DM_DEV_RENAME:
5136 case DM_DEV_SET_GEOMETRY:
5137 /* data contains only strings */
5138 memcpy(host_data, argptr, guest_data_size);
5139 break;
5140 case DM_TARGET_MSG:
5141 memcpy(host_data, argptr, guest_data_size);
5142 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5143 break;
5144 case DM_TABLE_LOAD:
5146 void *gspec = argptr;
5147 void *cur_data = host_data;
5148 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5149 int spec_size = thunk_type_size(arg_type, 0);
5150 int i;
5152 for (i = 0; i < host_dm->target_count; i++) {
5153 struct dm_target_spec *spec = cur_data;
5154 uint32_t next;
5155 int slen;
5157 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5158 slen = strlen((char*)gspec + spec_size) + 1;
5159 next = spec->next;
5160 spec->next = sizeof(*spec) + slen;
5161 strcpy((char*)&spec[1], gspec + spec_size);
5162 gspec += next;
5163 cur_data += spec->next;
5165 break;
5167 default:
5168 ret = -TARGET_EINVAL;
5169 unlock_user(argptr, guest_data, 0);
5170 goto out;
5172 unlock_user(argptr, guest_data, 0);
5174 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5175 if (!is_error(ret)) {
5176 guest_data = arg + host_dm->data_start;
5177 guest_data_size = host_dm->data_size - host_dm->data_start;
5178 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5179 switch (ie->host_cmd) {
5180 case DM_REMOVE_ALL:
5181 case DM_DEV_CREATE:
5182 case DM_DEV_REMOVE:
5183 case DM_DEV_RENAME:
5184 case DM_DEV_SUSPEND:
5185 case DM_DEV_STATUS:
5186 case DM_TABLE_LOAD:
5187 case DM_TABLE_CLEAR:
5188 case DM_TARGET_MSG:
5189 case DM_DEV_SET_GEOMETRY:
5190 /* no return data */
5191 break;
5192 case DM_LIST_DEVICES:
5194 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5195 uint32_t remaining_data = guest_data_size;
5196 void *cur_data = argptr;
5197 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5198 int nl_size = 12; /* can't use thunk_size due to alignment */
5200 while (1) {
5201 uint32_t next = nl->next;
5202 if (next) {
5203 nl->next = nl_size + (strlen(nl->name) + 1);
5205 if (remaining_data < nl->next) {
5206 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5207 break;
5209 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5210 strcpy(cur_data + nl_size, nl->name);
5211 cur_data += nl->next;
5212 remaining_data -= nl->next;
5213 if (!next) {
5214 break;
5216 nl = (void*)nl + next;
5218 break;
5220 case DM_DEV_WAIT:
5221 case DM_TABLE_STATUS:
5223 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5224 void *cur_data = argptr;
5225 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5226 int spec_size = thunk_type_size(arg_type, 0);
5227 int i;
5229 for (i = 0; i < host_dm->target_count; i++) {
5230 uint32_t next = spec->next;
5231 int slen = strlen((char*)&spec[1]) + 1;
5232 spec->next = (cur_data - argptr) + spec_size + slen;
5233 if (guest_data_size < spec->next) {
5234 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5235 break;
5237 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5238 strcpy(cur_data + spec_size, (char*)&spec[1]);
5239 cur_data = argptr + spec->next;
5240 spec = (void*)host_dm + host_dm->data_start + next;
5242 break;
5244 case DM_TABLE_DEPS:
5246 void *hdata = (void*)host_dm + host_dm->data_start;
5247 int count = *(uint32_t*)hdata;
5248 uint64_t *hdev = hdata + 8;
5249 uint64_t *gdev = argptr + 8;
5250 int i;
5252 *(uint32_t*)argptr = tswap32(count);
5253 for (i = 0; i < count; i++) {
5254 *gdev = tswap64(*hdev);
5255 gdev++;
5256 hdev++;
5258 break;
5260 case DM_LIST_VERSIONS:
5262 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5263 uint32_t remaining_data = guest_data_size;
5264 void *cur_data = argptr;
5265 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5266 int vers_size = thunk_type_size(arg_type, 0);
5268 while (1) {
5269 uint32_t next = vers->next;
5270 if (next) {
5271 vers->next = vers_size + (strlen(vers->name) + 1);
5273 if (remaining_data < vers->next) {
5274 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5275 break;
5277 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5278 strcpy(cur_data + vers_size, vers->name);
5279 cur_data += vers->next;
5280 remaining_data -= vers->next;
5281 if (!next) {
5282 break;
5284 vers = (void*)vers + next;
5286 break;
5288 default:
5289 unlock_user(argptr, guest_data, 0);
5290 ret = -TARGET_EINVAL;
5291 goto out;
5293 unlock_user(argptr, guest_data, guest_data_size);
5295 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5296 if (!argptr) {
5297 ret = -TARGET_EFAULT;
5298 goto out;
5300 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5301 unlock_user(argptr, arg, target_size);
5303 out:
5304 g_free(big_buf);
5305 return ret;
5308 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5309 int cmd, abi_long arg)
5311 void *argptr;
5312 int target_size;
5313 const argtype *arg_type = ie->arg_type;
5314 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5315 abi_long ret;
5317 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5318 struct blkpg_partition host_part;
5320 /* Read and convert blkpg */
5321 arg_type++;
5322 target_size = thunk_type_size(arg_type, 0);
5323 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5324 if (!argptr) {
5325 ret = -TARGET_EFAULT;
5326 goto out;
5328 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5329 unlock_user(argptr, arg, 0);
5331 switch (host_blkpg->op) {
5332 case BLKPG_ADD_PARTITION:
5333 case BLKPG_DEL_PARTITION:
5334 /* payload is struct blkpg_partition */
5335 break;
5336 default:
5337 /* Unknown opcode */
5338 ret = -TARGET_EINVAL;
5339 goto out;
5342 /* Read and convert blkpg->data */
5343 arg = (abi_long)(uintptr_t)host_blkpg->data;
5344 target_size = thunk_type_size(part_arg_type, 0);
5345 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5346 if (!argptr) {
5347 ret = -TARGET_EFAULT;
5348 goto out;
5350 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5351 unlock_user(argptr, arg, 0);
5353 /* Swizzle the data pointer to our local copy and call! */
5354 host_blkpg->data = &host_part;
5355 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5357 out:
5358 return ret;
5361 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5362 int fd, int cmd, abi_long arg)
5364 const argtype *arg_type = ie->arg_type;
5365 const StructEntry *se;
5366 const argtype *field_types;
5367 const int *dst_offsets, *src_offsets;
5368 int target_size;
5369 void *argptr;
5370 abi_ulong *target_rt_dev_ptr;
5371 unsigned long *host_rt_dev_ptr;
5372 abi_long ret;
5373 int i;
5375 assert(ie->access == IOC_W);
5376 assert(*arg_type == TYPE_PTR);
5377 arg_type++;
5378 assert(*arg_type == TYPE_STRUCT);
5379 target_size = thunk_type_size(arg_type, 0);
5380 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5381 if (!argptr) {
5382 return -TARGET_EFAULT;
5384 arg_type++;
5385 assert(*arg_type == (int)STRUCT_rtentry);
5386 se = struct_entries + *arg_type++;
5387 assert(se->convert[0] == NULL);
5388 /* convert struct here to be able to catch rt_dev string */
5389 field_types = se->field_types;
5390 dst_offsets = se->field_offsets[THUNK_HOST];
5391 src_offsets = se->field_offsets[THUNK_TARGET];
5392 for (i = 0; i < se->nb_fields; i++) {
5393 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5394 assert(*field_types == TYPE_PTRVOID);
5395 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5396 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5397 if (*target_rt_dev_ptr != 0) {
5398 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5399 tswapal(*target_rt_dev_ptr));
5400 if (!*host_rt_dev_ptr) {
5401 unlock_user(argptr, arg, 0);
5402 return -TARGET_EFAULT;
5404 } else {
5405 *host_rt_dev_ptr = 0;
5407 field_types++;
5408 continue;
5410 field_types = thunk_convert(buf_temp + dst_offsets[i],
5411 argptr + src_offsets[i],
5412 field_types, THUNK_HOST);
5414 unlock_user(argptr, arg, 0);
5416 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5417 if (*host_rt_dev_ptr != 0) {
5418 unlock_user((void *)*host_rt_dev_ptr,
5419 *target_rt_dev_ptr, 0);
5421 return ret;
5424 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5425 int fd, int cmd, abi_long arg)
5427 int sig = target_to_host_signal(arg);
5428 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5431 static IOCTLEntry ioctl_entries[] = {
5432 #define IOCTL(cmd, access, ...) \
5433 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5434 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5435 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5436 #include "ioctls.h"
5437 { 0, 0, },
5440 /* ??? Implement proper locking for ioctls. */
5441 /* do_ioctl() Must return target values and target errnos. */
5442 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5444 const IOCTLEntry *ie;
5445 const argtype *arg_type;
5446 abi_long ret;
5447 uint8_t buf_temp[MAX_STRUCT_SIZE];
5448 int target_size;
5449 void *argptr;
5451 ie = ioctl_entries;
5452 for(;;) {
5453 if (ie->target_cmd == 0) {
5454 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5455 return -TARGET_ENOSYS;
5457 if (ie->target_cmd == cmd)
5458 break;
5459 ie++;
5461 arg_type = ie->arg_type;
5462 #if defined(DEBUG)
5463 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5464 #endif
5465 if (ie->do_ioctl) {
5466 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5469 switch(arg_type[0]) {
5470 case TYPE_NULL:
5471 /* no argument */
5472 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5473 break;
5474 case TYPE_PTRVOID:
5475 case TYPE_INT:
5476 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5477 break;
5478 case TYPE_PTR:
5479 arg_type++;
5480 target_size = thunk_type_size(arg_type, 0);
5481 switch(ie->access) {
5482 case IOC_R:
5483 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5484 if (!is_error(ret)) {
5485 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5486 if (!argptr)
5487 return -TARGET_EFAULT;
5488 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5489 unlock_user(argptr, arg, target_size);
5491 break;
5492 case IOC_W:
5493 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5494 if (!argptr)
5495 return -TARGET_EFAULT;
5496 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5497 unlock_user(argptr, arg, 0);
5498 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5499 break;
5500 default:
5501 case IOC_RW:
5502 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5503 if (!argptr)
5504 return -TARGET_EFAULT;
5505 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5506 unlock_user(argptr, arg, 0);
5507 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5508 if (!is_error(ret)) {
5509 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5510 if (!argptr)
5511 return -TARGET_EFAULT;
5512 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5513 unlock_user(argptr, arg, target_size);
5515 break;
5517 break;
5518 default:
5519 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5520 (long)cmd, arg_type[0]);
5521 ret = -TARGET_ENOSYS;
5522 break;
5524 return ret;
5527 static const bitmask_transtbl iflag_tbl[] = {
5528 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5529 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5530 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5531 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5532 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5533 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5534 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5535 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5536 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5537 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5538 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5539 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5540 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5541 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5542 { 0, 0, 0, 0 }
5545 static const bitmask_transtbl oflag_tbl[] = {
5546 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5547 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5548 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5549 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5550 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5551 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5552 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5553 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5554 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5555 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5556 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5557 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5558 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5559 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5560 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5561 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5562 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5563 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5564 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5565 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5566 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5567 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5568 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5569 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5570 { 0, 0, 0, 0 }
5573 static const bitmask_transtbl cflag_tbl[] = {
5574 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5575 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5576 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5577 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5578 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5579 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5580 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5581 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5582 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5583 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5584 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5585 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5586 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5587 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5588 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5589 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5590 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5591 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5592 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5593 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5594 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5595 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5596 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5597 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5598 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5599 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5600 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5601 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5602 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5603 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5604 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5605 { 0, 0, 0, 0 }
5608 static const bitmask_transtbl lflag_tbl[] = {
5609 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5610 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5611 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5612 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5613 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5614 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5615 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5616 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5617 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5618 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5619 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5620 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5621 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5622 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5623 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5624 { 0, 0, 0, 0 }
5627 static void target_to_host_termios (void *dst, const void *src)
5629 struct host_termios *host = dst;
5630 const struct target_termios *target = src;
5632 host->c_iflag =
5633 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5634 host->c_oflag =
5635 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5636 host->c_cflag =
5637 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5638 host->c_lflag =
5639 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5640 host->c_line = target->c_line;
5642 memset(host->c_cc, 0, sizeof(host->c_cc));
5643 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5644 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5645 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5646 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5647 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5648 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5649 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5650 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5651 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5652 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5653 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5654 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5655 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5656 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5657 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5658 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5659 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5662 static void host_to_target_termios (void *dst, const void *src)
5664 struct target_termios *target = dst;
5665 const struct host_termios *host = src;
5667 target->c_iflag =
5668 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5669 target->c_oflag =
5670 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5671 target->c_cflag =
5672 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5673 target->c_lflag =
5674 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5675 target->c_line = host->c_line;
5677 memset(target->c_cc, 0, sizeof(target->c_cc));
5678 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5679 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5680 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5681 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5682 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5683 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5684 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5685 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5686 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5687 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5688 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5689 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5690 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5691 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5692 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5693 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5694 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5697 static const StructEntry struct_termios_def = {
5698 .convert = { host_to_target_termios, target_to_host_termios },
5699 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5700 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5703 static bitmask_transtbl mmap_flags_tbl[] = {
5704 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5705 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5706 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5707 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5708 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5709 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5710 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5711 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5712 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5713 MAP_NORESERVE },
5714 { 0, 0, 0, 0 }
5717 #if defined(TARGET_I386)
5719 /* NOTE: there is really one LDT for all the threads */
5720 static uint8_t *ldt_table;
5722 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5724 int size;
5725 void *p;
5727 if (!ldt_table)
5728 return 0;
5729 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5730 if (size > bytecount)
5731 size = bytecount;
5732 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5733 if (!p)
5734 return -TARGET_EFAULT;
5735 /* ??? Should this by byteswapped? */
5736 memcpy(p, ldt_table, size);
5737 unlock_user(p, ptr, size);
5738 return size;
5741 /* XXX: add locking support */
5742 static abi_long write_ldt(CPUX86State *env,
5743 abi_ulong ptr, unsigned long bytecount, int oldmode)
5745 struct target_modify_ldt_ldt_s ldt_info;
5746 struct target_modify_ldt_ldt_s *target_ldt_info;
5747 int seg_32bit, contents, read_exec_only, limit_in_pages;
5748 int seg_not_present, useable, lm;
5749 uint32_t *lp, entry_1, entry_2;
5751 if (bytecount != sizeof(ldt_info))
5752 return -TARGET_EINVAL;
5753 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5754 return -TARGET_EFAULT;
5755 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5756 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5757 ldt_info.limit = tswap32(target_ldt_info->limit);
5758 ldt_info.flags = tswap32(target_ldt_info->flags);
5759 unlock_user_struct(target_ldt_info, ptr, 0);
5761 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5762 return -TARGET_EINVAL;
5763 seg_32bit = ldt_info.flags & 1;
5764 contents = (ldt_info.flags >> 1) & 3;
5765 read_exec_only = (ldt_info.flags >> 3) & 1;
5766 limit_in_pages = (ldt_info.flags >> 4) & 1;
5767 seg_not_present = (ldt_info.flags >> 5) & 1;
5768 useable = (ldt_info.flags >> 6) & 1;
5769 #ifdef TARGET_ABI32
5770 lm = 0;
5771 #else
5772 lm = (ldt_info.flags >> 7) & 1;
5773 #endif
5774 if (contents == 3) {
5775 if (oldmode)
5776 return -TARGET_EINVAL;
5777 if (seg_not_present == 0)
5778 return -TARGET_EINVAL;
5780 /* allocate the LDT */
5781 if (!ldt_table) {
5782 env->ldt.base = target_mmap(0,
5783 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5784 PROT_READ|PROT_WRITE,
5785 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5786 if (env->ldt.base == -1)
5787 return -TARGET_ENOMEM;
5788 memset(g2h(env->ldt.base), 0,
5789 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5790 env->ldt.limit = 0xffff;
5791 ldt_table = g2h(env->ldt.base);
5794 /* NOTE: same code as Linux kernel */
5795 /* Allow LDTs to be cleared by the user. */
5796 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5797 if (oldmode ||
5798 (contents == 0 &&
5799 read_exec_only == 1 &&
5800 seg_32bit == 0 &&
5801 limit_in_pages == 0 &&
5802 seg_not_present == 1 &&
5803 useable == 0 )) {
5804 entry_1 = 0;
5805 entry_2 = 0;
5806 goto install;
5810 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5811 (ldt_info.limit & 0x0ffff);
5812 entry_2 = (ldt_info.base_addr & 0xff000000) |
5813 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5814 (ldt_info.limit & 0xf0000) |
5815 ((read_exec_only ^ 1) << 9) |
5816 (contents << 10) |
5817 ((seg_not_present ^ 1) << 15) |
5818 (seg_32bit << 22) |
5819 (limit_in_pages << 23) |
5820 (lm << 21) |
5821 0x7000;
5822 if (!oldmode)
5823 entry_2 |= (useable << 20);
5825 /* Install the new entry ... */
5826 install:
5827 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5828 lp[0] = tswap32(entry_1);
5829 lp[1] = tswap32(entry_2);
5830 return 0;
5833 /* specific and weird i386 syscalls */
5834 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5835 unsigned long bytecount)
5837 abi_long ret;
5839 switch (func) {
5840 case 0:
5841 ret = read_ldt(ptr, bytecount);
5842 break;
5843 case 1:
5844 ret = write_ldt(env, ptr, bytecount, 1);
5845 break;
5846 case 0x11:
5847 ret = write_ldt(env, ptr, bytecount, 0);
5848 break;
5849 default:
5850 ret = -TARGET_ENOSYS;
5851 break;
5853 return ret;
5856 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5857 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5859 uint64_t *gdt_table = g2h(env->gdt.base);
5860 struct target_modify_ldt_ldt_s ldt_info;
5861 struct target_modify_ldt_ldt_s *target_ldt_info;
5862 int seg_32bit, contents, read_exec_only, limit_in_pages;
5863 int seg_not_present, useable, lm;
5864 uint32_t *lp, entry_1, entry_2;
5865 int i;
5867 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5868 if (!target_ldt_info)
5869 return -TARGET_EFAULT;
5870 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5871 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5872 ldt_info.limit = tswap32(target_ldt_info->limit);
5873 ldt_info.flags = tswap32(target_ldt_info->flags);
5874 if (ldt_info.entry_number == -1) {
5875 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5876 if (gdt_table[i] == 0) {
5877 ldt_info.entry_number = i;
5878 target_ldt_info->entry_number = tswap32(i);
5879 break;
5883 unlock_user_struct(target_ldt_info, ptr, 1);
5885 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5886 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5887 return -TARGET_EINVAL;
5888 seg_32bit = ldt_info.flags & 1;
5889 contents = (ldt_info.flags >> 1) & 3;
5890 read_exec_only = (ldt_info.flags >> 3) & 1;
5891 limit_in_pages = (ldt_info.flags >> 4) & 1;
5892 seg_not_present = (ldt_info.flags >> 5) & 1;
5893 useable = (ldt_info.flags >> 6) & 1;
5894 #ifdef TARGET_ABI32
5895 lm = 0;
5896 #else
5897 lm = (ldt_info.flags >> 7) & 1;
5898 #endif
5900 if (contents == 3) {
5901 if (seg_not_present == 0)
5902 return -TARGET_EINVAL;
5905 /* NOTE: same code as Linux kernel */
5906 /* Allow LDTs to be cleared by the user. */
5907 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5908 if ((contents == 0 &&
5909 read_exec_only == 1 &&
5910 seg_32bit == 0 &&
5911 limit_in_pages == 0 &&
5912 seg_not_present == 1 &&
5913 useable == 0 )) {
5914 entry_1 = 0;
5915 entry_2 = 0;
5916 goto install;
5920 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5921 (ldt_info.limit & 0x0ffff);
5922 entry_2 = (ldt_info.base_addr & 0xff000000) |
5923 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5924 (ldt_info.limit & 0xf0000) |
5925 ((read_exec_only ^ 1) << 9) |
5926 (contents << 10) |
5927 ((seg_not_present ^ 1) << 15) |
5928 (seg_32bit << 22) |
5929 (limit_in_pages << 23) |
5930 (useable << 20) |
5931 (lm << 21) |
5932 0x7000;
5934 /* Install the new entry ... */
5935 install:
5936 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5937 lp[0] = tswap32(entry_1);
5938 lp[1] = tswap32(entry_2);
5939 return 0;
5942 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5944 struct target_modify_ldt_ldt_s *target_ldt_info;
5945 uint64_t *gdt_table = g2h(env->gdt.base);
5946 uint32_t base_addr, limit, flags;
5947 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5948 int seg_not_present, useable, lm;
5949 uint32_t *lp, entry_1, entry_2;
5951 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5952 if (!target_ldt_info)
5953 return -TARGET_EFAULT;
5954 idx = tswap32(target_ldt_info->entry_number);
5955 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5956 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5957 unlock_user_struct(target_ldt_info, ptr, 1);
5958 return -TARGET_EINVAL;
5960 lp = (uint32_t *)(gdt_table + idx);
5961 entry_1 = tswap32(lp[0]);
5962 entry_2 = tswap32(lp[1]);
5964 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5965 contents = (entry_2 >> 10) & 3;
5966 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5967 seg_32bit = (entry_2 >> 22) & 1;
5968 limit_in_pages = (entry_2 >> 23) & 1;
5969 useable = (entry_2 >> 20) & 1;
5970 #ifdef TARGET_ABI32
5971 lm = 0;
5972 #else
5973 lm = (entry_2 >> 21) & 1;
5974 #endif
5975 flags = (seg_32bit << 0) | (contents << 1) |
5976 (read_exec_only << 3) | (limit_in_pages << 4) |
5977 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5978 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5979 base_addr = (entry_1 >> 16) |
5980 (entry_2 & 0xff000000) |
5981 ((entry_2 & 0xff) << 16);
5982 target_ldt_info->base_addr = tswapal(base_addr);
5983 target_ldt_info->limit = tswap32(limit);
5984 target_ldt_info->flags = tswap32(flags);
5985 unlock_user_struct(target_ldt_info, ptr, 1);
5986 return 0;
5988 #endif /* TARGET_I386 && TARGET_ABI32 */
5990 #ifndef TARGET_ABI32
5991 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
5993 abi_long ret = 0;
5994 abi_ulong val;
5995 int idx;
5997 switch(code) {
5998 case TARGET_ARCH_SET_GS:
5999 case TARGET_ARCH_SET_FS:
6000 if (code == TARGET_ARCH_SET_GS)
6001 idx = R_GS;
6002 else
6003 idx = R_FS;
6004 cpu_x86_load_seg(env, idx, 0);
6005 env->segs[idx].base = addr;
6006 break;
6007 case TARGET_ARCH_GET_GS:
6008 case TARGET_ARCH_GET_FS:
6009 if (code == TARGET_ARCH_GET_GS)
6010 idx = R_GS;
6011 else
6012 idx = R_FS;
6013 val = env->segs[idx].base;
6014 if (put_user(val, addr, abi_ulong))
6015 ret = -TARGET_EFAULT;
6016 break;
6017 default:
6018 ret = -TARGET_EINVAL;
6019 break;
6021 return ret;
6023 #endif
6025 #endif /* defined(TARGET_I386) */
6027 #define NEW_STACK_SIZE 0x40000
6030 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6031 typedef struct {
6032 CPUArchState *env;
6033 pthread_mutex_t mutex;
6034 pthread_cond_t cond;
6035 pthread_t thread;
6036 uint32_t tid;
6037 abi_ulong child_tidptr;
6038 abi_ulong parent_tidptr;
6039 sigset_t sigmask;
6040 } new_thread_info;
6042 static void *clone_func(void *arg)
6044 new_thread_info *info = arg;
6045 CPUArchState *env;
6046 CPUState *cpu;
6047 TaskState *ts;
6049 rcu_register_thread();
6050 env = info->env;
6051 cpu = ENV_GET_CPU(env);
6052 thread_cpu = cpu;
6053 ts = (TaskState *)cpu->opaque;
6054 info->tid = gettid();
6055 cpu->host_tid = info->tid;
6056 task_settid(ts);
6057 if (info->child_tidptr)
6058 put_user_u32(info->tid, info->child_tidptr);
6059 if (info->parent_tidptr)
6060 put_user_u32(info->tid, info->parent_tidptr);
6061 /* Enable signals. */
6062 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6063 /* Signal to the parent that we're ready. */
6064 pthread_mutex_lock(&info->mutex);
6065 pthread_cond_broadcast(&info->cond);
6066 pthread_mutex_unlock(&info->mutex);
6067 /* Wait until the parent has finshed initializing the tls state. */
6068 pthread_mutex_lock(&clone_lock);
6069 pthread_mutex_unlock(&clone_lock);
6070 cpu_loop(env);
6071 /* never exits */
6072 return NULL;
6075 /* do_fork() Must return host values and target errnos (unlike most
6076 do_*() functions). */
6077 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6078 abi_ulong parent_tidptr, target_ulong newtls,
6079 abi_ulong child_tidptr)
6081 CPUState *cpu = ENV_GET_CPU(env);
6082 int ret;
6083 TaskState *ts;
6084 CPUState *new_cpu;
6085 CPUArchState *new_env;
6086 sigset_t sigmask;
6088 flags &= ~CLONE_IGNORED_FLAGS;
6090 /* Emulate vfork() with fork() */
6091 if (flags & CLONE_VFORK)
6092 flags &= ~(CLONE_VFORK | CLONE_VM);
6094 if (flags & CLONE_VM) {
6095 TaskState *parent_ts = (TaskState *)cpu->opaque;
6096 new_thread_info info;
6097 pthread_attr_t attr;
6099 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6100 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6101 return -TARGET_EINVAL;
6104 ts = g_new0(TaskState, 1);
6105 init_task_state(ts);
6106 /* we create a new CPU instance. */
6107 new_env = cpu_copy(env);
6108 /* Init regs that differ from the parent. */
6109 cpu_clone_regs(new_env, newsp);
6110 new_cpu = ENV_GET_CPU(new_env);
6111 new_cpu->opaque = ts;
6112 ts->bprm = parent_ts->bprm;
6113 ts->info = parent_ts->info;
6114 ts->signal_mask = parent_ts->signal_mask;
6116 if (flags & CLONE_CHILD_CLEARTID) {
6117 ts->child_tidptr = child_tidptr;
6120 if (flags & CLONE_SETTLS) {
6121 cpu_set_tls (new_env, newtls);
6124 /* Grab a mutex so that thread setup appears atomic. */
6125 pthread_mutex_lock(&clone_lock);
6127 memset(&info, 0, sizeof(info));
6128 pthread_mutex_init(&info.mutex, NULL);
6129 pthread_mutex_lock(&info.mutex);
6130 pthread_cond_init(&info.cond, NULL);
6131 info.env = new_env;
6132 if (flags & CLONE_CHILD_SETTID) {
6133 info.child_tidptr = child_tidptr;
6135 if (flags & CLONE_PARENT_SETTID) {
6136 info.parent_tidptr = parent_tidptr;
6139 ret = pthread_attr_init(&attr);
6140 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6141 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6142 /* It is not safe to deliver signals until the child has finished
6143 initializing, so temporarily block all signals. */
6144 sigfillset(&sigmask);
6145 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6147 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6148 /* TODO: Free new CPU state if thread creation failed. */
6150 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6151 pthread_attr_destroy(&attr);
6152 if (ret == 0) {
6153 /* Wait for the child to initialize. */
6154 pthread_cond_wait(&info.cond, &info.mutex);
6155 ret = info.tid;
6156 } else {
6157 ret = -1;
6159 pthread_mutex_unlock(&info.mutex);
6160 pthread_cond_destroy(&info.cond);
6161 pthread_mutex_destroy(&info.mutex);
6162 pthread_mutex_unlock(&clone_lock);
6163 } else {
6164 /* if no CLONE_VM, we consider it is a fork */
6165 if (flags & CLONE_INVALID_FORK_FLAGS) {
6166 return -TARGET_EINVAL;
6169 /* We can't support custom termination signals */
6170 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6171 return -TARGET_EINVAL;
6174 if (block_signals()) {
6175 return -TARGET_ERESTARTSYS;
6178 fork_start();
6179 ret = fork();
6180 if (ret == 0) {
6181 /* Child Process. */
6182 rcu_after_fork();
6183 cpu_clone_regs(env, newsp);
6184 fork_end(1);
6185 /* There is a race condition here. The parent process could
6186 theoretically read the TID in the child process before the child
6187 tid is set. This would require using either ptrace
6188 (not implemented) or having *_tidptr to point at a shared memory
6189 mapping. We can't repeat the spinlock hack used above because
6190 the child process gets its own copy of the lock. */
6191 if (flags & CLONE_CHILD_SETTID)
6192 put_user_u32(gettid(), child_tidptr);
6193 if (flags & CLONE_PARENT_SETTID)
6194 put_user_u32(gettid(), parent_tidptr);
6195 ts = (TaskState *)cpu->opaque;
6196 if (flags & CLONE_SETTLS)
6197 cpu_set_tls (env, newtls);
6198 if (flags & CLONE_CHILD_CLEARTID)
6199 ts->child_tidptr = child_tidptr;
6200 } else {
6201 fork_end(0);
6204 return ret;
6207 /* warning : doesn't handle linux specific flags... */
6208 static int target_to_host_fcntl_cmd(int cmd)
6210 switch(cmd) {
6211 case TARGET_F_DUPFD:
6212 case TARGET_F_GETFD:
6213 case TARGET_F_SETFD:
6214 case TARGET_F_GETFL:
6215 case TARGET_F_SETFL:
6216 return cmd;
6217 case TARGET_F_GETLK:
6218 return F_GETLK64;
6219 case TARGET_F_SETLK:
6220 return F_SETLK64;
6221 case TARGET_F_SETLKW:
6222 return F_SETLKW64;
6223 case TARGET_F_GETOWN:
6224 return F_GETOWN;
6225 case TARGET_F_SETOWN:
6226 return F_SETOWN;
6227 case TARGET_F_GETSIG:
6228 return F_GETSIG;
6229 case TARGET_F_SETSIG:
6230 return F_SETSIG;
6231 #if TARGET_ABI_BITS == 32
6232 case TARGET_F_GETLK64:
6233 return F_GETLK64;
6234 case TARGET_F_SETLK64:
6235 return F_SETLK64;
6236 case TARGET_F_SETLKW64:
6237 return F_SETLKW64;
6238 #endif
6239 case TARGET_F_SETLEASE:
6240 return F_SETLEASE;
6241 case TARGET_F_GETLEASE:
6242 return F_GETLEASE;
6243 #ifdef F_DUPFD_CLOEXEC
6244 case TARGET_F_DUPFD_CLOEXEC:
6245 return F_DUPFD_CLOEXEC;
6246 #endif
6247 case TARGET_F_NOTIFY:
6248 return F_NOTIFY;
6249 #ifdef F_GETOWN_EX
6250 case TARGET_F_GETOWN_EX:
6251 return F_GETOWN_EX;
6252 #endif
6253 #ifdef F_SETOWN_EX
6254 case TARGET_F_SETOWN_EX:
6255 return F_SETOWN_EX;
6256 #endif
6257 #ifdef F_SETPIPE_SZ
6258 case TARGET_F_SETPIPE_SZ:
6259 return F_SETPIPE_SZ;
6260 case TARGET_F_GETPIPE_SZ:
6261 return F_GETPIPE_SZ;
6262 #endif
6263 default:
6264 return -TARGET_EINVAL;
6266 return -TARGET_EINVAL;
6269 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6270 static const bitmask_transtbl flock_tbl[] = {
6271 TRANSTBL_CONVERT(F_RDLCK),
6272 TRANSTBL_CONVERT(F_WRLCK),
6273 TRANSTBL_CONVERT(F_UNLCK),
6274 TRANSTBL_CONVERT(F_EXLCK),
6275 TRANSTBL_CONVERT(F_SHLCK),
6276 { 0, 0, 0, 0 }
6279 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6280 abi_ulong target_flock_addr)
6282 struct target_flock *target_fl;
6283 short l_type;
6285 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6286 return -TARGET_EFAULT;
6289 __get_user(l_type, &target_fl->l_type);
6290 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6291 __get_user(fl->l_whence, &target_fl->l_whence);
6292 __get_user(fl->l_start, &target_fl->l_start);
6293 __get_user(fl->l_len, &target_fl->l_len);
6294 __get_user(fl->l_pid, &target_fl->l_pid);
6295 unlock_user_struct(target_fl, target_flock_addr, 0);
6296 return 0;
6299 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6300 const struct flock64 *fl)
6302 struct target_flock *target_fl;
6303 short l_type;
6305 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6306 return -TARGET_EFAULT;
6309 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6310 __put_user(l_type, &target_fl->l_type);
6311 __put_user(fl->l_whence, &target_fl->l_whence);
6312 __put_user(fl->l_start, &target_fl->l_start);
6313 __put_user(fl->l_len, &target_fl->l_len);
6314 __put_user(fl->l_pid, &target_fl->l_pid);
6315 unlock_user_struct(target_fl, target_flock_addr, 1);
6316 return 0;
6319 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6320 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6322 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6323 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6324 abi_ulong target_flock_addr)
6326 struct target_eabi_flock64 *target_fl;
6327 short l_type;
6329 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6330 return -TARGET_EFAULT;
6333 __get_user(l_type, &target_fl->l_type);
6334 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6335 __get_user(fl->l_whence, &target_fl->l_whence);
6336 __get_user(fl->l_start, &target_fl->l_start);
6337 __get_user(fl->l_len, &target_fl->l_len);
6338 __get_user(fl->l_pid, &target_fl->l_pid);
6339 unlock_user_struct(target_fl, target_flock_addr, 0);
6340 return 0;
6343 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6344 const struct flock64 *fl)
6346 struct target_eabi_flock64 *target_fl;
6347 short l_type;
6349 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6350 return -TARGET_EFAULT;
6353 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6354 __put_user(l_type, &target_fl->l_type);
6355 __put_user(fl->l_whence, &target_fl->l_whence);
6356 __put_user(fl->l_start, &target_fl->l_start);
6357 __put_user(fl->l_len, &target_fl->l_len);
6358 __put_user(fl->l_pid, &target_fl->l_pid);
6359 unlock_user_struct(target_fl, target_flock_addr, 1);
6360 return 0;
6362 #endif
6364 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6365 abi_ulong target_flock_addr)
6367 struct target_flock64 *target_fl;
6368 short l_type;
6370 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6371 return -TARGET_EFAULT;
6374 __get_user(l_type, &target_fl->l_type);
6375 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6376 __get_user(fl->l_whence, &target_fl->l_whence);
6377 __get_user(fl->l_start, &target_fl->l_start);
6378 __get_user(fl->l_len, &target_fl->l_len);
6379 __get_user(fl->l_pid, &target_fl->l_pid);
6380 unlock_user_struct(target_fl, target_flock_addr, 0);
6381 return 0;
6384 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6385 const struct flock64 *fl)
6387 struct target_flock64 *target_fl;
6388 short l_type;
6390 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6391 return -TARGET_EFAULT;
6394 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6395 __put_user(l_type, &target_fl->l_type);
6396 __put_user(fl->l_whence, &target_fl->l_whence);
6397 __put_user(fl->l_start, &target_fl->l_start);
6398 __put_user(fl->l_len, &target_fl->l_len);
6399 __put_user(fl->l_pid, &target_fl->l_pid);
6400 unlock_user_struct(target_fl, target_flock_addr, 1);
6401 return 0;
6404 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6406 struct flock64 fl64;
6407 #ifdef F_GETOWN_EX
6408 struct f_owner_ex fox;
6409 struct target_f_owner_ex *target_fox;
6410 #endif
6411 abi_long ret;
6412 int host_cmd = target_to_host_fcntl_cmd(cmd);
6414 if (host_cmd == -TARGET_EINVAL)
6415 return host_cmd;
6417 switch(cmd) {
6418 case TARGET_F_GETLK:
6419 ret = copy_from_user_flock(&fl64, arg);
6420 if (ret) {
6421 return ret;
6423 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6424 if (ret == 0) {
6425 ret = copy_to_user_flock(arg, &fl64);
6427 break;
6429 case TARGET_F_SETLK:
6430 case TARGET_F_SETLKW:
6431 ret = copy_from_user_flock(&fl64, arg);
6432 if (ret) {
6433 return ret;
6435 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6436 break;
6438 case TARGET_F_GETLK64:
6439 ret = copy_from_user_flock64(&fl64, arg);
6440 if (ret) {
6441 return ret;
6443 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6444 if (ret == 0) {
6445 ret = copy_to_user_flock64(arg, &fl64);
6447 break;
6448 case TARGET_F_SETLK64:
6449 case TARGET_F_SETLKW64:
6450 ret = copy_from_user_flock64(&fl64, arg);
6451 if (ret) {
6452 return ret;
6454 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6455 break;
6457 case TARGET_F_GETFL:
6458 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6459 if (ret >= 0) {
6460 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6462 break;
6464 case TARGET_F_SETFL:
6465 ret = get_errno(safe_fcntl(fd, host_cmd,
6466 target_to_host_bitmask(arg,
6467 fcntl_flags_tbl)));
6468 break;
6470 #ifdef F_GETOWN_EX
6471 case TARGET_F_GETOWN_EX:
6472 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6473 if (ret >= 0) {
6474 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6475 return -TARGET_EFAULT;
6476 target_fox->type = tswap32(fox.type);
6477 target_fox->pid = tswap32(fox.pid);
6478 unlock_user_struct(target_fox, arg, 1);
6480 break;
6481 #endif
6483 #ifdef F_SETOWN_EX
6484 case TARGET_F_SETOWN_EX:
6485 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6486 return -TARGET_EFAULT;
6487 fox.type = tswap32(target_fox->type);
6488 fox.pid = tswap32(target_fox->pid);
6489 unlock_user_struct(target_fox, arg, 0);
6490 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6491 break;
6492 #endif
6494 case TARGET_F_SETOWN:
6495 case TARGET_F_GETOWN:
6496 case TARGET_F_SETSIG:
6497 case TARGET_F_GETSIG:
6498 case TARGET_F_SETLEASE:
6499 case TARGET_F_GETLEASE:
6500 case TARGET_F_SETPIPE_SZ:
6501 case TARGET_F_GETPIPE_SZ:
6502 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6503 break;
6505 default:
6506 ret = get_errno(safe_fcntl(fd, cmd, arg));
6507 break;
6509 return ret;
6512 #ifdef USE_UID16
6514 static inline int high2lowuid(int uid)
6516 if (uid > 65535)
6517 return 65534;
6518 else
6519 return uid;
6522 static inline int high2lowgid(int gid)
6524 if (gid > 65535)
6525 return 65534;
6526 else
6527 return gid;
6530 static inline int low2highuid(int uid)
6532 if ((int16_t)uid == -1)
6533 return -1;
6534 else
6535 return uid;
6538 static inline int low2highgid(int gid)
6540 if ((int16_t)gid == -1)
6541 return -1;
6542 else
6543 return gid;
6545 static inline int tswapid(int id)
6547 return tswap16(id);
6550 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6552 #else /* !USE_UID16 */
6553 static inline int high2lowuid(int uid)
6555 return uid;
6557 static inline int high2lowgid(int gid)
6559 return gid;
6561 static inline int low2highuid(int uid)
6563 return uid;
6565 static inline int low2highgid(int gid)
6567 return gid;
6569 static inline int tswapid(int id)
6571 return tswap32(id);
6574 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6576 #endif /* USE_UID16 */
6578 /* We must do direct syscalls for setting UID/GID, because we want to
6579 * implement the Linux system call semantics of "change only for this thread",
6580 * not the libc/POSIX semantics of "change for all threads in process".
6581 * (See http://ewontfix.com/17/ for more details.)
6582 * We use the 32-bit version of the syscalls if present; if it is not
6583 * then either the host architecture supports 32-bit UIDs natively with
6584 * the standard syscall, or the 16-bit UID is the best we can do.
6586 #ifdef __NR_setuid32
6587 #define __NR_sys_setuid __NR_setuid32
6588 #else
6589 #define __NR_sys_setuid __NR_setuid
6590 #endif
6591 #ifdef __NR_setgid32
6592 #define __NR_sys_setgid __NR_setgid32
6593 #else
6594 #define __NR_sys_setgid __NR_setgid
6595 #endif
6596 #ifdef __NR_setresuid32
6597 #define __NR_sys_setresuid __NR_setresuid32
6598 #else
6599 #define __NR_sys_setresuid __NR_setresuid
6600 #endif
6601 #ifdef __NR_setresgid32
6602 #define __NR_sys_setresgid __NR_setresgid32
6603 #else
6604 #define __NR_sys_setresgid __NR_setresgid
6605 #endif
6607 _syscall1(int, sys_setuid, uid_t, uid)
6608 _syscall1(int, sys_setgid, gid_t, gid)
6609 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6610 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6612 void syscall_init(void)
6614 IOCTLEntry *ie;
6615 const argtype *arg_type;
6616 int size;
6617 int i;
6619 thunk_init(STRUCT_MAX);
6621 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6622 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6623 #include "syscall_types.h"
6624 #undef STRUCT
6625 #undef STRUCT_SPECIAL
6627 /* Build target_to_host_errno_table[] table from
6628 * host_to_target_errno_table[]. */
6629 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6630 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6633 /* we patch the ioctl size if necessary. We rely on the fact that
6634 no ioctl has all the bits at '1' in the size field */
6635 ie = ioctl_entries;
6636 while (ie->target_cmd != 0) {
6637 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6638 TARGET_IOC_SIZEMASK) {
6639 arg_type = ie->arg_type;
6640 if (arg_type[0] != TYPE_PTR) {
6641 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6642 ie->target_cmd);
6643 exit(1);
6645 arg_type++;
6646 size = thunk_type_size(arg_type, 0);
6647 ie->target_cmd = (ie->target_cmd &
6648 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6649 (size << TARGET_IOC_SIZESHIFT);
6652 /* automatic consistency check if same arch */
6653 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6654 (defined(__x86_64__) && defined(TARGET_X86_64))
6655 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6656 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6657 ie->name, ie->target_cmd, ie->host_cmd);
6659 #endif
6660 ie++;
6664 #if TARGET_ABI_BITS == 32
6665 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6667 #ifdef TARGET_WORDS_BIGENDIAN
6668 return ((uint64_t)word0 << 32) | word1;
6669 #else
6670 return ((uint64_t)word1 << 32) | word0;
6671 #endif
6673 #else /* TARGET_ABI_BITS == 32 */
6674 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6676 return word0;
6678 #endif /* TARGET_ABI_BITS != 32 */
6680 #ifdef TARGET_NR_truncate64
6681 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6682 abi_long arg2,
6683 abi_long arg3,
6684 abi_long arg4)
6686 if (regpairs_aligned(cpu_env)) {
6687 arg2 = arg3;
6688 arg3 = arg4;
6690 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6692 #endif
6694 #ifdef TARGET_NR_ftruncate64
6695 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6696 abi_long arg2,
6697 abi_long arg3,
6698 abi_long arg4)
6700 if (regpairs_aligned(cpu_env)) {
6701 arg2 = arg3;
6702 arg3 = arg4;
6704 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6706 #endif
6708 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6709 abi_ulong target_addr)
6711 struct target_timespec *target_ts;
6713 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6714 return -TARGET_EFAULT;
6715 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6716 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6717 unlock_user_struct(target_ts, target_addr, 0);
6718 return 0;
6721 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6722 struct timespec *host_ts)
6724 struct target_timespec *target_ts;
6726 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6727 return -TARGET_EFAULT;
6728 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6729 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6730 unlock_user_struct(target_ts, target_addr, 1);
6731 return 0;
6734 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6735 abi_ulong target_addr)
6737 struct target_itimerspec *target_itspec;
6739 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6740 return -TARGET_EFAULT;
6743 host_itspec->it_interval.tv_sec =
6744 tswapal(target_itspec->it_interval.tv_sec);
6745 host_itspec->it_interval.tv_nsec =
6746 tswapal(target_itspec->it_interval.tv_nsec);
6747 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6748 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6750 unlock_user_struct(target_itspec, target_addr, 1);
6751 return 0;
6754 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6755 struct itimerspec *host_its)
6757 struct target_itimerspec *target_itspec;
6759 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6760 return -TARGET_EFAULT;
6763 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6764 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6766 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6767 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6769 unlock_user_struct(target_itspec, target_addr, 0);
6770 return 0;
6773 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6774 abi_ulong target_addr)
6776 struct target_sigevent *target_sevp;
6778 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6779 return -TARGET_EFAULT;
6782 /* This union is awkward on 64 bit systems because it has a 32 bit
6783 * integer and a pointer in it; we follow the conversion approach
6784 * used for handling sigval types in signal.c so the guest should get
6785 * the correct value back even if we did a 64 bit byteswap and it's
6786 * using the 32 bit integer.
6788 host_sevp->sigev_value.sival_ptr =
6789 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6790 host_sevp->sigev_signo =
6791 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6792 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6793 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6795 unlock_user_struct(target_sevp, target_addr, 1);
6796 return 0;
6799 #if defined(TARGET_NR_mlockall)
6800 static inline int target_to_host_mlockall_arg(int arg)
6802 int result = 0;
6804 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6805 result |= MCL_CURRENT;
6807 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6808 result |= MCL_FUTURE;
6810 return result;
6812 #endif
6814 static inline abi_long host_to_target_stat64(void *cpu_env,
6815 abi_ulong target_addr,
6816 struct stat *host_st)
6818 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6819 if (((CPUARMState *)cpu_env)->eabi) {
6820 struct target_eabi_stat64 *target_st;
6822 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6823 return -TARGET_EFAULT;
6824 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6825 __put_user(host_st->st_dev, &target_st->st_dev);
6826 __put_user(host_st->st_ino, &target_st->st_ino);
6827 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6828 __put_user(host_st->st_ino, &target_st->__st_ino);
6829 #endif
6830 __put_user(host_st->st_mode, &target_st->st_mode);
6831 __put_user(host_st->st_nlink, &target_st->st_nlink);
6832 __put_user(host_st->st_uid, &target_st->st_uid);
6833 __put_user(host_st->st_gid, &target_st->st_gid);
6834 __put_user(host_st->st_rdev, &target_st->st_rdev);
6835 __put_user(host_st->st_size, &target_st->st_size);
6836 __put_user(host_st->st_blksize, &target_st->st_blksize);
6837 __put_user(host_st->st_blocks, &target_st->st_blocks);
6838 __put_user(host_st->st_atime, &target_st->target_st_atime);
6839 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6840 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6841 unlock_user_struct(target_st, target_addr, 1);
6842 } else
6843 #endif
6845 #if defined(TARGET_HAS_STRUCT_STAT64)
6846 struct target_stat64 *target_st;
6847 #else
6848 struct target_stat *target_st;
6849 #endif
6851 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6852 return -TARGET_EFAULT;
6853 memset(target_st, 0, sizeof(*target_st));
6854 __put_user(host_st->st_dev, &target_st->st_dev);
6855 __put_user(host_st->st_ino, &target_st->st_ino);
6856 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6857 __put_user(host_st->st_ino, &target_st->__st_ino);
6858 #endif
6859 __put_user(host_st->st_mode, &target_st->st_mode);
6860 __put_user(host_st->st_nlink, &target_st->st_nlink);
6861 __put_user(host_st->st_uid, &target_st->st_uid);
6862 __put_user(host_st->st_gid, &target_st->st_gid);
6863 __put_user(host_st->st_rdev, &target_st->st_rdev);
6864 /* XXX: better use of kernel struct */
6865 __put_user(host_st->st_size, &target_st->st_size);
6866 __put_user(host_st->st_blksize, &target_st->st_blksize);
6867 __put_user(host_st->st_blocks, &target_st->st_blocks);
6868 __put_user(host_st->st_atime, &target_st->target_st_atime);
6869 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6870 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6871 unlock_user_struct(target_st, target_addr, 1);
6874 return 0;
6877 /* ??? Using host futex calls even when target atomic operations
6878 are not really atomic probably breaks things. However implementing
6879 futexes locally would make futexes shared between multiple processes
6880 tricky. However they're probably useless because guest atomic
6881 operations won't work either. */
6882 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6883 target_ulong uaddr2, int val3)
6885 struct timespec ts, *pts;
6886 int base_op;
6888 /* ??? We assume FUTEX_* constants are the same on both host
6889 and target. */
6890 #ifdef FUTEX_CMD_MASK
6891 base_op = op & FUTEX_CMD_MASK;
6892 #else
6893 base_op = op;
6894 #endif
6895 switch (base_op) {
6896 case FUTEX_WAIT:
6897 case FUTEX_WAIT_BITSET:
6898 if (timeout) {
6899 pts = &ts;
6900 target_to_host_timespec(pts, timeout);
6901 } else {
6902 pts = NULL;
6904 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6905 pts, NULL, val3));
6906 case FUTEX_WAKE:
6907 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6908 case FUTEX_FD:
6909 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6910 case FUTEX_REQUEUE:
6911 case FUTEX_CMP_REQUEUE:
6912 case FUTEX_WAKE_OP:
6913 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
6914 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
6915 But the prototype takes a `struct timespec *'; insert casts
6916 to satisfy the compiler. We do not need to tswap TIMEOUT
6917 since it's not compared to guest memory. */
6918 pts = (struct timespec *)(uintptr_t) timeout;
6919 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
6920 g2h(uaddr2),
6921 (base_op == FUTEX_CMP_REQUEUE
6922 ? tswap32(val3)
6923 : val3)));
6924 default:
6925 return -TARGET_ENOSYS;
6928 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6929 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
6930 abi_long handle, abi_long mount_id,
6931 abi_long flags)
6933 struct file_handle *target_fh;
6934 struct file_handle *fh;
6935 int mid = 0;
6936 abi_long ret;
6937 char *name;
6938 unsigned int size, total_size;
6940 if (get_user_s32(size, handle)) {
6941 return -TARGET_EFAULT;
6944 name = lock_user_string(pathname);
6945 if (!name) {
6946 return -TARGET_EFAULT;
6949 total_size = sizeof(struct file_handle) + size;
6950 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
6951 if (!target_fh) {
6952 unlock_user(name, pathname, 0);
6953 return -TARGET_EFAULT;
6956 fh = g_malloc0(total_size);
6957 fh->handle_bytes = size;
6959 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
6960 unlock_user(name, pathname, 0);
6962 /* man name_to_handle_at(2):
6963 * Other than the use of the handle_bytes field, the caller should treat
6964 * the file_handle structure as an opaque data type
6967 memcpy(target_fh, fh, total_size);
6968 target_fh->handle_bytes = tswap32(fh->handle_bytes);
6969 target_fh->handle_type = tswap32(fh->handle_type);
6970 g_free(fh);
6971 unlock_user(target_fh, handle, total_size);
6973 if (put_user_s32(mid, mount_id)) {
6974 return -TARGET_EFAULT;
6977 return ret;
6980 #endif
6982 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
6983 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
6984 abi_long flags)
6986 struct file_handle *target_fh;
6987 struct file_handle *fh;
6988 unsigned int size, total_size;
6989 abi_long ret;
6991 if (get_user_s32(size, handle)) {
6992 return -TARGET_EFAULT;
6995 total_size = sizeof(struct file_handle) + size;
6996 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
6997 if (!target_fh) {
6998 return -TARGET_EFAULT;
7001 fh = g_memdup(target_fh, total_size);
7002 fh->handle_bytes = size;
7003 fh->handle_type = tswap32(target_fh->handle_type);
7005 ret = get_errno(open_by_handle_at(mount_fd, fh,
7006 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7008 g_free(fh);
7010 unlock_user(target_fh, handle, total_size);
7012 return ret;
7014 #endif
7016 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7018 /* signalfd siginfo conversion */
7020 static void
7021 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7022 const struct signalfd_siginfo *info)
7024 int sig = host_to_target_signal(info->ssi_signo);
7026 /* linux/signalfd.h defines a ssi_addr_lsb
7027 * not defined in sys/signalfd.h but used by some kernels
7030 #ifdef BUS_MCEERR_AO
7031 if (tinfo->ssi_signo == SIGBUS &&
7032 (tinfo->ssi_code == BUS_MCEERR_AR ||
7033 tinfo->ssi_code == BUS_MCEERR_AO)) {
7034 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7035 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7036 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7038 #endif
7040 tinfo->ssi_signo = tswap32(sig);
7041 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7042 tinfo->ssi_code = tswap32(info->ssi_code);
7043 tinfo->ssi_pid = tswap32(info->ssi_pid);
7044 tinfo->ssi_uid = tswap32(info->ssi_uid);
7045 tinfo->ssi_fd = tswap32(info->ssi_fd);
7046 tinfo->ssi_tid = tswap32(info->ssi_tid);
7047 tinfo->ssi_band = tswap32(info->ssi_band);
7048 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7049 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7050 tinfo->ssi_status = tswap32(info->ssi_status);
7051 tinfo->ssi_int = tswap32(info->ssi_int);
7052 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7053 tinfo->ssi_utime = tswap64(info->ssi_utime);
7054 tinfo->ssi_stime = tswap64(info->ssi_stime);
7055 tinfo->ssi_addr = tswap64(info->ssi_addr);
7058 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7060 int i;
7062 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7063 host_to_target_signalfd_siginfo(buf + i, buf + i);
7066 return len;
7069 static TargetFdTrans target_signalfd_trans = {
7070 .host_to_target_data = host_to_target_data_signalfd,
7073 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7075 int host_flags;
7076 target_sigset_t *target_mask;
7077 sigset_t host_mask;
7078 abi_long ret;
7080 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7081 return -TARGET_EINVAL;
7083 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7084 return -TARGET_EFAULT;
7087 target_to_host_sigset(&host_mask, target_mask);
7089 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7091 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7092 if (ret >= 0) {
7093 fd_trans_register(ret, &target_signalfd_trans);
7096 unlock_user_struct(target_mask, mask, 0);
7098 return ret;
7100 #endif
7102 /* Map host to target signal numbers for the wait family of syscalls.
7103 Assume all other status bits are the same. */
7104 int host_to_target_waitstatus(int status)
7106 if (WIFSIGNALED(status)) {
7107 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7109 if (WIFSTOPPED(status)) {
7110 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7111 | (status & 0xff);
7113 return status;
7116 static int open_self_cmdline(void *cpu_env, int fd)
7118 int fd_orig = -1;
7119 bool word_skipped = false;
7121 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7122 if (fd_orig < 0) {
7123 return fd_orig;
7126 while (true) {
7127 ssize_t nb_read;
7128 char buf[128];
7129 char *cp_buf = buf;
7131 nb_read = read(fd_orig, buf, sizeof(buf));
7132 if (nb_read < 0) {
7133 int e = errno;
7134 fd_orig = close(fd_orig);
7135 errno = e;
7136 return -1;
7137 } else if (nb_read == 0) {
7138 break;
7141 if (!word_skipped) {
7142 /* Skip the first string, which is the path to qemu-*-static
7143 instead of the actual command. */
7144 cp_buf = memchr(buf, 0, nb_read);
7145 if (cp_buf) {
7146 /* Null byte found, skip one string */
7147 cp_buf++;
7148 nb_read -= cp_buf - buf;
7149 word_skipped = true;
7153 if (word_skipped) {
7154 if (write(fd, cp_buf, nb_read) != nb_read) {
7155 int e = errno;
7156 close(fd_orig);
7157 errno = e;
7158 return -1;
7163 return close(fd_orig);
7166 static int open_self_maps(void *cpu_env, int fd)
7168 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7169 TaskState *ts = cpu->opaque;
7170 FILE *fp;
7171 char *line = NULL;
7172 size_t len = 0;
7173 ssize_t read;
7175 fp = fopen("/proc/self/maps", "r");
7176 if (fp == NULL) {
7177 return -1;
7180 while ((read = getline(&line, &len, fp)) != -1) {
7181 int fields, dev_maj, dev_min, inode;
7182 uint64_t min, max, offset;
7183 char flag_r, flag_w, flag_x, flag_p;
7184 char path[512] = "";
7185 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7186 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7187 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7189 if ((fields < 10) || (fields > 11)) {
7190 continue;
7192 if (h2g_valid(min)) {
7193 int flags = page_get_flags(h2g(min));
7194 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7195 if (page_check_range(h2g(min), max - min, flags) == -1) {
7196 continue;
7198 if (h2g(min) == ts->info->stack_limit) {
7199 pstrcpy(path, sizeof(path), " [stack]");
7201 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7202 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7203 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7204 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7205 path[0] ? " " : "", path);
7209 free(line);
7210 fclose(fp);
7212 return 0;
7215 static int open_self_stat(void *cpu_env, int fd)
7217 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7218 TaskState *ts = cpu->opaque;
7219 abi_ulong start_stack = ts->info->start_stack;
7220 int i;
7222 for (i = 0; i < 44; i++) {
7223 char buf[128];
7224 int len;
7225 uint64_t val = 0;
7227 if (i == 0) {
7228 /* pid */
7229 val = getpid();
7230 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7231 } else if (i == 1) {
7232 /* app name */
7233 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7234 } else if (i == 27) {
7235 /* stack bottom */
7236 val = start_stack;
7237 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7238 } else {
7239 /* for the rest, there is MasterCard */
7240 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7243 len = strlen(buf);
7244 if (write(fd, buf, len) != len) {
7245 return -1;
7249 return 0;
7252 static int open_self_auxv(void *cpu_env, int fd)
7254 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7255 TaskState *ts = cpu->opaque;
7256 abi_ulong auxv = ts->info->saved_auxv;
7257 abi_ulong len = ts->info->auxv_len;
7258 char *ptr;
7261 * Auxiliary vector is stored in target process stack.
7262 * read in whole auxv vector and copy it to file
7264 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7265 if (ptr != NULL) {
7266 while (len > 0) {
7267 ssize_t r;
7268 r = write(fd, ptr, len);
7269 if (r <= 0) {
7270 break;
7272 len -= r;
7273 ptr += r;
7275 lseek(fd, 0, SEEK_SET);
7276 unlock_user(ptr, auxv, len);
7279 return 0;
7282 static int is_proc_myself(const char *filename, const char *entry)
7284 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7285 filename += strlen("/proc/");
7286 if (!strncmp(filename, "self/", strlen("self/"))) {
7287 filename += strlen("self/");
7288 } else if (*filename >= '1' && *filename <= '9') {
7289 char myself[80];
7290 snprintf(myself, sizeof(myself), "%d/", getpid());
7291 if (!strncmp(filename, myself, strlen(myself))) {
7292 filename += strlen(myself);
7293 } else {
7294 return 0;
7296 } else {
7297 return 0;
7299 if (!strcmp(filename, entry)) {
7300 return 1;
7303 return 0;
7306 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7307 static int is_proc(const char *filename, const char *entry)
7309 return strcmp(filename, entry) == 0;
7312 static int open_net_route(void *cpu_env, int fd)
7314 FILE *fp;
7315 char *line = NULL;
7316 size_t len = 0;
7317 ssize_t read;
7319 fp = fopen("/proc/net/route", "r");
7320 if (fp == NULL) {
7321 return -1;
7324 /* read header */
7326 read = getline(&line, &len, fp);
7327 dprintf(fd, "%s", line);
7329 /* read routes */
7331 while ((read = getline(&line, &len, fp)) != -1) {
7332 char iface[16];
7333 uint32_t dest, gw, mask;
7334 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7335 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7336 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7337 &mask, &mtu, &window, &irtt);
7338 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7339 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7340 metric, tswap32(mask), mtu, window, irtt);
7343 free(line);
7344 fclose(fp);
7346 return 0;
7348 #endif
7350 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7352 struct fake_open {
7353 const char *filename;
7354 int (*fill)(void *cpu_env, int fd);
7355 int (*cmp)(const char *s1, const char *s2);
7357 const struct fake_open *fake_open;
7358 static const struct fake_open fakes[] = {
7359 { "maps", open_self_maps, is_proc_myself },
7360 { "stat", open_self_stat, is_proc_myself },
7361 { "auxv", open_self_auxv, is_proc_myself },
7362 { "cmdline", open_self_cmdline, is_proc_myself },
7363 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7364 { "/proc/net/route", open_net_route, is_proc },
7365 #endif
7366 { NULL, NULL, NULL }
7369 if (is_proc_myself(pathname, "exe")) {
7370 int execfd = qemu_getauxval(AT_EXECFD);
7371 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7374 for (fake_open = fakes; fake_open->filename; fake_open++) {
7375 if (fake_open->cmp(pathname, fake_open->filename)) {
7376 break;
7380 if (fake_open->filename) {
7381 const char *tmpdir;
7382 char filename[PATH_MAX];
7383 int fd, r;
7385 /* create temporary file to map stat to */
7386 tmpdir = getenv("TMPDIR");
7387 if (!tmpdir)
7388 tmpdir = "/tmp";
7389 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7390 fd = mkstemp(filename);
7391 if (fd < 0) {
7392 return fd;
7394 unlink(filename);
7396 if ((r = fake_open->fill(cpu_env, fd))) {
7397 int e = errno;
7398 close(fd);
7399 errno = e;
7400 return r;
7402 lseek(fd, 0, SEEK_SET);
7404 return fd;
7407 return safe_openat(dirfd, path(pathname), flags, mode);
7410 #define TIMER_MAGIC 0x0caf0000
7411 #define TIMER_MAGIC_MASK 0xffff0000
7413 /* Convert QEMU provided timer ID back to internal 16bit index format */
7414 static target_timer_t get_timer_id(abi_long arg)
7416 target_timer_t timerid = arg;
7418 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7419 return -TARGET_EINVAL;
7422 timerid &= 0xffff;
7424 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7425 return -TARGET_EINVAL;
7428 return timerid;
7431 /* do_syscall() should always have a single exit point at the end so
7432 that actions, such as logging of syscall results, can be performed.
7433 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7434 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7435 abi_long arg2, abi_long arg3, abi_long arg4,
7436 abi_long arg5, abi_long arg6, abi_long arg7,
7437 abi_long arg8)
7439 CPUState *cpu = ENV_GET_CPU(cpu_env);
7440 abi_long ret;
7441 struct stat st;
7442 struct statfs stfs;
7443 void *p;
7445 #if defined(DEBUG_ERESTARTSYS)
7446 /* Debug-only code for exercising the syscall-restart code paths
7447 * in the per-architecture cpu main loops: restart every syscall
7448 * the guest makes once before letting it through.
7451 static int flag;
7453 flag = !flag;
7454 if (flag) {
7455 return -TARGET_ERESTARTSYS;
7458 #endif
7460 #ifdef DEBUG
7461 gemu_log("syscall %d", num);
7462 #endif
7463 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7464 if(do_strace)
7465 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7467 switch(num) {
7468 case TARGET_NR_exit:
7469 /* In old applications this may be used to implement _exit(2).
7470 However in threaded applictions it is used for thread termination,
7471 and _exit_group is used for application termination.
7472 Do thread termination if we have more then one thread. */
7474 if (block_signals()) {
7475 ret = -TARGET_ERESTARTSYS;
7476 break;
7479 if (CPU_NEXT(first_cpu)) {
7480 TaskState *ts;
7482 cpu_list_lock();
7483 /* Remove the CPU from the list. */
7484 QTAILQ_REMOVE(&cpus, cpu, node);
7485 cpu_list_unlock();
7486 ts = cpu->opaque;
7487 if (ts->child_tidptr) {
7488 put_user_u32(0, ts->child_tidptr);
7489 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7490 NULL, NULL, 0);
7492 thread_cpu = NULL;
7493 object_unref(OBJECT(cpu));
7494 g_free(ts);
7495 rcu_unregister_thread();
7496 pthread_exit(NULL);
7498 #ifdef TARGET_GPROF
7499 _mcleanup();
7500 #endif
7501 gdb_exit(cpu_env, arg1);
7502 _exit(arg1);
7503 ret = 0; /* avoid warning */
7504 break;
7505 case TARGET_NR_read:
7506 if (arg3 == 0)
7507 ret = 0;
7508 else {
7509 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7510 goto efault;
7511 ret = get_errno(safe_read(arg1, p, arg3));
7512 if (ret >= 0 &&
7513 fd_trans_host_to_target_data(arg1)) {
7514 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7516 unlock_user(p, arg2, ret);
7518 break;
7519 case TARGET_NR_write:
7520 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7521 goto efault;
7522 ret = get_errno(safe_write(arg1, p, arg3));
7523 unlock_user(p, arg2, 0);
7524 break;
7525 #ifdef TARGET_NR_open
7526 case TARGET_NR_open:
7527 if (!(p = lock_user_string(arg1)))
7528 goto efault;
7529 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7530 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7531 arg3));
7532 fd_trans_unregister(ret);
7533 unlock_user(p, arg1, 0);
7534 break;
7535 #endif
7536 case TARGET_NR_openat:
7537 if (!(p = lock_user_string(arg2)))
7538 goto efault;
7539 ret = get_errno(do_openat(cpu_env, arg1, p,
7540 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7541 arg4));
7542 fd_trans_unregister(ret);
7543 unlock_user(p, arg2, 0);
7544 break;
7545 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7546 case TARGET_NR_name_to_handle_at:
7547 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7548 break;
7549 #endif
7550 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7551 case TARGET_NR_open_by_handle_at:
7552 ret = do_open_by_handle_at(arg1, arg2, arg3);
7553 fd_trans_unregister(ret);
7554 break;
7555 #endif
7556 case TARGET_NR_close:
7557 fd_trans_unregister(arg1);
7558 ret = get_errno(close(arg1));
7559 break;
7560 case TARGET_NR_brk:
7561 ret = do_brk(arg1);
7562 break;
7563 #ifdef TARGET_NR_fork
7564 case TARGET_NR_fork:
7565 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7566 break;
7567 #endif
7568 #ifdef TARGET_NR_waitpid
7569 case TARGET_NR_waitpid:
7571 int status;
7572 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7573 if (!is_error(ret) && arg2 && ret
7574 && put_user_s32(host_to_target_waitstatus(status), arg2))
7575 goto efault;
7577 break;
7578 #endif
7579 #ifdef TARGET_NR_waitid
7580 case TARGET_NR_waitid:
7582 siginfo_t info;
7583 info.si_pid = 0;
7584 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7585 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7586 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7587 goto efault;
7588 host_to_target_siginfo(p, &info);
7589 unlock_user(p, arg3, sizeof(target_siginfo_t));
7592 break;
7593 #endif
7594 #ifdef TARGET_NR_creat /* not on alpha */
7595 case TARGET_NR_creat:
7596 if (!(p = lock_user_string(arg1)))
7597 goto efault;
7598 ret = get_errno(creat(p, arg2));
7599 fd_trans_unregister(ret);
7600 unlock_user(p, arg1, 0);
7601 break;
7602 #endif
7603 #ifdef TARGET_NR_link
7604 case TARGET_NR_link:
7606 void * p2;
7607 p = lock_user_string(arg1);
7608 p2 = lock_user_string(arg2);
7609 if (!p || !p2)
7610 ret = -TARGET_EFAULT;
7611 else
7612 ret = get_errno(link(p, p2));
7613 unlock_user(p2, arg2, 0);
7614 unlock_user(p, arg1, 0);
7616 break;
7617 #endif
7618 #if defined(TARGET_NR_linkat)
7619 case TARGET_NR_linkat:
7621 void * p2 = NULL;
7622 if (!arg2 || !arg4)
7623 goto efault;
7624 p = lock_user_string(arg2);
7625 p2 = lock_user_string(arg4);
7626 if (!p || !p2)
7627 ret = -TARGET_EFAULT;
7628 else
7629 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7630 unlock_user(p, arg2, 0);
7631 unlock_user(p2, arg4, 0);
7633 break;
7634 #endif
7635 #ifdef TARGET_NR_unlink
7636 case TARGET_NR_unlink:
7637 if (!(p = lock_user_string(arg1)))
7638 goto efault;
7639 ret = get_errno(unlink(p));
7640 unlock_user(p, arg1, 0);
7641 break;
7642 #endif
7643 #if defined(TARGET_NR_unlinkat)
7644 case TARGET_NR_unlinkat:
7645 if (!(p = lock_user_string(arg2)))
7646 goto efault;
7647 ret = get_errno(unlinkat(arg1, p, arg3));
7648 unlock_user(p, arg2, 0);
7649 break;
7650 #endif
7651 case TARGET_NR_execve:
7653 char **argp, **envp;
7654 int argc, envc;
7655 abi_ulong gp;
7656 abi_ulong guest_argp;
7657 abi_ulong guest_envp;
7658 abi_ulong addr;
7659 char **q;
7660 int total_size = 0;
7662 argc = 0;
7663 guest_argp = arg2;
7664 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7665 if (get_user_ual(addr, gp))
7666 goto efault;
7667 if (!addr)
7668 break;
7669 argc++;
7671 envc = 0;
7672 guest_envp = arg3;
7673 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7674 if (get_user_ual(addr, gp))
7675 goto efault;
7676 if (!addr)
7677 break;
7678 envc++;
7681 argp = alloca((argc + 1) * sizeof(void *));
7682 envp = alloca((envc + 1) * sizeof(void *));
7684 for (gp = guest_argp, q = argp; gp;
7685 gp += sizeof(abi_ulong), q++) {
7686 if (get_user_ual(addr, gp))
7687 goto execve_efault;
7688 if (!addr)
7689 break;
7690 if (!(*q = lock_user_string(addr)))
7691 goto execve_efault;
7692 total_size += strlen(*q) + 1;
7694 *q = NULL;
7696 for (gp = guest_envp, q = envp; gp;
7697 gp += sizeof(abi_ulong), q++) {
7698 if (get_user_ual(addr, gp))
7699 goto execve_efault;
7700 if (!addr)
7701 break;
7702 if (!(*q = lock_user_string(addr)))
7703 goto execve_efault;
7704 total_size += strlen(*q) + 1;
7706 *q = NULL;
7708 if (!(p = lock_user_string(arg1)))
7709 goto execve_efault;
7710 /* Although execve() is not an interruptible syscall it is
7711 * a special case where we must use the safe_syscall wrapper:
7712 * if we allow a signal to happen before we make the host
7713 * syscall then we will 'lose' it, because at the point of
7714 * execve the process leaves QEMU's control. So we use the
7715 * safe syscall wrapper to ensure that we either take the
7716 * signal as a guest signal, or else it does not happen
7717 * before the execve completes and makes it the other
7718 * program's problem.
7720 ret = get_errno(safe_execve(p, argp, envp));
7721 unlock_user(p, arg1, 0);
7723 goto execve_end;
7725 execve_efault:
7726 ret = -TARGET_EFAULT;
7728 execve_end:
7729 for (gp = guest_argp, q = argp; *q;
7730 gp += sizeof(abi_ulong), q++) {
7731 if (get_user_ual(addr, gp)
7732 || !addr)
7733 break;
7734 unlock_user(*q, addr, 0);
7736 for (gp = guest_envp, q = envp; *q;
7737 gp += sizeof(abi_ulong), q++) {
7738 if (get_user_ual(addr, gp)
7739 || !addr)
7740 break;
7741 unlock_user(*q, addr, 0);
7744 break;
7745 case TARGET_NR_chdir:
7746 if (!(p = lock_user_string(arg1)))
7747 goto efault;
7748 ret = get_errno(chdir(p));
7749 unlock_user(p, arg1, 0);
7750 break;
7751 #ifdef TARGET_NR_time
7752 case TARGET_NR_time:
7754 time_t host_time;
7755 ret = get_errno(time(&host_time));
7756 if (!is_error(ret)
7757 && arg1
7758 && put_user_sal(host_time, arg1))
7759 goto efault;
7761 break;
7762 #endif
7763 #ifdef TARGET_NR_mknod
7764 case TARGET_NR_mknod:
7765 if (!(p = lock_user_string(arg1)))
7766 goto efault;
7767 ret = get_errno(mknod(p, arg2, arg3));
7768 unlock_user(p, arg1, 0);
7769 break;
7770 #endif
7771 #if defined(TARGET_NR_mknodat)
7772 case TARGET_NR_mknodat:
7773 if (!(p = lock_user_string(arg2)))
7774 goto efault;
7775 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7776 unlock_user(p, arg2, 0);
7777 break;
7778 #endif
7779 #ifdef TARGET_NR_chmod
7780 case TARGET_NR_chmod:
7781 if (!(p = lock_user_string(arg1)))
7782 goto efault;
7783 ret = get_errno(chmod(p, arg2));
7784 unlock_user(p, arg1, 0);
7785 break;
7786 #endif
7787 #ifdef TARGET_NR_break
7788 case TARGET_NR_break:
7789 goto unimplemented;
7790 #endif
7791 #ifdef TARGET_NR_oldstat
7792 case TARGET_NR_oldstat:
7793 goto unimplemented;
7794 #endif
7795 case TARGET_NR_lseek:
7796 ret = get_errno(lseek(arg1, arg2, arg3));
7797 break;
7798 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7799 /* Alpha specific */
7800 case TARGET_NR_getxpid:
7801 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7802 ret = get_errno(getpid());
7803 break;
7804 #endif
7805 #ifdef TARGET_NR_getpid
7806 case TARGET_NR_getpid:
7807 ret = get_errno(getpid());
7808 break;
7809 #endif
7810 case TARGET_NR_mount:
7812 /* need to look at the data field */
7813 void *p2, *p3;
7815 if (arg1) {
7816 p = lock_user_string(arg1);
7817 if (!p) {
7818 goto efault;
7820 } else {
7821 p = NULL;
7824 p2 = lock_user_string(arg2);
7825 if (!p2) {
7826 if (arg1) {
7827 unlock_user(p, arg1, 0);
7829 goto efault;
7832 if (arg3) {
7833 p3 = lock_user_string(arg3);
7834 if (!p3) {
7835 if (arg1) {
7836 unlock_user(p, arg1, 0);
7838 unlock_user(p2, arg2, 0);
7839 goto efault;
7841 } else {
7842 p3 = NULL;
7845 /* FIXME - arg5 should be locked, but it isn't clear how to
7846 * do that since it's not guaranteed to be a NULL-terminated
7847 * string.
7849 if (!arg5) {
7850 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7851 } else {
7852 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7854 ret = get_errno(ret);
7856 if (arg1) {
7857 unlock_user(p, arg1, 0);
7859 unlock_user(p2, arg2, 0);
7860 if (arg3) {
7861 unlock_user(p3, arg3, 0);
7864 break;
7865 #ifdef TARGET_NR_umount
7866 case TARGET_NR_umount:
7867 if (!(p = lock_user_string(arg1)))
7868 goto efault;
7869 ret = get_errno(umount(p));
7870 unlock_user(p, arg1, 0);
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_stime /* not on alpha */
7874 case TARGET_NR_stime:
7876 time_t host_time;
7877 if (get_user_sal(host_time, arg1))
7878 goto efault;
7879 ret = get_errno(stime(&host_time));
7881 break;
7882 #endif
7883 case TARGET_NR_ptrace:
7884 goto unimplemented;
7885 #ifdef TARGET_NR_alarm /* not on alpha */
7886 case TARGET_NR_alarm:
7887 ret = alarm(arg1);
7888 break;
7889 #endif
7890 #ifdef TARGET_NR_oldfstat
7891 case TARGET_NR_oldfstat:
7892 goto unimplemented;
7893 #endif
7894 #ifdef TARGET_NR_pause /* not on alpha */
7895 case TARGET_NR_pause:
7896 if (!block_signals()) {
7897 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7899 ret = -TARGET_EINTR;
7900 break;
7901 #endif
7902 #ifdef TARGET_NR_utime
7903 case TARGET_NR_utime:
7905 struct utimbuf tbuf, *host_tbuf;
7906 struct target_utimbuf *target_tbuf;
7907 if (arg2) {
7908 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
7909 goto efault;
7910 tbuf.actime = tswapal(target_tbuf->actime);
7911 tbuf.modtime = tswapal(target_tbuf->modtime);
7912 unlock_user_struct(target_tbuf, arg2, 0);
7913 host_tbuf = &tbuf;
7914 } else {
7915 host_tbuf = NULL;
7917 if (!(p = lock_user_string(arg1)))
7918 goto efault;
7919 ret = get_errno(utime(p, host_tbuf));
7920 unlock_user(p, arg1, 0);
7922 break;
7923 #endif
7924 #ifdef TARGET_NR_utimes
7925 case TARGET_NR_utimes:
7927 struct timeval *tvp, tv[2];
7928 if (arg2) {
7929 if (copy_from_user_timeval(&tv[0], arg2)
7930 || copy_from_user_timeval(&tv[1],
7931 arg2 + sizeof(struct target_timeval)))
7932 goto efault;
7933 tvp = tv;
7934 } else {
7935 tvp = NULL;
7937 if (!(p = lock_user_string(arg1)))
7938 goto efault;
7939 ret = get_errno(utimes(p, tvp));
7940 unlock_user(p, arg1, 0);
7942 break;
7943 #endif
7944 #if defined(TARGET_NR_futimesat)
7945 case TARGET_NR_futimesat:
7947 struct timeval *tvp, tv[2];
7948 if (arg3) {
7949 if (copy_from_user_timeval(&tv[0], arg3)
7950 || copy_from_user_timeval(&tv[1],
7951 arg3 + sizeof(struct target_timeval)))
7952 goto efault;
7953 tvp = tv;
7954 } else {
7955 tvp = NULL;
7957 if (!(p = lock_user_string(arg2)))
7958 goto efault;
7959 ret = get_errno(futimesat(arg1, path(p), tvp));
7960 unlock_user(p, arg2, 0);
7962 break;
7963 #endif
7964 #ifdef TARGET_NR_stty
7965 case TARGET_NR_stty:
7966 goto unimplemented;
7967 #endif
7968 #ifdef TARGET_NR_gtty
7969 case TARGET_NR_gtty:
7970 goto unimplemented;
7971 #endif
7972 #ifdef TARGET_NR_access
7973 case TARGET_NR_access:
7974 if (!(p = lock_user_string(arg1)))
7975 goto efault;
7976 ret = get_errno(access(path(p), arg2));
7977 unlock_user(p, arg1, 0);
7978 break;
7979 #endif
7980 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
7981 case TARGET_NR_faccessat:
7982 if (!(p = lock_user_string(arg2)))
7983 goto efault;
7984 ret = get_errno(faccessat(arg1, p, arg3, 0));
7985 unlock_user(p, arg2, 0);
7986 break;
7987 #endif
7988 #ifdef TARGET_NR_nice /* not on alpha */
7989 case TARGET_NR_nice:
7990 ret = get_errno(nice(arg1));
7991 break;
7992 #endif
7993 #ifdef TARGET_NR_ftime
7994 case TARGET_NR_ftime:
7995 goto unimplemented;
7996 #endif
7997 case TARGET_NR_sync:
7998 sync();
7999 ret = 0;
8000 break;
8001 case TARGET_NR_kill:
8002 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8003 break;
8004 #ifdef TARGET_NR_rename
8005 case TARGET_NR_rename:
8007 void *p2;
8008 p = lock_user_string(arg1);
8009 p2 = lock_user_string(arg2);
8010 if (!p || !p2)
8011 ret = -TARGET_EFAULT;
8012 else
8013 ret = get_errno(rename(p, p2));
8014 unlock_user(p2, arg2, 0);
8015 unlock_user(p, arg1, 0);
8017 break;
8018 #endif
8019 #if defined(TARGET_NR_renameat)
8020 case TARGET_NR_renameat:
8022 void *p2;
8023 p = lock_user_string(arg2);
8024 p2 = lock_user_string(arg4);
8025 if (!p || !p2)
8026 ret = -TARGET_EFAULT;
8027 else
8028 ret = get_errno(renameat(arg1, p, arg3, p2));
8029 unlock_user(p2, arg4, 0);
8030 unlock_user(p, arg2, 0);
8032 break;
8033 #endif
8034 #ifdef TARGET_NR_mkdir
8035 case TARGET_NR_mkdir:
8036 if (!(p = lock_user_string(arg1)))
8037 goto efault;
8038 ret = get_errno(mkdir(p, arg2));
8039 unlock_user(p, arg1, 0);
8040 break;
8041 #endif
8042 #if defined(TARGET_NR_mkdirat)
8043 case TARGET_NR_mkdirat:
8044 if (!(p = lock_user_string(arg2)))
8045 goto efault;
8046 ret = get_errno(mkdirat(arg1, p, arg3));
8047 unlock_user(p, arg2, 0);
8048 break;
8049 #endif
8050 #ifdef TARGET_NR_rmdir
8051 case TARGET_NR_rmdir:
8052 if (!(p = lock_user_string(arg1)))
8053 goto efault;
8054 ret = get_errno(rmdir(p));
8055 unlock_user(p, arg1, 0);
8056 break;
8057 #endif
8058 case TARGET_NR_dup:
8059 ret = get_errno(dup(arg1));
8060 if (ret >= 0) {
8061 fd_trans_dup(arg1, ret);
8063 break;
8064 #ifdef TARGET_NR_pipe
8065 case TARGET_NR_pipe:
8066 ret = do_pipe(cpu_env, arg1, 0, 0);
8067 break;
8068 #endif
8069 #ifdef TARGET_NR_pipe2
8070 case TARGET_NR_pipe2:
8071 ret = do_pipe(cpu_env, arg1,
8072 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8073 break;
8074 #endif
8075 case TARGET_NR_times:
8077 struct target_tms *tmsp;
8078 struct tms tms;
8079 ret = get_errno(times(&tms));
8080 if (arg1) {
8081 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8082 if (!tmsp)
8083 goto efault;
8084 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8085 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8086 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8087 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8089 if (!is_error(ret))
8090 ret = host_to_target_clock_t(ret);
8092 break;
8093 #ifdef TARGET_NR_prof
8094 case TARGET_NR_prof:
8095 goto unimplemented;
8096 #endif
8097 #ifdef TARGET_NR_signal
8098 case TARGET_NR_signal:
8099 goto unimplemented;
8100 #endif
8101 case TARGET_NR_acct:
8102 if (arg1 == 0) {
8103 ret = get_errno(acct(NULL));
8104 } else {
8105 if (!(p = lock_user_string(arg1)))
8106 goto efault;
8107 ret = get_errno(acct(path(p)));
8108 unlock_user(p, arg1, 0);
8110 break;
8111 #ifdef TARGET_NR_umount2
8112 case TARGET_NR_umount2:
8113 if (!(p = lock_user_string(arg1)))
8114 goto efault;
8115 ret = get_errno(umount2(p, arg2));
8116 unlock_user(p, arg1, 0);
8117 break;
8118 #endif
8119 #ifdef TARGET_NR_lock
8120 case TARGET_NR_lock:
8121 goto unimplemented;
8122 #endif
8123 case TARGET_NR_ioctl:
8124 ret = do_ioctl(arg1, arg2, arg3);
8125 break;
8126 case TARGET_NR_fcntl:
8127 ret = do_fcntl(arg1, arg2, arg3);
8128 break;
8129 #ifdef TARGET_NR_mpx
8130 case TARGET_NR_mpx:
8131 goto unimplemented;
8132 #endif
8133 case TARGET_NR_setpgid:
8134 ret = get_errno(setpgid(arg1, arg2));
8135 break;
8136 #ifdef TARGET_NR_ulimit
8137 case TARGET_NR_ulimit:
8138 goto unimplemented;
8139 #endif
8140 #ifdef TARGET_NR_oldolduname
8141 case TARGET_NR_oldolduname:
8142 goto unimplemented;
8143 #endif
8144 case TARGET_NR_umask:
8145 ret = get_errno(umask(arg1));
8146 break;
8147 case TARGET_NR_chroot:
8148 if (!(p = lock_user_string(arg1)))
8149 goto efault;
8150 ret = get_errno(chroot(p));
8151 unlock_user(p, arg1, 0);
8152 break;
8153 #ifdef TARGET_NR_ustat
8154 case TARGET_NR_ustat:
8155 goto unimplemented;
8156 #endif
8157 #ifdef TARGET_NR_dup2
8158 case TARGET_NR_dup2:
8159 ret = get_errno(dup2(arg1, arg2));
8160 if (ret >= 0) {
8161 fd_trans_dup(arg1, arg2);
8163 break;
8164 #endif
8165 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8166 case TARGET_NR_dup3:
8167 ret = get_errno(dup3(arg1, arg2, arg3));
8168 if (ret >= 0) {
8169 fd_trans_dup(arg1, arg2);
8171 break;
8172 #endif
8173 #ifdef TARGET_NR_getppid /* not on alpha */
8174 case TARGET_NR_getppid:
8175 ret = get_errno(getppid());
8176 break;
8177 #endif
8178 #ifdef TARGET_NR_getpgrp
8179 case TARGET_NR_getpgrp:
8180 ret = get_errno(getpgrp());
8181 break;
8182 #endif
8183 case TARGET_NR_setsid:
8184 ret = get_errno(setsid());
8185 break;
8186 #ifdef TARGET_NR_sigaction
8187 case TARGET_NR_sigaction:
8189 #if defined(TARGET_ALPHA)
8190 struct target_sigaction act, oact, *pact = 0;
8191 struct target_old_sigaction *old_act;
8192 if (arg2) {
8193 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8194 goto efault;
8195 act._sa_handler = old_act->_sa_handler;
8196 target_siginitset(&act.sa_mask, old_act->sa_mask);
8197 act.sa_flags = old_act->sa_flags;
8198 act.sa_restorer = 0;
8199 unlock_user_struct(old_act, arg2, 0);
8200 pact = &act;
8202 ret = get_errno(do_sigaction(arg1, pact, &oact));
8203 if (!is_error(ret) && arg3) {
8204 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8205 goto efault;
8206 old_act->_sa_handler = oact._sa_handler;
8207 old_act->sa_mask = oact.sa_mask.sig[0];
8208 old_act->sa_flags = oact.sa_flags;
8209 unlock_user_struct(old_act, arg3, 1);
8211 #elif defined(TARGET_MIPS)
8212 struct target_sigaction act, oact, *pact, *old_act;
8214 if (arg2) {
8215 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8216 goto efault;
8217 act._sa_handler = old_act->_sa_handler;
8218 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8219 act.sa_flags = old_act->sa_flags;
8220 unlock_user_struct(old_act, arg2, 0);
8221 pact = &act;
8222 } else {
8223 pact = NULL;
8226 ret = get_errno(do_sigaction(arg1, pact, &oact));
8228 if (!is_error(ret) && arg3) {
8229 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8230 goto efault;
8231 old_act->_sa_handler = oact._sa_handler;
8232 old_act->sa_flags = oact.sa_flags;
8233 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8234 old_act->sa_mask.sig[1] = 0;
8235 old_act->sa_mask.sig[2] = 0;
8236 old_act->sa_mask.sig[3] = 0;
8237 unlock_user_struct(old_act, arg3, 1);
8239 #else
8240 struct target_old_sigaction *old_act;
8241 struct target_sigaction act, oact, *pact;
8242 if (arg2) {
8243 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8244 goto efault;
8245 act._sa_handler = old_act->_sa_handler;
8246 target_siginitset(&act.sa_mask, old_act->sa_mask);
8247 act.sa_flags = old_act->sa_flags;
8248 act.sa_restorer = old_act->sa_restorer;
8249 unlock_user_struct(old_act, arg2, 0);
8250 pact = &act;
8251 } else {
8252 pact = NULL;
8254 ret = get_errno(do_sigaction(arg1, pact, &oact));
8255 if (!is_error(ret) && arg3) {
8256 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8257 goto efault;
8258 old_act->_sa_handler = oact._sa_handler;
8259 old_act->sa_mask = oact.sa_mask.sig[0];
8260 old_act->sa_flags = oact.sa_flags;
8261 old_act->sa_restorer = oact.sa_restorer;
8262 unlock_user_struct(old_act, arg3, 1);
8264 #endif
8266 break;
8267 #endif
8268 case TARGET_NR_rt_sigaction:
8270 #if defined(TARGET_ALPHA)
8271 struct target_sigaction act, oact, *pact = 0;
8272 struct target_rt_sigaction *rt_act;
8274 if (arg4 != sizeof(target_sigset_t)) {
8275 ret = -TARGET_EINVAL;
8276 break;
8278 if (arg2) {
8279 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8280 goto efault;
8281 act._sa_handler = rt_act->_sa_handler;
8282 act.sa_mask = rt_act->sa_mask;
8283 act.sa_flags = rt_act->sa_flags;
8284 act.sa_restorer = arg5;
8285 unlock_user_struct(rt_act, arg2, 0);
8286 pact = &act;
8288 ret = get_errno(do_sigaction(arg1, pact, &oact));
8289 if (!is_error(ret) && arg3) {
8290 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8291 goto efault;
8292 rt_act->_sa_handler = oact._sa_handler;
8293 rt_act->sa_mask = oact.sa_mask;
8294 rt_act->sa_flags = oact.sa_flags;
8295 unlock_user_struct(rt_act, arg3, 1);
8297 #else
8298 struct target_sigaction *act;
8299 struct target_sigaction *oact;
8301 if (arg4 != sizeof(target_sigset_t)) {
8302 ret = -TARGET_EINVAL;
8303 break;
8305 if (arg2) {
8306 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8307 goto efault;
8308 } else
8309 act = NULL;
8310 if (arg3) {
8311 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8312 ret = -TARGET_EFAULT;
8313 goto rt_sigaction_fail;
8315 } else
8316 oact = NULL;
8317 ret = get_errno(do_sigaction(arg1, act, oact));
8318 rt_sigaction_fail:
8319 if (act)
8320 unlock_user_struct(act, arg2, 0);
8321 if (oact)
8322 unlock_user_struct(oact, arg3, 1);
8323 #endif
8325 break;
8326 #ifdef TARGET_NR_sgetmask /* not on alpha */
8327 case TARGET_NR_sgetmask:
8329 sigset_t cur_set;
8330 abi_ulong target_set;
8331 ret = do_sigprocmask(0, NULL, &cur_set);
8332 if (!ret) {
8333 host_to_target_old_sigset(&target_set, &cur_set);
8334 ret = target_set;
8337 break;
8338 #endif
8339 #ifdef TARGET_NR_ssetmask /* not on alpha */
8340 case TARGET_NR_ssetmask:
8342 sigset_t set, oset, cur_set;
8343 abi_ulong target_set = arg1;
8344 /* We only have one word of the new mask so we must read
8345 * the rest of it with do_sigprocmask() and OR in this word.
8346 * We are guaranteed that a do_sigprocmask() that only queries
8347 * the signal mask will not fail.
8349 ret = do_sigprocmask(0, NULL, &cur_set);
8350 assert(!ret);
8351 target_to_host_old_sigset(&set, &target_set);
8352 sigorset(&set, &set, &cur_set);
8353 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8354 if (!ret) {
8355 host_to_target_old_sigset(&target_set, &oset);
8356 ret = target_set;
8359 break;
8360 #endif
8361 #ifdef TARGET_NR_sigprocmask
8362 case TARGET_NR_sigprocmask:
8364 #if defined(TARGET_ALPHA)
8365 sigset_t set, oldset;
8366 abi_ulong mask;
8367 int how;
8369 switch (arg1) {
8370 case TARGET_SIG_BLOCK:
8371 how = SIG_BLOCK;
8372 break;
8373 case TARGET_SIG_UNBLOCK:
8374 how = SIG_UNBLOCK;
8375 break;
8376 case TARGET_SIG_SETMASK:
8377 how = SIG_SETMASK;
8378 break;
8379 default:
8380 ret = -TARGET_EINVAL;
8381 goto fail;
8383 mask = arg2;
8384 target_to_host_old_sigset(&set, &mask);
8386 ret = do_sigprocmask(how, &set, &oldset);
8387 if (!is_error(ret)) {
8388 host_to_target_old_sigset(&mask, &oldset);
8389 ret = mask;
8390 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8392 #else
8393 sigset_t set, oldset, *set_ptr;
8394 int how;
8396 if (arg2) {
8397 switch (arg1) {
8398 case TARGET_SIG_BLOCK:
8399 how = SIG_BLOCK;
8400 break;
8401 case TARGET_SIG_UNBLOCK:
8402 how = SIG_UNBLOCK;
8403 break;
8404 case TARGET_SIG_SETMASK:
8405 how = SIG_SETMASK;
8406 break;
8407 default:
8408 ret = -TARGET_EINVAL;
8409 goto fail;
8411 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8412 goto efault;
8413 target_to_host_old_sigset(&set, p);
8414 unlock_user(p, arg2, 0);
8415 set_ptr = &set;
8416 } else {
8417 how = 0;
8418 set_ptr = NULL;
8420 ret = do_sigprocmask(how, set_ptr, &oldset);
8421 if (!is_error(ret) && arg3) {
8422 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8423 goto efault;
8424 host_to_target_old_sigset(p, &oldset);
8425 unlock_user(p, arg3, sizeof(target_sigset_t));
8427 #endif
8429 break;
8430 #endif
8431 case TARGET_NR_rt_sigprocmask:
8433 int how = arg1;
8434 sigset_t set, oldset, *set_ptr;
8436 if (arg4 != sizeof(target_sigset_t)) {
8437 ret = -TARGET_EINVAL;
8438 break;
8441 if (arg2) {
8442 switch(how) {
8443 case TARGET_SIG_BLOCK:
8444 how = SIG_BLOCK;
8445 break;
8446 case TARGET_SIG_UNBLOCK:
8447 how = SIG_UNBLOCK;
8448 break;
8449 case TARGET_SIG_SETMASK:
8450 how = SIG_SETMASK;
8451 break;
8452 default:
8453 ret = -TARGET_EINVAL;
8454 goto fail;
8456 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8457 goto efault;
8458 target_to_host_sigset(&set, p);
8459 unlock_user(p, arg2, 0);
8460 set_ptr = &set;
8461 } else {
8462 how = 0;
8463 set_ptr = NULL;
8465 ret = do_sigprocmask(how, set_ptr, &oldset);
8466 if (!is_error(ret) && arg3) {
8467 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8468 goto efault;
8469 host_to_target_sigset(p, &oldset);
8470 unlock_user(p, arg3, sizeof(target_sigset_t));
8473 break;
8474 #ifdef TARGET_NR_sigpending
8475 case TARGET_NR_sigpending:
8477 sigset_t set;
8478 ret = get_errno(sigpending(&set));
8479 if (!is_error(ret)) {
8480 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8481 goto efault;
8482 host_to_target_old_sigset(p, &set);
8483 unlock_user(p, arg1, sizeof(target_sigset_t));
8486 break;
8487 #endif
8488 case TARGET_NR_rt_sigpending:
8490 sigset_t set;
8492 /* Yes, this check is >, not != like most. We follow the kernel's
8493 * logic and it does it like this because it implements
8494 * NR_sigpending through the same code path, and in that case
8495 * the old_sigset_t is smaller in size.
8497 if (arg2 > sizeof(target_sigset_t)) {
8498 ret = -TARGET_EINVAL;
8499 break;
8502 ret = get_errno(sigpending(&set));
8503 if (!is_error(ret)) {
8504 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8505 goto efault;
8506 host_to_target_sigset(p, &set);
8507 unlock_user(p, arg1, sizeof(target_sigset_t));
8510 break;
8511 #ifdef TARGET_NR_sigsuspend
8512 case TARGET_NR_sigsuspend:
8514 TaskState *ts = cpu->opaque;
8515 #if defined(TARGET_ALPHA)
8516 abi_ulong mask = arg1;
8517 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8518 #else
8519 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8520 goto efault;
8521 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8522 unlock_user(p, arg1, 0);
8523 #endif
8524 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8525 SIGSET_T_SIZE));
8526 if (ret != -TARGET_ERESTARTSYS) {
8527 ts->in_sigsuspend = 1;
8530 break;
8531 #endif
8532 case TARGET_NR_rt_sigsuspend:
8534 TaskState *ts = cpu->opaque;
8536 if (arg2 != sizeof(target_sigset_t)) {
8537 ret = -TARGET_EINVAL;
8538 break;
8540 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8541 goto efault;
8542 target_to_host_sigset(&ts->sigsuspend_mask, p);
8543 unlock_user(p, arg1, 0);
8544 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8545 SIGSET_T_SIZE));
8546 if (ret != -TARGET_ERESTARTSYS) {
8547 ts->in_sigsuspend = 1;
8550 break;
8551 case TARGET_NR_rt_sigtimedwait:
8553 sigset_t set;
8554 struct timespec uts, *puts;
8555 siginfo_t uinfo;
8557 if (arg4 != sizeof(target_sigset_t)) {
8558 ret = -TARGET_EINVAL;
8559 break;
8562 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8563 goto efault;
8564 target_to_host_sigset(&set, p);
8565 unlock_user(p, arg1, 0);
8566 if (arg3) {
8567 puts = &uts;
8568 target_to_host_timespec(puts, arg3);
8569 } else {
8570 puts = NULL;
8572 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8573 SIGSET_T_SIZE));
8574 if (!is_error(ret)) {
8575 if (arg2) {
8576 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8578 if (!p) {
8579 goto efault;
8581 host_to_target_siginfo(p, &uinfo);
8582 unlock_user(p, arg2, sizeof(target_siginfo_t));
8584 ret = host_to_target_signal(ret);
8587 break;
8588 case TARGET_NR_rt_sigqueueinfo:
8590 siginfo_t uinfo;
8592 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8593 if (!p) {
8594 goto efault;
8596 target_to_host_siginfo(&uinfo, p);
8597 unlock_user(p, arg1, 0);
8598 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8600 break;
8601 #ifdef TARGET_NR_sigreturn
8602 case TARGET_NR_sigreturn:
8603 if (block_signals()) {
8604 ret = -TARGET_ERESTARTSYS;
8605 } else {
8606 ret = do_sigreturn(cpu_env);
8608 break;
8609 #endif
8610 case TARGET_NR_rt_sigreturn:
8611 if (block_signals()) {
8612 ret = -TARGET_ERESTARTSYS;
8613 } else {
8614 ret = do_rt_sigreturn(cpu_env);
8616 break;
8617 case TARGET_NR_sethostname:
8618 if (!(p = lock_user_string(arg1)))
8619 goto efault;
8620 ret = get_errno(sethostname(p, arg2));
8621 unlock_user(p, arg1, 0);
8622 break;
8623 case TARGET_NR_setrlimit:
8625 int resource = target_to_host_resource(arg1);
8626 struct target_rlimit *target_rlim;
8627 struct rlimit rlim;
8628 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8629 goto efault;
8630 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8631 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8632 unlock_user_struct(target_rlim, arg2, 0);
8633 ret = get_errno(setrlimit(resource, &rlim));
8635 break;
8636 case TARGET_NR_getrlimit:
8638 int resource = target_to_host_resource(arg1);
8639 struct target_rlimit *target_rlim;
8640 struct rlimit rlim;
8642 ret = get_errno(getrlimit(resource, &rlim));
8643 if (!is_error(ret)) {
8644 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8645 goto efault;
8646 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8647 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8648 unlock_user_struct(target_rlim, arg2, 1);
8651 break;
8652 case TARGET_NR_getrusage:
8654 struct rusage rusage;
8655 ret = get_errno(getrusage(arg1, &rusage));
8656 if (!is_error(ret)) {
8657 ret = host_to_target_rusage(arg2, &rusage);
8660 break;
8661 case TARGET_NR_gettimeofday:
8663 struct timeval tv;
8664 ret = get_errno(gettimeofday(&tv, NULL));
8665 if (!is_error(ret)) {
8666 if (copy_to_user_timeval(arg1, &tv))
8667 goto efault;
8670 break;
8671 case TARGET_NR_settimeofday:
8673 struct timeval tv, *ptv = NULL;
8674 struct timezone tz, *ptz = NULL;
8676 if (arg1) {
8677 if (copy_from_user_timeval(&tv, arg1)) {
8678 goto efault;
8680 ptv = &tv;
8683 if (arg2) {
8684 if (copy_from_user_timezone(&tz, arg2)) {
8685 goto efault;
8687 ptz = &tz;
8690 ret = get_errno(settimeofday(ptv, ptz));
8692 break;
8693 #if defined(TARGET_NR_select)
8694 case TARGET_NR_select:
8695 #if defined(TARGET_WANT_NI_OLD_SELECT)
8696 /* some architectures used to have old_select here
8697 * but now ENOSYS it.
8699 ret = -TARGET_ENOSYS;
8700 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8701 ret = do_old_select(arg1);
8702 #else
8703 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8704 #endif
8705 break;
8706 #endif
8707 #ifdef TARGET_NR_pselect6
8708 case TARGET_NR_pselect6:
8710 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8711 fd_set rfds, wfds, efds;
8712 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8713 struct timespec ts, *ts_ptr;
8716 * The 6th arg is actually two args smashed together,
8717 * so we cannot use the C library.
8719 sigset_t set;
8720 struct {
8721 sigset_t *set;
8722 size_t size;
8723 } sig, *sig_ptr;
8725 abi_ulong arg_sigset, arg_sigsize, *arg7;
8726 target_sigset_t *target_sigset;
8728 n = arg1;
8729 rfd_addr = arg2;
8730 wfd_addr = arg3;
8731 efd_addr = arg4;
8732 ts_addr = arg5;
8734 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8735 if (ret) {
8736 goto fail;
8738 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8739 if (ret) {
8740 goto fail;
8742 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8743 if (ret) {
8744 goto fail;
8748 * This takes a timespec, and not a timeval, so we cannot
8749 * use the do_select() helper ...
8751 if (ts_addr) {
8752 if (target_to_host_timespec(&ts, ts_addr)) {
8753 goto efault;
8755 ts_ptr = &ts;
8756 } else {
8757 ts_ptr = NULL;
8760 /* Extract the two packed args for the sigset */
8761 if (arg6) {
8762 sig_ptr = &sig;
8763 sig.size = SIGSET_T_SIZE;
8765 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8766 if (!arg7) {
8767 goto efault;
8769 arg_sigset = tswapal(arg7[0]);
8770 arg_sigsize = tswapal(arg7[1]);
8771 unlock_user(arg7, arg6, 0);
8773 if (arg_sigset) {
8774 sig.set = &set;
8775 if (arg_sigsize != sizeof(*target_sigset)) {
8776 /* Like the kernel, we enforce correct size sigsets */
8777 ret = -TARGET_EINVAL;
8778 goto fail;
8780 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8781 sizeof(*target_sigset), 1);
8782 if (!target_sigset) {
8783 goto efault;
8785 target_to_host_sigset(&set, target_sigset);
8786 unlock_user(target_sigset, arg_sigset, 0);
8787 } else {
8788 sig.set = NULL;
8790 } else {
8791 sig_ptr = NULL;
8794 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8795 ts_ptr, sig_ptr));
8797 if (!is_error(ret)) {
8798 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8799 goto efault;
8800 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8801 goto efault;
8802 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8803 goto efault;
8805 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8806 goto efault;
8809 break;
8810 #endif
8811 #ifdef TARGET_NR_symlink
8812 case TARGET_NR_symlink:
8814 void *p2;
8815 p = lock_user_string(arg1);
8816 p2 = lock_user_string(arg2);
8817 if (!p || !p2)
8818 ret = -TARGET_EFAULT;
8819 else
8820 ret = get_errno(symlink(p, p2));
8821 unlock_user(p2, arg2, 0);
8822 unlock_user(p, arg1, 0);
8824 break;
8825 #endif
8826 #if defined(TARGET_NR_symlinkat)
8827 case TARGET_NR_symlinkat:
8829 void *p2;
8830 p = lock_user_string(arg1);
8831 p2 = lock_user_string(arg3);
8832 if (!p || !p2)
8833 ret = -TARGET_EFAULT;
8834 else
8835 ret = get_errno(symlinkat(p, arg2, p2));
8836 unlock_user(p2, arg3, 0);
8837 unlock_user(p, arg1, 0);
8839 break;
8840 #endif
8841 #ifdef TARGET_NR_oldlstat
8842 case TARGET_NR_oldlstat:
8843 goto unimplemented;
8844 #endif
8845 #ifdef TARGET_NR_readlink
8846 case TARGET_NR_readlink:
8848 void *p2;
8849 p = lock_user_string(arg1);
8850 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8851 if (!p || !p2) {
8852 ret = -TARGET_EFAULT;
8853 } else if (!arg3) {
8854 /* Short circuit this for the magic exe check. */
8855 ret = -TARGET_EINVAL;
8856 } else if (is_proc_myself((const char *)p, "exe")) {
8857 char real[PATH_MAX], *temp;
8858 temp = realpath(exec_path, real);
8859 /* Return value is # of bytes that we wrote to the buffer. */
8860 if (temp == NULL) {
8861 ret = get_errno(-1);
8862 } else {
8863 /* Don't worry about sign mismatch as earlier mapping
8864 * logic would have thrown a bad address error. */
8865 ret = MIN(strlen(real), arg3);
8866 /* We cannot NUL terminate the string. */
8867 memcpy(p2, real, ret);
8869 } else {
8870 ret = get_errno(readlink(path(p), p2, arg3));
8872 unlock_user(p2, arg2, ret);
8873 unlock_user(p, arg1, 0);
8875 break;
8876 #endif
8877 #if defined(TARGET_NR_readlinkat)
8878 case TARGET_NR_readlinkat:
8880 void *p2;
8881 p = lock_user_string(arg2);
8882 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8883 if (!p || !p2) {
8884 ret = -TARGET_EFAULT;
8885 } else if (is_proc_myself((const char *)p, "exe")) {
8886 char real[PATH_MAX], *temp;
8887 temp = realpath(exec_path, real);
8888 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8889 snprintf((char *)p2, arg4, "%s", real);
8890 } else {
8891 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8893 unlock_user(p2, arg3, ret);
8894 unlock_user(p, arg2, 0);
8896 break;
8897 #endif
8898 #ifdef TARGET_NR_uselib
8899 case TARGET_NR_uselib:
8900 goto unimplemented;
8901 #endif
8902 #ifdef TARGET_NR_swapon
8903 case TARGET_NR_swapon:
8904 if (!(p = lock_user_string(arg1)))
8905 goto efault;
8906 ret = get_errno(swapon(p, arg2));
8907 unlock_user(p, arg1, 0);
8908 break;
8909 #endif
8910 case TARGET_NR_reboot:
8911 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
8912 /* arg4 must be ignored in all other cases */
8913 p = lock_user_string(arg4);
8914 if (!p) {
8915 goto efault;
8917 ret = get_errno(reboot(arg1, arg2, arg3, p));
8918 unlock_user(p, arg4, 0);
8919 } else {
8920 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
8922 break;
8923 #ifdef TARGET_NR_readdir
8924 case TARGET_NR_readdir:
8925 goto unimplemented;
8926 #endif
8927 #ifdef TARGET_NR_mmap
8928 case TARGET_NR_mmap:
8929 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
8930 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
8931 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
8932 || defined(TARGET_S390X)
8934 abi_ulong *v;
8935 abi_ulong v1, v2, v3, v4, v5, v6;
8936 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
8937 goto efault;
8938 v1 = tswapal(v[0]);
8939 v2 = tswapal(v[1]);
8940 v3 = tswapal(v[2]);
8941 v4 = tswapal(v[3]);
8942 v5 = tswapal(v[4]);
8943 v6 = tswapal(v[5]);
8944 unlock_user(v, arg1, 0);
8945 ret = get_errno(target_mmap(v1, v2, v3,
8946 target_to_host_bitmask(v4, mmap_flags_tbl),
8947 v5, v6));
8949 #else
8950 ret = get_errno(target_mmap(arg1, arg2, arg3,
8951 target_to_host_bitmask(arg4, mmap_flags_tbl),
8952 arg5,
8953 arg6));
8954 #endif
8955 break;
8956 #endif
8957 #ifdef TARGET_NR_mmap2
8958 case TARGET_NR_mmap2:
8959 #ifndef MMAP_SHIFT
8960 #define MMAP_SHIFT 12
8961 #endif
8962 ret = get_errno(target_mmap(arg1, arg2, arg3,
8963 target_to_host_bitmask(arg4, mmap_flags_tbl),
8964 arg5,
8965 arg6 << MMAP_SHIFT));
8966 break;
8967 #endif
8968 case TARGET_NR_munmap:
8969 ret = get_errno(target_munmap(arg1, arg2));
8970 break;
8971 case TARGET_NR_mprotect:
8973 TaskState *ts = cpu->opaque;
8974 /* Special hack to detect libc making the stack executable. */
8975 if ((arg3 & PROT_GROWSDOWN)
8976 && arg1 >= ts->info->stack_limit
8977 && arg1 <= ts->info->start_stack) {
8978 arg3 &= ~PROT_GROWSDOWN;
8979 arg2 = arg2 + arg1 - ts->info->stack_limit;
8980 arg1 = ts->info->stack_limit;
8983 ret = get_errno(target_mprotect(arg1, arg2, arg3));
8984 break;
8985 #ifdef TARGET_NR_mremap
8986 case TARGET_NR_mremap:
8987 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
8988 break;
8989 #endif
8990 /* ??? msync/mlock/munlock are broken for softmmu. */
8991 #ifdef TARGET_NR_msync
8992 case TARGET_NR_msync:
8993 ret = get_errno(msync(g2h(arg1), arg2, arg3));
8994 break;
8995 #endif
8996 #ifdef TARGET_NR_mlock
8997 case TARGET_NR_mlock:
8998 ret = get_errno(mlock(g2h(arg1), arg2));
8999 break;
9000 #endif
9001 #ifdef TARGET_NR_munlock
9002 case TARGET_NR_munlock:
9003 ret = get_errno(munlock(g2h(arg1), arg2));
9004 break;
9005 #endif
9006 #ifdef TARGET_NR_mlockall
9007 case TARGET_NR_mlockall:
9008 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9009 break;
9010 #endif
9011 #ifdef TARGET_NR_munlockall
9012 case TARGET_NR_munlockall:
9013 ret = get_errno(munlockall());
9014 break;
9015 #endif
9016 case TARGET_NR_truncate:
9017 if (!(p = lock_user_string(arg1)))
9018 goto efault;
9019 ret = get_errno(truncate(p, arg2));
9020 unlock_user(p, arg1, 0);
9021 break;
9022 case TARGET_NR_ftruncate:
9023 ret = get_errno(ftruncate(arg1, arg2));
9024 break;
9025 case TARGET_NR_fchmod:
9026 ret = get_errno(fchmod(arg1, arg2));
9027 break;
9028 #if defined(TARGET_NR_fchmodat)
9029 case TARGET_NR_fchmodat:
9030 if (!(p = lock_user_string(arg2)))
9031 goto efault;
9032 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9033 unlock_user(p, arg2, 0);
9034 break;
9035 #endif
9036 case TARGET_NR_getpriority:
9037 /* Note that negative values are valid for getpriority, so we must
9038 differentiate based on errno settings. */
9039 errno = 0;
9040 ret = getpriority(arg1, arg2);
9041 if (ret == -1 && errno != 0) {
9042 ret = -host_to_target_errno(errno);
9043 break;
9045 #ifdef TARGET_ALPHA
9046 /* Return value is the unbiased priority. Signal no error. */
9047 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9048 #else
9049 /* Return value is a biased priority to avoid negative numbers. */
9050 ret = 20 - ret;
9051 #endif
9052 break;
9053 case TARGET_NR_setpriority:
9054 ret = get_errno(setpriority(arg1, arg2, arg3));
9055 break;
9056 #ifdef TARGET_NR_profil
9057 case TARGET_NR_profil:
9058 goto unimplemented;
9059 #endif
9060 case TARGET_NR_statfs:
9061 if (!(p = lock_user_string(arg1)))
9062 goto efault;
9063 ret = get_errno(statfs(path(p), &stfs));
9064 unlock_user(p, arg1, 0);
9065 convert_statfs:
9066 if (!is_error(ret)) {
9067 struct target_statfs *target_stfs;
9069 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9070 goto efault;
9071 __put_user(stfs.f_type, &target_stfs->f_type);
9072 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9073 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9074 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9075 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9076 __put_user(stfs.f_files, &target_stfs->f_files);
9077 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9078 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9079 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9080 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9081 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9082 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9083 unlock_user_struct(target_stfs, arg2, 1);
9085 break;
9086 case TARGET_NR_fstatfs:
9087 ret = get_errno(fstatfs(arg1, &stfs));
9088 goto convert_statfs;
9089 #ifdef TARGET_NR_statfs64
9090 case TARGET_NR_statfs64:
9091 if (!(p = lock_user_string(arg1)))
9092 goto efault;
9093 ret = get_errno(statfs(path(p), &stfs));
9094 unlock_user(p, arg1, 0);
9095 convert_statfs64:
9096 if (!is_error(ret)) {
9097 struct target_statfs64 *target_stfs;
9099 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9100 goto efault;
9101 __put_user(stfs.f_type, &target_stfs->f_type);
9102 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9103 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9104 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9105 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9106 __put_user(stfs.f_files, &target_stfs->f_files);
9107 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9108 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9109 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9110 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9111 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9112 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9113 unlock_user_struct(target_stfs, arg3, 1);
9115 break;
9116 case TARGET_NR_fstatfs64:
9117 ret = get_errno(fstatfs(arg1, &stfs));
9118 goto convert_statfs64;
9119 #endif
9120 #ifdef TARGET_NR_ioperm
9121 case TARGET_NR_ioperm:
9122 goto unimplemented;
9123 #endif
9124 #ifdef TARGET_NR_socketcall
9125 case TARGET_NR_socketcall:
9126 ret = do_socketcall(arg1, arg2);
9127 break;
9128 #endif
9129 #ifdef TARGET_NR_accept
9130 case TARGET_NR_accept:
9131 ret = do_accept4(arg1, arg2, arg3, 0);
9132 break;
9133 #endif
9134 #ifdef TARGET_NR_accept4
9135 case TARGET_NR_accept4:
9136 ret = do_accept4(arg1, arg2, arg3, arg4);
9137 break;
9138 #endif
9139 #ifdef TARGET_NR_bind
9140 case TARGET_NR_bind:
9141 ret = do_bind(arg1, arg2, arg3);
9142 break;
9143 #endif
9144 #ifdef TARGET_NR_connect
9145 case TARGET_NR_connect:
9146 ret = do_connect(arg1, arg2, arg3);
9147 break;
9148 #endif
9149 #ifdef TARGET_NR_getpeername
9150 case TARGET_NR_getpeername:
9151 ret = do_getpeername(arg1, arg2, arg3);
9152 break;
9153 #endif
9154 #ifdef TARGET_NR_getsockname
9155 case TARGET_NR_getsockname:
9156 ret = do_getsockname(arg1, arg2, arg3);
9157 break;
9158 #endif
9159 #ifdef TARGET_NR_getsockopt
9160 case TARGET_NR_getsockopt:
9161 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9162 break;
9163 #endif
9164 #ifdef TARGET_NR_listen
9165 case TARGET_NR_listen:
9166 ret = get_errno(listen(arg1, arg2));
9167 break;
9168 #endif
9169 #ifdef TARGET_NR_recv
9170 case TARGET_NR_recv:
9171 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9172 break;
9173 #endif
9174 #ifdef TARGET_NR_recvfrom
9175 case TARGET_NR_recvfrom:
9176 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9177 break;
9178 #endif
9179 #ifdef TARGET_NR_recvmsg
9180 case TARGET_NR_recvmsg:
9181 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9182 break;
9183 #endif
9184 #ifdef TARGET_NR_send
9185 case TARGET_NR_send:
9186 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9187 break;
9188 #endif
9189 #ifdef TARGET_NR_sendmsg
9190 case TARGET_NR_sendmsg:
9191 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9192 break;
9193 #endif
9194 #ifdef TARGET_NR_sendmmsg
9195 case TARGET_NR_sendmmsg:
9196 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9197 break;
9198 case TARGET_NR_recvmmsg:
9199 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9200 break;
9201 #endif
9202 #ifdef TARGET_NR_sendto
9203 case TARGET_NR_sendto:
9204 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9205 break;
9206 #endif
9207 #ifdef TARGET_NR_shutdown
9208 case TARGET_NR_shutdown:
9209 ret = get_errno(shutdown(arg1, arg2));
9210 break;
9211 #endif
9212 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9213 case TARGET_NR_getrandom:
9214 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9215 if (!p) {
9216 goto efault;
9218 ret = get_errno(getrandom(p, arg2, arg3));
9219 unlock_user(p, arg1, ret);
9220 break;
9221 #endif
9222 #ifdef TARGET_NR_socket
9223 case TARGET_NR_socket:
9224 ret = do_socket(arg1, arg2, arg3);
9225 fd_trans_unregister(ret);
9226 break;
9227 #endif
9228 #ifdef TARGET_NR_socketpair
9229 case TARGET_NR_socketpair:
9230 ret = do_socketpair(arg1, arg2, arg3, arg4);
9231 break;
9232 #endif
9233 #ifdef TARGET_NR_setsockopt
9234 case TARGET_NR_setsockopt:
9235 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9236 break;
9237 #endif
9239 case TARGET_NR_syslog:
9240 if (!(p = lock_user_string(arg2)))
9241 goto efault;
9242 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9243 unlock_user(p, arg2, 0);
9244 break;
9246 case TARGET_NR_setitimer:
9248 struct itimerval value, ovalue, *pvalue;
9250 if (arg2) {
9251 pvalue = &value;
9252 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9253 || copy_from_user_timeval(&pvalue->it_value,
9254 arg2 + sizeof(struct target_timeval)))
9255 goto efault;
9256 } else {
9257 pvalue = NULL;
9259 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9260 if (!is_error(ret) && arg3) {
9261 if (copy_to_user_timeval(arg3,
9262 &ovalue.it_interval)
9263 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9264 &ovalue.it_value))
9265 goto efault;
9268 break;
9269 case TARGET_NR_getitimer:
9271 struct itimerval value;
9273 ret = get_errno(getitimer(arg1, &value));
9274 if (!is_error(ret) && arg2) {
9275 if (copy_to_user_timeval(arg2,
9276 &value.it_interval)
9277 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9278 &value.it_value))
9279 goto efault;
9282 break;
9283 #ifdef TARGET_NR_stat
9284 case TARGET_NR_stat:
9285 if (!(p = lock_user_string(arg1)))
9286 goto efault;
9287 ret = get_errno(stat(path(p), &st));
9288 unlock_user(p, arg1, 0);
9289 goto do_stat;
9290 #endif
9291 #ifdef TARGET_NR_lstat
9292 case TARGET_NR_lstat:
9293 if (!(p = lock_user_string(arg1)))
9294 goto efault;
9295 ret = get_errno(lstat(path(p), &st));
9296 unlock_user(p, arg1, 0);
9297 goto do_stat;
9298 #endif
9299 case TARGET_NR_fstat:
9301 ret = get_errno(fstat(arg1, &st));
9302 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9303 do_stat:
9304 #endif
9305 if (!is_error(ret)) {
9306 struct target_stat *target_st;
9308 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9309 goto efault;
9310 memset(target_st, 0, sizeof(*target_st));
9311 __put_user(st.st_dev, &target_st->st_dev);
9312 __put_user(st.st_ino, &target_st->st_ino);
9313 __put_user(st.st_mode, &target_st->st_mode);
9314 __put_user(st.st_uid, &target_st->st_uid);
9315 __put_user(st.st_gid, &target_st->st_gid);
9316 __put_user(st.st_nlink, &target_st->st_nlink);
9317 __put_user(st.st_rdev, &target_st->st_rdev);
9318 __put_user(st.st_size, &target_st->st_size);
9319 __put_user(st.st_blksize, &target_st->st_blksize);
9320 __put_user(st.st_blocks, &target_st->st_blocks);
9321 __put_user(st.st_atime, &target_st->target_st_atime);
9322 __put_user(st.st_mtime, &target_st->target_st_mtime);
9323 __put_user(st.st_ctime, &target_st->target_st_ctime);
9324 unlock_user_struct(target_st, arg2, 1);
9327 break;
9328 #ifdef TARGET_NR_olduname
9329 case TARGET_NR_olduname:
9330 goto unimplemented;
9331 #endif
9332 #ifdef TARGET_NR_iopl
9333 case TARGET_NR_iopl:
9334 goto unimplemented;
9335 #endif
9336 case TARGET_NR_vhangup:
9337 ret = get_errno(vhangup());
9338 break;
9339 #ifdef TARGET_NR_idle
9340 case TARGET_NR_idle:
9341 goto unimplemented;
9342 #endif
9343 #ifdef TARGET_NR_syscall
9344 case TARGET_NR_syscall:
9345 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9346 arg6, arg7, arg8, 0);
9347 break;
9348 #endif
9349 case TARGET_NR_wait4:
9351 int status;
9352 abi_long status_ptr = arg2;
9353 struct rusage rusage, *rusage_ptr;
9354 abi_ulong target_rusage = arg4;
9355 abi_long rusage_err;
9356 if (target_rusage)
9357 rusage_ptr = &rusage;
9358 else
9359 rusage_ptr = NULL;
9360 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9361 if (!is_error(ret)) {
9362 if (status_ptr && ret) {
9363 status = host_to_target_waitstatus(status);
9364 if (put_user_s32(status, status_ptr))
9365 goto efault;
9367 if (target_rusage) {
9368 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9369 if (rusage_err) {
9370 ret = rusage_err;
9375 break;
9376 #ifdef TARGET_NR_swapoff
9377 case TARGET_NR_swapoff:
9378 if (!(p = lock_user_string(arg1)))
9379 goto efault;
9380 ret = get_errno(swapoff(p));
9381 unlock_user(p, arg1, 0);
9382 break;
9383 #endif
9384 case TARGET_NR_sysinfo:
9386 struct target_sysinfo *target_value;
9387 struct sysinfo value;
9388 ret = get_errno(sysinfo(&value));
9389 if (!is_error(ret) && arg1)
9391 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9392 goto efault;
9393 __put_user(value.uptime, &target_value->uptime);
9394 __put_user(value.loads[0], &target_value->loads[0]);
9395 __put_user(value.loads[1], &target_value->loads[1]);
9396 __put_user(value.loads[2], &target_value->loads[2]);
9397 __put_user(value.totalram, &target_value->totalram);
9398 __put_user(value.freeram, &target_value->freeram);
9399 __put_user(value.sharedram, &target_value->sharedram);
9400 __put_user(value.bufferram, &target_value->bufferram);
9401 __put_user(value.totalswap, &target_value->totalswap);
9402 __put_user(value.freeswap, &target_value->freeswap);
9403 __put_user(value.procs, &target_value->procs);
9404 __put_user(value.totalhigh, &target_value->totalhigh);
9405 __put_user(value.freehigh, &target_value->freehigh);
9406 __put_user(value.mem_unit, &target_value->mem_unit);
9407 unlock_user_struct(target_value, arg1, 1);
9410 break;
9411 #ifdef TARGET_NR_ipc
9412 case TARGET_NR_ipc:
9413 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9414 break;
9415 #endif
9416 #ifdef TARGET_NR_semget
9417 case TARGET_NR_semget:
9418 ret = get_errno(semget(arg1, arg2, arg3));
9419 break;
9420 #endif
9421 #ifdef TARGET_NR_semop
9422 case TARGET_NR_semop:
9423 ret = do_semop(arg1, arg2, arg3);
9424 break;
9425 #endif
9426 #ifdef TARGET_NR_semctl
9427 case TARGET_NR_semctl:
9428 ret = do_semctl(arg1, arg2, arg3, arg4);
9429 break;
9430 #endif
9431 #ifdef TARGET_NR_msgctl
9432 case TARGET_NR_msgctl:
9433 ret = do_msgctl(arg1, arg2, arg3);
9434 break;
9435 #endif
9436 #ifdef TARGET_NR_msgget
9437 case TARGET_NR_msgget:
9438 ret = get_errno(msgget(arg1, arg2));
9439 break;
9440 #endif
9441 #ifdef TARGET_NR_msgrcv
9442 case TARGET_NR_msgrcv:
9443 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9444 break;
9445 #endif
9446 #ifdef TARGET_NR_msgsnd
9447 case TARGET_NR_msgsnd:
9448 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9449 break;
9450 #endif
9451 #ifdef TARGET_NR_shmget
9452 case TARGET_NR_shmget:
9453 ret = get_errno(shmget(arg1, arg2, arg3));
9454 break;
9455 #endif
9456 #ifdef TARGET_NR_shmctl
9457 case TARGET_NR_shmctl:
9458 ret = do_shmctl(arg1, arg2, arg3);
9459 break;
9460 #endif
9461 #ifdef TARGET_NR_shmat
9462 case TARGET_NR_shmat:
9463 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9464 break;
9465 #endif
9466 #ifdef TARGET_NR_shmdt
9467 case TARGET_NR_shmdt:
9468 ret = do_shmdt(arg1);
9469 break;
9470 #endif
9471 case TARGET_NR_fsync:
9472 ret = get_errno(fsync(arg1));
9473 break;
9474 case TARGET_NR_clone:
9475 /* Linux manages to have three different orderings for its
9476 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9477 * match the kernel's CONFIG_CLONE_* settings.
9478 * Microblaze is further special in that it uses a sixth
9479 * implicit argument to clone for the TLS pointer.
9481 #if defined(TARGET_MICROBLAZE)
9482 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9483 #elif defined(TARGET_CLONE_BACKWARDS)
9484 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9485 #elif defined(TARGET_CLONE_BACKWARDS2)
9486 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9487 #else
9488 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9489 #endif
9490 break;
9491 #ifdef __NR_exit_group
9492 /* new thread calls */
9493 case TARGET_NR_exit_group:
9494 #ifdef TARGET_GPROF
9495 _mcleanup();
9496 #endif
9497 gdb_exit(cpu_env, arg1);
9498 ret = get_errno(exit_group(arg1));
9499 break;
9500 #endif
9501 case TARGET_NR_setdomainname:
9502 if (!(p = lock_user_string(arg1)))
9503 goto efault;
9504 ret = get_errno(setdomainname(p, arg2));
9505 unlock_user(p, arg1, 0);
9506 break;
9507 case TARGET_NR_uname:
9508 /* no need to transcode because we use the linux syscall */
9510 struct new_utsname * buf;
9512 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9513 goto efault;
9514 ret = get_errno(sys_uname(buf));
9515 if (!is_error(ret)) {
9516 /* Overwrite the native machine name with whatever is being
9517 emulated. */
9518 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9519 /* Allow the user to override the reported release. */
9520 if (qemu_uname_release && *qemu_uname_release) {
9521 g_strlcpy(buf->release, qemu_uname_release,
9522 sizeof(buf->release));
9525 unlock_user_struct(buf, arg1, 1);
9527 break;
9528 #ifdef TARGET_I386
9529 case TARGET_NR_modify_ldt:
9530 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9531 break;
9532 #if !defined(TARGET_X86_64)
9533 case TARGET_NR_vm86old:
9534 goto unimplemented;
9535 case TARGET_NR_vm86:
9536 ret = do_vm86(cpu_env, arg1, arg2);
9537 break;
9538 #endif
9539 #endif
9540 case TARGET_NR_adjtimex:
9541 goto unimplemented;
9542 #ifdef TARGET_NR_create_module
9543 case TARGET_NR_create_module:
9544 #endif
9545 case TARGET_NR_init_module:
9546 case TARGET_NR_delete_module:
9547 #ifdef TARGET_NR_get_kernel_syms
9548 case TARGET_NR_get_kernel_syms:
9549 #endif
9550 goto unimplemented;
9551 case TARGET_NR_quotactl:
9552 goto unimplemented;
9553 case TARGET_NR_getpgid:
9554 ret = get_errno(getpgid(arg1));
9555 break;
9556 case TARGET_NR_fchdir:
9557 ret = get_errno(fchdir(arg1));
9558 break;
9559 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9560 case TARGET_NR_bdflush:
9561 goto unimplemented;
9562 #endif
9563 #ifdef TARGET_NR_sysfs
9564 case TARGET_NR_sysfs:
9565 goto unimplemented;
9566 #endif
9567 case TARGET_NR_personality:
9568 ret = get_errno(personality(arg1));
9569 break;
9570 #ifdef TARGET_NR_afs_syscall
9571 case TARGET_NR_afs_syscall:
9572 goto unimplemented;
9573 #endif
9574 #ifdef TARGET_NR__llseek /* Not on alpha */
9575 case TARGET_NR__llseek:
9577 int64_t res;
9578 #if !defined(__NR_llseek)
9579 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9580 if (res == -1) {
9581 ret = get_errno(res);
9582 } else {
9583 ret = 0;
9585 #else
9586 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9587 #endif
9588 if ((ret == 0) && put_user_s64(res, arg4)) {
9589 goto efault;
9592 break;
9593 #endif
9594 #ifdef TARGET_NR_getdents
9595 case TARGET_NR_getdents:
9596 #ifdef __NR_getdents
9597 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9599 struct target_dirent *target_dirp;
9600 struct linux_dirent *dirp;
9601 abi_long count = arg3;
9603 dirp = g_try_malloc(count);
9604 if (!dirp) {
9605 ret = -TARGET_ENOMEM;
9606 goto fail;
9609 ret = get_errno(sys_getdents(arg1, dirp, count));
9610 if (!is_error(ret)) {
9611 struct linux_dirent *de;
9612 struct target_dirent *tde;
9613 int len = ret;
9614 int reclen, treclen;
9615 int count1, tnamelen;
9617 count1 = 0;
9618 de = dirp;
9619 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9620 goto efault;
9621 tde = target_dirp;
9622 while (len > 0) {
9623 reclen = de->d_reclen;
9624 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9625 assert(tnamelen >= 0);
9626 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9627 assert(count1 + treclen <= count);
9628 tde->d_reclen = tswap16(treclen);
9629 tde->d_ino = tswapal(de->d_ino);
9630 tde->d_off = tswapal(de->d_off);
9631 memcpy(tde->d_name, de->d_name, tnamelen);
9632 de = (struct linux_dirent *)((char *)de + reclen);
9633 len -= reclen;
9634 tde = (struct target_dirent *)((char *)tde + treclen);
9635 count1 += treclen;
9637 ret = count1;
9638 unlock_user(target_dirp, arg2, ret);
9640 g_free(dirp);
9642 #else
9644 struct linux_dirent *dirp;
9645 abi_long count = arg3;
9647 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9648 goto efault;
9649 ret = get_errno(sys_getdents(arg1, dirp, count));
9650 if (!is_error(ret)) {
9651 struct linux_dirent *de;
9652 int len = ret;
9653 int reclen;
9654 de = dirp;
9655 while (len > 0) {
9656 reclen = de->d_reclen;
9657 if (reclen > len)
9658 break;
9659 de->d_reclen = tswap16(reclen);
9660 tswapls(&de->d_ino);
9661 tswapls(&de->d_off);
9662 de = (struct linux_dirent *)((char *)de + reclen);
9663 len -= reclen;
9666 unlock_user(dirp, arg2, ret);
9668 #endif
9669 #else
9670 /* Implement getdents in terms of getdents64 */
9672 struct linux_dirent64 *dirp;
9673 abi_long count = arg3;
9675 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9676 if (!dirp) {
9677 goto efault;
9679 ret = get_errno(sys_getdents64(arg1, dirp, count));
9680 if (!is_error(ret)) {
9681 /* Convert the dirent64 structs to target dirent. We do this
9682 * in-place, since we can guarantee that a target_dirent is no
9683 * larger than a dirent64; however this means we have to be
9684 * careful to read everything before writing in the new format.
9686 struct linux_dirent64 *de;
9687 struct target_dirent *tde;
9688 int len = ret;
9689 int tlen = 0;
9691 de = dirp;
9692 tde = (struct target_dirent *)dirp;
9693 while (len > 0) {
9694 int namelen, treclen;
9695 int reclen = de->d_reclen;
9696 uint64_t ino = de->d_ino;
9697 int64_t off = de->d_off;
9698 uint8_t type = de->d_type;
9700 namelen = strlen(de->d_name);
9701 treclen = offsetof(struct target_dirent, d_name)
9702 + namelen + 2;
9703 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9705 memmove(tde->d_name, de->d_name, namelen + 1);
9706 tde->d_ino = tswapal(ino);
9707 tde->d_off = tswapal(off);
9708 tde->d_reclen = tswap16(treclen);
9709 /* The target_dirent type is in what was formerly a padding
9710 * byte at the end of the structure:
9712 *(((char *)tde) + treclen - 1) = type;
9714 de = (struct linux_dirent64 *)((char *)de + reclen);
9715 tde = (struct target_dirent *)((char *)tde + treclen);
9716 len -= reclen;
9717 tlen += treclen;
9719 ret = tlen;
9721 unlock_user(dirp, arg2, ret);
9723 #endif
9724 break;
9725 #endif /* TARGET_NR_getdents */
9726 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9727 case TARGET_NR_getdents64:
9729 struct linux_dirent64 *dirp;
9730 abi_long count = arg3;
9731 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9732 goto efault;
9733 ret = get_errno(sys_getdents64(arg1, dirp, count));
9734 if (!is_error(ret)) {
9735 struct linux_dirent64 *de;
9736 int len = ret;
9737 int reclen;
9738 de = dirp;
9739 while (len > 0) {
9740 reclen = de->d_reclen;
9741 if (reclen > len)
9742 break;
9743 de->d_reclen = tswap16(reclen);
9744 tswap64s((uint64_t *)&de->d_ino);
9745 tswap64s((uint64_t *)&de->d_off);
9746 de = (struct linux_dirent64 *)((char *)de + reclen);
9747 len -= reclen;
9750 unlock_user(dirp, arg2, ret);
9752 break;
9753 #endif /* TARGET_NR_getdents64 */
9754 #if defined(TARGET_NR__newselect)
9755 case TARGET_NR__newselect:
9756 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9757 break;
9758 #endif
9759 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9760 # ifdef TARGET_NR_poll
9761 case TARGET_NR_poll:
9762 # endif
9763 # ifdef TARGET_NR_ppoll
9764 case TARGET_NR_ppoll:
9765 # endif
9767 struct target_pollfd *target_pfd;
9768 unsigned int nfds = arg2;
9769 struct pollfd *pfd;
9770 unsigned int i;
9772 pfd = NULL;
9773 target_pfd = NULL;
9774 if (nfds) {
9775 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9776 ret = -TARGET_EINVAL;
9777 break;
9780 target_pfd = lock_user(VERIFY_WRITE, arg1,
9781 sizeof(struct target_pollfd) * nfds, 1);
9782 if (!target_pfd) {
9783 goto efault;
9786 pfd = alloca(sizeof(struct pollfd) * nfds);
9787 for (i = 0; i < nfds; i++) {
9788 pfd[i].fd = tswap32(target_pfd[i].fd);
9789 pfd[i].events = tswap16(target_pfd[i].events);
9793 switch (num) {
9794 # ifdef TARGET_NR_ppoll
9795 case TARGET_NR_ppoll:
9797 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9798 target_sigset_t *target_set;
9799 sigset_t _set, *set = &_set;
9801 if (arg3) {
9802 if (target_to_host_timespec(timeout_ts, arg3)) {
9803 unlock_user(target_pfd, arg1, 0);
9804 goto efault;
9806 } else {
9807 timeout_ts = NULL;
9810 if (arg4) {
9811 if (arg5 != sizeof(target_sigset_t)) {
9812 unlock_user(target_pfd, arg1, 0);
9813 ret = -TARGET_EINVAL;
9814 break;
9817 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9818 if (!target_set) {
9819 unlock_user(target_pfd, arg1, 0);
9820 goto efault;
9822 target_to_host_sigset(set, target_set);
9823 } else {
9824 set = NULL;
9827 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9828 set, SIGSET_T_SIZE));
9830 if (!is_error(ret) && arg3) {
9831 host_to_target_timespec(arg3, timeout_ts);
9833 if (arg4) {
9834 unlock_user(target_set, arg4, 0);
9836 break;
9838 # endif
9839 # ifdef TARGET_NR_poll
9840 case TARGET_NR_poll:
9842 struct timespec ts, *pts;
9844 if (arg3 >= 0) {
9845 /* Convert ms to secs, ns */
9846 ts.tv_sec = arg3 / 1000;
9847 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
9848 pts = &ts;
9849 } else {
9850 /* -ve poll() timeout means "infinite" */
9851 pts = NULL;
9853 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
9854 break;
9856 # endif
9857 default:
9858 g_assert_not_reached();
9861 if (!is_error(ret)) {
9862 for(i = 0; i < nfds; i++) {
9863 target_pfd[i].revents = tswap16(pfd[i].revents);
9866 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
9868 break;
9869 #endif
9870 case TARGET_NR_flock:
9871 /* NOTE: the flock constant seems to be the same for every
9872 Linux platform */
9873 ret = get_errno(safe_flock(arg1, arg2));
9874 break;
9875 case TARGET_NR_readv:
9877 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
9878 if (vec != NULL) {
9879 ret = get_errno(safe_readv(arg1, vec, arg3));
9880 unlock_iovec(vec, arg2, arg3, 1);
9881 } else {
9882 ret = -host_to_target_errno(errno);
9885 break;
9886 case TARGET_NR_writev:
9888 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
9889 if (vec != NULL) {
9890 ret = get_errno(safe_writev(arg1, vec, arg3));
9891 unlock_iovec(vec, arg2, arg3, 0);
9892 } else {
9893 ret = -host_to_target_errno(errno);
9896 break;
9897 case TARGET_NR_getsid:
9898 ret = get_errno(getsid(arg1));
9899 break;
9900 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
9901 case TARGET_NR_fdatasync:
9902 ret = get_errno(fdatasync(arg1));
9903 break;
9904 #endif
9905 #ifdef TARGET_NR__sysctl
9906 case TARGET_NR__sysctl:
9907 /* We don't implement this, but ENOTDIR is always a safe
9908 return value. */
9909 ret = -TARGET_ENOTDIR;
9910 break;
9911 #endif
9912 case TARGET_NR_sched_getaffinity:
9914 unsigned int mask_size;
9915 unsigned long *mask;
9918 * sched_getaffinity needs multiples of ulong, so need to take
9919 * care of mismatches between target ulong and host ulong sizes.
9921 if (arg2 & (sizeof(abi_ulong) - 1)) {
9922 ret = -TARGET_EINVAL;
9923 break;
9925 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9927 mask = alloca(mask_size);
9928 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
9930 if (!is_error(ret)) {
9931 if (ret > arg2) {
9932 /* More data returned than the caller's buffer will fit.
9933 * This only happens if sizeof(abi_long) < sizeof(long)
9934 * and the caller passed us a buffer holding an odd number
9935 * of abi_longs. If the host kernel is actually using the
9936 * extra 4 bytes then fail EINVAL; otherwise we can just
9937 * ignore them and only copy the interesting part.
9939 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
9940 if (numcpus > arg2 * 8) {
9941 ret = -TARGET_EINVAL;
9942 break;
9944 ret = arg2;
9947 if (copy_to_user(arg3, mask, ret)) {
9948 goto efault;
9952 break;
9953 case TARGET_NR_sched_setaffinity:
9955 unsigned int mask_size;
9956 unsigned long *mask;
9959 * sched_setaffinity needs multiples of ulong, so need to take
9960 * care of mismatches between target ulong and host ulong sizes.
9962 if (arg2 & (sizeof(abi_ulong) - 1)) {
9963 ret = -TARGET_EINVAL;
9964 break;
9966 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
9968 mask = alloca(mask_size);
9969 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
9970 goto efault;
9972 memcpy(mask, p, arg2);
9973 unlock_user_struct(p, arg2, 0);
9975 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
9977 break;
9978 case TARGET_NR_sched_setparam:
9980 struct sched_param *target_schp;
9981 struct sched_param schp;
9983 if (arg2 == 0) {
9984 return -TARGET_EINVAL;
9986 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
9987 goto efault;
9988 schp.sched_priority = tswap32(target_schp->sched_priority);
9989 unlock_user_struct(target_schp, arg2, 0);
9990 ret = get_errno(sched_setparam(arg1, &schp));
9992 break;
9993 case TARGET_NR_sched_getparam:
9995 struct sched_param *target_schp;
9996 struct sched_param schp;
9998 if (arg2 == 0) {
9999 return -TARGET_EINVAL;
10001 ret = get_errno(sched_getparam(arg1, &schp));
10002 if (!is_error(ret)) {
10003 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10004 goto efault;
10005 target_schp->sched_priority = tswap32(schp.sched_priority);
10006 unlock_user_struct(target_schp, arg2, 1);
10009 break;
10010 case TARGET_NR_sched_setscheduler:
10012 struct sched_param *target_schp;
10013 struct sched_param schp;
10014 if (arg3 == 0) {
10015 return -TARGET_EINVAL;
10017 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10018 goto efault;
10019 schp.sched_priority = tswap32(target_schp->sched_priority);
10020 unlock_user_struct(target_schp, arg3, 0);
10021 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10023 break;
10024 case TARGET_NR_sched_getscheduler:
10025 ret = get_errno(sched_getscheduler(arg1));
10026 break;
10027 case TARGET_NR_sched_yield:
10028 ret = get_errno(sched_yield());
10029 break;
10030 case TARGET_NR_sched_get_priority_max:
10031 ret = get_errno(sched_get_priority_max(arg1));
10032 break;
10033 case TARGET_NR_sched_get_priority_min:
10034 ret = get_errno(sched_get_priority_min(arg1));
10035 break;
10036 case TARGET_NR_sched_rr_get_interval:
10038 struct timespec ts;
10039 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10040 if (!is_error(ret)) {
10041 ret = host_to_target_timespec(arg2, &ts);
10044 break;
10045 case TARGET_NR_nanosleep:
10047 struct timespec req, rem;
10048 target_to_host_timespec(&req, arg1);
10049 ret = get_errno(safe_nanosleep(&req, &rem));
10050 if (is_error(ret) && arg2) {
10051 host_to_target_timespec(arg2, &rem);
10054 break;
10055 #ifdef TARGET_NR_query_module
10056 case TARGET_NR_query_module:
10057 goto unimplemented;
10058 #endif
10059 #ifdef TARGET_NR_nfsservctl
10060 case TARGET_NR_nfsservctl:
10061 goto unimplemented;
10062 #endif
10063 case TARGET_NR_prctl:
10064 switch (arg1) {
10065 case PR_GET_PDEATHSIG:
10067 int deathsig;
10068 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10069 if (!is_error(ret) && arg2
10070 && put_user_ual(deathsig, arg2)) {
10071 goto efault;
10073 break;
10075 #ifdef PR_GET_NAME
10076 case PR_GET_NAME:
10078 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10079 if (!name) {
10080 goto efault;
10082 ret = get_errno(prctl(arg1, (unsigned long)name,
10083 arg3, arg4, arg5));
10084 unlock_user(name, arg2, 16);
10085 break;
10087 case PR_SET_NAME:
10089 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10090 if (!name) {
10091 goto efault;
10093 ret = get_errno(prctl(arg1, (unsigned long)name,
10094 arg3, arg4, arg5));
10095 unlock_user(name, arg2, 0);
10096 break;
10098 #endif
10099 default:
10100 /* Most prctl options have no pointer arguments */
10101 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10102 break;
10104 break;
10105 #ifdef TARGET_NR_arch_prctl
10106 case TARGET_NR_arch_prctl:
10107 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10108 ret = do_arch_prctl(cpu_env, arg1, arg2);
10109 break;
10110 #else
10111 goto unimplemented;
10112 #endif
10113 #endif
10114 #ifdef TARGET_NR_pread64
10115 case TARGET_NR_pread64:
10116 if (regpairs_aligned(cpu_env)) {
10117 arg4 = arg5;
10118 arg5 = arg6;
10120 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10121 goto efault;
10122 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10123 unlock_user(p, arg2, ret);
10124 break;
10125 case TARGET_NR_pwrite64:
10126 if (regpairs_aligned(cpu_env)) {
10127 arg4 = arg5;
10128 arg5 = arg6;
10130 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10131 goto efault;
10132 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10133 unlock_user(p, arg2, 0);
10134 break;
10135 #endif
10136 case TARGET_NR_getcwd:
10137 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10138 goto efault;
10139 ret = get_errno(sys_getcwd1(p, arg2));
10140 unlock_user(p, arg1, ret);
10141 break;
10142 case TARGET_NR_capget:
10143 case TARGET_NR_capset:
10145 struct target_user_cap_header *target_header;
10146 struct target_user_cap_data *target_data = NULL;
10147 struct __user_cap_header_struct header;
10148 struct __user_cap_data_struct data[2];
10149 struct __user_cap_data_struct *dataptr = NULL;
10150 int i, target_datalen;
10151 int data_items = 1;
10153 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10154 goto efault;
10156 header.version = tswap32(target_header->version);
10157 header.pid = tswap32(target_header->pid);
10159 if (header.version != _LINUX_CAPABILITY_VERSION) {
10160 /* Version 2 and up takes pointer to two user_data structs */
10161 data_items = 2;
10164 target_datalen = sizeof(*target_data) * data_items;
10166 if (arg2) {
10167 if (num == TARGET_NR_capget) {
10168 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10169 } else {
10170 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10172 if (!target_data) {
10173 unlock_user_struct(target_header, arg1, 0);
10174 goto efault;
10177 if (num == TARGET_NR_capset) {
10178 for (i = 0; i < data_items; i++) {
10179 data[i].effective = tswap32(target_data[i].effective);
10180 data[i].permitted = tswap32(target_data[i].permitted);
10181 data[i].inheritable = tswap32(target_data[i].inheritable);
10185 dataptr = data;
10188 if (num == TARGET_NR_capget) {
10189 ret = get_errno(capget(&header, dataptr));
10190 } else {
10191 ret = get_errno(capset(&header, dataptr));
10194 /* The kernel always updates version for both capget and capset */
10195 target_header->version = tswap32(header.version);
10196 unlock_user_struct(target_header, arg1, 1);
10198 if (arg2) {
10199 if (num == TARGET_NR_capget) {
10200 for (i = 0; i < data_items; i++) {
10201 target_data[i].effective = tswap32(data[i].effective);
10202 target_data[i].permitted = tswap32(data[i].permitted);
10203 target_data[i].inheritable = tswap32(data[i].inheritable);
10205 unlock_user(target_data, arg2, target_datalen);
10206 } else {
10207 unlock_user(target_data, arg2, 0);
10210 break;
10212 case TARGET_NR_sigaltstack:
10213 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10214 break;
10216 #ifdef CONFIG_SENDFILE
10217 case TARGET_NR_sendfile:
10219 off_t *offp = NULL;
10220 off_t off;
10221 if (arg3) {
10222 ret = get_user_sal(off, arg3);
10223 if (is_error(ret)) {
10224 break;
10226 offp = &off;
10228 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10229 if (!is_error(ret) && arg3) {
10230 abi_long ret2 = put_user_sal(off, arg3);
10231 if (is_error(ret2)) {
10232 ret = ret2;
10235 break;
10237 #ifdef TARGET_NR_sendfile64
10238 case TARGET_NR_sendfile64:
10240 off_t *offp = NULL;
10241 off_t off;
10242 if (arg3) {
10243 ret = get_user_s64(off, arg3);
10244 if (is_error(ret)) {
10245 break;
10247 offp = &off;
10249 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10250 if (!is_error(ret) && arg3) {
10251 abi_long ret2 = put_user_s64(off, arg3);
10252 if (is_error(ret2)) {
10253 ret = ret2;
10256 break;
10258 #endif
10259 #else
10260 case TARGET_NR_sendfile:
10261 #ifdef TARGET_NR_sendfile64
10262 case TARGET_NR_sendfile64:
10263 #endif
10264 goto unimplemented;
10265 #endif
10267 #ifdef TARGET_NR_getpmsg
10268 case TARGET_NR_getpmsg:
10269 goto unimplemented;
10270 #endif
10271 #ifdef TARGET_NR_putpmsg
10272 case TARGET_NR_putpmsg:
10273 goto unimplemented;
10274 #endif
10275 #ifdef TARGET_NR_vfork
10276 case TARGET_NR_vfork:
10277 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10278 0, 0, 0, 0));
10279 break;
10280 #endif
10281 #ifdef TARGET_NR_ugetrlimit
10282 case TARGET_NR_ugetrlimit:
10284 struct rlimit rlim;
10285 int resource = target_to_host_resource(arg1);
10286 ret = get_errno(getrlimit(resource, &rlim));
10287 if (!is_error(ret)) {
10288 struct target_rlimit *target_rlim;
10289 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10290 goto efault;
10291 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10292 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10293 unlock_user_struct(target_rlim, arg2, 1);
10295 break;
10297 #endif
10298 #ifdef TARGET_NR_truncate64
10299 case TARGET_NR_truncate64:
10300 if (!(p = lock_user_string(arg1)))
10301 goto efault;
10302 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10303 unlock_user(p, arg1, 0);
10304 break;
10305 #endif
10306 #ifdef TARGET_NR_ftruncate64
10307 case TARGET_NR_ftruncate64:
10308 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10309 break;
10310 #endif
10311 #ifdef TARGET_NR_stat64
10312 case TARGET_NR_stat64:
10313 if (!(p = lock_user_string(arg1)))
10314 goto efault;
10315 ret = get_errno(stat(path(p), &st));
10316 unlock_user(p, arg1, 0);
10317 if (!is_error(ret))
10318 ret = host_to_target_stat64(cpu_env, arg2, &st);
10319 break;
10320 #endif
10321 #ifdef TARGET_NR_lstat64
10322 case TARGET_NR_lstat64:
10323 if (!(p = lock_user_string(arg1)))
10324 goto efault;
10325 ret = get_errno(lstat(path(p), &st));
10326 unlock_user(p, arg1, 0);
10327 if (!is_error(ret))
10328 ret = host_to_target_stat64(cpu_env, arg2, &st);
10329 break;
10330 #endif
10331 #ifdef TARGET_NR_fstat64
10332 case TARGET_NR_fstat64:
10333 ret = get_errno(fstat(arg1, &st));
10334 if (!is_error(ret))
10335 ret = host_to_target_stat64(cpu_env, arg2, &st);
10336 break;
10337 #endif
10338 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10339 #ifdef TARGET_NR_fstatat64
10340 case TARGET_NR_fstatat64:
10341 #endif
10342 #ifdef TARGET_NR_newfstatat
10343 case TARGET_NR_newfstatat:
10344 #endif
10345 if (!(p = lock_user_string(arg2)))
10346 goto efault;
10347 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10348 if (!is_error(ret))
10349 ret = host_to_target_stat64(cpu_env, arg3, &st);
10350 break;
10351 #endif
10352 #ifdef TARGET_NR_lchown
10353 case TARGET_NR_lchown:
10354 if (!(p = lock_user_string(arg1)))
10355 goto efault;
10356 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10357 unlock_user(p, arg1, 0);
10358 break;
10359 #endif
10360 #ifdef TARGET_NR_getuid
10361 case TARGET_NR_getuid:
10362 ret = get_errno(high2lowuid(getuid()));
10363 break;
10364 #endif
10365 #ifdef TARGET_NR_getgid
10366 case TARGET_NR_getgid:
10367 ret = get_errno(high2lowgid(getgid()));
10368 break;
10369 #endif
10370 #ifdef TARGET_NR_geteuid
10371 case TARGET_NR_geteuid:
10372 ret = get_errno(high2lowuid(geteuid()));
10373 break;
10374 #endif
10375 #ifdef TARGET_NR_getegid
10376 case TARGET_NR_getegid:
10377 ret = get_errno(high2lowgid(getegid()));
10378 break;
10379 #endif
10380 case TARGET_NR_setreuid:
10381 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10382 break;
10383 case TARGET_NR_setregid:
10384 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10385 break;
10386 case TARGET_NR_getgroups:
10388 int gidsetsize = arg1;
10389 target_id *target_grouplist;
10390 gid_t *grouplist;
10391 int i;
10393 grouplist = alloca(gidsetsize * sizeof(gid_t));
10394 ret = get_errno(getgroups(gidsetsize, grouplist));
10395 if (gidsetsize == 0)
10396 break;
10397 if (!is_error(ret)) {
10398 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10399 if (!target_grouplist)
10400 goto efault;
10401 for(i = 0;i < ret; i++)
10402 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10403 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10406 break;
10407 case TARGET_NR_setgroups:
10409 int gidsetsize = arg1;
10410 target_id *target_grouplist;
10411 gid_t *grouplist = NULL;
10412 int i;
10413 if (gidsetsize) {
10414 grouplist = alloca(gidsetsize * sizeof(gid_t));
10415 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10416 if (!target_grouplist) {
10417 ret = -TARGET_EFAULT;
10418 goto fail;
10420 for (i = 0; i < gidsetsize; i++) {
10421 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10423 unlock_user(target_grouplist, arg2, 0);
10425 ret = get_errno(setgroups(gidsetsize, grouplist));
10427 break;
10428 case TARGET_NR_fchown:
10429 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10430 break;
10431 #if defined(TARGET_NR_fchownat)
10432 case TARGET_NR_fchownat:
10433 if (!(p = lock_user_string(arg2)))
10434 goto efault;
10435 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10436 low2highgid(arg4), arg5));
10437 unlock_user(p, arg2, 0);
10438 break;
10439 #endif
10440 #ifdef TARGET_NR_setresuid
10441 case TARGET_NR_setresuid:
10442 ret = get_errno(sys_setresuid(low2highuid(arg1),
10443 low2highuid(arg2),
10444 low2highuid(arg3)));
10445 break;
10446 #endif
10447 #ifdef TARGET_NR_getresuid
10448 case TARGET_NR_getresuid:
10450 uid_t ruid, euid, suid;
10451 ret = get_errno(getresuid(&ruid, &euid, &suid));
10452 if (!is_error(ret)) {
10453 if (put_user_id(high2lowuid(ruid), arg1)
10454 || put_user_id(high2lowuid(euid), arg2)
10455 || put_user_id(high2lowuid(suid), arg3))
10456 goto efault;
10459 break;
10460 #endif
10461 #ifdef TARGET_NR_getresgid
10462 case TARGET_NR_setresgid:
10463 ret = get_errno(sys_setresgid(low2highgid(arg1),
10464 low2highgid(arg2),
10465 low2highgid(arg3)));
10466 break;
10467 #endif
10468 #ifdef TARGET_NR_getresgid
10469 case TARGET_NR_getresgid:
10471 gid_t rgid, egid, sgid;
10472 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10473 if (!is_error(ret)) {
10474 if (put_user_id(high2lowgid(rgid), arg1)
10475 || put_user_id(high2lowgid(egid), arg2)
10476 || put_user_id(high2lowgid(sgid), arg3))
10477 goto efault;
10480 break;
10481 #endif
10482 #ifdef TARGET_NR_chown
10483 case TARGET_NR_chown:
10484 if (!(p = lock_user_string(arg1)))
10485 goto efault;
10486 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10487 unlock_user(p, arg1, 0);
10488 break;
10489 #endif
10490 case TARGET_NR_setuid:
10491 ret = get_errno(sys_setuid(low2highuid(arg1)));
10492 break;
10493 case TARGET_NR_setgid:
10494 ret = get_errno(sys_setgid(low2highgid(arg1)));
10495 break;
10496 case TARGET_NR_setfsuid:
10497 ret = get_errno(setfsuid(arg1));
10498 break;
10499 case TARGET_NR_setfsgid:
10500 ret = get_errno(setfsgid(arg1));
10501 break;
10503 #ifdef TARGET_NR_lchown32
10504 case TARGET_NR_lchown32:
10505 if (!(p = lock_user_string(arg1)))
10506 goto efault;
10507 ret = get_errno(lchown(p, arg2, arg3));
10508 unlock_user(p, arg1, 0);
10509 break;
10510 #endif
10511 #ifdef TARGET_NR_getuid32
10512 case TARGET_NR_getuid32:
10513 ret = get_errno(getuid());
10514 break;
10515 #endif
10517 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10518 /* Alpha specific */
10519 case TARGET_NR_getxuid:
10521 uid_t euid;
10522 euid=geteuid();
10523 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10525 ret = get_errno(getuid());
10526 break;
10527 #endif
10528 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10529 /* Alpha specific */
10530 case TARGET_NR_getxgid:
10532 uid_t egid;
10533 egid=getegid();
10534 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10536 ret = get_errno(getgid());
10537 break;
10538 #endif
10539 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10540 /* Alpha specific */
10541 case TARGET_NR_osf_getsysinfo:
10542 ret = -TARGET_EOPNOTSUPP;
10543 switch (arg1) {
10544 case TARGET_GSI_IEEE_FP_CONTROL:
10546 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10548 /* Copied from linux ieee_fpcr_to_swcr. */
10549 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10550 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10551 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10552 | SWCR_TRAP_ENABLE_DZE
10553 | SWCR_TRAP_ENABLE_OVF);
10554 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10555 | SWCR_TRAP_ENABLE_INE);
10556 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10557 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10559 if (put_user_u64 (swcr, arg2))
10560 goto efault;
10561 ret = 0;
10563 break;
10565 /* case GSI_IEEE_STATE_AT_SIGNAL:
10566 -- Not implemented in linux kernel.
10567 case GSI_UACPROC:
10568 -- Retrieves current unaligned access state; not much used.
10569 case GSI_PROC_TYPE:
10570 -- Retrieves implver information; surely not used.
10571 case GSI_GET_HWRPB:
10572 -- Grabs a copy of the HWRPB; surely not used.
10575 break;
10576 #endif
10577 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10578 /* Alpha specific */
10579 case TARGET_NR_osf_setsysinfo:
10580 ret = -TARGET_EOPNOTSUPP;
10581 switch (arg1) {
10582 case TARGET_SSI_IEEE_FP_CONTROL:
10584 uint64_t swcr, fpcr, orig_fpcr;
10586 if (get_user_u64 (swcr, arg2)) {
10587 goto efault;
10589 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10590 fpcr = orig_fpcr & FPCR_DYN_MASK;
10592 /* Copied from linux ieee_swcr_to_fpcr. */
10593 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10594 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10595 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10596 | SWCR_TRAP_ENABLE_DZE
10597 | SWCR_TRAP_ENABLE_OVF)) << 48;
10598 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10599 | SWCR_TRAP_ENABLE_INE)) << 57;
10600 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10601 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10603 cpu_alpha_store_fpcr(cpu_env, fpcr);
10604 ret = 0;
10606 break;
10608 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10610 uint64_t exc, fpcr, orig_fpcr;
10611 int si_code;
10613 if (get_user_u64(exc, arg2)) {
10614 goto efault;
10617 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10619 /* We only add to the exception status here. */
10620 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10622 cpu_alpha_store_fpcr(cpu_env, fpcr);
10623 ret = 0;
10625 /* Old exceptions are not signaled. */
10626 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10628 /* If any exceptions set by this call,
10629 and are unmasked, send a signal. */
10630 si_code = 0;
10631 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10632 si_code = TARGET_FPE_FLTRES;
10634 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10635 si_code = TARGET_FPE_FLTUND;
10637 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10638 si_code = TARGET_FPE_FLTOVF;
10640 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10641 si_code = TARGET_FPE_FLTDIV;
10643 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10644 si_code = TARGET_FPE_FLTINV;
10646 if (si_code != 0) {
10647 target_siginfo_t info;
10648 info.si_signo = SIGFPE;
10649 info.si_errno = 0;
10650 info.si_code = si_code;
10651 info._sifields._sigfault._addr
10652 = ((CPUArchState *)cpu_env)->pc;
10653 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10654 QEMU_SI_FAULT, &info);
10657 break;
10659 /* case SSI_NVPAIRS:
10660 -- Used with SSIN_UACPROC to enable unaligned accesses.
10661 case SSI_IEEE_STATE_AT_SIGNAL:
10662 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10663 -- Not implemented in linux kernel
10666 break;
10667 #endif
10668 #ifdef TARGET_NR_osf_sigprocmask
10669 /* Alpha specific. */
10670 case TARGET_NR_osf_sigprocmask:
10672 abi_ulong mask;
10673 int how;
10674 sigset_t set, oldset;
10676 switch(arg1) {
10677 case TARGET_SIG_BLOCK:
10678 how = SIG_BLOCK;
10679 break;
10680 case TARGET_SIG_UNBLOCK:
10681 how = SIG_UNBLOCK;
10682 break;
10683 case TARGET_SIG_SETMASK:
10684 how = SIG_SETMASK;
10685 break;
10686 default:
10687 ret = -TARGET_EINVAL;
10688 goto fail;
10690 mask = arg2;
10691 target_to_host_old_sigset(&set, &mask);
10692 ret = do_sigprocmask(how, &set, &oldset);
10693 if (!ret) {
10694 host_to_target_old_sigset(&mask, &oldset);
10695 ret = mask;
10698 break;
10699 #endif
10701 #ifdef TARGET_NR_getgid32
10702 case TARGET_NR_getgid32:
10703 ret = get_errno(getgid());
10704 break;
10705 #endif
10706 #ifdef TARGET_NR_geteuid32
10707 case TARGET_NR_geteuid32:
10708 ret = get_errno(geteuid());
10709 break;
10710 #endif
10711 #ifdef TARGET_NR_getegid32
10712 case TARGET_NR_getegid32:
10713 ret = get_errno(getegid());
10714 break;
10715 #endif
10716 #ifdef TARGET_NR_setreuid32
10717 case TARGET_NR_setreuid32:
10718 ret = get_errno(setreuid(arg1, arg2));
10719 break;
10720 #endif
10721 #ifdef TARGET_NR_setregid32
10722 case TARGET_NR_setregid32:
10723 ret = get_errno(setregid(arg1, arg2));
10724 break;
10725 #endif
10726 #ifdef TARGET_NR_getgroups32
10727 case TARGET_NR_getgroups32:
10729 int gidsetsize = arg1;
10730 uint32_t *target_grouplist;
10731 gid_t *grouplist;
10732 int i;
10734 grouplist = alloca(gidsetsize * sizeof(gid_t));
10735 ret = get_errno(getgroups(gidsetsize, grouplist));
10736 if (gidsetsize == 0)
10737 break;
10738 if (!is_error(ret)) {
10739 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10740 if (!target_grouplist) {
10741 ret = -TARGET_EFAULT;
10742 goto fail;
10744 for(i = 0;i < ret; i++)
10745 target_grouplist[i] = tswap32(grouplist[i]);
10746 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10749 break;
10750 #endif
10751 #ifdef TARGET_NR_setgroups32
10752 case TARGET_NR_setgroups32:
10754 int gidsetsize = arg1;
10755 uint32_t *target_grouplist;
10756 gid_t *grouplist;
10757 int i;
10759 grouplist = alloca(gidsetsize * sizeof(gid_t));
10760 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10761 if (!target_grouplist) {
10762 ret = -TARGET_EFAULT;
10763 goto fail;
10765 for(i = 0;i < gidsetsize; i++)
10766 grouplist[i] = tswap32(target_grouplist[i]);
10767 unlock_user(target_grouplist, arg2, 0);
10768 ret = get_errno(setgroups(gidsetsize, grouplist));
10770 break;
10771 #endif
10772 #ifdef TARGET_NR_fchown32
10773 case TARGET_NR_fchown32:
10774 ret = get_errno(fchown(arg1, arg2, arg3));
10775 break;
10776 #endif
10777 #ifdef TARGET_NR_setresuid32
10778 case TARGET_NR_setresuid32:
10779 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10780 break;
10781 #endif
10782 #ifdef TARGET_NR_getresuid32
10783 case TARGET_NR_getresuid32:
10785 uid_t ruid, euid, suid;
10786 ret = get_errno(getresuid(&ruid, &euid, &suid));
10787 if (!is_error(ret)) {
10788 if (put_user_u32(ruid, arg1)
10789 || put_user_u32(euid, arg2)
10790 || put_user_u32(suid, arg3))
10791 goto efault;
10794 break;
10795 #endif
10796 #ifdef TARGET_NR_setresgid32
10797 case TARGET_NR_setresgid32:
10798 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10799 break;
10800 #endif
10801 #ifdef TARGET_NR_getresgid32
10802 case TARGET_NR_getresgid32:
10804 gid_t rgid, egid, sgid;
10805 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10806 if (!is_error(ret)) {
10807 if (put_user_u32(rgid, arg1)
10808 || put_user_u32(egid, arg2)
10809 || put_user_u32(sgid, arg3))
10810 goto efault;
10813 break;
10814 #endif
10815 #ifdef TARGET_NR_chown32
10816 case TARGET_NR_chown32:
10817 if (!(p = lock_user_string(arg1)))
10818 goto efault;
10819 ret = get_errno(chown(p, arg2, arg3));
10820 unlock_user(p, arg1, 0);
10821 break;
10822 #endif
10823 #ifdef TARGET_NR_setuid32
10824 case TARGET_NR_setuid32:
10825 ret = get_errno(sys_setuid(arg1));
10826 break;
10827 #endif
10828 #ifdef TARGET_NR_setgid32
10829 case TARGET_NR_setgid32:
10830 ret = get_errno(sys_setgid(arg1));
10831 break;
10832 #endif
10833 #ifdef TARGET_NR_setfsuid32
10834 case TARGET_NR_setfsuid32:
10835 ret = get_errno(setfsuid(arg1));
10836 break;
10837 #endif
10838 #ifdef TARGET_NR_setfsgid32
10839 case TARGET_NR_setfsgid32:
10840 ret = get_errno(setfsgid(arg1));
10841 break;
10842 #endif
10844 case TARGET_NR_pivot_root:
10845 goto unimplemented;
10846 #ifdef TARGET_NR_mincore
10847 case TARGET_NR_mincore:
10849 void *a;
10850 ret = -TARGET_EFAULT;
10851 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
10852 goto efault;
10853 if (!(p = lock_user_string(arg3)))
10854 goto mincore_fail;
10855 ret = get_errno(mincore(a, arg2, p));
10856 unlock_user(p, arg3, ret);
10857 mincore_fail:
10858 unlock_user(a, arg1, 0);
10860 break;
10861 #endif
10862 #ifdef TARGET_NR_arm_fadvise64_64
10863 case TARGET_NR_arm_fadvise64_64:
10864 /* arm_fadvise64_64 looks like fadvise64_64 but
10865 * with different argument order: fd, advice, offset, len
10866 * rather than the usual fd, offset, len, advice.
10867 * Note that offset and len are both 64-bit so appear as
10868 * pairs of 32-bit registers.
10870 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
10871 target_offset64(arg5, arg6), arg2);
10872 ret = -host_to_target_errno(ret);
10873 break;
10874 #endif
10876 #if TARGET_ABI_BITS == 32
10878 #ifdef TARGET_NR_fadvise64_64
10879 case TARGET_NR_fadvise64_64:
10880 /* 6 args: fd, offset (high, low), len (high, low), advice */
10881 if (regpairs_aligned(cpu_env)) {
10882 /* offset is in (3,4), len in (5,6) and advice in 7 */
10883 arg2 = arg3;
10884 arg3 = arg4;
10885 arg4 = arg5;
10886 arg5 = arg6;
10887 arg6 = arg7;
10889 ret = -host_to_target_errno(posix_fadvise(arg1,
10890 target_offset64(arg2, arg3),
10891 target_offset64(arg4, arg5),
10892 arg6));
10893 break;
10894 #endif
10896 #ifdef TARGET_NR_fadvise64
10897 case TARGET_NR_fadvise64:
10898 /* 5 args: fd, offset (high, low), len, advice */
10899 if (regpairs_aligned(cpu_env)) {
10900 /* offset is in (3,4), len in 5 and advice in 6 */
10901 arg2 = arg3;
10902 arg3 = arg4;
10903 arg4 = arg5;
10904 arg5 = arg6;
10906 ret = -host_to_target_errno(posix_fadvise(arg1,
10907 target_offset64(arg2, arg3),
10908 arg4, arg5));
10909 break;
10910 #endif
10912 #else /* not a 32-bit ABI */
10913 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
10914 #ifdef TARGET_NR_fadvise64_64
10915 case TARGET_NR_fadvise64_64:
10916 #endif
10917 #ifdef TARGET_NR_fadvise64
10918 case TARGET_NR_fadvise64:
10919 #endif
10920 #ifdef TARGET_S390X
10921 switch (arg4) {
10922 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
10923 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
10924 case 6: arg4 = POSIX_FADV_DONTNEED; break;
10925 case 7: arg4 = POSIX_FADV_NOREUSE; break;
10926 default: break;
10928 #endif
10929 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
10930 break;
10931 #endif
10932 #endif /* end of 64-bit ABI fadvise handling */
10934 #ifdef TARGET_NR_madvise
10935 case TARGET_NR_madvise:
10936 /* A straight passthrough may not be safe because qemu sometimes
10937 turns private file-backed mappings into anonymous mappings.
10938 This will break MADV_DONTNEED.
10939 This is a hint, so ignoring and returning success is ok. */
10940 ret = get_errno(0);
10941 break;
10942 #endif
10943 #if TARGET_ABI_BITS == 32
10944 case TARGET_NR_fcntl64:
10946 int cmd;
10947 struct flock64 fl;
10948 from_flock64_fn *copyfrom = copy_from_user_flock64;
10949 to_flock64_fn *copyto = copy_to_user_flock64;
10951 #ifdef TARGET_ARM
10952 if (((CPUARMState *)cpu_env)->eabi) {
10953 copyfrom = copy_from_user_eabi_flock64;
10954 copyto = copy_to_user_eabi_flock64;
10956 #endif
10958 cmd = target_to_host_fcntl_cmd(arg2);
10959 if (cmd == -TARGET_EINVAL) {
10960 ret = cmd;
10961 break;
10964 switch(arg2) {
10965 case TARGET_F_GETLK64:
10966 ret = copyfrom(&fl, arg3);
10967 if (ret) {
10968 break;
10970 ret = get_errno(fcntl(arg1, cmd, &fl));
10971 if (ret == 0) {
10972 ret = copyto(arg3, &fl);
10974 break;
10976 case TARGET_F_SETLK64:
10977 case TARGET_F_SETLKW64:
10978 ret = copyfrom(&fl, arg3);
10979 if (ret) {
10980 break;
10982 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
10983 break;
10984 default:
10985 ret = do_fcntl(arg1, arg2, arg3);
10986 break;
10988 break;
10990 #endif
10991 #ifdef TARGET_NR_cacheflush
10992 case TARGET_NR_cacheflush:
10993 /* self-modifying code is handled automatically, so nothing needed */
10994 ret = 0;
10995 break;
10996 #endif
10997 #ifdef TARGET_NR_security
10998 case TARGET_NR_security:
10999 goto unimplemented;
11000 #endif
11001 #ifdef TARGET_NR_getpagesize
11002 case TARGET_NR_getpagesize:
11003 ret = TARGET_PAGE_SIZE;
11004 break;
11005 #endif
11006 case TARGET_NR_gettid:
11007 ret = get_errno(gettid());
11008 break;
11009 #ifdef TARGET_NR_readahead
11010 case TARGET_NR_readahead:
11011 #if TARGET_ABI_BITS == 32
11012 if (regpairs_aligned(cpu_env)) {
11013 arg2 = arg3;
11014 arg3 = arg4;
11015 arg4 = arg5;
11017 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11018 #else
11019 ret = get_errno(readahead(arg1, arg2, arg3));
11020 #endif
11021 break;
11022 #endif
11023 #ifdef CONFIG_ATTR
11024 #ifdef TARGET_NR_setxattr
11025 case TARGET_NR_listxattr:
11026 case TARGET_NR_llistxattr:
11028 void *p, *b = 0;
11029 if (arg2) {
11030 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11031 if (!b) {
11032 ret = -TARGET_EFAULT;
11033 break;
11036 p = lock_user_string(arg1);
11037 if (p) {
11038 if (num == TARGET_NR_listxattr) {
11039 ret = get_errno(listxattr(p, b, arg3));
11040 } else {
11041 ret = get_errno(llistxattr(p, b, arg3));
11043 } else {
11044 ret = -TARGET_EFAULT;
11046 unlock_user(p, arg1, 0);
11047 unlock_user(b, arg2, arg3);
11048 break;
11050 case TARGET_NR_flistxattr:
11052 void *b = 0;
11053 if (arg2) {
11054 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11055 if (!b) {
11056 ret = -TARGET_EFAULT;
11057 break;
11060 ret = get_errno(flistxattr(arg1, b, arg3));
11061 unlock_user(b, arg2, arg3);
11062 break;
11064 case TARGET_NR_setxattr:
11065 case TARGET_NR_lsetxattr:
11067 void *p, *n, *v = 0;
11068 if (arg3) {
11069 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11070 if (!v) {
11071 ret = -TARGET_EFAULT;
11072 break;
11075 p = lock_user_string(arg1);
11076 n = lock_user_string(arg2);
11077 if (p && n) {
11078 if (num == TARGET_NR_setxattr) {
11079 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11080 } else {
11081 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11083 } else {
11084 ret = -TARGET_EFAULT;
11086 unlock_user(p, arg1, 0);
11087 unlock_user(n, arg2, 0);
11088 unlock_user(v, arg3, 0);
11090 break;
11091 case TARGET_NR_fsetxattr:
11093 void *n, *v = 0;
11094 if (arg3) {
11095 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11096 if (!v) {
11097 ret = -TARGET_EFAULT;
11098 break;
11101 n = lock_user_string(arg2);
11102 if (n) {
11103 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11104 } else {
11105 ret = -TARGET_EFAULT;
11107 unlock_user(n, arg2, 0);
11108 unlock_user(v, arg3, 0);
11110 break;
11111 case TARGET_NR_getxattr:
11112 case TARGET_NR_lgetxattr:
11114 void *p, *n, *v = 0;
11115 if (arg3) {
11116 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11117 if (!v) {
11118 ret = -TARGET_EFAULT;
11119 break;
11122 p = lock_user_string(arg1);
11123 n = lock_user_string(arg2);
11124 if (p && n) {
11125 if (num == TARGET_NR_getxattr) {
11126 ret = get_errno(getxattr(p, n, v, arg4));
11127 } else {
11128 ret = get_errno(lgetxattr(p, n, v, arg4));
11130 } else {
11131 ret = -TARGET_EFAULT;
11133 unlock_user(p, arg1, 0);
11134 unlock_user(n, arg2, 0);
11135 unlock_user(v, arg3, arg4);
11137 break;
11138 case TARGET_NR_fgetxattr:
11140 void *n, *v = 0;
11141 if (arg3) {
11142 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11143 if (!v) {
11144 ret = -TARGET_EFAULT;
11145 break;
11148 n = lock_user_string(arg2);
11149 if (n) {
11150 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11151 } else {
11152 ret = -TARGET_EFAULT;
11154 unlock_user(n, arg2, 0);
11155 unlock_user(v, arg3, arg4);
11157 break;
11158 case TARGET_NR_removexattr:
11159 case TARGET_NR_lremovexattr:
11161 void *p, *n;
11162 p = lock_user_string(arg1);
11163 n = lock_user_string(arg2);
11164 if (p && n) {
11165 if (num == TARGET_NR_removexattr) {
11166 ret = get_errno(removexattr(p, n));
11167 } else {
11168 ret = get_errno(lremovexattr(p, n));
11170 } else {
11171 ret = -TARGET_EFAULT;
11173 unlock_user(p, arg1, 0);
11174 unlock_user(n, arg2, 0);
11176 break;
11177 case TARGET_NR_fremovexattr:
11179 void *n;
11180 n = lock_user_string(arg2);
11181 if (n) {
11182 ret = get_errno(fremovexattr(arg1, n));
11183 } else {
11184 ret = -TARGET_EFAULT;
11186 unlock_user(n, arg2, 0);
11188 break;
11189 #endif
11190 #endif /* CONFIG_ATTR */
11191 #ifdef TARGET_NR_set_thread_area
11192 case TARGET_NR_set_thread_area:
11193 #if defined(TARGET_MIPS)
11194 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11195 ret = 0;
11196 break;
11197 #elif defined(TARGET_CRIS)
11198 if (arg1 & 0xff)
11199 ret = -TARGET_EINVAL;
11200 else {
11201 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11202 ret = 0;
11204 break;
11205 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11206 ret = do_set_thread_area(cpu_env, arg1);
11207 break;
11208 #elif defined(TARGET_M68K)
11210 TaskState *ts = cpu->opaque;
11211 ts->tp_value = arg1;
11212 ret = 0;
11213 break;
11215 #else
11216 goto unimplemented_nowarn;
11217 #endif
11218 #endif
11219 #ifdef TARGET_NR_get_thread_area
11220 case TARGET_NR_get_thread_area:
11221 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11222 ret = do_get_thread_area(cpu_env, arg1);
11223 break;
11224 #elif defined(TARGET_M68K)
11226 TaskState *ts = cpu->opaque;
11227 ret = ts->tp_value;
11228 break;
11230 #else
11231 goto unimplemented_nowarn;
11232 #endif
11233 #endif
11234 #ifdef TARGET_NR_getdomainname
11235 case TARGET_NR_getdomainname:
11236 goto unimplemented_nowarn;
11237 #endif
11239 #ifdef TARGET_NR_clock_gettime
11240 case TARGET_NR_clock_gettime:
11242 struct timespec ts;
11243 ret = get_errno(clock_gettime(arg1, &ts));
11244 if (!is_error(ret)) {
11245 host_to_target_timespec(arg2, &ts);
11247 break;
11249 #endif
11250 #ifdef TARGET_NR_clock_getres
11251 case TARGET_NR_clock_getres:
11253 struct timespec ts;
11254 ret = get_errno(clock_getres(arg1, &ts));
11255 if (!is_error(ret)) {
11256 host_to_target_timespec(arg2, &ts);
11258 break;
11260 #endif
11261 #ifdef TARGET_NR_clock_nanosleep
11262 case TARGET_NR_clock_nanosleep:
11264 struct timespec ts;
11265 target_to_host_timespec(&ts, arg3);
11266 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11267 &ts, arg4 ? &ts : NULL));
11268 if (arg4)
11269 host_to_target_timespec(arg4, &ts);
11271 #if defined(TARGET_PPC)
11272 /* clock_nanosleep is odd in that it returns positive errno values.
11273 * On PPC, CR0 bit 3 should be set in such a situation. */
11274 if (ret && ret != -TARGET_ERESTARTSYS) {
11275 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11277 #endif
11278 break;
11280 #endif
11282 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11283 case TARGET_NR_set_tid_address:
11284 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11285 break;
11286 #endif
11288 case TARGET_NR_tkill:
11289 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11290 break;
11292 case TARGET_NR_tgkill:
11293 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11294 target_to_host_signal(arg3)));
11295 break;
11297 #ifdef TARGET_NR_set_robust_list
11298 case TARGET_NR_set_robust_list:
11299 case TARGET_NR_get_robust_list:
11300 /* The ABI for supporting robust futexes has userspace pass
11301 * the kernel a pointer to a linked list which is updated by
11302 * userspace after the syscall; the list is walked by the kernel
11303 * when the thread exits. Since the linked list in QEMU guest
11304 * memory isn't a valid linked list for the host and we have
11305 * no way to reliably intercept the thread-death event, we can't
11306 * support these. Silently return ENOSYS so that guest userspace
11307 * falls back to a non-robust futex implementation (which should
11308 * be OK except in the corner case of the guest crashing while
11309 * holding a mutex that is shared with another process via
11310 * shared memory).
11312 goto unimplemented_nowarn;
11313 #endif
11315 #if defined(TARGET_NR_utimensat)
11316 case TARGET_NR_utimensat:
11318 struct timespec *tsp, ts[2];
11319 if (!arg3) {
11320 tsp = NULL;
11321 } else {
11322 target_to_host_timespec(ts, arg3);
11323 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11324 tsp = ts;
11326 if (!arg2)
11327 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11328 else {
11329 if (!(p = lock_user_string(arg2))) {
11330 ret = -TARGET_EFAULT;
11331 goto fail;
11333 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11334 unlock_user(p, arg2, 0);
11337 break;
11338 #endif
11339 case TARGET_NR_futex:
11340 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11341 break;
11342 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11343 case TARGET_NR_inotify_init:
11344 ret = get_errno(sys_inotify_init());
11345 break;
11346 #endif
11347 #ifdef CONFIG_INOTIFY1
11348 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11349 case TARGET_NR_inotify_init1:
11350 ret = get_errno(sys_inotify_init1(arg1));
11351 break;
11352 #endif
11353 #endif
11354 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11355 case TARGET_NR_inotify_add_watch:
11356 p = lock_user_string(arg2);
11357 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11358 unlock_user(p, arg2, 0);
11359 break;
11360 #endif
11361 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11362 case TARGET_NR_inotify_rm_watch:
11363 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11364 break;
11365 #endif
11367 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11368 case TARGET_NR_mq_open:
11370 struct mq_attr posix_mq_attr, *attrp;
11372 p = lock_user_string(arg1 - 1);
11373 if (arg4 != 0) {
11374 copy_from_user_mq_attr (&posix_mq_attr, arg4);
11375 attrp = &posix_mq_attr;
11376 } else {
11377 attrp = 0;
11379 ret = get_errno(mq_open(p, arg2, arg3, attrp));
11380 unlock_user (p, arg1, 0);
11382 break;
11384 case TARGET_NR_mq_unlink:
11385 p = lock_user_string(arg1 - 1);
11386 if (!p) {
11387 ret = -TARGET_EFAULT;
11388 break;
11390 ret = get_errno(mq_unlink(p));
11391 unlock_user (p, arg1, 0);
11392 break;
11394 case TARGET_NR_mq_timedsend:
11396 struct timespec ts;
11398 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11399 if (arg5 != 0) {
11400 target_to_host_timespec(&ts, arg5);
11401 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11402 host_to_target_timespec(arg5, &ts);
11403 } else {
11404 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11406 unlock_user (p, arg2, arg3);
11408 break;
11410 case TARGET_NR_mq_timedreceive:
11412 struct timespec ts;
11413 unsigned int prio;
11415 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11416 if (arg5 != 0) {
11417 target_to_host_timespec(&ts, arg5);
11418 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11419 &prio, &ts));
11420 host_to_target_timespec(arg5, &ts);
11421 } else {
11422 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11423 &prio, NULL));
11425 unlock_user (p, arg2, arg3);
11426 if (arg4 != 0)
11427 put_user_u32(prio, arg4);
11429 break;
11431 /* Not implemented for now... */
11432 /* case TARGET_NR_mq_notify: */
11433 /* break; */
11435 case TARGET_NR_mq_getsetattr:
11437 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11438 ret = 0;
11439 if (arg3 != 0) {
11440 ret = mq_getattr(arg1, &posix_mq_attr_out);
11441 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11443 if (arg2 != 0) {
11444 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11445 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11449 break;
11450 #endif
11452 #ifdef CONFIG_SPLICE
11453 #ifdef TARGET_NR_tee
11454 case TARGET_NR_tee:
11456 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11458 break;
11459 #endif
11460 #ifdef TARGET_NR_splice
11461 case TARGET_NR_splice:
11463 loff_t loff_in, loff_out;
11464 loff_t *ploff_in = NULL, *ploff_out = NULL;
11465 if (arg2) {
11466 if (get_user_u64(loff_in, arg2)) {
11467 goto efault;
11469 ploff_in = &loff_in;
11471 if (arg4) {
11472 if (get_user_u64(loff_out, arg4)) {
11473 goto efault;
11475 ploff_out = &loff_out;
11477 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11478 if (arg2) {
11479 if (put_user_u64(loff_in, arg2)) {
11480 goto efault;
11483 if (arg4) {
11484 if (put_user_u64(loff_out, arg4)) {
11485 goto efault;
11489 break;
11490 #endif
11491 #ifdef TARGET_NR_vmsplice
11492 case TARGET_NR_vmsplice:
11494 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11495 if (vec != NULL) {
11496 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11497 unlock_iovec(vec, arg2, arg3, 0);
11498 } else {
11499 ret = -host_to_target_errno(errno);
11502 break;
11503 #endif
11504 #endif /* CONFIG_SPLICE */
11505 #ifdef CONFIG_EVENTFD
11506 #if defined(TARGET_NR_eventfd)
11507 case TARGET_NR_eventfd:
11508 ret = get_errno(eventfd(arg1, 0));
11509 fd_trans_unregister(ret);
11510 break;
11511 #endif
11512 #if defined(TARGET_NR_eventfd2)
11513 case TARGET_NR_eventfd2:
11515 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11516 if (arg2 & TARGET_O_NONBLOCK) {
11517 host_flags |= O_NONBLOCK;
11519 if (arg2 & TARGET_O_CLOEXEC) {
11520 host_flags |= O_CLOEXEC;
11522 ret = get_errno(eventfd(arg1, host_flags));
11523 fd_trans_unregister(ret);
11524 break;
11526 #endif
11527 #endif /* CONFIG_EVENTFD */
11528 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11529 case TARGET_NR_fallocate:
11530 #if TARGET_ABI_BITS == 32
11531 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11532 target_offset64(arg5, arg6)));
11533 #else
11534 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11535 #endif
11536 break;
11537 #endif
11538 #if defined(CONFIG_SYNC_FILE_RANGE)
11539 #if defined(TARGET_NR_sync_file_range)
11540 case TARGET_NR_sync_file_range:
11541 #if TARGET_ABI_BITS == 32
11542 #if defined(TARGET_MIPS)
11543 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11544 target_offset64(arg5, arg6), arg7));
11545 #else
11546 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11547 target_offset64(arg4, arg5), arg6));
11548 #endif /* !TARGET_MIPS */
11549 #else
11550 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11551 #endif
11552 break;
11553 #endif
11554 #if defined(TARGET_NR_sync_file_range2)
11555 case TARGET_NR_sync_file_range2:
11556 /* This is like sync_file_range but the arguments are reordered */
11557 #if TARGET_ABI_BITS == 32
11558 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11559 target_offset64(arg5, arg6), arg2));
11560 #else
11561 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11562 #endif
11563 break;
11564 #endif
11565 #endif
11566 #if defined(TARGET_NR_signalfd4)
11567 case TARGET_NR_signalfd4:
11568 ret = do_signalfd4(arg1, arg2, arg4);
11569 break;
11570 #endif
11571 #if defined(TARGET_NR_signalfd)
11572 case TARGET_NR_signalfd:
11573 ret = do_signalfd4(arg1, arg2, 0);
11574 break;
11575 #endif
11576 #if defined(CONFIG_EPOLL)
11577 #if defined(TARGET_NR_epoll_create)
11578 case TARGET_NR_epoll_create:
11579 ret = get_errno(epoll_create(arg1));
11580 break;
11581 #endif
11582 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11583 case TARGET_NR_epoll_create1:
11584 ret = get_errno(epoll_create1(arg1));
11585 break;
11586 #endif
11587 #if defined(TARGET_NR_epoll_ctl)
11588 case TARGET_NR_epoll_ctl:
11590 struct epoll_event ep;
11591 struct epoll_event *epp = 0;
11592 if (arg4) {
11593 struct target_epoll_event *target_ep;
11594 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11595 goto efault;
11597 ep.events = tswap32(target_ep->events);
11598 /* The epoll_data_t union is just opaque data to the kernel,
11599 * so we transfer all 64 bits across and need not worry what
11600 * actual data type it is.
11602 ep.data.u64 = tswap64(target_ep->data.u64);
11603 unlock_user_struct(target_ep, arg4, 0);
11604 epp = &ep;
11606 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11607 break;
11609 #endif
11611 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11612 #if defined(TARGET_NR_epoll_wait)
11613 case TARGET_NR_epoll_wait:
11614 #endif
11615 #if defined(TARGET_NR_epoll_pwait)
11616 case TARGET_NR_epoll_pwait:
11617 #endif
11619 struct target_epoll_event *target_ep;
11620 struct epoll_event *ep;
11621 int epfd = arg1;
11622 int maxevents = arg3;
11623 int timeout = arg4;
11625 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11626 ret = -TARGET_EINVAL;
11627 break;
11630 target_ep = lock_user(VERIFY_WRITE, arg2,
11631 maxevents * sizeof(struct target_epoll_event), 1);
11632 if (!target_ep) {
11633 goto efault;
11636 ep = alloca(maxevents * sizeof(struct epoll_event));
11638 switch (num) {
11639 #if defined(TARGET_NR_epoll_pwait)
11640 case TARGET_NR_epoll_pwait:
11642 target_sigset_t *target_set;
11643 sigset_t _set, *set = &_set;
11645 if (arg5) {
11646 if (arg6 != sizeof(target_sigset_t)) {
11647 ret = -TARGET_EINVAL;
11648 break;
11651 target_set = lock_user(VERIFY_READ, arg5,
11652 sizeof(target_sigset_t), 1);
11653 if (!target_set) {
11654 unlock_user(target_ep, arg2, 0);
11655 goto efault;
11657 target_to_host_sigset(set, target_set);
11658 unlock_user(target_set, arg5, 0);
11659 } else {
11660 set = NULL;
11663 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11664 set, SIGSET_T_SIZE));
11665 break;
11667 #endif
11668 #if defined(TARGET_NR_epoll_wait)
11669 case TARGET_NR_epoll_wait:
11670 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11671 NULL, 0));
11672 break;
11673 #endif
11674 default:
11675 ret = -TARGET_ENOSYS;
11677 if (!is_error(ret)) {
11678 int i;
11679 for (i = 0; i < ret; i++) {
11680 target_ep[i].events = tswap32(ep[i].events);
11681 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11684 unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
11685 break;
11687 #endif
11688 #endif
11689 #ifdef TARGET_NR_prlimit64
11690 case TARGET_NR_prlimit64:
11692 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11693 struct target_rlimit64 *target_rnew, *target_rold;
11694 struct host_rlimit64 rnew, rold, *rnewp = 0;
11695 int resource = target_to_host_resource(arg2);
11696 if (arg3) {
11697 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11698 goto efault;
11700 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11701 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11702 unlock_user_struct(target_rnew, arg3, 0);
11703 rnewp = &rnew;
11706 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11707 if (!is_error(ret) && arg4) {
11708 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11709 goto efault;
11711 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11712 target_rold->rlim_max = tswap64(rold.rlim_max);
11713 unlock_user_struct(target_rold, arg4, 1);
11715 break;
11717 #endif
11718 #ifdef TARGET_NR_gethostname
11719 case TARGET_NR_gethostname:
11721 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11722 if (name) {
11723 ret = get_errno(gethostname(name, arg2));
11724 unlock_user(name, arg1, arg2);
11725 } else {
11726 ret = -TARGET_EFAULT;
11728 break;
11730 #endif
11731 #ifdef TARGET_NR_atomic_cmpxchg_32
11732 case TARGET_NR_atomic_cmpxchg_32:
11734 /* should use start_exclusive from main.c */
11735 abi_ulong mem_value;
11736 if (get_user_u32(mem_value, arg6)) {
11737 target_siginfo_t info;
11738 info.si_signo = SIGSEGV;
11739 info.si_errno = 0;
11740 info.si_code = TARGET_SEGV_MAPERR;
11741 info._sifields._sigfault._addr = arg6;
11742 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11743 QEMU_SI_FAULT, &info);
11744 ret = 0xdeadbeef;
11747 if (mem_value == arg2)
11748 put_user_u32(arg1, arg6);
11749 ret = mem_value;
11750 break;
11752 #endif
11753 #ifdef TARGET_NR_atomic_barrier
11754 case TARGET_NR_atomic_barrier:
11756 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11757 ret = 0;
11758 break;
11760 #endif
11762 #ifdef TARGET_NR_timer_create
11763 case TARGET_NR_timer_create:
11765 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11767 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11769 int clkid = arg1;
11770 int timer_index = next_free_host_timer();
11772 if (timer_index < 0) {
11773 ret = -TARGET_EAGAIN;
11774 } else {
11775 timer_t *phtimer = g_posix_timers + timer_index;
11777 if (arg2) {
11778 phost_sevp = &host_sevp;
11779 ret = target_to_host_sigevent(phost_sevp, arg2);
11780 if (ret != 0) {
11781 break;
11785 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11786 if (ret) {
11787 phtimer = NULL;
11788 } else {
11789 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11790 goto efault;
11794 break;
11796 #endif
11798 #ifdef TARGET_NR_timer_settime
11799 case TARGET_NR_timer_settime:
11801 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11802 * struct itimerspec * old_value */
11803 target_timer_t timerid = get_timer_id(arg1);
11805 if (timerid < 0) {
11806 ret = timerid;
11807 } else if (arg3 == 0) {
11808 ret = -TARGET_EINVAL;
11809 } else {
11810 timer_t htimer = g_posix_timers[timerid];
11811 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11813 target_to_host_itimerspec(&hspec_new, arg3);
11814 ret = get_errno(
11815 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11816 host_to_target_itimerspec(arg2, &hspec_old);
11818 break;
11820 #endif
11822 #ifdef TARGET_NR_timer_gettime
11823 case TARGET_NR_timer_gettime:
11825 /* args: timer_t timerid, struct itimerspec *curr_value */
11826 target_timer_t timerid = get_timer_id(arg1);
11828 if (timerid < 0) {
11829 ret = timerid;
11830 } else if (!arg2) {
11831 ret = -TARGET_EFAULT;
11832 } else {
11833 timer_t htimer = g_posix_timers[timerid];
11834 struct itimerspec hspec;
11835 ret = get_errno(timer_gettime(htimer, &hspec));
11837 if (host_to_target_itimerspec(arg2, &hspec)) {
11838 ret = -TARGET_EFAULT;
11841 break;
11843 #endif
11845 #ifdef TARGET_NR_timer_getoverrun
11846 case TARGET_NR_timer_getoverrun:
11848 /* args: timer_t timerid */
11849 target_timer_t timerid = get_timer_id(arg1);
11851 if (timerid < 0) {
11852 ret = timerid;
11853 } else {
11854 timer_t htimer = g_posix_timers[timerid];
11855 ret = get_errno(timer_getoverrun(htimer));
11857 fd_trans_unregister(ret);
11858 break;
11860 #endif
11862 #ifdef TARGET_NR_timer_delete
11863 case TARGET_NR_timer_delete:
11865 /* args: timer_t timerid */
11866 target_timer_t timerid = get_timer_id(arg1);
11868 if (timerid < 0) {
11869 ret = timerid;
11870 } else {
11871 timer_t htimer = g_posix_timers[timerid];
11872 ret = get_errno(timer_delete(htimer));
11873 g_posix_timers[timerid] = 0;
11875 break;
11877 #endif
11879 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
11880 case TARGET_NR_timerfd_create:
11881 ret = get_errno(timerfd_create(arg1,
11882 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
11883 break;
11884 #endif
11886 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
11887 case TARGET_NR_timerfd_gettime:
11889 struct itimerspec its_curr;
11891 ret = get_errno(timerfd_gettime(arg1, &its_curr));
11893 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
11894 goto efault;
11897 break;
11898 #endif
11900 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
11901 case TARGET_NR_timerfd_settime:
11903 struct itimerspec its_new, its_old, *p_new;
11905 if (arg3) {
11906 if (target_to_host_itimerspec(&its_new, arg3)) {
11907 goto efault;
11909 p_new = &its_new;
11910 } else {
11911 p_new = NULL;
11914 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
11916 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
11917 goto efault;
11920 break;
11921 #endif
11923 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
11924 case TARGET_NR_ioprio_get:
11925 ret = get_errno(ioprio_get(arg1, arg2));
11926 break;
11927 #endif
11929 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
11930 case TARGET_NR_ioprio_set:
11931 ret = get_errno(ioprio_set(arg1, arg2, arg3));
11932 break;
11933 #endif
11935 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
11936 case TARGET_NR_setns:
11937 ret = get_errno(setns(arg1, arg2));
11938 break;
11939 #endif
11940 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
11941 case TARGET_NR_unshare:
11942 ret = get_errno(unshare(arg1));
11943 break;
11944 #endif
11946 default:
11947 unimplemented:
11948 gemu_log("qemu: Unsupported syscall: %d\n", num);
11949 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
11950 unimplemented_nowarn:
11951 #endif
11952 ret = -TARGET_ENOSYS;
11953 break;
11955 fail:
11956 #ifdef DEBUG
11957 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
11958 #endif
11959 if(do_strace)
11960 print_syscall_ret(num, ret);
11961 trace_guest_user_syscall_ret(cpu, num, ret);
11962 return ret;
11963 efault:
11964 ret = -TARGET_EFAULT;
11965 goto fail;