net: rtl8139: limit processing of ring descriptors
[qemu/ar7.git] / linux-user / syscall.c
blobdb697c0bf38a2a4942a07fe8c3296987350f24be
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu/xattr.h"
75 #endif
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
97 #endif
98 #include <linux/fb.h>
99 #include <linux/vt.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
110 #endif
111 #include <linux/audit.h>
112 #include "linux_loop.h"
113 #include "uname.h"
115 #include "qemu.h"
117 #ifndef CLONE_IO
118 #define CLONE_IO 0x80000000 /* Clone io context */
119 #endif
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 //#define DEBUG
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
178 #undef _syscall0
179 #undef _syscall1
180 #undef _syscall2
181 #undef _syscall3
182 #undef _syscall4
183 #undef _syscall5
184 #undef _syscall6
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 type5,arg5) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 type6 arg6) \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 defined(__s390x__)
247 #define __NR__llseek __NR_lseek
248 #endif
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
253 #endif
255 #ifdef __NR_gettid
256 _syscall0(int, gettid)
257 #else
258 /* This is a replacement for the host gettid() and must return a host
259 errno. */
260 static int gettid(void) {
261 return -ENOSYS;
263 #endif
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
273 loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
294 void *, arg);
295 _syscall2(int, capget, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 _syscall2(int, capset, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get, int, which, int, who)
301 #endif
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
304 #endif
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
307 #endif
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
311 unsigned long, idx1, unsigned long, idx2)
312 #endif
314 static bitmask_transtbl fcntl_flags_tbl[] = {
315 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
316 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
317 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
318 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
319 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
320 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
321 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
322 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
323 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
324 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
325 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
326 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
327 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
330 #endif
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
333 #endif
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
336 #endif
337 #if defined(O_PATH)
338 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
339 #endif
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
343 #endif
344 { 0, 0, 0, 0 }
347 enum {
348 QEMU_IFLA_BR_UNSPEC,
349 QEMU_IFLA_BR_FORWARD_DELAY,
350 QEMU_IFLA_BR_HELLO_TIME,
351 QEMU_IFLA_BR_MAX_AGE,
352 QEMU_IFLA_BR_AGEING_TIME,
353 QEMU_IFLA_BR_STP_STATE,
354 QEMU_IFLA_BR_PRIORITY,
355 QEMU_IFLA_BR_VLAN_FILTERING,
356 QEMU_IFLA_BR_VLAN_PROTOCOL,
357 QEMU_IFLA_BR_GROUP_FWD_MASK,
358 QEMU_IFLA_BR_ROOT_ID,
359 QEMU_IFLA_BR_BRIDGE_ID,
360 QEMU_IFLA_BR_ROOT_PORT,
361 QEMU_IFLA_BR_ROOT_PATH_COST,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
364 QEMU_IFLA_BR_HELLO_TIMER,
365 QEMU_IFLA_BR_TCN_TIMER,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
367 QEMU_IFLA_BR_GC_TIMER,
368 QEMU_IFLA_BR_GROUP_ADDR,
369 QEMU_IFLA_BR_FDB_FLUSH,
370 QEMU_IFLA_BR_MCAST_ROUTER,
371 QEMU_IFLA_BR_MCAST_SNOOPING,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
373 QEMU_IFLA_BR_MCAST_QUERIER,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
375 QEMU_IFLA_BR_MCAST_HASH_MAX,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
388 QEMU_IFLA_BR_PAD,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
391 QEMU___IFLA_BR_MAX,
394 enum {
395 QEMU_IFLA_UNSPEC,
396 QEMU_IFLA_ADDRESS,
397 QEMU_IFLA_BROADCAST,
398 QEMU_IFLA_IFNAME,
399 QEMU_IFLA_MTU,
400 QEMU_IFLA_LINK,
401 QEMU_IFLA_QDISC,
402 QEMU_IFLA_STATS,
403 QEMU_IFLA_COST,
404 QEMU_IFLA_PRIORITY,
405 QEMU_IFLA_MASTER,
406 QEMU_IFLA_WIRELESS,
407 QEMU_IFLA_PROTINFO,
408 QEMU_IFLA_TXQLEN,
409 QEMU_IFLA_MAP,
410 QEMU_IFLA_WEIGHT,
411 QEMU_IFLA_OPERSTATE,
412 QEMU_IFLA_LINKMODE,
413 QEMU_IFLA_LINKINFO,
414 QEMU_IFLA_NET_NS_PID,
415 QEMU_IFLA_IFALIAS,
416 QEMU_IFLA_NUM_VF,
417 QEMU_IFLA_VFINFO_LIST,
418 QEMU_IFLA_STATS64,
419 QEMU_IFLA_VF_PORTS,
420 QEMU_IFLA_PORT_SELF,
421 QEMU_IFLA_AF_SPEC,
422 QEMU_IFLA_GROUP,
423 QEMU_IFLA_NET_NS_FD,
424 QEMU_IFLA_EXT_MASK,
425 QEMU_IFLA_PROMISCUITY,
426 QEMU_IFLA_NUM_TX_QUEUES,
427 QEMU_IFLA_NUM_RX_QUEUES,
428 QEMU_IFLA_CARRIER,
429 QEMU_IFLA_PHYS_PORT_ID,
430 QEMU_IFLA_CARRIER_CHANGES,
431 QEMU_IFLA_PHYS_SWITCH_ID,
432 QEMU_IFLA_LINK_NETNSID,
433 QEMU_IFLA_PHYS_PORT_NAME,
434 QEMU_IFLA_PROTO_DOWN,
435 QEMU_IFLA_GSO_MAX_SEGS,
436 QEMU_IFLA_GSO_MAX_SIZE,
437 QEMU_IFLA_PAD,
438 QEMU_IFLA_XDP,
439 QEMU___IFLA_MAX
442 enum {
443 QEMU_IFLA_BRPORT_UNSPEC,
444 QEMU_IFLA_BRPORT_STATE,
445 QEMU_IFLA_BRPORT_PRIORITY,
446 QEMU_IFLA_BRPORT_COST,
447 QEMU_IFLA_BRPORT_MODE,
448 QEMU_IFLA_BRPORT_GUARD,
449 QEMU_IFLA_BRPORT_PROTECT,
450 QEMU_IFLA_BRPORT_FAST_LEAVE,
451 QEMU_IFLA_BRPORT_LEARNING,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
453 QEMU_IFLA_BRPORT_PROXYARP,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
456 QEMU_IFLA_BRPORT_ROOT_ID,
457 QEMU_IFLA_BRPORT_BRIDGE_ID,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST,
460 QEMU_IFLA_BRPORT_ID,
461 QEMU_IFLA_BRPORT_NO,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
466 QEMU_IFLA_BRPORT_HOLD_TIMER,
467 QEMU_IFLA_BRPORT_FLUSH,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
469 QEMU_IFLA_BRPORT_PAD,
470 QEMU___IFLA_BRPORT_MAX
473 enum {
474 QEMU_IFLA_INFO_UNSPEC,
475 QEMU_IFLA_INFO_KIND,
476 QEMU_IFLA_INFO_DATA,
477 QEMU_IFLA_INFO_XSTATS,
478 QEMU_IFLA_INFO_SLAVE_KIND,
479 QEMU_IFLA_INFO_SLAVE_DATA,
480 QEMU___IFLA_INFO_MAX,
483 enum {
484 QEMU_IFLA_INET_UNSPEC,
485 QEMU_IFLA_INET_CONF,
486 QEMU___IFLA_INET_MAX,
489 enum {
490 QEMU_IFLA_INET6_UNSPEC,
491 QEMU_IFLA_INET6_FLAGS,
492 QEMU_IFLA_INET6_CONF,
493 QEMU_IFLA_INET6_STATS,
494 QEMU_IFLA_INET6_MCAST,
495 QEMU_IFLA_INET6_CACHEINFO,
496 QEMU_IFLA_INET6_ICMP6STATS,
497 QEMU_IFLA_INET6_TOKEN,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
504 typedef struct TargetFdTrans {
505 TargetFdDataFunc host_to_target_data;
506 TargetFdDataFunc target_to_host_data;
507 TargetFdAddrFunc target_to_host_addr;
508 } TargetFdTrans;
510 static TargetFdTrans **target_fd_trans;
512 static unsigned int target_fd_max;
514 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
516 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
517 return target_fd_trans[fd]->target_to_host_data;
519 return NULL;
522 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
524 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
525 return target_fd_trans[fd]->host_to_target_data;
527 return NULL;
530 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
532 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
533 return target_fd_trans[fd]->target_to_host_addr;
535 return NULL;
538 static void fd_trans_register(int fd, TargetFdTrans *trans)
540 unsigned int oldmax;
542 if (fd >= target_fd_max) {
543 oldmax = target_fd_max;
544 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans = g_renew(TargetFdTrans *,
546 target_fd_trans, target_fd_max);
547 memset((void *)(target_fd_trans + oldmax), 0,
548 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
550 target_fd_trans[fd] = trans;
553 static void fd_trans_unregister(int fd)
555 if (fd >= 0 && fd < target_fd_max) {
556 target_fd_trans[fd] = NULL;
560 static void fd_trans_dup(int oldfd, int newfd)
562 fd_trans_unregister(newfd);
563 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
564 fd_trans_register(newfd, target_fd_trans[oldfd]);
568 static int sys_getcwd1(char *buf, size_t size)
570 if (getcwd(buf, size) == NULL) {
571 /* getcwd() sets errno */
572 return (-1);
574 return strlen(buf)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
581 const struct timespec *,tsp,int,flags)
582 #else
583 static int sys_utimensat(int dirfd, const char *pathname,
584 const struct timespec times[2], int flags)
586 errno = ENOSYS;
587 return -1;
589 #endif
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
600 #endif
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
604 return (inotify_add_watch(fd, pathname, mask));
606 #endif
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd, int32_t wd)
610 return (inotify_rm_watch(fd, wd));
612 #endif
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags)
617 return (inotify_init1(flags));
619 #endif
620 #endif
621 #else
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
632 #endif
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64 {
636 uint64_t rlim_cur;
637 uint64_t rlim_max;
639 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
640 const struct host_rlimit64 *, new_limit,
641 struct host_rlimit64 *, old_limit)
642 #endif
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
651 int k ;
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
654 if (g_posix_timers[k] == 0) {
655 g_posix_timers[k] = (timer_t) 1;
656 return k;
659 return -1;
661 #endif
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
664 #ifdef TARGET_ARM
665 static inline int regpairs_aligned(void *cpu_env) {
666 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
673 * r3 as arg1 */
674 static inline int regpairs_aligned(void *cpu_env) { return 1; }
675 #else
676 static inline int regpairs_aligned(void *cpu_env) { return 0; }
677 #endif
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
691 [EAGAIN] = TARGET_EAGAIN,
692 [EIDRM] = TARGET_EIDRM,
693 [ECHRNG] = TARGET_ECHRNG,
694 [EL2NSYNC] = TARGET_EL2NSYNC,
695 [EL3HLT] = TARGET_EL3HLT,
696 [EL3RST] = TARGET_EL3RST,
697 [ELNRNG] = TARGET_ELNRNG,
698 [EUNATCH] = TARGET_EUNATCH,
699 [ENOCSI] = TARGET_ENOCSI,
700 [EL2HLT] = TARGET_EL2HLT,
701 [EDEADLK] = TARGET_EDEADLK,
702 [ENOLCK] = TARGET_ENOLCK,
703 [EBADE] = TARGET_EBADE,
704 [EBADR] = TARGET_EBADR,
705 [EXFULL] = TARGET_EXFULL,
706 [ENOANO] = TARGET_ENOANO,
707 [EBADRQC] = TARGET_EBADRQC,
708 [EBADSLT] = TARGET_EBADSLT,
709 [EBFONT] = TARGET_EBFONT,
710 [ENOSTR] = TARGET_ENOSTR,
711 [ENODATA] = TARGET_ENODATA,
712 [ETIME] = TARGET_ETIME,
713 [ENOSR] = TARGET_ENOSR,
714 [ENONET] = TARGET_ENONET,
715 [ENOPKG] = TARGET_ENOPKG,
716 [EREMOTE] = TARGET_EREMOTE,
717 [ENOLINK] = TARGET_ENOLINK,
718 [EADV] = TARGET_EADV,
719 [ESRMNT] = TARGET_ESRMNT,
720 [ECOMM] = TARGET_ECOMM,
721 [EPROTO] = TARGET_EPROTO,
722 [EDOTDOT] = TARGET_EDOTDOT,
723 [EMULTIHOP] = TARGET_EMULTIHOP,
724 [EBADMSG] = TARGET_EBADMSG,
725 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
726 [EOVERFLOW] = TARGET_EOVERFLOW,
727 [ENOTUNIQ] = TARGET_ENOTUNIQ,
728 [EBADFD] = TARGET_EBADFD,
729 [EREMCHG] = TARGET_EREMCHG,
730 [ELIBACC] = TARGET_ELIBACC,
731 [ELIBBAD] = TARGET_ELIBBAD,
732 [ELIBSCN] = TARGET_ELIBSCN,
733 [ELIBMAX] = TARGET_ELIBMAX,
734 [ELIBEXEC] = TARGET_ELIBEXEC,
735 [EILSEQ] = TARGET_EILSEQ,
736 [ENOSYS] = TARGET_ENOSYS,
737 [ELOOP] = TARGET_ELOOP,
738 [ERESTART] = TARGET_ERESTART,
739 [ESTRPIPE] = TARGET_ESTRPIPE,
740 [ENOTEMPTY] = TARGET_ENOTEMPTY,
741 [EUSERS] = TARGET_EUSERS,
742 [ENOTSOCK] = TARGET_ENOTSOCK,
743 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
744 [EMSGSIZE] = TARGET_EMSGSIZE,
745 [EPROTOTYPE] = TARGET_EPROTOTYPE,
746 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
747 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
748 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
749 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
750 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
751 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
752 [EADDRINUSE] = TARGET_EADDRINUSE,
753 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
754 [ENETDOWN] = TARGET_ENETDOWN,
755 [ENETUNREACH] = TARGET_ENETUNREACH,
756 [ENETRESET] = TARGET_ENETRESET,
757 [ECONNABORTED] = TARGET_ECONNABORTED,
758 [ECONNRESET] = TARGET_ECONNRESET,
759 [ENOBUFS] = TARGET_ENOBUFS,
760 [EISCONN] = TARGET_EISCONN,
761 [ENOTCONN] = TARGET_ENOTCONN,
762 [EUCLEAN] = TARGET_EUCLEAN,
763 [ENOTNAM] = TARGET_ENOTNAM,
764 [ENAVAIL] = TARGET_ENAVAIL,
765 [EISNAM] = TARGET_EISNAM,
766 [EREMOTEIO] = TARGET_EREMOTEIO,
767 [EDQUOT] = TARGET_EDQUOT,
768 [ESHUTDOWN] = TARGET_ESHUTDOWN,
769 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
770 [ETIMEDOUT] = TARGET_ETIMEDOUT,
771 [ECONNREFUSED] = TARGET_ECONNREFUSED,
772 [EHOSTDOWN] = TARGET_EHOSTDOWN,
773 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
774 [EALREADY] = TARGET_EALREADY,
775 [EINPROGRESS] = TARGET_EINPROGRESS,
776 [ESTALE] = TARGET_ESTALE,
777 [ECANCELED] = TARGET_ECANCELED,
778 [ENOMEDIUM] = TARGET_ENOMEDIUM,
779 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
780 #ifdef ENOKEY
781 [ENOKEY] = TARGET_ENOKEY,
782 #endif
783 #ifdef EKEYEXPIRED
784 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
785 #endif
786 #ifdef EKEYREVOKED
787 [EKEYREVOKED] = TARGET_EKEYREVOKED,
788 #endif
789 #ifdef EKEYREJECTED
790 [EKEYREJECTED] = TARGET_EKEYREJECTED,
791 #endif
792 #ifdef EOWNERDEAD
793 [EOWNERDEAD] = TARGET_EOWNERDEAD,
794 #endif
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
797 #endif
798 #ifdef ENOMSG
799 [ENOMSG] = TARGET_ENOMSG,
800 #endif
803 static inline int host_to_target_errno(int err)
805 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
806 host_to_target_errno_table[err]) {
807 return host_to_target_errno_table[err];
809 return err;
812 static inline int target_to_host_errno(int err)
814 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
815 target_to_host_errno_table[err]) {
816 return target_to_host_errno_table[err];
818 return err;
821 static inline abi_long get_errno(abi_long ret)
823 if (ret == -1)
824 return -host_to_target_errno(errno);
825 else
826 return ret;
829 static inline int is_error(abi_long ret)
831 return (abi_ulong)ret >= (abi_ulong)(-4096);
834 const char *target_strerror(int err)
836 if (err == TARGET_ERESTARTSYS) {
837 return "To be restarted";
839 if (err == TARGET_QEMU_ESIGRETURN) {
840 return "Successful exit from sigreturn";
843 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
844 return NULL;
846 return strerror(target_to_host_errno(err));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
874 type4, arg4) \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
883 type5 arg5) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
897 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
898 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
899 int, flags, mode_t, mode)
900 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
901 struct rusage *, rusage)
902 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
903 int, options, struct rusage *, rusage)
904 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
905 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
906 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
907 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
908 struct timespec *, tsp, const sigset_t *, sigmask,
909 size_t, sigsetsize)
910 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
911 int, maxevents, int, timeout, const sigset_t *, sigmask,
912 size_t, sigsetsize)
913 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
914 const struct timespec *,timeout,int *,uaddr2,int,val3)
915 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
916 safe_syscall2(int, kill, pid_t, pid, int, sig)
917 safe_syscall2(int, tkill, int, tid, int, sig)
918 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
919 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
920 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
921 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
922 unsigned long, pos_l, unsigned long, pos_h)
923 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
924 unsigned long, pos_l, unsigned long, pos_h)
925 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
926 socklen_t, addrlen)
927 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
928 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
929 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
930 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
931 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
932 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
933 safe_syscall2(int, flock, int, fd, int, operation)
934 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
935 const struct timespec *, uts, size_t, sigsetsize)
936 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
937 int, flags)
938 safe_syscall2(int, nanosleep, const struct timespec *, req,
939 struct timespec *, rem)
940 #ifdef TARGET_NR_clock_nanosleep
941 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
942 const struct timespec *, req, struct timespec *, rem)
943 #endif
944 #ifdef __NR_msgsnd
945 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
946 int, flags)
947 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
948 long, msgtype, int, flags)
949 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
950 unsigned, nsops, const struct timespec *, timeout)
951 #else
952 /* This host kernel architecture uses a single ipc syscall; fake up
953 * wrappers for the sub-operations to hide this implementation detail.
954 * Annoyingly we can't include linux/ipc.h to get the constant definitions
955 * for the call parameter because some structs in there conflict with the
956 * sys/ipc.h ones. So we just define them here, and rely on them being
957 * the same for all host architectures.
959 #define Q_SEMTIMEDOP 4
960 #define Q_MSGSND 11
961 #define Q_MSGRCV 12
962 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
964 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
965 void *, ptr, long, fifth)
966 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
968 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
970 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
972 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
974 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
975 const struct timespec *timeout)
977 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
978 (long)timeout);
980 #endif
981 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
982 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
983 size_t, len, unsigned, prio, const struct timespec *, timeout)
984 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
985 size_t, len, unsigned *, prio, const struct timespec *, timeout)
986 #endif
987 /* We do ioctl like this rather than via safe_syscall3 to preserve the
988 * "third argument might be integer or pointer or not present" behaviour of
989 * the libc function.
991 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
992 /* Similarly for fcntl. Note that callers must always:
993 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
994 * use the flock64 struct rather than unsuffixed flock
995 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
997 #ifdef __NR_fcntl64
998 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
999 #else
1000 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1001 #endif
1003 static inline int host_to_target_sock_type(int host_type)
1005 int target_type;
1007 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1008 case SOCK_DGRAM:
1009 target_type = TARGET_SOCK_DGRAM;
1010 break;
1011 case SOCK_STREAM:
1012 target_type = TARGET_SOCK_STREAM;
1013 break;
1014 default:
1015 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1016 break;
1019 #if defined(SOCK_CLOEXEC)
1020 if (host_type & SOCK_CLOEXEC) {
1021 target_type |= TARGET_SOCK_CLOEXEC;
1023 #endif
1025 #if defined(SOCK_NONBLOCK)
1026 if (host_type & SOCK_NONBLOCK) {
1027 target_type |= TARGET_SOCK_NONBLOCK;
1029 #endif
1031 return target_type;
1034 static abi_ulong target_brk;
1035 static abi_ulong target_original_brk;
1036 static abi_ulong brk_page;
1038 void target_set_brk(abi_ulong new_brk)
1040 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1041 brk_page = HOST_PAGE_ALIGN(target_brk);
1044 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1045 #define DEBUGF_BRK(message, args...)
1047 /* do_brk() must return target values and target errnos. */
1048 abi_long do_brk(abi_ulong new_brk)
1050 abi_long mapped_addr;
1051 abi_ulong new_alloc_size;
1053 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1055 if (!new_brk) {
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1057 return target_brk;
1059 if (new_brk < target_original_brk) {
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1061 target_brk);
1062 return target_brk;
1065 /* If the new brk is less than the highest page reserved to the
1066 * target heap allocation, set it and we're almost done... */
1067 if (new_brk <= brk_page) {
1068 /* Heap contents are initialized to zero, as for anonymous
1069 * mapped pages. */
1070 if (new_brk > target_brk) {
1071 memset(g2h(target_brk), 0, new_brk - target_brk);
1073 target_brk = new_brk;
1074 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1075 return target_brk;
1078 /* We need to allocate more memory after the brk... Note that
1079 * we don't use MAP_FIXED because that will map over the top of
1080 * any existing mapping (like the one with the host libc or qemu
1081 * itself); instead we treat "mapped but at wrong address" as
1082 * a failure and unmap again.
1084 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1085 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1086 PROT_READ|PROT_WRITE,
1087 MAP_ANON|MAP_PRIVATE, 0, 0));
1089 if (mapped_addr == brk_page) {
1090 /* Heap contents are initialized to zero, as for anonymous
1091 * mapped pages. Technically the new pages are already
1092 * initialized to zero since they *are* anonymous mapped
1093 * pages, however we have to take care with the contents that
1094 * come from the remaining part of the previous page: it may
1095 * contains garbage data due to a previous heap usage (grown
1096 * then shrunken). */
1097 memset(g2h(target_brk), 0, brk_page - target_brk);
1099 target_brk = new_brk;
1100 brk_page = HOST_PAGE_ALIGN(target_brk);
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1102 target_brk);
1103 return target_brk;
1104 } else if (mapped_addr != -1) {
1105 /* Mapped but at wrong address, meaning there wasn't actually
1106 * enough space for this brk.
1108 target_munmap(mapped_addr, new_alloc_size);
1109 mapped_addr = -1;
1110 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1112 else {
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1116 #if defined(TARGET_ALPHA)
1117 /* We (partially) emulate OSF/1 on Alpha, which requires we
1118 return a proper errno, not an unchanged brk value. */
1119 return -TARGET_ENOMEM;
1120 #endif
1121 /* For everything else, return the previous break. */
1122 return target_brk;
1125 static inline abi_long copy_from_user_fdset(fd_set *fds,
1126 abi_ulong target_fds_addr,
1127 int n)
1129 int i, nw, j, k;
1130 abi_ulong b, *target_fds;
1132 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1133 if (!(target_fds = lock_user(VERIFY_READ,
1134 target_fds_addr,
1135 sizeof(abi_ulong) * nw,
1136 1)))
1137 return -TARGET_EFAULT;
1139 FD_ZERO(fds);
1140 k = 0;
1141 for (i = 0; i < nw; i++) {
1142 /* grab the abi_ulong */
1143 __get_user(b, &target_fds[i]);
1144 for (j = 0; j < TARGET_ABI_BITS; j++) {
1145 /* check the bit inside the abi_ulong */
1146 if ((b >> j) & 1)
1147 FD_SET(k, fds);
1148 k++;
1152 unlock_user(target_fds, target_fds_addr, 0);
1154 return 0;
1157 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1158 abi_ulong target_fds_addr,
1159 int n)
1161 if (target_fds_addr) {
1162 if (copy_from_user_fdset(fds, target_fds_addr, n))
1163 return -TARGET_EFAULT;
1164 *fds_ptr = fds;
1165 } else {
1166 *fds_ptr = NULL;
1168 return 0;
1171 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1172 const fd_set *fds,
1173 int n)
1175 int i, nw, j, k;
1176 abi_long v;
1177 abi_ulong *target_fds;
1179 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1180 if (!(target_fds = lock_user(VERIFY_WRITE,
1181 target_fds_addr,
1182 sizeof(abi_ulong) * nw,
1183 0)))
1184 return -TARGET_EFAULT;
1186 k = 0;
1187 for (i = 0; i < nw; i++) {
1188 v = 0;
1189 for (j = 0; j < TARGET_ABI_BITS; j++) {
1190 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1191 k++;
1193 __put_user(v, &target_fds[i]);
1196 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1198 return 0;
1201 #if defined(__alpha__)
1202 #define HOST_HZ 1024
1203 #else
1204 #define HOST_HZ 100
1205 #endif
1207 static inline abi_long host_to_target_clock_t(long ticks)
1209 #if HOST_HZ == TARGET_HZ
1210 return ticks;
1211 #else
1212 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1213 #endif
1216 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1217 const struct rusage *rusage)
1219 struct target_rusage *target_rusage;
1221 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1222 return -TARGET_EFAULT;
1223 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1224 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1225 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1226 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1227 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1228 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1229 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1230 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1231 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1232 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1233 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1234 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1235 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1236 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1237 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1238 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1239 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1240 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1241 unlock_user_struct(target_rusage, target_addr, 1);
1243 return 0;
1246 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1248 abi_ulong target_rlim_swap;
1249 rlim_t result;
1251 target_rlim_swap = tswapal(target_rlim);
1252 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1253 return RLIM_INFINITY;
1255 result = target_rlim_swap;
1256 if (target_rlim_swap != (rlim_t)result)
1257 return RLIM_INFINITY;
1259 return result;
1262 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1264 abi_ulong target_rlim_swap;
1265 abi_ulong result;
1267 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1268 target_rlim_swap = TARGET_RLIM_INFINITY;
1269 else
1270 target_rlim_swap = rlim;
1271 result = tswapal(target_rlim_swap);
1273 return result;
1276 static inline int target_to_host_resource(int code)
1278 switch (code) {
1279 case TARGET_RLIMIT_AS:
1280 return RLIMIT_AS;
1281 case TARGET_RLIMIT_CORE:
1282 return RLIMIT_CORE;
1283 case TARGET_RLIMIT_CPU:
1284 return RLIMIT_CPU;
1285 case TARGET_RLIMIT_DATA:
1286 return RLIMIT_DATA;
1287 case TARGET_RLIMIT_FSIZE:
1288 return RLIMIT_FSIZE;
1289 case TARGET_RLIMIT_LOCKS:
1290 return RLIMIT_LOCKS;
1291 case TARGET_RLIMIT_MEMLOCK:
1292 return RLIMIT_MEMLOCK;
1293 case TARGET_RLIMIT_MSGQUEUE:
1294 return RLIMIT_MSGQUEUE;
1295 case TARGET_RLIMIT_NICE:
1296 return RLIMIT_NICE;
1297 case TARGET_RLIMIT_NOFILE:
1298 return RLIMIT_NOFILE;
1299 case TARGET_RLIMIT_NPROC:
1300 return RLIMIT_NPROC;
1301 case TARGET_RLIMIT_RSS:
1302 return RLIMIT_RSS;
1303 case TARGET_RLIMIT_RTPRIO:
1304 return RLIMIT_RTPRIO;
1305 case TARGET_RLIMIT_SIGPENDING:
1306 return RLIMIT_SIGPENDING;
1307 case TARGET_RLIMIT_STACK:
1308 return RLIMIT_STACK;
1309 default:
1310 return code;
1314 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1315 abi_ulong target_tv_addr)
1317 struct target_timeval *target_tv;
1319 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1320 return -TARGET_EFAULT;
1322 __get_user(tv->tv_sec, &target_tv->tv_sec);
1323 __get_user(tv->tv_usec, &target_tv->tv_usec);
1325 unlock_user_struct(target_tv, target_tv_addr, 0);
1327 return 0;
1330 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1331 const struct timeval *tv)
1333 struct target_timeval *target_tv;
1335 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1336 return -TARGET_EFAULT;
1338 __put_user(tv->tv_sec, &target_tv->tv_sec);
1339 __put_user(tv->tv_usec, &target_tv->tv_usec);
1341 unlock_user_struct(target_tv, target_tv_addr, 1);
1343 return 0;
1346 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1347 abi_ulong target_tz_addr)
1349 struct target_timezone *target_tz;
1351 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1352 return -TARGET_EFAULT;
1355 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1356 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1358 unlock_user_struct(target_tz, target_tz_addr, 0);
1360 return 0;
1363 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1364 #include <mqueue.h>
1366 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1367 abi_ulong target_mq_attr_addr)
1369 struct target_mq_attr *target_mq_attr;
1371 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1372 target_mq_attr_addr, 1))
1373 return -TARGET_EFAULT;
1375 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1376 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1377 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1378 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1380 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1382 return 0;
1385 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1386 const struct mq_attr *attr)
1388 struct target_mq_attr *target_mq_attr;
1390 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1391 target_mq_attr_addr, 0))
1392 return -TARGET_EFAULT;
1394 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1395 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1396 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1397 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1399 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1401 return 0;
1403 #endif
1405 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1406 /* do_select() must return target values and target errnos. */
1407 static abi_long do_select(int n,
1408 abi_ulong rfd_addr, abi_ulong wfd_addr,
1409 abi_ulong efd_addr, abi_ulong target_tv_addr)
1411 fd_set rfds, wfds, efds;
1412 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1413 struct timeval tv;
1414 struct timespec ts, *ts_ptr;
1415 abi_long ret;
1417 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1418 if (ret) {
1419 return ret;
1421 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1422 if (ret) {
1423 return ret;
1425 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1426 if (ret) {
1427 return ret;
1430 if (target_tv_addr) {
1431 if (copy_from_user_timeval(&tv, target_tv_addr))
1432 return -TARGET_EFAULT;
1433 ts.tv_sec = tv.tv_sec;
1434 ts.tv_nsec = tv.tv_usec * 1000;
1435 ts_ptr = &ts;
1436 } else {
1437 ts_ptr = NULL;
1440 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1441 ts_ptr, NULL));
1443 if (!is_error(ret)) {
1444 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1445 return -TARGET_EFAULT;
1446 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1447 return -TARGET_EFAULT;
1448 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1449 return -TARGET_EFAULT;
1451 if (target_tv_addr) {
1452 tv.tv_sec = ts.tv_sec;
1453 tv.tv_usec = ts.tv_nsec / 1000;
1454 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1455 return -TARGET_EFAULT;
1460 return ret;
1463 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1464 static abi_long do_old_select(abi_ulong arg1)
1466 struct target_sel_arg_struct *sel;
1467 abi_ulong inp, outp, exp, tvp;
1468 long nsel;
1470 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1471 return -TARGET_EFAULT;
1474 nsel = tswapal(sel->n);
1475 inp = tswapal(sel->inp);
1476 outp = tswapal(sel->outp);
1477 exp = tswapal(sel->exp);
1478 tvp = tswapal(sel->tvp);
1480 unlock_user_struct(sel, arg1, 0);
1482 return do_select(nsel, inp, outp, exp, tvp);
1484 #endif
1485 #endif
1487 static abi_long do_pipe2(int host_pipe[], int flags)
1489 #ifdef CONFIG_PIPE2
1490 return pipe2(host_pipe, flags);
1491 #else
1492 return -ENOSYS;
1493 #endif
1496 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1497 int flags, int is_pipe2)
1499 int host_pipe[2];
1500 abi_long ret;
1501 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1503 if (is_error(ret))
1504 return get_errno(ret);
1506 /* Several targets have special calling conventions for the original
1507 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1508 if (!is_pipe2) {
1509 #if defined(TARGET_ALPHA)
1510 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1511 return host_pipe[0];
1512 #elif defined(TARGET_MIPS)
1513 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1514 return host_pipe[0];
1515 #elif defined(TARGET_SH4)
1516 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1517 return host_pipe[0];
1518 #elif defined(TARGET_SPARC)
1519 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1520 return host_pipe[0];
1521 #endif
1524 if (put_user_s32(host_pipe[0], pipedes)
1525 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1526 return -TARGET_EFAULT;
1527 return get_errno(ret);
1530 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1531 abi_ulong target_addr,
1532 socklen_t len)
1534 struct target_ip_mreqn *target_smreqn;
1536 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1537 if (!target_smreqn)
1538 return -TARGET_EFAULT;
1539 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1540 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1541 if (len == sizeof(struct target_ip_mreqn))
1542 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1543 unlock_user(target_smreqn, target_addr, 0);
1545 return 0;
1548 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1549 abi_ulong target_addr,
1550 socklen_t len)
1552 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1553 sa_family_t sa_family;
1554 struct target_sockaddr *target_saddr;
1556 if (fd_trans_target_to_host_addr(fd)) {
1557 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1560 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1561 if (!target_saddr)
1562 return -TARGET_EFAULT;
1564 sa_family = tswap16(target_saddr->sa_family);
1566 /* Oops. The caller might send a incomplete sun_path; sun_path
1567 * must be terminated by \0 (see the manual page), but
1568 * unfortunately it is quite common to specify sockaddr_un
1569 * length as "strlen(x->sun_path)" while it should be
1570 * "strlen(...) + 1". We'll fix that here if needed.
1571 * Linux kernel has a similar feature.
1574 if (sa_family == AF_UNIX) {
1575 if (len < unix_maxlen && len > 0) {
1576 char *cp = (char*)target_saddr;
1578 if ( cp[len-1] && !cp[len] )
1579 len++;
1581 if (len > unix_maxlen)
1582 len = unix_maxlen;
1585 memcpy(addr, target_saddr, len);
1586 addr->sa_family = sa_family;
1587 if (sa_family == AF_NETLINK) {
1588 struct sockaddr_nl *nladdr;
1590 nladdr = (struct sockaddr_nl *)addr;
1591 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1592 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1593 } else if (sa_family == AF_PACKET) {
1594 struct target_sockaddr_ll *lladdr;
1596 lladdr = (struct target_sockaddr_ll *)addr;
1597 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1598 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1600 unlock_user(target_saddr, target_addr, 0);
1602 return 0;
1605 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1606 struct sockaddr *addr,
1607 socklen_t len)
1609 struct target_sockaddr *target_saddr;
1611 if (len == 0) {
1612 return 0;
1615 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1616 if (!target_saddr)
1617 return -TARGET_EFAULT;
1618 memcpy(target_saddr, addr, len);
1619 if (len >= offsetof(struct target_sockaddr, sa_family) +
1620 sizeof(target_saddr->sa_family)) {
1621 target_saddr->sa_family = tswap16(addr->sa_family);
1623 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1624 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1625 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1626 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1627 } else if (addr->sa_family == AF_PACKET) {
1628 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1629 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1630 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1632 unlock_user(target_saddr, target_addr, len);
1634 return 0;
1637 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1638 struct target_msghdr *target_msgh)
1640 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1641 abi_long msg_controllen;
1642 abi_ulong target_cmsg_addr;
1643 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1644 socklen_t space = 0;
1646 msg_controllen = tswapal(target_msgh->msg_controllen);
1647 if (msg_controllen < sizeof (struct target_cmsghdr))
1648 goto the_end;
1649 target_cmsg_addr = tswapal(target_msgh->msg_control);
1650 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1651 target_cmsg_start = target_cmsg;
1652 if (!target_cmsg)
1653 return -TARGET_EFAULT;
1655 while (cmsg && target_cmsg) {
1656 void *data = CMSG_DATA(cmsg);
1657 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1659 int len = tswapal(target_cmsg->cmsg_len)
1660 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1662 space += CMSG_SPACE(len);
1663 if (space > msgh->msg_controllen) {
1664 space -= CMSG_SPACE(len);
1665 /* This is a QEMU bug, since we allocated the payload
1666 * area ourselves (unlike overflow in host-to-target
1667 * conversion, which is just the guest giving us a buffer
1668 * that's too small). It can't happen for the payload types
1669 * we currently support; if it becomes an issue in future
1670 * we would need to improve our allocation strategy to
1671 * something more intelligent than "twice the size of the
1672 * target buffer we're reading from".
1674 gemu_log("Host cmsg overflow\n");
1675 break;
1678 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1679 cmsg->cmsg_level = SOL_SOCKET;
1680 } else {
1681 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1683 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1684 cmsg->cmsg_len = CMSG_LEN(len);
1686 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1687 int *fd = (int *)data;
1688 int *target_fd = (int *)target_data;
1689 int i, numfds = len / sizeof(int);
1691 for (i = 0; i < numfds; i++) {
1692 __get_user(fd[i], target_fd + i);
1694 } else if (cmsg->cmsg_level == SOL_SOCKET
1695 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1696 struct ucred *cred = (struct ucred *)data;
1697 struct target_ucred *target_cred =
1698 (struct target_ucred *)target_data;
1700 __get_user(cred->pid, &target_cred->pid);
1701 __get_user(cred->uid, &target_cred->uid);
1702 __get_user(cred->gid, &target_cred->gid);
1703 } else {
1704 gemu_log("Unsupported ancillary data: %d/%d\n",
1705 cmsg->cmsg_level, cmsg->cmsg_type);
1706 memcpy(data, target_data, len);
1709 cmsg = CMSG_NXTHDR(msgh, cmsg);
1710 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1711 target_cmsg_start);
1713 unlock_user(target_cmsg, target_cmsg_addr, 0);
1714 the_end:
1715 msgh->msg_controllen = space;
1716 return 0;
1719 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1720 struct msghdr *msgh)
1722 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1723 abi_long msg_controllen;
1724 abi_ulong target_cmsg_addr;
1725 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1726 socklen_t space = 0;
1728 msg_controllen = tswapal(target_msgh->msg_controllen);
1729 if (msg_controllen < sizeof (struct target_cmsghdr))
1730 goto the_end;
1731 target_cmsg_addr = tswapal(target_msgh->msg_control);
1732 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1733 target_cmsg_start = target_cmsg;
1734 if (!target_cmsg)
1735 return -TARGET_EFAULT;
1737 while (cmsg && target_cmsg) {
1738 void *data = CMSG_DATA(cmsg);
1739 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1741 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1742 int tgt_len, tgt_space;
1744 /* We never copy a half-header but may copy half-data;
1745 * this is Linux's behaviour in put_cmsg(). Note that
1746 * truncation here is a guest problem (which we report
1747 * to the guest via the CTRUNC bit), unlike truncation
1748 * in target_to_host_cmsg, which is a QEMU bug.
1750 if (msg_controllen < sizeof(struct cmsghdr)) {
1751 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1752 break;
1755 if (cmsg->cmsg_level == SOL_SOCKET) {
1756 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1757 } else {
1758 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1760 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1762 tgt_len = TARGET_CMSG_LEN(len);
1764 /* Payload types which need a different size of payload on
1765 * the target must adjust tgt_len here.
1767 switch (cmsg->cmsg_level) {
1768 case SOL_SOCKET:
1769 switch (cmsg->cmsg_type) {
1770 case SO_TIMESTAMP:
1771 tgt_len = sizeof(struct target_timeval);
1772 break;
1773 default:
1774 break;
1776 default:
1777 break;
1780 if (msg_controllen < tgt_len) {
1781 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1782 tgt_len = msg_controllen;
1785 /* We must now copy-and-convert len bytes of payload
1786 * into tgt_len bytes of destination space. Bear in mind
1787 * that in both source and destination we may be dealing
1788 * with a truncated value!
1790 switch (cmsg->cmsg_level) {
1791 case SOL_SOCKET:
1792 switch (cmsg->cmsg_type) {
1793 case SCM_RIGHTS:
1795 int *fd = (int *)data;
1796 int *target_fd = (int *)target_data;
1797 int i, numfds = tgt_len / sizeof(int);
1799 for (i = 0; i < numfds; i++) {
1800 __put_user(fd[i], target_fd + i);
1802 break;
1804 case SO_TIMESTAMP:
1806 struct timeval *tv = (struct timeval *)data;
1807 struct target_timeval *target_tv =
1808 (struct target_timeval *)target_data;
1810 if (len != sizeof(struct timeval) ||
1811 tgt_len != sizeof(struct target_timeval)) {
1812 goto unimplemented;
1815 /* copy struct timeval to target */
1816 __put_user(tv->tv_sec, &target_tv->tv_sec);
1817 __put_user(tv->tv_usec, &target_tv->tv_usec);
1818 break;
1820 case SCM_CREDENTIALS:
1822 struct ucred *cred = (struct ucred *)data;
1823 struct target_ucred *target_cred =
1824 (struct target_ucred *)target_data;
1826 __put_user(cred->pid, &target_cred->pid);
1827 __put_user(cred->uid, &target_cred->uid);
1828 __put_user(cred->gid, &target_cred->gid);
1829 break;
1831 default:
1832 goto unimplemented;
1834 break;
1836 default:
1837 unimplemented:
1838 gemu_log("Unsupported ancillary data: %d/%d\n",
1839 cmsg->cmsg_level, cmsg->cmsg_type);
1840 memcpy(target_data, data, MIN(len, tgt_len));
1841 if (tgt_len > len) {
1842 memset(target_data + len, 0, tgt_len - len);
1846 target_cmsg->cmsg_len = tswapal(tgt_len);
1847 tgt_space = TARGET_CMSG_SPACE(len);
1848 if (msg_controllen < tgt_space) {
1849 tgt_space = msg_controllen;
1851 msg_controllen -= tgt_space;
1852 space += tgt_space;
1853 cmsg = CMSG_NXTHDR(msgh, cmsg);
1854 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1855 target_cmsg_start);
1857 unlock_user(target_cmsg, target_cmsg_addr, space);
1858 the_end:
1859 target_msgh->msg_controllen = tswapal(space);
1860 return 0;
1863 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1865 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1866 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1867 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1868 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1869 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1872 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1873 size_t len,
1874 abi_long (*host_to_target_nlmsg)
1875 (struct nlmsghdr *))
1877 uint32_t nlmsg_len;
1878 abi_long ret;
1880 while (len > sizeof(struct nlmsghdr)) {
1882 nlmsg_len = nlh->nlmsg_len;
1883 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1884 nlmsg_len > len) {
1885 break;
1888 switch (nlh->nlmsg_type) {
1889 case NLMSG_DONE:
1890 tswap_nlmsghdr(nlh);
1891 return 0;
1892 case NLMSG_NOOP:
1893 break;
1894 case NLMSG_ERROR:
1896 struct nlmsgerr *e = NLMSG_DATA(nlh);
1897 e->error = tswap32(e->error);
1898 tswap_nlmsghdr(&e->msg);
1899 tswap_nlmsghdr(nlh);
1900 return 0;
1902 default:
1903 ret = host_to_target_nlmsg(nlh);
1904 if (ret < 0) {
1905 tswap_nlmsghdr(nlh);
1906 return ret;
1908 break;
1910 tswap_nlmsghdr(nlh);
1911 len -= NLMSG_ALIGN(nlmsg_len);
1912 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1914 return 0;
1917 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1918 size_t len,
1919 abi_long (*target_to_host_nlmsg)
1920 (struct nlmsghdr *))
1922 int ret;
1924 while (len > sizeof(struct nlmsghdr)) {
1925 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1926 tswap32(nlh->nlmsg_len) > len) {
1927 break;
1929 tswap_nlmsghdr(nlh);
1930 switch (nlh->nlmsg_type) {
1931 case NLMSG_DONE:
1932 return 0;
1933 case NLMSG_NOOP:
1934 break;
1935 case NLMSG_ERROR:
1937 struct nlmsgerr *e = NLMSG_DATA(nlh);
1938 e->error = tswap32(e->error);
1939 tswap_nlmsghdr(&e->msg);
1940 return 0;
1942 default:
1943 ret = target_to_host_nlmsg(nlh);
1944 if (ret < 0) {
1945 return ret;
1948 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1949 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1951 return 0;
1954 #ifdef CONFIG_RTNETLINK
1955 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1956 size_t len, void *context,
1957 abi_long (*host_to_target_nlattr)
1958 (struct nlattr *,
1959 void *context))
1961 unsigned short nla_len;
1962 abi_long ret;
1964 while (len > sizeof(struct nlattr)) {
1965 nla_len = nlattr->nla_len;
1966 if (nla_len < sizeof(struct nlattr) ||
1967 nla_len > len) {
1968 break;
1970 ret = host_to_target_nlattr(nlattr, context);
1971 nlattr->nla_len = tswap16(nlattr->nla_len);
1972 nlattr->nla_type = tswap16(nlattr->nla_type);
1973 if (ret < 0) {
1974 return ret;
1976 len -= NLA_ALIGN(nla_len);
1977 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1979 return 0;
1982 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1983 size_t len,
1984 abi_long (*host_to_target_rtattr)
1985 (struct rtattr *))
1987 unsigned short rta_len;
1988 abi_long ret;
1990 while (len > sizeof(struct rtattr)) {
1991 rta_len = rtattr->rta_len;
1992 if (rta_len < sizeof(struct rtattr) ||
1993 rta_len > len) {
1994 break;
1996 ret = host_to_target_rtattr(rtattr);
1997 rtattr->rta_len = tswap16(rtattr->rta_len);
1998 rtattr->rta_type = tswap16(rtattr->rta_type);
1999 if (ret < 0) {
2000 return ret;
2002 len -= RTA_ALIGN(rta_len);
2003 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2005 return 0;
2008 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2010 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2011 void *context)
2013 uint16_t *u16;
2014 uint32_t *u32;
2015 uint64_t *u64;
2017 switch (nlattr->nla_type) {
2018 /* no data */
2019 case QEMU_IFLA_BR_FDB_FLUSH:
2020 break;
2021 /* binary */
2022 case QEMU_IFLA_BR_GROUP_ADDR:
2023 break;
2024 /* uint8_t */
2025 case QEMU_IFLA_BR_VLAN_FILTERING:
2026 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2027 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2028 case QEMU_IFLA_BR_MCAST_ROUTER:
2029 case QEMU_IFLA_BR_MCAST_SNOOPING:
2030 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2031 case QEMU_IFLA_BR_MCAST_QUERIER:
2032 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2033 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2034 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2035 break;
2036 /* uint16_t */
2037 case QEMU_IFLA_BR_PRIORITY:
2038 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2039 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2040 case QEMU_IFLA_BR_ROOT_PORT:
2041 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2042 u16 = NLA_DATA(nlattr);
2043 *u16 = tswap16(*u16);
2044 break;
2045 /* uint32_t */
2046 case QEMU_IFLA_BR_FORWARD_DELAY:
2047 case QEMU_IFLA_BR_HELLO_TIME:
2048 case QEMU_IFLA_BR_MAX_AGE:
2049 case QEMU_IFLA_BR_AGEING_TIME:
2050 case QEMU_IFLA_BR_STP_STATE:
2051 case QEMU_IFLA_BR_ROOT_PATH_COST:
2052 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2053 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2056 u32 = NLA_DATA(nlattr);
2057 *u32 = tswap32(*u32);
2058 break;
2059 /* uint64_t */
2060 case QEMU_IFLA_BR_HELLO_TIMER:
2061 case QEMU_IFLA_BR_TCN_TIMER:
2062 case QEMU_IFLA_BR_GC_TIMER:
2063 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2064 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2065 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2066 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2067 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2068 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2069 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2070 u64 = NLA_DATA(nlattr);
2071 *u64 = tswap64(*u64);
2072 break;
2073 /* ifla_bridge_id: uin8_t[] */
2074 case QEMU_IFLA_BR_ROOT_ID:
2075 case QEMU_IFLA_BR_BRIDGE_ID:
2076 break;
2077 default:
2078 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2079 break;
2081 return 0;
2084 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2085 void *context)
2087 uint16_t *u16;
2088 uint32_t *u32;
2089 uint64_t *u64;
2091 switch (nlattr->nla_type) {
2092 /* uint8_t */
2093 case QEMU_IFLA_BRPORT_STATE:
2094 case QEMU_IFLA_BRPORT_MODE:
2095 case QEMU_IFLA_BRPORT_GUARD:
2096 case QEMU_IFLA_BRPORT_PROTECT:
2097 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2098 case QEMU_IFLA_BRPORT_LEARNING:
2099 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2100 case QEMU_IFLA_BRPORT_PROXYARP:
2101 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2102 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2103 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2104 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2105 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2106 break;
2107 /* uint16_t */
2108 case QEMU_IFLA_BRPORT_PRIORITY:
2109 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2110 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2111 case QEMU_IFLA_BRPORT_ID:
2112 case QEMU_IFLA_BRPORT_NO:
2113 u16 = NLA_DATA(nlattr);
2114 *u16 = tswap16(*u16);
2115 break;
2116 /* uin32_t */
2117 case QEMU_IFLA_BRPORT_COST:
2118 u32 = NLA_DATA(nlattr);
2119 *u32 = tswap32(*u32);
2120 break;
2121 /* uint64_t */
2122 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2123 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2124 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2125 u64 = NLA_DATA(nlattr);
2126 *u64 = tswap64(*u64);
2127 break;
2128 /* ifla_bridge_id: uint8_t[] */
2129 case QEMU_IFLA_BRPORT_ROOT_ID:
2130 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2131 break;
2132 default:
2133 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2134 break;
2136 return 0;
2139 struct linkinfo_context {
2140 int len;
2141 char *name;
2142 int slave_len;
2143 char *slave_name;
2146 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2147 void *context)
2149 struct linkinfo_context *li_context = context;
2151 switch (nlattr->nla_type) {
2152 /* string */
2153 case QEMU_IFLA_INFO_KIND:
2154 li_context->name = NLA_DATA(nlattr);
2155 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2156 break;
2157 case QEMU_IFLA_INFO_SLAVE_KIND:
2158 li_context->slave_name = NLA_DATA(nlattr);
2159 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2160 break;
2161 /* stats */
2162 case QEMU_IFLA_INFO_XSTATS:
2163 /* FIXME: only used by CAN */
2164 break;
2165 /* nested */
2166 case QEMU_IFLA_INFO_DATA:
2167 if (strncmp(li_context->name, "bridge",
2168 li_context->len) == 0) {
2169 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2170 nlattr->nla_len,
2171 NULL,
2172 host_to_target_data_bridge_nlattr);
2173 } else {
2174 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2176 break;
2177 case QEMU_IFLA_INFO_SLAVE_DATA:
2178 if (strncmp(li_context->slave_name, "bridge",
2179 li_context->slave_len) == 0) {
2180 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2181 nlattr->nla_len,
2182 NULL,
2183 host_to_target_slave_data_bridge_nlattr);
2184 } else {
2185 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2186 li_context->slave_name);
2188 break;
2189 default:
2190 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2191 break;
2194 return 0;
2197 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2198 void *context)
2200 uint32_t *u32;
2201 int i;
2203 switch (nlattr->nla_type) {
2204 case QEMU_IFLA_INET_CONF:
2205 u32 = NLA_DATA(nlattr);
2206 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2207 i++) {
2208 u32[i] = tswap32(u32[i]);
2210 break;
2211 default:
2212 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2214 return 0;
2217 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2218 void *context)
2220 uint32_t *u32;
2221 uint64_t *u64;
2222 struct ifla_cacheinfo *ci;
2223 int i;
2225 switch (nlattr->nla_type) {
2226 /* binaries */
2227 case QEMU_IFLA_INET6_TOKEN:
2228 break;
2229 /* uint8_t */
2230 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2231 break;
2232 /* uint32_t */
2233 case QEMU_IFLA_INET6_FLAGS:
2234 u32 = NLA_DATA(nlattr);
2235 *u32 = tswap32(*u32);
2236 break;
2237 /* uint32_t[] */
2238 case QEMU_IFLA_INET6_CONF:
2239 u32 = NLA_DATA(nlattr);
2240 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2241 i++) {
2242 u32[i] = tswap32(u32[i]);
2244 break;
2245 /* ifla_cacheinfo */
2246 case QEMU_IFLA_INET6_CACHEINFO:
2247 ci = NLA_DATA(nlattr);
2248 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2249 ci->tstamp = tswap32(ci->tstamp);
2250 ci->reachable_time = tswap32(ci->reachable_time);
2251 ci->retrans_time = tswap32(ci->retrans_time);
2252 break;
2253 /* uint64_t[] */
2254 case QEMU_IFLA_INET6_STATS:
2255 case QEMU_IFLA_INET6_ICMP6STATS:
2256 u64 = NLA_DATA(nlattr);
2257 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2258 i++) {
2259 u64[i] = tswap64(u64[i]);
2261 break;
2262 default:
2263 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2265 return 0;
2268 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2269 void *context)
2271 switch (nlattr->nla_type) {
2272 case AF_INET:
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2274 NULL,
2275 host_to_target_data_inet_nlattr);
2276 case AF_INET6:
2277 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2278 NULL,
2279 host_to_target_data_inet6_nlattr);
2280 default:
2281 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2282 break;
2284 return 0;
2287 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2289 uint32_t *u32;
2290 struct rtnl_link_stats *st;
2291 struct rtnl_link_stats64 *st64;
2292 struct rtnl_link_ifmap *map;
2293 struct linkinfo_context li_context;
2295 switch (rtattr->rta_type) {
2296 /* binary stream */
2297 case QEMU_IFLA_ADDRESS:
2298 case QEMU_IFLA_BROADCAST:
2299 /* string */
2300 case QEMU_IFLA_IFNAME:
2301 case QEMU_IFLA_QDISC:
2302 break;
2303 /* uin8_t */
2304 case QEMU_IFLA_OPERSTATE:
2305 case QEMU_IFLA_LINKMODE:
2306 case QEMU_IFLA_CARRIER:
2307 case QEMU_IFLA_PROTO_DOWN:
2308 break;
2309 /* uint32_t */
2310 case QEMU_IFLA_MTU:
2311 case QEMU_IFLA_LINK:
2312 case QEMU_IFLA_WEIGHT:
2313 case QEMU_IFLA_TXQLEN:
2314 case QEMU_IFLA_CARRIER_CHANGES:
2315 case QEMU_IFLA_NUM_RX_QUEUES:
2316 case QEMU_IFLA_NUM_TX_QUEUES:
2317 case QEMU_IFLA_PROMISCUITY:
2318 case QEMU_IFLA_EXT_MASK:
2319 case QEMU_IFLA_LINK_NETNSID:
2320 case QEMU_IFLA_GROUP:
2321 case QEMU_IFLA_MASTER:
2322 case QEMU_IFLA_NUM_VF:
2323 u32 = RTA_DATA(rtattr);
2324 *u32 = tswap32(*u32);
2325 break;
2326 /* struct rtnl_link_stats */
2327 case QEMU_IFLA_STATS:
2328 st = RTA_DATA(rtattr);
2329 st->rx_packets = tswap32(st->rx_packets);
2330 st->tx_packets = tswap32(st->tx_packets);
2331 st->rx_bytes = tswap32(st->rx_bytes);
2332 st->tx_bytes = tswap32(st->tx_bytes);
2333 st->rx_errors = tswap32(st->rx_errors);
2334 st->tx_errors = tswap32(st->tx_errors);
2335 st->rx_dropped = tswap32(st->rx_dropped);
2336 st->tx_dropped = tswap32(st->tx_dropped);
2337 st->multicast = tswap32(st->multicast);
2338 st->collisions = tswap32(st->collisions);
2340 /* detailed rx_errors: */
2341 st->rx_length_errors = tswap32(st->rx_length_errors);
2342 st->rx_over_errors = tswap32(st->rx_over_errors);
2343 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2344 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2345 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2346 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2348 /* detailed tx_errors */
2349 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2350 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2351 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2352 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2353 st->tx_window_errors = tswap32(st->tx_window_errors);
2355 /* for cslip etc */
2356 st->rx_compressed = tswap32(st->rx_compressed);
2357 st->tx_compressed = tswap32(st->tx_compressed);
2358 break;
2359 /* struct rtnl_link_stats64 */
2360 case QEMU_IFLA_STATS64:
2361 st64 = RTA_DATA(rtattr);
2362 st64->rx_packets = tswap64(st64->rx_packets);
2363 st64->tx_packets = tswap64(st64->tx_packets);
2364 st64->rx_bytes = tswap64(st64->rx_bytes);
2365 st64->tx_bytes = tswap64(st64->tx_bytes);
2366 st64->rx_errors = tswap64(st64->rx_errors);
2367 st64->tx_errors = tswap64(st64->tx_errors);
2368 st64->rx_dropped = tswap64(st64->rx_dropped);
2369 st64->tx_dropped = tswap64(st64->tx_dropped);
2370 st64->multicast = tswap64(st64->multicast);
2371 st64->collisions = tswap64(st64->collisions);
2373 /* detailed rx_errors: */
2374 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2375 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2376 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2377 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2378 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2379 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2381 /* detailed tx_errors */
2382 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2383 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2384 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2385 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2386 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2388 /* for cslip etc */
2389 st64->rx_compressed = tswap64(st64->rx_compressed);
2390 st64->tx_compressed = tswap64(st64->tx_compressed);
2391 break;
2392 /* struct rtnl_link_ifmap */
2393 case QEMU_IFLA_MAP:
2394 map = RTA_DATA(rtattr);
2395 map->mem_start = tswap64(map->mem_start);
2396 map->mem_end = tswap64(map->mem_end);
2397 map->base_addr = tswap64(map->base_addr);
2398 map->irq = tswap16(map->irq);
2399 break;
2400 /* nested */
2401 case QEMU_IFLA_LINKINFO:
2402 memset(&li_context, 0, sizeof(li_context));
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2404 &li_context,
2405 host_to_target_data_linkinfo_nlattr);
2406 case QEMU_IFLA_AF_SPEC:
2407 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2408 NULL,
2409 host_to_target_data_spec_nlattr);
2410 default:
2411 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2412 break;
2414 return 0;
2417 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2419 uint32_t *u32;
2420 struct ifa_cacheinfo *ci;
2422 switch (rtattr->rta_type) {
2423 /* binary: depends on family type */
2424 case IFA_ADDRESS:
2425 case IFA_LOCAL:
2426 break;
2427 /* string */
2428 case IFA_LABEL:
2429 break;
2430 /* u32 */
2431 case IFA_FLAGS:
2432 case IFA_BROADCAST:
2433 u32 = RTA_DATA(rtattr);
2434 *u32 = tswap32(*u32);
2435 break;
2436 /* struct ifa_cacheinfo */
2437 case IFA_CACHEINFO:
2438 ci = RTA_DATA(rtattr);
2439 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2440 ci->ifa_valid = tswap32(ci->ifa_valid);
2441 ci->cstamp = tswap32(ci->cstamp);
2442 ci->tstamp = tswap32(ci->tstamp);
2443 break;
2444 default:
2445 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2446 break;
2448 return 0;
2451 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2453 uint32_t *u32;
2454 switch (rtattr->rta_type) {
2455 /* binary: depends on family type */
2456 case RTA_GATEWAY:
2457 case RTA_DST:
2458 case RTA_PREFSRC:
2459 break;
2460 /* u32 */
2461 case RTA_PRIORITY:
2462 case RTA_TABLE:
2463 case RTA_OIF:
2464 u32 = RTA_DATA(rtattr);
2465 *u32 = tswap32(*u32);
2466 break;
2467 default:
2468 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2469 break;
2471 return 0;
2474 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2475 uint32_t rtattr_len)
2477 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2478 host_to_target_data_link_rtattr);
2481 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2482 uint32_t rtattr_len)
2484 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2485 host_to_target_data_addr_rtattr);
2488 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2489 uint32_t rtattr_len)
2491 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2492 host_to_target_data_route_rtattr);
2495 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2497 uint32_t nlmsg_len;
2498 struct ifinfomsg *ifi;
2499 struct ifaddrmsg *ifa;
2500 struct rtmsg *rtm;
2502 nlmsg_len = nlh->nlmsg_len;
2503 switch (nlh->nlmsg_type) {
2504 case RTM_NEWLINK:
2505 case RTM_DELLINK:
2506 case RTM_GETLINK:
2507 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2508 ifi = NLMSG_DATA(nlh);
2509 ifi->ifi_type = tswap16(ifi->ifi_type);
2510 ifi->ifi_index = tswap32(ifi->ifi_index);
2511 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2512 ifi->ifi_change = tswap32(ifi->ifi_change);
2513 host_to_target_link_rtattr(IFLA_RTA(ifi),
2514 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2516 break;
2517 case RTM_NEWADDR:
2518 case RTM_DELADDR:
2519 case RTM_GETADDR:
2520 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2521 ifa = NLMSG_DATA(nlh);
2522 ifa->ifa_index = tswap32(ifa->ifa_index);
2523 host_to_target_addr_rtattr(IFA_RTA(ifa),
2524 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2526 break;
2527 case RTM_NEWROUTE:
2528 case RTM_DELROUTE:
2529 case RTM_GETROUTE:
2530 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2531 rtm = NLMSG_DATA(nlh);
2532 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2533 host_to_target_route_rtattr(RTM_RTA(rtm),
2534 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2536 break;
2537 default:
2538 return -TARGET_EINVAL;
2540 return 0;
2543 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2544 size_t len)
2546 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2549 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2550 size_t len,
2551 abi_long (*target_to_host_rtattr)
2552 (struct rtattr *))
2554 abi_long ret;
2556 while (len >= sizeof(struct rtattr)) {
2557 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2558 tswap16(rtattr->rta_len) > len) {
2559 break;
2561 rtattr->rta_len = tswap16(rtattr->rta_len);
2562 rtattr->rta_type = tswap16(rtattr->rta_type);
2563 ret = target_to_host_rtattr(rtattr);
2564 if (ret < 0) {
2565 return ret;
2567 len -= RTA_ALIGN(rtattr->rta_len);
2568 rtattr = (struct rtattr *)(((char *)rtattr) +
2569 RTA_ALIGN(rtattr->rta_len));
2571 return 0;
2574 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2576 switch (rtattr->rta_type) {
2577 default:
2578 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2579 break;
2581 return 0;
2584 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2586 switch (rtattr->rta_type) {
2587 /* binary: depends on family type */
2588 case IFA_LOCAL:
2589 case IFA_ADDRESS:
2590 break;
2591 default:
2592 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2593 break;
2595 return 0;
2598 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2600 uint32_t *u32;
2601 switch (rtattr->rta_type) {
2602 /* binary: depends on family type */
2603 case RTA_DST:
2604 case RTA_SRC:
2605 case RTA_GATEWAY:
2606 break;
2607 /* u32 */
2608 case RTA_PRIORITY:
2609 case RTA_OIF:
2610 u32 = RTA_DATA(rtattr);
2611 *u32 = tswap32(*u32);
2612 break;
2613 default:
2614 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2615 break;
2617 return 0;
2620 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2624 target_to_host_data_link_rtattr);
2627 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2628 uint32_t rtattr_len)
2630 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2631 target_to_host_data_addr_rtattr);
2634 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2635 uint32_t rtattr_len)
2637 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2638 target_to_host_data_route_rtattr);
2641 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2643 struct ifinfomsg *ifi;
2644 struct ifaddrmsg *ifa;
2645 struct rtmsg *rtm;
2647 switch (nlh->nlmsg_type) {
2648 case RTM_GETLINK:
2649 break;
2650 case RTM_NEWLINK:
2651 case RTM_DELLINK:
2652 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2653 ifi = NLMSG_DATA(nlh);
2654 ifi->ifi_type = tswap16(ifi->ifi_type);
2655 ifi->ifi_index = tswap32(ifi->ifi_index);
2656 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2657 ifi->ifi_change = tswap32(ifi->ifi_change);
2658 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2659 NLMSG_LENGTH(sizeof(*ifi)));
2661 break;
2662 case RTM_GETADDR:
2663 case RTM_NEWADDR:
2664 case RTM_DELADDR:
2665 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2666 ifa = NLMSG_DATA(nlh);
2667 ifa->ifa_index = tswap32(ifa->ifa_index);
2668 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2669 NLMSG_LENGTH(sizeof(*ifa)));
2671 break;
2672 case RTM_GETROUTE:
2673 break;
2674 case RTM_NEWROUTE:
2675 case RTM_DELROUTE:
2676 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2677 rtm = NLMSG_DATA(nlh);
2678 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2679 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2680 NLMSG_LENGTH(sizeof(*rtm)));
2682 break;
2683 default:
2684 return -TARGET_EOPNOTSUPP;
2686 return 0;
2689 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2691 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2693 #endif /* CONFIG_RTNETLINK */
2695 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2697 switch (nlh->nlmsg_type) {
2698 default:
2699 gemu_log("Unknown host audit message type %d\n",
2700 nlh->nlmsg_type);
2701 return -TARGET_EINVAL;
2703 return 0;
2706 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2707 size_t len)
2709 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2712 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2714 switch (nlh->nlmsg_type) {
2715 case AUDIT_USER:
2716 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2717 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2718 break;
2719 default:
2720 gemu_log("Unknown target audit message type %d\n",
2721 nlh->nlmsg_type);
2722 return -TARGET_EINVAL;
2725 return 0;
2728 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2730 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2733 /* do_setsockopt() Must return target values and target errnos. */
2734 static abi_long do_setsockopt(int sockfd, int level, int optname,
2735 abi_ulong optval_addr, socklen_t optlen)
2737 abi_long ret;
2738 int val;
2739 struct ip_mreqn *ip_mreq;
2740 struct ip_mreq_source *ip_mreq_source;
2742 switch(level) {
2743 case SOL_TCP:
2744 /* TCP options all take an 'int' value. */
2745 if (optlen < sizeof(uint32_t))
2746 return -TARGET_EINVAL;
2748 if (get_user_u32(val, optval_addr))
2749 return -TARGET_EFAULT;
2750 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2751 break;
2752 case SOL_IP:
2753 switch(optname) {
2754 case IP_TOS:
2755 case IP_TTL:
2756 case IP_HDRINCL:
2757 case IP_ROUTER_ALERT:
2758 case IP_RECVOPTS:
2759 case IP_RETOPTS:
2760 case IP_PKTINFO:
2761 case IP_MTU_DISCOVER:
2762 case IP_RECVERR:
2763 case IP_RECVTOS:
2764 #ifdef IP_FREEBIND
2765 case IP_FREEBIND:
2766 #endif
2767 case IP_MULTICAST_TTL:
2768 case IP_MULTICAST_LOOP:
2769 val = 0;
2770 if (optlen >= sizeof(uint32_t)) {
2771 if (get_user_u32(val, optval_addr))
2772 return -TARGET_EFAULT;
2773 } else if (optlen >= 1) {
2774 if (get_user_u8(val, optval_addr))
2775 return -TARGET_EFAULT;
2777 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2778 break;
2779 case IP_ADD_MEMBERSHIP:
2780 case IP_DROP_MEMBERSHIP:
2781 if (optlen < sizeof (struct target_ip_mreq) ||
2782 optlen > sizeof (struct target_ip_mreqn))
2783 return -TARGET_EINVAL;
2785 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2786 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2787 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2788 break;
2790 case IP_BLOCK_SOURCE:
2791 case IP_UNBLOCK_SOURCE:
2792 case IP_ADD_SOURCE_MEMBERSHIP:
2793 case IP_DROP_SOURCE_MEMBERSHIP:
2794 if (optlen != sizeof (struct target_ip_mreq_source))
2795 return -TARGET_EINVAL;
2797 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2798 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2799 unlock_user (ip_mreq_source, optval_addr, 0);
2800 break;
2802 default:
2803 goto unimplemented;
2805 break;
2806 case SOL_IPV6:
2807 switch (optname) {
2808 case IPV6_MTU_DISCOVER:
2809 case IPV6_MTU:
2810 case IPV6_V6ONLY:
2811 case IPV6_RECVPKTINFO:
2812 val = 0;
2813 if (optlen < sizeof(uint32_t)) {
2814 return -TARGET_EINVAL;
2816 if (get_user_u32(val, optval_addr)) {
2817 return -TARGET_EFAULT;
2819 ret = get_errno(setsockopt(sockfd, level, optname,
2820 &val, sizeof(val)));
2821 break;
2822 default:
2823 goto unimplemented;
2825 break;
2826 case SOL_RAW:
2827 switch (optname) {
2828 case ICMP_FILTER:
2829 /* struct icmp_filter takes an u32 value */
2830 if (optlen < sizeof(uint32_t)) {
2831 return -TARGET_EINVAL;
2834 if (get_user_u32(val, optval_addr)) {
2835 return -TARGET_EFAULT;
2837 ret = get_errno(setsockopt(sockfd, level, optname,
2838 &val, sizeof(val)));
2839 break;
2841 default:
2842 goto unimplemented;
2844 break;
2845 case TARGET_SOL_SOCKET:
2846 switch (optname) {
2847 case TARGET_SO_RCVTIMEO:
2849 struct timeval tv;
2851 optname = SO_RCVTIMEO;
2853 set_timeout:
2854 if (optlen != sizeof(struct target_timeval)) {
2855 return -TARGET_EINVAL;
2858 if (copy_from_user_timeval(&tv, optval_addr)) {
2859 return -TARGET_EFAULT;
2862 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2863 &tv, sizeof(tv)));
2864 return ret;
2866 case TARGET_SO_SNDTIMEO:
2867 optname = SO_SNDTIMEO;
2868 goto set_timeout;
2869 case TARGET_SO_ATTACH_FILTER:
2871 struct target_sock_fprog *tfprog;
2872 struct target_sock_filter *tfilter;
2873 struct sock_fprog fprog;
2874 struct sock_filter *filter;
2875 int i;
2877 if (optlen != sizeof(*tfprog)) {
2878 return -TARGET_EINVAL;
2880 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2881 return -TARGET_EFAULT;
2883 if (!lock_user_struct(VERIFY_READ, tfilter,
2884 tswapal(tfprog->filter), 0)) {
2885 unlock_user_struct(tfprog, optval_addr, 1);
2886 return -TARGET_EFAULT;
2889 fprog.len = tswap16(tfprog->len);
2890 filter = g_try_new(struct sock_filter, fprog.len);
2891 if (filter == NULL) {
2892 unlock_user_struct(tfilter, tfprog->filter, 1);
2893 unlock_user_struct(tfprog, optval_addr, 1);
2894 return -TARGET_ENOMEM;
2896 for (i = 0; i < fprog.len; i++) {
2897 filter[i].code = tswap16(tfilter[i].code);
2898 filter[i].jt = tfilter[i].jt;
2899 filter[i].jf = tfilter[i].jf;
2900 filter[i].k = tswap32(tfilter[i].k);
2902 fprog.filter = filter;
2904 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2905 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2906 g_free(filter);
2908 unlock_user_struct(tfilter, tfprog->filter, 1);
2909 unlock_user_struct(tfprog, optval_addr, 1);
2910 return ret;
2912 case TARGET_SO_BINDTODEVICE:
2914 char *dev_ifname, *addr_ifname;
2916 if (optlen > IFNAMSIZ - 1) {
2917 optlen = IFNAMSIZ - 1;
2919 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2920 if (!dev_ifname) {
2921 return -TARGET_EFAULT;
2923 optname = SO_BINDTODEVICE;
2924 addr_ifname = alloca(IFNAMSIZ);
2925 memcpy(addr_ifname, dev_ifname, optlen);
2926 addr_ifname[optlen] = 0;
2927 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2928 addr_ifname, optlen));
2929 unlock_user (dev_ifname, optval_addr, 0);
2930 return ret;
2932 /* Options with 'int' argument. */
2933 case TARGET_SO_DEBUG:
2934 optname = SO_DEBUG;
2935 break;
2936 case TARGET_SO_REUSEADDR:
2937 optname = SO_REUSEADDR;
2938 break;
2939 case TARGET_SO_TYPE:
2940 optname = SO_TYPE;
2941 break;
2942 case TARGET_SO_ERROR:
2943 optname = SO_ERROR;
2944 break;
2945 case TARGET_SO_DONTROUTE:
2946 optname = SO_DONTROUTE;
2947 break;
2948 case TARGET_SO_BROADCAST:
2949 optname = SO_BROADCAST;
2950 break;
2951 case TARGET_SO_SNDBUF:
2952 optname = SO_SNDBUF;
2953 break;
2954 case TARGET_SO_SNDBUFFORCE:
2955 optname = SO_SNDBUFFORCE;
2956 break;
2957 case TARGET_SO_RCVBUF:
2958 optname = SO_RCVBUF;
2959 break;
2960 case TARGET_SO_RCVBUFFORCE:
2961 optname = SO_RCVBUFFORCE;
2962 break;
2963 case TARGET_SO_KEEPALIVE:
2964 optname = SO_KEEPALIVE;
2965 break;
2966 case TARGET_SO_OOBINLINE:
2967 optname = SO_OOBINLINE;
2968 break;
2969 case TARGET_SO_NO_CHECK:
2970 optname = SO_NO_CHECK;
2971 break;
2972 case TARGET_SO_PRIORITY:
2973 optname = SO_PRIORITY;
2974 break;
2975 #ifdef SO_BSDCOMPAT
2976 case TARGET_SO_BSDCOMPAT:
2977 optname = SO_BSDCOMPAT;
2978 break;
2979 #endif
2980 case TARGET_SO_PASSCRED:
2981 optname = SO_PASSCRED;
2982 break;
2983 case TARGET_SO_PASSSEC:
2984 optname = SO_PASSSEC;
2985 break;
2986 case TARGET_SO_TIMESTAMP:
2987 optname = SO_TIMESTAMP;
2988 break;
2989 case TARGET_SO_RCVLOWAT:
2990 optname = SO_RCVLOWAT;
2991 break;
2992 break;
2993 default:
2994 goto unimplemented;
2996 if (optlen < sizeof(uint32_t))
2997 return -TARGET_EINVAL;
2999 if (get_user_u32(val, optval_addr))
3000 return -TARGET_EFAULT;
3001 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3002 break;
3003 default:
3004 unimplemented:
3005 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3006 ret = -TARGET_ENOPROTOOPT;
3008 return ret;
3011 /* do_getsockopt() Must return target values and target errnos. */
3012 static abi_long do_getsockopt(int sockfd, int level, int optname,
3013 abi_ulong optval_addr, abi_ulong optlen)
3015 abi_long ret;
3016 int len, val;
3017 socklen_t lv;
3019 switch(level) {
3020 case TARGET_SOL_SOCKET:
3021 level = SOL_SOCKET;
3022 switch (optname) {
3023 /* These don't just return a single integer */
3024 case TARGET_SO_LINGER:
3025 case TARGET_SO_RCVTIMEO:
3026 case TARGET_SO_SNDTIMEO:
3027 case TARGET_SO_PEERNAME:
3028 goto unimplemented;
3029 case TARGET_SO_PEERCRED: {
3030 struct ucred cr;
3031 socklen_t crlen;
3032 struct target_ucred *tcr;
3034 if (get_user_u32(len, optlen)) {
3035 return -TARGET_EFAULT;
3037 if (len < 0) {
3038 return -TARGET_EINVAL;
3041 crlen = sizeof(cr);
3042 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3043 &cr, &crlen));
3044 if (ret < 0) {
3045 return ret;
3047 if (len > crlen) {
3048 len = crlen;
3050 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3051 return -TARGET_EFAULT;
3053 __put_user(cr.pid, &tcr->pid);
3054 __put_user(cr.uid, &tcr->uid);
3055 __put_user(cr.gid, &tcr->gid);
3056 unlock_user_struct(tcr, optval_addr, 1);
3057 if (put_user_u32(len, optlen)) {
3058 return -TARGET_EFAULT;
3060 break;
3062 /* Options with 'int' argument. */
3063 case TARGET_SO_DEBUG:
3064 optname = SO_DEBUG;
3065 goto int_case;
3066 case TARGET_SO_REUSEADDR:
3067 optname = SO_REUSEADDR;
3068 goto int_case;
3069 case TARGET_SO_TYPE:
3070 optname = SO_TYPE;
3071 goto int_case;
3072 case TARGET_SO_ERROR:
3073 optname = SO_ERROR;
3074 goto int_case;
3075 case TARGET_SO_DONTROUTE:
3076 optname = SO_DONTROUTE;
3077 goto int_case;
3078 case TARGET_SO_BROADCAST:
3079 optname = SO_BROADCAST;
3080 goto int_case;
3081 case TARGET_SO_SNDBUF:
3082 optname = SO_SNDBUF;
3083 goto int_case;
3084 case TARGET_SO_RCVBUF:
3085 optname = SO_RCVBUF;
3086 goto int_case;
3087 case TARGET_SO_KEEPALIVE:
3088 optname = SO_KEEPALIVE;
3089 goto int_case;
3090 case TARGET_SO_OOBINLINE:
3091 optname = SO_OOBINLINE;
3092 goto int_case;
3093 case TARGET_SO_NO_CHECK:
3094 optname = SO_NO_CHECK;
3095 goto int_case;
3096 case TARGET_SO_PRIORITY:
3097 optname = SO_PRIORITY;
3098 goto int_case;
3099 #ifdef SO_BSDCOMPAT
3100 case TARGET_SO_BSDCOMPAT:
3101 optname = SO_BSDCOMPAT;
3102 goto int_case;
3103 #endif
3104 case TARGET_SO_PASSCRED:
3105 optname = SO_PASSCRED;
3106 goto int_case;
3107 case TARGET_SO_TIMESTAMP:
3108 optname = SO_TIMESTAMP;
3109 goto int_case;
3110 case TARGET_SO_RCVLOWAT:
3111 optname = SO_RCVLOWAT;
3112 goto int_case;
3113 case TARGET_SO_ACCEPTCONN:
3114 optname = SO_ACCEPTCONN;
3115 goto int_case;
3116 default:
3117 goto int_case;
3119 break;
3120 case SOL_TCP:
3121 /* TCP options all take an 'int' value. */
3122 int_case:
3123 if (get_user_u32(len, optlen))
3124 return -TARGET_EFAULT;
3125 if (len < 0)
3126 return -TARGET_EINVAL;
3127 lv = sizeof(lv);
3128 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3129 if (ret < 0)
3130 return ret;
3131 if (optname == SO_TYPE) {
3132 val = host_to_target_sock_type(val);
3134 if (len > lv)
3135 len = lv;
3136 if (len == 4) {
3137 if (put_user_u32(val, optval_addr))
3138 return -TARGET_EFAULT;
3139 } else {
3140 if (put_user_u8(val, optval_addr))
3141 return -TARGET_EFAULT;
3143 if (put_user_u32(len, optlen))
3144 return -TARGET_EFAULT;
3145 break;
3146 case SOL_IP:
3147 switch(optname) {
3148 case IP_TOS:
3149 case IP_TTL:
3150 case IP_HDRINCL:
3151 case IP_ROUTER_ALERT:
3152 case IP_RECVOPTS:
3153 case IP_RETOPTS:
3154 case IP_PKTINFO:
3155 case IP_MTU_DISCOVER:
3156 case IP_RECVERR:
3157 case IP_RECVTOS:
3158 #ifdef IP_FREEBIND
3159 case IP_FREEBIND:
3160 #endif
3161 case IP_MULTICAST_TTL:
3162 case IP_MULTICAST_LOOP:
3163 if (get_user_u32(len, optlen))
3164 return -TARGET_EFAULT;
3165 if (len < 0)
3166 return -TARGET_EINVAL;
3167 lv = sizeof(lv);
3168 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3169 if (ret < 0)
3170 return ret;
3171 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3172 len = 1;
3173 if (put_user_u32(len, optlen)
3174 || put_user_u8(val, optval_addr))
3175 return -TARGET_EFAULT;
3176 } else {
3177 if (len > sizeof(int))
3178 len = sizeof(int);
3179 if (put_user_u32(len, optlen)
3180 || put_user_u32(val, optval_addr))
3181 return -TARGET_EFAULT;
3183 break;
3184 default:
3185 ret = -TARGET_ENOPROTOOPT;
3186 break;
3188 break;
3189 default:
3190 unimplemented:
3191 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3192 level, optname);
3193 ret = -TARGET_EOPNOTSUPP;
3194 break;
3196 return ret;
3199 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3200 abi_ulong count, int copy)
3202 struct target_iovec *target_vec;
3203 struct iovec *vec;
3204 abi_ulong total_len, max_len;
3205 int i;
3206 int err = 0;
3207 bool bad_address = false;
3209 if (count == 0) {
3210 errno = 0;
3211 return NULL;
3213 if (count > IOV_MAX) {
3214 errno = EINVAL;
3215 return NULL;
3218 vec = g_try_new0(struct iovec, count);
3219 if (vec == NULL) {
3220 errno = ENOMEM;
3221 return NULL;
3224 target_vec = lock_user(VERIFY_READ, target_addr,
3225 count * sizeof(struct target_iovec), 1);
3226 if (target_vec == NULL) {
3227 err = EFAULT;
3228 goto fail2;
3231 /* ??? If host page size > target page size, this will result in a
3232 value larger than what we can actually support. */
3233 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3234 total_len = 0;
3236 for (i = 0; i < count; i++) {
3237 abi_ulong base = tswapal(target_vec[i].iov_base);
3238 abi_long len = tswapal(target_vec[i].iov_len);
3240 if (len < 0) {
3241 err = EINVAL;
3242 goto fail;
3243 } else if (len == 0) {
3244 /* Zero length pointer is ignored. */
3245 vec[i].iov_base = 0;
3246 } else {
3247 vec[i].iov_base = lock_user(type, base, len, copy);
3248 /* If the first buffer pointer is bad, this is a fault. But
3249 * subsequent bad buffers will result in a partial write; this
3250 * is realized by filling the vector with null pointers and
3251 * zero lengths. */
3252 if (!vec[i].iov_base) {
3253 if (i == 0) {
3254 err = EFAULT;
3255 goto fail;
3256 } else {
3257 bad_address = true;
3260 if (bad_address) {
3261 len = 0;
3263 if (len > max_len - total_len) {
3264 len = max_len - total_len;
3267 vec[i].iov_len = len;
3268 total_len += len;
3271 unlock_user(target_vec, target_addr, 0);
3272 return vec;
3274 fail:
3275 while (--i >= 0) {
3276 if (tswapal(target_vec[i].iov_len) > 0) {
3277 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3280 unlock_user(target_vec, target_addr, 0);
3281 fail2:
3282 g_free(vec);
3283 errno = err;
3284 return NULL;
3287 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3288 abi_ulong count, int copy)
3290 struct target_iovec *target_vec;
3291 int i;
3293 target_vec = lock_user(VERIFY_READ, target_addr,
3294 count * sizeof(struct target_iovec), 1);
3295 if (target_vec) {
3296 for (i = 0; i < count; i++) {
3297 abi_ulong base = tswapal(target_vec[i].iov_base);
3298 abi_long len = tswapal(target_vec[i].iov_len);
3299 if (len < 0) {
3300 break;
3302 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3304 unlock_user(target_vec, target_addr, 0);
3307 g_free(vec);
3310 static inline int target_to_host_sock_type(int *type)
3312 int host_type = 0;
3313 int target_type = *type;
3315 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3316 case TARGET_SOCK_DGRAM:
3317 host_type = SOCK_DGRAM;
3318 break;
3319 case TARGET_SOCK_STREAM:
3320 host_type = SOCK_STREAM;
3321 break;
3322 default:
3323 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3324 break;
3326 if (target_type & TARGET_SOCK_CLOEXEC) {
3327 #if defined(SOCK_CLOEXEC)
3328 host_type |= SOCK_CLOEXEC;
3329 #else
3330 return -TARGET_EINVAL;
3331 #endif
3333 if (target_type & TARGET_SOCK_NONBLOCK) {
3334 #if defined(SOCK_NONBLOCK)
3335 host_type |= SOCK_NONBLOCK;
3336 #elif !defined(O_NONBLOCK)
3337 return -TARGET_EINVAL;
3338 #endif
3340 *type = host_type;
3341 return 0;
3344 /* Try to emulate socket type flags after socket creation. */
3345 static int sock_flags_fixup(int fd, int target_type)
3347 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3348 if (target_type & TARGET_SOCK_NONBLOCK) {
3349 int flags = fcntl(fd, F_GETFL);
3350 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3351 close(fd);
3352 return -TARGET_EINVAL;
3355 #endif
3356 return fd;
3359 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3360 abi_ulong target_addr,
3361 socklen_t len)
3363 struct sockaddr *addr = host_addr;
3364 struct target_sockaddr *target_saddr;
3366 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3367 if (!target_saddr) {
3368 return -TARGET_EFAULT;
3371 memcpy(addr, target_saddr, len);
3372 addr->sa_family = tswap16(target_saddr->sa_family);
3373 /* spkt_protocol is big-endian */
3375 unlock_user(target_saddr, target_addr, 0);
3376 return 0;
3379 static TargetFdTrans target_packet_trans = {
3380 .target_to_host_addr = packet_target_to_host_sockaddr,
3383 #ifdef CONFIG_RTNETLINK
3384 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3386 abi_long ret;
3388 ret = target_to_host_nlmsg_route(buf, len);
3389 if (ret < 0) {
3390 return ret;
3393 return len;
3396 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3398 abi_long ret;
3400 ret = host_to_target_nlmsg_route(buf, len);
3401 if (ret < 0) {
3402 return ret;
3405 return len;
3408 static TargetFdTrans target_netlink_route_trans = {
3409 .target_to_host_data = netlink_route_target_to_host,
3410 .host_to_target_data = netlink_route_host_to_target,
3412 #endif /* CONFIG_RTNETLINK */
3414 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3416 abi_long ret;
3418 ret = target_to_host_nlmsg_audit(buf, len);
3419 if (ret < 0) {
3420 return ret;
3423 return len;
3426 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3428 abi_long ret;
3430 ret = host_to_target_nlmsg_audit(buf, len);
3431 if (ret < 0) {
3432 return ret;
3435 return len;
3438 static TargetFdTrans target_netlink_audit_trans = {
3439 .target_to_host_data = netlink_audit_target_to_host,
3440 .host_to_target_data = netlink_audit_host_to_target,
3443 /* do_socket() Must return target values and target errnos. */
3444 static abi_long do_socket(int domain, int type, int protocol)
3446 int target_type = type;
3447 int ret;
3449 ret = target_to_host_sock_type(&type);
3450 if (ret) {
3451 return ret;
3454 if (domain == PF_NETLINK && !(
3455 #ifdef CONFIG_RTNETLINK
3456 protocol == NETLINK_ROUTE ||
3457 #endif
3458 protocol == NETLINK_KOBJECT_UEVENT ||
3459 protocol == NETLINK_AUDIT)) {
3460 return -EPFNOSUPPORT;
3463 if (domain == AF_PACKET ||
3464 (domain == AF_INET && type == SOCK_PACKET)) {
3465 protocol = tswap16(protocol);
3468 ret = get_errno(socket(domain, type, protocol));
3469 if (ret >= 0) {
3470 ret = sock_flags_fixup(ret, target_type);
3471 if (type == SOCK_PACKET) {
3472 /* Manage an obsolete case :
3473 * if socket type is SOCK_PACKET, bind by name
3475 fd_trans_register(ret, &target_packet_trans);
3476 } else if (domain == PF_NETLINK) {
3477 switch (protocol) {
3478 #ifdef CONFIG_RTNETLINK
3479 case NETLINK_ROUTE:
3480 fd_trans_register(ret, &target_netlink_route_trans);
3481 break;
3482 #endif
3483 case NETLINK_KOBJECT_UEVENT:
3484 /* nothing to do: messages are strings */
3485 break;
3486 case NETLINK_AUDIT:
3487 fd_trans_register(ret, &target_netlink_audit_trans);
3488 break;
3489 default:
3490 g_assert_not_reached();
3494 return ret;
3497 /* do_bind() Must return target values and target errnos. */
3498 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3499 socklen_t addrlen)
3501 void *addr;
3502 abi_long ret;
3504 if ((int)addrlen < 0) {
3505 return -TARGET_EINVAL;
3508 addr = alloca(addrlen+1);
3510 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3511 if (ret)
3512 return ret;
3514 return get_errno(bind(sockfd, addr, addrlen));
3517 /* do_connect() Must return target values and target errnos. */
3518 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3519 socklen_t addrlen)
3521 void *addr;
3522 abi_long ret;
3524 if ((int)addrlen < 0) {
3525 return -TARGET_EINVAL;
3528 addr = alloca(addrlen+1);
3530 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3531 if (ret)
3532 return ret;
3534 return get_errno(safe_connect(sockfd, addr, addrlen));
3537 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3538 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3539 int flags, int send)
3541 abi_long ret, len;
3542 struct msghdr msg;
3543 abi_ulong count;
3544 struct iovec *vec;
3545 abi_ulong target_vec;
3547 if (msgp->msg_name) {
3548 msg.msg_namelen = tswap32(msgp->msg_namelen);
3549 msg.msg_name = alloca(msg.msg_namelen+1);
3550 ret = target_to_host_sockaddr(fd, msg.msg_name,
3551 tswapal(msgp->msg_name),
3552 msg.msg_namelen);
3553 if (ret == -TARGET_EFAULT) {
3554 /* For connected sockets msg_name and msg_namelen must
3555 * be ignored, so returning EFAULT immediately is wrong.
3556 * Instead, pass a bad msg_name to the host kernel, and
3557 * let it decide whether to return EFAULT or not.
3559 msg.msg_name = (void *)-1;
3560 } else if (ret) {
3561 goto out2;
3563 } else {
3564 msg.msg_name = NULL;
3565 msg.msg_namelen = 0;
3567 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3568 msg.msg_control = alloca(msg.msg_controllen);
3569 msg.msg_flags = tswap32(msgp->msg_flags);
3571 count = tswapal(msgp->msg_iovlen);
3572 target_vec = tswapal(msgp->msg_iov);
3574 if (count > IOV_MAX) {
3575 /* sendrcvmsg returns a different errno for this condition than
3576 * readv/writev, so we must catch it here before lock_iovec() does.
3578 ret = -TARGET_EMSGSIZE;
3579 goto out2;
3582 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3583 target_vec, count, send);
3584 if (vec == NULL) {
3585 ret = -host_to_target_errno(errno);
3586 goto out2;
3588 msg.msg_iovlen = count;
3589 msg.msg_iov = vec;
3591 if (send) {
3592 if (fd_trans_target_to_host_data(fd)) {
3593 void *host_msg;
3595 host_msg = g_malloc(msg.msg_iov->iov_len);
3596 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3597 ret = fd_trans_target_to_host_data(fd)(host_msg,
3598 msg.msg_iov->iov_len);
3599 if (ret >= 0) {
3600 msg.msg_iov->iov_base = host_msg;
3601 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3603 g_free(host_msg);
3604 } else {
3605 ret = target_to_host_cmsg(&msg, msgp);
3606 if (ret == 0) {
3607 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3610 } else {
3611 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3612 if (!is_error(ret)) {
3613 len = ret;
3614 if (fd_trans_host_to_target_data(fd)) {
3615 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3616 len);
3617 } else {
3618 ret = host_to_target_cmsg(msgp, &msg);
3620 if (!is_error(ret)) {
3621 msgp->msg_namelen = tswap32(msg.msg_namelen);
3622 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3623 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3624 msg.msg_name, msg.msg_namelen);
3625 if (ret) {
3626 goto out;
3630 ret = len;
3635 out:
3636 unlock_iovec(vec, target_vec, count, !send);
3637 out2:
3638 return ret;
3641 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3642 int flags, int send)
3644 abi_long ret;
3645 struct target_msghdr *msgp;
3647 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3648 msgp,
3649 target_msg,
3650 send ? 1 : 0)) {
3651 return -TARGET_EFAULT;
3653 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3654 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3655 return ret;
3658 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3659 * so it might not have this *mmsg-specific flag either.
3661 #ifndef MSG_WAITFORONE
3662 #define MSG_WAITFORONE 0x10000
3663 #endif
3665 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3666 unsigned int vlen, unsigned int flags,
3667 int send)
3669 struct target_mmsghdr *mmsgp;
3670 abi_long ret = 0;
3671 int i;
3673 if (vlen > UIO_MAXIOV) {
3674 vlen = UIO_MAXIOV;
3677 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3678 if (!mmsgp) {
3679 return -TARGET_EFAULT;
3682 for (i = 0; i < vlen; i++) {
3683 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3684 if (is_error(ret)) {
3685 break;
3687 mmsgp[i].msg_len = tswap32(ret);
3688 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3689 if (flags & MSG_WAITFORONE) {
3690 flags |= MSG_DONTWAIT;
3694 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3696 /* Return number of datagrams sent if we sent any at all;
3697 * otherwise return the error.
3699 if (i) {
3700 return i;
3702 return ret;
3705 /* do_accept4() Must return target values and target errnos. */
3706 static abi_long do_accept4(int fd, abi_ulong target_addr,
3707 abi_ulong target_addrlen_addr, int flags)
3709 socklen_t addrlen;
3710 void *addr;
3711 abi_long ret;
3712 int host_flags;
3714 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3716 if (target_addr == 0) {
3717 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3720 /* linux returns EINVAL if addrlen pointer is invalid */
3721 if (get_user_u32(addrlen, target_addrlen_addr))
3722 return -TARGET_EINVAL;
3724 if ((int)addrlen < 0) {
3725 return -TARGET_EINVAL;
3728 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3729 return -TARGET_EINVAL;
3731 addr = alloca(addrlen);
3733 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3734 if (!is_error(ret)) {
3735 host_to_target_sockaddr(target_addr, addr, addrlen);
3736 if (put_user_u32(addrlen, target_addrlen_addr))
3737 ret = -TARGET_EFAULT;
3739 return ret;
3742 /* do_getpeername() Must return target values and target errnos. */
3743 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3744 abi_ulong target_addrlen_addr)
3746 socklen_t addrlen;
3747 void *addr;
3748 abi_long ret;
3750 if (get_user_u32(addrlen, target_addrlen_addr))
3751 return -TARGET_EFAULT;
3753 if ((int)addrlen < 0) {
3754 return -TARGET_EINVAL;
3757 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3758 return -TARGET_EFAULT;
3760 addr = alloca(addrlen);
3762 ret = get_errno(getpeername(fd, addr, &addrlen));
3763 if (!is_error(ret)) {
3764 host_to_target_sockaddr(target_addr, addr, addrlen);
3765 if (put_user_u32(addrlen, target_addrlen_addr))
3766 ret = -TARGET_EFAULT;
3768 return ret;
3771 /* do_getsockname() Must return target values and target errnos. */
3772 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3773 abi_ulong target_addrlen_addr)
3775 socklen_t addrlen;
3776 void *addr;
3777 abi_long ret;
3779 if (get_user_u32(addrlen, target_addrlen_addr))
3780 return -TARGET_EFAULT;
3782 if ((int)addrlen < 0) {
3783 return -TARGET_EINVAL;
3786 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3787 return -TARGET_EFAULT;
3789 addr = alloca(addrlen);
3791 ret = get_errno(getsockname(fd, addr, &addrlen));
3792 if (!is_error(ret)) {
3793 host_to_target_sockaddr(target_addr, addr, addrlen);
3794 if (put_user_u32(addrlen, target_addrlen_addr))
3795 ret = -TARGET_EFAULT;
3797 return ret;
3800 /* do_socketpair() Must return target values and target errnos. */
3801 static abi_long do_socketpair(int domain, int type, int protocol,
3802 abi_ulong target_tab_addr)
3804 int tab[2];
3805 abi_long ret;
3807 target_to_host_sock_type(&type);
3809 ret = get_errno(socketpair(domain, type, protocol, tab));
3810 if (!is_error(ret)) {
3811 if (put_user_s32(tab[0], target_tab_addr)
3812 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3813 ret = -TARGET_EFAULT;
3815 return ret;
3818 /* do_sendto() Must return target values and target errnos. */
3819 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3820 abi_ulong target_addr, socklen_t addrlen)
3822 void *addr;
3823 void *host_msg;
3824 void *copy_msg = NULL;
3825 abi_long ret;
3827 if ((int)addrlen < 0) {
3828 return -TARGET_EINVAL;
3831 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3832 if (!host_msg)
3833 return -TARGET_EFAULT;
3834 if (fd_trans_target_to_host_data(fd)) {
3835 copy_msg = host_msg;
3836 host_msg = g_malloc(len);
3837 memcpy(host_msg, copy_msg, len);
3838 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3839 if (ret < 0) {
3840 goto fail;
3843 if (target_addr) {
3844 addr = alloca(addrlen+1);
3845 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3846 if (ret) {
3847 goto fail;
3849 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3850 } else {
3851 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3853 fail:
3854 if (copy_msg) {
3855 g_free(host_msg);
3856 host_msg = copy_msg;
3858 unlock_user(host_msg, msg, 0);
3859 return ret;
3862 /* do_recvfrom() Must return target values and target errnos. */
3863 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3864 abi_ulong target_addr,
3865 abi_ulong target_addrlen)
3867 socklen_t addrlen;
3868 void *addr;
3869 void *host_msg;
3870 abi_long ret;
3872 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3873 if (!host_msg)
3874 return -TARGET_EFAULT;
3875 if (target_addr) {
3876 if (get_user_u32(addrlen, target_addrlen)) {
3877 ret = -TARGET_EFAULT;
3878 goto fail;
3880 if ((int)addrlen < 0) {
3881 ret = -TARGET_EINVAL;
3882 goto fail;
3884 addr = alloca(addrlen);
3885 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3886 addr, &addrlen));
3887 } else {
3888 addr = NULL; /* To keep compiler quiet. */
3889 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3891 if (!is_error(ret)) {
3892 if (fd_trans_host_to_target_data(fd)) {
3893 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3895 if (target_addr) {
3896 host_to_target_sockaddr(target_addr, addr, addrlen);
3897 if (put_user_u32(addrlen, target_addrlen)) {
3898 ret = -TARGET_EFAULT;
3899 goto fail;
3902 unlock_user(host_msg, msg, len);
3903 } else {
3904 fail:
3905 unlock_user(host_msg, msg, 0);
3907 return ret;
3910 #ifdef TARGET_NR_socketcall
3911 /* do_socketcall() must return target values and target errnos. */
3912 static abi_long do_socketcall(int num, abi_ulong vptr)
3914 static const unsigned nargs[] = { /* number of arguments per operation */
3915 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3916 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3919 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3920 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3921 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3922 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3923 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3924 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3925 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3926 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3927 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3928 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3929 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3930 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3931 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3932 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3933 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3934 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3936 abi_long a[6]; /* max 6 args */
3937 unsigned i;
3939 /* check the range of the first argument num */
3940 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3941 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3942 return -TARGET_EINVAL;
3944 /* ensure we have space for args */
3945 if (nargs[num] > ARRAY_SIZE(a)) {
3946 return -TARGET_EINVAL;
3948 /* collect the arguments in a[] according to nargs[] */
3949 for (i = 0; i < nargs[num]; ++i) {
3950 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3951 return -TARGET_EFAULT;
3954 /* now when we have the args, invoke the appropriate underlying function */
3955 switch (num) {
3956 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3957 return do_socket(a[0], a[1], a[2]);
3958 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3959 return do_bind(a[0], a[1], a[2]);
3960 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3961 return do_connect(a[0], a[1], a[2]);
3962 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3963 return get_errno(listen(a[0], a[1]));
3964 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3965 return do_accept4(a[0], a[1], a[2], 0);
3966 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3967 return do_getsockname(a[0], a[1], a[2]);
3968 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3969 return do_getpeername(a[0], a[1], a[2]);
3970 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3971 return do_socketpair(a[0], a[1], a[2], a[3]);
3972 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3973 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3974 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3975 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3976 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3977 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3978 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3979 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3980 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3981 return get_errno(shutdown(a[0], a[1]));
3982 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3983 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3984 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3985 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3986 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3987 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3988 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3989 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3990 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3991 return do_accept4(a[0], a[1], a[2], a[3]);
3992 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3993 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3994 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3995 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3996 default:
3997 gemu_log("Unsupported socketcall: %d\n", num);
3998 return -TARGET_EINVAL;
4001 #endif
4003 #define N_SHM_REGIONS 32
4005 static struct shm_region {
4006 abi_ulong start;
4007 abi_ulong size;
4008 bool in_use;
4009 } shm_regions[N_SHM_REGIONS];
4011 #ifndef TARGET_SEMID64_DS
4012 /* asm-generic version of this struct */
4013 struct target_semid64_ds
4015 struct target_ipc_perm sem_perm;
4016 abi_ulong sem_otime;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused1;
4019 #endif
4020 abi_ulong sem_ctime;
4021 #if TARGET_ABI_BITS == 32
4022 abi_ulong __unused2;
4023 #endif
4024 abi_ulong sem_nsems;
4025 abi_ulong __unused3;
4026 abi_ulong __unused4;
4028 #endif
4030 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4031 abi_ulong target_addr)
4033 struct target_ipc_perm *target_ip;
4034 struct target_semid64_ds *target_sd;
4036 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4037 return -TARGET_EFAULT;
4038 target_ip = &(target_sd->sem_perm);
4039 host_ip->__key = tswap32(target_ip->__key);
4040 host_ip->uid = tswap32(target_ip->uid);
4041 host_ip->gid = tswap32(target_ip->gid);
4042 host_ip->cuid = tswap32(target_ip->cuid);
4043 host_ip->cgid = tswap32(target_ip->cgid);
4044 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4045 host_ip->mode = tswap32(target_ip->mode);
4046 #else
4047 host_ip->mode = tswap16(target_ip->mode);
4048 #endif
4049 #if defined(TARGET_PPC)
4050 host_ip->__seq = tswap32(target_ip->__seq);
4051 #else
4052 host_ip->__seq = tswap16(target_ip->__seq);
4053 #endif
4054 unlock_user_struct(target_sd, target_addr, 0);
4055 return 0;
4058 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4059 struct ipc_perm *host_ip)
4061 struct target_ipc_perm *target_ip;
4062 struct target_semid64_ds *target_sd;
4064 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4065 return -TARGET_EFAULT;
4066 target_ip = &(target_sd->sem_perm);
4067 target_ip->__key = tswap32(host_ip->__key);
4068 target_ip->uid = tswap32(host_ip->uid);
4069 target_ip->gid = tswap32(host_ip->gid);
4070 target_ip->cuid = tswap32(host_ip->cuid);
4071 target_ip->cgid = tswap32(host_ip->cgid);
4072 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4073 target_ip->mode = tswap32(host_ip->mode);
4074 #else
4075 target_ip->mode = tswap16(host_ip->mode);
4076 #endif
4077 #if defined(TARGET_PPC)
4078 target_ip->__seq = tswap32(host_ip->__seq);
4079 #else
4080 target_ip->__seq = tswap16(host_ip->__seq);
4081 #endif
4082 unlock_user_struct(target_sd, target_addr, 1);
4083 return 0;
4086 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4087 abi_ulong target_addr)
4089 struct target_semid64_ds *target_sd;
4091 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4092 return -TARGET_EFAULT;
4093 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4094 return -TARGET_EFAULT;
4095 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4096 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4097 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4098 unlock_user_struct(target_sd, target_addr, 0);
4099 return 0;
4102 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4103 struct semid_ds *host_sd)
4105 struct target_semid64_ds *target_sd;
4107 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4108 return -TARGET_EFAULT;
4109 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4110 return -TARGET_EFAULT;
4111 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4112 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4113 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4114 unlock_user_struct(target_sd, target_addr, 1);
4115 return 0;
4118 struct target_seminfo {
4119 int semmap;
4120 int semmni;
4121 int semmns;
4122 int semmnu;
4123 int semmsl;
4124 int semopm;
4125 int semume;
4126 int semusz;
4127 int semvmx;
4128 int semaem;
4131 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4132 struct seminfo *host_seminfo)
4134 struct target_seminfo *target_seminfo;
4135 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4136 return -TARGET_EFAULT;
4137 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4138 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4139 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4140 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4141 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4142 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4143 __put_user(host_seminfo->semume, &target_seminfo->semume);
4144 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4145 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4146 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4147 unlock_user_struct(target_seminfo, target_addr, 1);
4148 return 0;
4151 union semun {
4152 int val;
4153 struct semid_ds *buf;
4154 unsigned short *array;
4155 struct seminfo *__buf;
4158 union target_semun {
4159 int val;
4160 abi_ulong buf;
4161 abi_ulong array;
4162 abi_ulong __buf;
4165 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4166 abi_ulong target_addr)
4168 int nsems;
4169 unsigned short *array;
4170 union semun semun;
4171 struct semid_ds semid_ds;
4172 int i, ret;
4174 semun.buf = &semid_ds;
4176 ret = semctl(semid, 0, IPC_STAT, semun);
4177 if (ret == -1)
4178 return get_errno(ret);
4180 nsems = semid_ds.sem_nsems;
4182 *host_array = g_try_new(unsigned short, nsems);
4183 if (!*host_array) {
4184 return -TARGET_ENOMEM;
4186 array = lock_user(VERIFY_READ, target_addr,
4187 nsems*sizeof(unsigned short), 1);
4188 if (!array) {
4189 g_free(*host_array);
4190 return -TARGET_EFAULT;
4193 for(i=0; i<nsems; i++) {
4194 __get_user((*host_array)[i], &array[i]);
4196 unlock_user(array, target_addr, 0);
4198 return 0;
4201 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4202 unsigned short **host_array)
4204 int nsems;
4205 unsigned short *array;
4206 union semun semun;
4207 struct semid_ds semid_ds;
4208 int i, ret;
4210 semun.buf = &semid_ds;
4212 ret = semctl(semid, 0, IPC_STAT, semun);
4213 if (ret == -1)
4214 return get_errno(ret);
4216 nsems = semid_ds.sem_nsems;
4218 array = lock_user(VERIFY_WRITE, target_addr,
4219 nsems*sizeof(unsigned short), 0);
4220 if (!array)
4221 return -TARGET_EFAULT;
4223 for(i=0; i<nsems; i++) {
4224 __put_user((*host_array)[i], &array[i]);
4226 g_free(*host_array);
4227 unlock_user(array, target_addr, 1);
4229 return 0;
4232 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4233 abi_ulong target_arg)
4235 union target_semun target_su = { .buf = target_arg };
4236 union semun arg;
4237 struct semid_ds dsarg;
4238 unsigned short *array = NULL;
4239 struct seminfo seminfo;
4240 abi_long ret = -TARGET_EINVAL;
4241 abi_long err;
4242 cmd &= 0xff;
4244 switch( cmd ) {
4245 case GETVAL:
4246 case SETVAL:
4247 /* In 64 bit cross-endian situations, we will erroneously pick up
4248 * the wrong half of the union for the "val" element. To rectify
4249 * this, the entire 8-byte structure is byteswapped, followed by
4250 * a swap of the 4 byte val field. In other cases, the data is
4251 * already in proper host byte order. */
4252 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4253 target_su.buf = tswapal(target_su.buf);
4254 arg.val = tswap32(target_su.val);
4255 } else {
4256 arg.val = target_su.val;
4258 ret = get_errno(semctl(semid, semnum, cmd, arg));
4259 break;
4260 case GETALL:
4261 case SETALL:
4262 err = target_to_host_semarray(semid, &array, target_su.array);
4263 if (err)
4264 return err;
4265 arg.array = array;
4266 ret = get_errno(semctl(semid, semnum, cmd, arg));
4267 err = host_to_target_semarray(semid, target_su.array, &array);
4268 if (err)
4269 return err;
4270 break;
4271 case IPC_STAT:
4272 case IPC_SET:
4273 case SEM_STAT:
4274 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4275 if (err)
4276 return err;
4277 arg.buf = &dsarg;
4278 ret = get_errno(semctl(semid, semnum, cmd, arg));
4279 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4280 if (err)
4281 return err;
4282 break;
4283 case IPC_INFO:
4284 case SEM_INFO:
4285 arg.__buf = &seminfo;
4286 ret = get_errno(semctl(semid, semnum, cmd, arg));
4287 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4288 if (err)
4289 return err;
4290 break;
4291 case IPC_RMID:
4292 case GETPID:
4293 case GETNCNT:
4294 case GETZCNT:
4295 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4296 break;
4299 return ret;
4302 struct target_sembuf {
4303 unsigned short sem_num;
4304 short sem_op;
4305 short sem_flg;
4308 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4309 abi_ulong target_addr,
4310 unsigned nsops)
4312 struct target_sembuf *target_sembuf;
4313 int i;
4315 target_sembuf = lock_user(VERIFY_READ, target_addr,
4316 nsops*sizeof(struct target_sembuf), 1);
4317 if (!target_sembuf)
4318 return -TARGET_EFAULT;
4320 for(i=0; i<nsops; i++) {
4321 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4322 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4323 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4326 unlock_user(target_sembuf, target_addr, 0);
4328 return 0;
4331 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4333 struct sembuf sops[nsops];
4335 if (target_to_host_sembuf(sops, ptr, nsops))
4336 return -TARGET_EFAULT;
4338 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4341 struct target_msqid_ds
4343 struct target_ipc_perm msg_perm;
4344 abi_ulong msg_stime;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused1;
4347 #endif
4348 abi_ulong msg_rtime;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused2;
4351 #endif
4352 abi_ulong msg_ctime;
4353 #if TARGET_ABI_BITS == 32
4354 abi_ulong __unused3;
4355 #endif
4356 abi_ulong __msg_cbytes;
4357 abi_ulong msg_qnum;
4358 abi_ulong msg_qbytes;
4359 abi_ulong msg_lspid;
4360 abi_ulong msg_lrpid;
4361 abi_ulong __unused4;
4362 abi_ulong __unused5;
4365 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4366 abi_ulong target_addr)
4368 struct target_msqid_ds *target_md;
4370 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4371 return -TARGET_EFAULT;
4372 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4373 return -TARGET_EFAULT;
4374 host_md->msg_stime = tswapal(target_md->msg_stime);
4375 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4376 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4377 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4378 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4379 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4380 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4381 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4382 unlock_user_struct(target_md, target_addr, 0);
4383 return 0;
4386 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4387 struct msqid_ds *host_md)
4389 struct target_msqid_ds *target_md;
4391 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4392 return -TARGET_EFAULT;
4393 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4394 return -TARGET_EFAULT;
4395 target_md->msg_stime = tswapal(host_md->msg_stime);
4396 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4397 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4398 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4399 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4400 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4401 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4402 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4403 unlock_user_struct(target_md, target_addr, 1);
4404 return 0;
4407 struct target_msginfo {
4408 int msgpool;
4409 int msgmap;
4410 int msgmax;
4411 int msgmnb;
4412 int msgmni;
4413 int msgssz;
4414 int msgtql;
4415 unsigned short int msgseg;
4418 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4419 struct msginfo *host_msginfo)
4421 struct target_msginfo *target_msginfo;
4422 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4423 return -TARGET_EFAULT;
4424 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4425 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4426 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4427 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4428 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4429 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4430 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4431 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4432 unlock_user_struct(target_msginfo, target_addr, 1);
4433 return 0;
4436 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4438 struct msqid_ds dsarg;
4439 struct msginfo msginfo;
4440 abi_long ret = -TARGET_EINVAL;
4442 cmd &= 0xff;
4444 switch (cmd) {
4445 case IPC_STAT:
4446 case IPC_SET:
4447 case MSG_STAT:
4448 if (target_to_host_msqid_ds(&dsarg,ptr))
4449 return -TARGET_EFAULT;
4450 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4451 if (host_to_target_msqid_ds(ptr,&dsarg))
4452 return -TARGET_EFAULT;
4453 break;
4454 case IPC_RMID:
4455 ret = get_errno(msgctl(msgid, cmd, NULL));
4456 break;
4457 case IPC_INFO:
4458 case MSG_INFO:
4459 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4460 if (host_to_target_msginfo(ptr, &msginfo))
4461 return -TARGET_EFAULT;
4462 break;
4465 return ret;
4468 struct target_msgbuf {
4469 abi_long mtype;
4470 char mtext[1];
4473 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4474 ssize_t msgsz, int msgflg)
4476 struct target_msgbuf *target_mb;
4477 struct msgbuf *host_mb;
4478 abi_long ret = 0;
4480 if (msgsz < 0) {
4481 return -TARGET_EINVAL;
4484 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4485 return -TARGET_EFAULT;
4486 host_mb = g_try_malloc(msgsz + sizeof(long));
4487 if (!host_mb) {
4488 unlock_user_struct(target_mb, msgp, 0);
4489 return -TARGET_ENOMEM;
4491 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4492 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4493 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4494 g_free(host_mb);
4495 unlock_user_struct(target_mb, msgp, 0);
4497 return ret;
4500 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4501 ssize_t msgsz, abi_long msgtyp,
4502 int msgflg)
4504 struct target_msgbuf *target_mb;
4505 char *target_mtext;
4506 struct msgbuf *host_mb;
4507 abi_long ret = 0;
4509 if (msgsz < 0) {
4510 return -TARGET_EINVAL;
4513 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4514 return -TARGET_EFAULT;
4516 host_mb = g_try_malloc(msgsz + sizeof(long));
4517 if (!host_mb) {
4518 ret = -TARGET_ENOMEM;
4519 goto end;
4521 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4523 if (ret > 0) {
4524 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4525 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4526 if (!target_mtext) {
4527 ret = -TARGET_EFAULT;
4528 goto end;
4530 memcpy(target_mb->mtext, host_mb->mtext, ret);
4531 unlock_user(target_mtext, target_mtext_addr, ret);
4534 target_mb->mtype = tswapal(host_mb->mtype);
4536 end:
4537 if (target_mb)
4538 unlock_user_struct(target_mb, msgp, 1);
4539 g_free(host_mb);
4540 return ret;
4543 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4544 abi_ulong target_addr)
4546 struct target_shmid_ds *target_sd;
4548 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4549 return -TARGET_EFAULT;
4550 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4551 return -TARGET_EFAULT;
4552 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4553 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4554 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4555 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4556 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4557 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4558 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4559 unlock_user_struct(target_sd, target_addr, 0);
4560 return 0;
4563 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4564 struct shmid_ds *host_sd)
4566 struct target_shmid_ds *target_sd;
4568 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4569 return -TARGET_EFAULT;
4570 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4571 return -TARGET_EFAULT;
4572 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4573 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4574 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4575 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4576 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4577 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4578 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4579 unlock_user_struct(target_sd, target_addr, 1);
4580 return 0;
4583 struct target_shminfo {
4584 abi_ulong shmmax;
4585 abi_ulong shmmin;
4586 abi_ulong shmmni;
4587 abi_ulong shmseg;
4588 abi_ulong shmall;
4591 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4592 struct shminfo *host_shminfo)
4594 struct target_shminfo *target_shminfo;
4595 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4596 return -TARGET_EFAULT;
4597 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4598 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4599 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4600 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4601 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4602 unlock_user_struct(target_shminfo, target_addr, 1);
4603 return 0;
4606 struct target_shm_info {
4607 int used_ids;
4608 abi_ulong shm_tot;
4609 abi_ulong shm_rss;
4610 abi_ulong shm_swp;
4611 abi_ulong swap_attempts;
4612 abi_ulong swap_successes;
4615 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4616 struct shm_info *host_shm_info)
4618 struct target_shm_info *target_shm_info;
4619 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4620 return -TARGET_EFAULT;
4621 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4622 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4623 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4624 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4625 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4626 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4627 unlock_user_struct(target_shm_info, target_addr, 1);
4628 return 0;
4631 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4633 struct shmid_ds dsarg;
4634 struct shminfo shminfo;
4635 struct shm_info shm_info;
4636 abi_long ret = -TARGET_EINVAL;
4638 cmd &= 0xff;
4640 switch(cmd) {
4641 case IPC_STAT:
4642 case IPC_SET:
4643 case SHM_STAT:
4644 if (target_to_host_shmid_ds(&dsarg, buf))
4645 return -TARGET_EFAULT;
4646 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4647 if (host_to_target_shmid_ds(buf, &dsarg))
4648 return -TARGET_EFAULT;
4649 break;
4650 case IPC_INFO:
4651 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4652 if (host_to_target_shminfo(buf, &shminfo))
4653 return -TARGET_EFAULT;
4654 break;
4655 case SHM_INFO:
4656 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4657 if (host_to_target_shm_info(buf, &shm_info))
4658 return -TARGET_EFAULT;
4659 break;
4660 case IPC_RMID:
4661 case SHM_LOCK:
4662 case SHM_UNLOCK:
4663 ret = get_errno(shmctl(shmid, cmd, NULL));
4664 break;
4667 return ret;
4670 #ifndef TARGET_FORCE_SHMLBA
4671 /* For most architectures, SHMLBA is the same as the page size;
4672 * some architectures have larger values, in which case they should
4673 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4674 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4675 * and defining its own value for SHMLBA.
4677 * The kernel also permits SHMLBA to be set by the architecture to a
4678 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4679 * this means that addresses are rounded to the large size if
4680 * SHM_RND is set but addresses not aligned to that size are not rejected
4681 * as long as they are at least page-aligned. Since the only architecture
4682 * which uses this is ia64 this code doesn't provide for that oddity.
4684 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4686 return TARGET_PAGE_SIZE;
4688 #endif
4690 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4691 int shmid, abi_ulong shmaddr, int shmflg)
4693 abi_long raddr;
4694 void *host_raddr;
4695 struct shmid_ds shm_info;
4696 int i,ret;
4697 abi_ulong shmlba;
4699 /* find out the length of the shared memory segment */
4700 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4701 if (is_error(ret)) {
4702 /* can't get length, bail out */
4703 return ret;
4706 shmlba = target_shmlba(cpu_env);
4708 if (shmaddr & (shmlba - 1)) {
4709 if (shmflg & SHM_RND) {
4710 shmaddr &= ~(shmlba - 1);
4711 } else {
4712 return -TARGET_EINVAL;
4716 mmap_lock();
4718 if (shmaddr)
4719 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4720 else {
4721 abi_ulong mmap_start;
4723 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4725 if (mmap_start == -1) {
4726 errno = ENOMEM;
4727 host_raddr = (void *)-1;
4728 } else
4729 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4732 if (host_raddr == (void *)-1) {
4733 mmap_unlock();
4734 return get_errno((long)host_raddr);
4736 raddr=h2g((unsigned long)host_raddr);
4738 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4739 PAGE_VALID | PAGE_READ |
4740 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4742 for (i = 0; i < N_SHM_REGIONS; i++) {
4743 if (!shm_regions[i].in_use) {
4744 shm_regions[i].in_use = true;
4745 shm_regions[i].start = raddr;
4746 shm_regions[i].size = shm_info.shm_segsz;
4747 break;
4751 mmap_unlock();
4752 return raddr;
4756 static inline abi_long do_shmdt(abi_ulong shmaddr)
4758 int i;
4760 for (i = 0; i < N_SHM_REGIONS; ++i) {
4761 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4762 shm_regions[i].in_use = false;
4763 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4764 break;
4768 return get_errno(shmdt(g2h(shmaddr)));
4771 #ifdef TARGET_NR_ipc
4772 /* ??? This only works with linear mappings. */
4773 /* do_ipc() must return target values and target errnos. */
4774 static abi_long do_ipc(CPUArchState *cpu_env,
4775 unsigned int call, abi_long first,
4776 abi_long second, abi_long third,
4777 abi_long ptr, abi_long fifth)
4779 int version;
4780 abi_long ret = 0;
4782 version = call >> 16;
4783 call &= 0xffff;
4785 switch (call) {
4786 case IPCOP_semop:
4787 ret = do_semop(first, ptr, second);
4788 break;
4790 case IPCOP_semget:
4791 ret = get_errno(semget(first, second, third));
4792 break;
4794 case IPCOP_semctl: {
4795 /* The semun argument to semctl is passed by value, so dereference the
4796 * ptr argument. */
4797 abi_ulong atptr;
4798 get_user_ual(atptr, ptr);
4799 ret = do_semctl(first, second, third, atptr);
4800 break;
4803 case IPCOP_msgget:
4804 ret = get_errno(msgget(first, second));
4805 break;
4807 case IPCOP_msgsnd:
4808 ret = do_msgsnd(first, ptr, second, third);
4809 break;
4811 case IPCOP_msgctl:
4812 ret = do_msgctl(first, second, ptr);
4813 break;
4815 case IPCOP_msgrcv:
4816 switch (version) {
4817 case 0:
4819 struct target_ipc_kludge {
4820 abi_long msgp;
4821 abi_long msgtyp;
4822 } *tmp;
4824 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4825 ret = -TARGET_EFAULT;
4826 break;
4829 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4831 unlock_user_struct(tmp, ptr, 0);
4832 break;
4834 default:
4835 ret = do_msgrcv(first, ptr, second, fifth, third);
4837 break;
4839 case IPCOP_shmat:
4840 switch (version) {
4841 default:
4843 abi_ulong raddr;
4844 raddr = do_shmat(cpu_env, first, ptr, second);
4845 if (is_error(raddr))
4846 return get_errno(raddr);
4847 if (put_user_ual(raddr, third))
4848 return -TARGET_EFAULT;
4849 break;
4851 case 1:
4852 ret = -TARGET_EINVAL;
4853 break;
4855 break;
4856 case IPCOP_shmdt:
4857 ret = do_shmdt(ptr);
4858 break;
4860 case IPCOP_shmget:
4861 /* IPC_* flag values are the same on all linux platforms */
4862 ret = get_errno(shmget(first, second, third));
4863 break;
4865 /* IPC_* and SHM_* command values are the same on all linux platforms */
4866 case IPCOP_shmctl:
4867 ret = do_shmctl(first, second, ptr);
4868 break;
4869 default:
4870 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4871 ret = -TARGET_ENOSYS;
4872 break;
4874 return ret;
4876 #endif
4878 /* kernel structure types definitions */
4880 #define STRUCT(name, ...) STRUCT_ ## name,
4881 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4882 enum {
4883 #include "syscall_types.h"
4884 STRUCT_MAX
4886 #undef STRUCT
4887 #undef STRUCT_SPECIAL
4889 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4890 #define STRUCT_SPECIAL(name)
4891 #include "syscall_types.h"
4892 #undef STRUCT
4893 #undef STRUCT_SPECIAL
4895 typedef struct IOCTLEntry IOCTLEntry;
4897 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4898 int fd, int cmd, abi_long arg);
4900 struct IOCTLEntry {
4901 int target_cmd;
4902 unsigned int host_cmd;
4903 const char *name;
4904 int access;
4905 do_ioctl_fn *do_ioctl;
4906 const argtype arg_type[5];
4909 #define IOC_R 0x0001
4910 #define IOC_W 0x0002
4911 #define IOC_RW (IOC_R | IOC_W)
4913 #define MAX_STRUCT_SIZE 4096
4915 #ifdef CONFIG_FIEMAP
4916 /* So fiemap access checks don't overflow on 32 bit systems.
4917 * This is very slightly smaller than the limit imposed by
4918 * the underlying kernel.
4920 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4921 / sizeof(struct fiemap_extent))
4923 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4924 int fd, int cmd, abi_long arg)
4926 /* The parameter for this ioctl is a struct fiemap followed
4927 * by an array of struct fiemap_extent whose size is set
4928 * in fiemap->fm_extent_count. The array is filled in by the
4929 * ioctl.
4931 int target_size_in, target_size_out;
4932 struct fiemap *fm;
4933 const argtype *arg_type = ie->arg_type;
4934 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4935 void *argptr, *p;
4936 abi_long ret;
4937 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4938 uint32_t outbufsz;
4939 int free_fm = 0;
4941 assert(arg_type[0] == TYPE_PTR);
4942 assert(ie->access == IOC_RW);
4943 arg_type++;
4944 target_size_in = thunk_type_size(arg_type, 0);
4945 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4946 if (!argptr) {
4947 return -TARGET_EFAULT;
4949 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4951 fm = (struct fiemap *)buf_temp;
4952 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4953 return -TARGET_EINVAL;
4956 outbufsz = sizeof (*fm) +
4957 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4959 if (outbufsz > MAX_STRUCT_SIZE) {
4960 /* We can't fit all the extents into the fixed size buffer.
4961 * Allocate one that is large enough and use it instead.
4963 fm = g_try_malloc(outbufsz);
4964 if (!fm) {
4965 return -TARGET_ENOMEM;
4967 memcpy(fm, buf_temp, sizeof(struct fiemap));
4968 free_fm = 1;
4970 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4971 if (!is_error(ret)) {
4972 target_size_out = target_size_in;
4973 /* An extent_count of 0 means we were only counting the extents
4974 * so there are no structs to copy
4976 if (fm->fm_extent_count != 0) {
4977 target_size_out += fm->fm_mapped_extents * extent_size;
4979 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4980 if (!argptr) {
4981 ret = -TARGET_EFAULT;
4982 } else {
4983 /* Convert the struct fiemap */
4984 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4985 if (fm->fm_extent_count != 0) {
4986 p = argptr + target_size_in;
4987 /* ...and then all the struct fiemap_extents */
4988 for (i = 0; i < fm->fm_mapped_extents; i++) {
4989 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4990 THUNK_TARGET);
4991 p += extent_size;
4994 unlock_user(argptr, arg, target_size_out);
4997 if (free_fm) {
4998 g_free(fm);
5000 return ret;
5002 #endif
5004 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5005 int fd, int cmd, abi_long arg)
5007 const argtype *arg_type = ie->arg_type;
5008 int target_size;
5009 void *argptr;
5010 int ret;
5011 struct ifconf *host_ifconf;
5012 uint32_t outbufsz;
5013 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5014 int target_ifreq_size;
5015 int nb_ifreq;
5016 int free_buf = 0;
5017 int i;
5018 int target_ifc_len;
5019 abi_long target_ifc_buf;
5020 int host_ifc_len;
5021 char *host_ifc_buf;
5023 assert(arg_type[0] == TYPE_PTR);
5024 assert(ie->access == IOC_RW);
5026 arg_type++;
5027 target_size = thunk_type_size(arg_type, 0);
5029 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5030 if (!argptr)
5031 return -TARGET_EFAULT;
5032 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5033 unlock_user(argptr, arg, 0);
5035 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5036 target_ifc_len = host_ifconf->ifc_len;
5037 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5039 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5040 nb_ifreq = target_ifc_len / target_ifreq_size;
5041 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5043 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5044 if (outbufsz > MAX_STRUCT_SIZE) {
5045 /* We can't fit all the extents into the fixed size buffer.
5046 * Allocate one that is large enough and use it instead.
5048 host_ifconf = malloc(outbufsz);
5049 if (!host_ifconf) {
5050 return -TARGET_ENOMEM;
5052 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5053 free_buf = 1;
5055 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5057 host_ifconf->ifc_len = host_ifc_len;
5058 host_ifconf->ifc_buf = host_ifc_buf;
5060 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5061 if (!is_error(ret)) {
5062 /* convert host ifc_len to target ifc_len */
5064 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5065 target_ifc_len = nb_ifreq * target_ifreq_size;
5066 host_ifconf->ifc_len = target_ifc_len;
5068 /* restore target ifc_buf */
5070 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5072 /* copy struct ifconf to target user */
5074 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5075 if (!argptr)
5076 return -TARGET_EFAULT;
5077 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5078 unlock_user(argptr, arg, target_size);
5080 /* copy ifreq[] to target user */
5082 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5083 for (i = 0; i < nb_ifreq ; i++) {
5084 thunk_convert(argptr + i * target_ifreq_size,
5085 host_ifc_buf + i * sizeof(struct ifreq),
5086 ifreq_arg_type, THUNK_TARGET);
5088 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5091 if (free_buf) {
5092 free(host_ifconf);
5095 return ret;
5098 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5099 int cmd, abi_long arg)
5101 void *argptr;
5102 struct dm_ioctl *host_dm;
5103 abi_long guest_data;
5104 uint32_t guest_data_size;
5105 int target_size;
5106 const argtype *arg_type = ie->arg_type;
5107 abi_long ret;
5108 void *big_buf = NULL;
5109 char *host_data;
5111 arg_type++;
5112 target_size = thunk_type_size(arg_type, 0);
5113 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5114 if (!argptr) {
5115 ret = -TARGET_EFAULT;
5116 goto out;
5118 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5119 unlock_user(argptr, arg, 0);
5121 /* buf_temp is too small, so fetch things into a bigger buffer */
5122 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5123 memcpy(big_buf, buf_temp, target_size);
5124 buf_temp = big_buf;
5125 host_dm = big_buf;
5127 guest_data = arg + host_dm->data_start;
5128 if ((guest_data - arg) < 0) {
5129 ret = -TARGET_EINVAL;
5130 goto out;
5132 guest_data_size = host_dm->data_size - host_dm->data_start;
5133 host_data = (char*)host_dm + host_dm->data_start;
5135 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5136 if (!argptr) {
5137 ret = -TARGET_EFAULT;
5138 goto out;
5141 switch (ie->host_cmd) {
5142 case DM_REMOVE_ALL:
5143 case DM_LIST_DEVICES:
5144 case DM_DEV_CREATE:
5145 case DM_DEV_REMOVE:
5146 case DM_DEV_SUSPEND:
5147 case DM_DEV_STATUS:
5148 case DM_DEV_WAIT:
5149 case DM_TABLE_STATUS:
5150 case DM_TABLE_CLEAR:
5151 case DM_TABLE_DEPS:
5152 case DM_LIST_VERSIONS:
5153 /* no input data */
5154 break;
5155 case DM_DEV_RENAME:
5156 case DM_DEV_SET_GEOMETRY:
5157 /* data contains only strings */
5158 memcpy(host_data, argptr, guest_data_size);
5159 break;
5160 case DM_TARGET_MSG:
5161 memcpy(host_data, argptr, guest_data_size);
5162 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5163 break;
5164 case DM_TABLE_LOAD:
5166 void *gspec = argptr;
5167 void *cur_data = host_data;
5168 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5169 int spec_size = thunk_type_size(arg_type, 0);
5170 int i;
5172 for (i = 0; i < host_dm->target_count; i++) {
5173 struct dm_target_spec *spec = cur_data;
5174 uint32_t next;
5175 int slen;
5177 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5178 slen = strlen((char*)gspec + spec_size) + 1;
5179 next = spec->next;
5180 spec->next = sizeof(*spec) + slen;
5181 strcpy((char*)&spec[1], gspec + spec_size);
5182 gspec += next;
5183 cur_data += spec->next;
5185 break;
5187 default:
5188 ret = -TARGET_EINVAL;
5189 unlock_user(argptr, guest_data, 0);
5190 goto out;
5192 unlock_user(argptr, guest_data, 0);
5194 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5195 if (!is_error(ret)) {
5196 guest_data = arg + host_dm->data_start;
5197 guest_data_size = host_dm->data_size - host_dm->data_start;
5198 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5199 switch (ie->host_cmd) {
5200 case DM_REMOVE_ALL:
5201 case DM_DEV_CREATE:
5202 case DM_DEV_REMOVE:
5203 case DM_DEV_RENAME:
5204 case DM_DEV_SUSPEND:
5205 case DM_DEV_STATUS:
5206 case DM_TABLE_LOAD:
5207 case DM_TABLE_CLEAR:
5208 case DM_TARGET_MSG:
5209 case DM_DEV_SET_GEOMETRY:
5210 /* no return data */
5211 break;
5212 case DM_LIST_DEVICES:
5214 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5215 uint32_t remaining_data = guest_data_size;
5216 void *cur_data = argptr;
5217 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5218 int nl_size = 12; /* can't use thunk_size due to alignment */
5220 while (1) {
5221 uint32_t next = nl->next;
5222 if (next) {
5223 nl->next = nl_size + (strlen(nl->name) + 1);
5225 if (remaining_data < nl->next) {
5226 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5227 break;
5229 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5230 strcpy(cur_data + nl_size, nl->name);
5231 cur_data += nl->next;
5232 remaining_data -= nl->next;
5233 if (!next) {
5234 break;
5236 nl = (void*)nl + next;
5238 break;
5240 case DM_DEV_WAIT:
5241 case DM_TABLE_STATUS:
5243 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5244 void *cur_data = argptr;
5245 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5246 int spec_size = thunk_type_size(arg_type, 0);
5247 int i;
5249 for (i = 0; i < host_dm->target_count; i++) {
5250 uint32_t next = spec->next;
5251 int slen = strlen((char*)&spec[1]) + 1;
5252 spec->next = (cur_data - argptr) + spec_size + slen;
5253 if (guest_data_size < spec->next) {
5254 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5255 break;
5257 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5258 strcpy(cur_data + spec_size, (char*)&spec[1]);
5259 cur_data = argptr + spec->next;
5260 spec = (void*)host_dm + host_dm->data_start + next;
5262 break;
5264 case DM_TABLE_DEPS:
5266 void *hdata = (void*)host_dm + host_dm->data_start;
5267 int count = *(uint32_t*)hdata;
5268 uint64_t *hdev = hdata + 8;
5269 uint64_t *gdev = argptr + 8;
5270 int i;
5272 *(uint32_t*)argptr = tswap32(count);
5273 for (i = 0; i < count; i++) {
5274 *gdev = tswap64(*hdev);
5275 gdev++;
5276 hdev++;
5278 break;
5280 case DM_LIST_VERSIONS:
5282 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5283 uint32_t remaining_data = guest_data_size;
5284 void *cur_data = argptr;
5285 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5286 int vers_size = thunk_type_size(arg_type, 0);
5288 while (1) {
5289 uint32_t next = vers->next;
5290 if (next) {
5291 vers->next = vers_size + (strlen(vers->name) + 1);
5293 if (remaining_data < vers->next) {
5294 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5295 break;
5297 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5298 strcpy(cur_data + vers_size, vers->name);
5299 cur_data += vers->next;
5300 remaining_data -= vers->next;
5301 if (!next) {
5302 break;
5304 vers = (void*)vers + next;
5306 break;
5308 default:
5309 unlock_user(argptr, guest_data, 0);
5310 ret = -TARGET_EINVAL;
5311 goto out;
5313 unlock_user(argptr, guest_data, guest_data_size);
5315 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5316 if (!argptr) {
5317 ret = -TARGET_EFAULT;
5318 goto out;
5320 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5321 unlock_user(argptr, arg, target_size);
5323 out:
5324 g_free(big_buf);
5325 return ret;
5328 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5329 int cmd, abi_long arg)
5331 void *argptr;
5332 int target_size;
5333 const argtype *arg_type = ie->arg_type;
5334 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5335 abi_long ret;
5337 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5338 struct blkpg_partition host_part;
5340 /* Read and convert blkpg */
5341 arg_type++;
5342 target_size = thunk_type_size(arg_type, 0);
5343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5344 if (!argptr) {
5345 ret = -TARGET_EFAULT;
5346 goto out;
5348 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5349 unlock_user(argptr, arg, 0);
5351 switch (host_blkpg->op) {
5352 case BLKPG_ADD_PARTITION:
5353 case BLKPG_DEL_PARTITION:
5354 /* payload is struct blkpg_partition */
5355 break;
5356 default:
5357 /* Unknown opcode */
5358 ret = -TARGET_EINVAL;
5359 goto out;
5362 /* Read and convert blkpg->data */
5363 arg = (abi_long)(uintptr_t)host_blkpg->data;
5364 target_size = thunk_type_size(part_arg_type, 0);
5365 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366 if (!argptr) {
5367 ret = -TARGET_EFAULT;
5368 goto out;
5370 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5371 unlock_user(argptr, arg, 0);
5373 /* Swizzle the data pointer to our local copy and call! */
5374 host_blkpg->data = &host_part;
5375 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5377 out:
5378 return ret;
5381 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5382 int fd, int cmd, abi_long arg)
5384 const argtype *arg_type = ie->arg_type;
5385 const StructEntry *se;
5386 const argtype *field_types;
5387 const int *dst_offsets, *src_offsets;
5388 int target_size;
5389 void *argptr;
5390 abi_ulong *target_rt_dev_ptr;
5391 unsigned long *host_rt_dev_ptr;
5392 abi_long ret;
5393 int i;
5395 assert(ie->access == IOC_W);
5396 assert(*arg_type == TYPE_PTR);
5397 arg_type++;
5398 assert(*arg_type == TYPE_STRUCT);
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 return -TARGET_EFAULT;
5404 arg_type++;
5405 assert(*arg_type == (int)STRUCT_rtentry);
5406 se = struct_entries + *arg_type++;
5407 assert(se->convert[0] == NULL);
5408 /* convert struct here to be able to catch rt_dev string */
5409 field_types = se->field_types;
5410 dst_offsets = se->field_offsets[THUNK_HOST];
5411 src_offsets = se->field_offsets[THUNK_TARGET];
5412 for (i = 0; i < se->nb_fields; i++) {
5413 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5414 assert(*field_types == TYPE_PTRVOID);
5415 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5416 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5417 if (*target_rt_dev_ptr != 0) {
5418 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5419 tswapal(*target_rt_dev_ptr));
5420 if (!*host_rt_dev_ptr) {
5421 unlock_user(argptr, arg, 0);
5422 return -TARGET_EFAULT;
5424 } else {
5425 *host_rt_dev_ptr = 0;
5427 field_types++;
5428 continue;
5430 field_types = thunk_convert(buf_temp + dst_offsets[i],
5431 argptr + src_offsets[i],
5432 field_types, THUNK_HOST);
5434 unlock_user(argptr, arg, 0);
5436 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5437 if (*host_rt_dev_ptr != 0) {
5438 unlock_user((void *)*host_rt_dev_ptr,
5439 *target_rt_dev_ptr, 0);
5441 return ret;
5444 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5445 int fd, int cmd, abi_long arg)
5447 int sig = target_to_host_signal(arg);
5448 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5451 static IOCTLEntry ioctl_entries[] = {
5452 #define IOCTL(cmd, access, ...) \
5453 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5454 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5455 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5456 #include "ioctls.h"
5457 { 0, 0, },
5460 /* ??? Implement proper locking for ioctls. */
5461 /* do_ioctl() Must return target values and target errnos. */
5462 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5464 const IOCTLEntry *ie;
5465 const argtype *arg_type;
5466 abi_long ret;
5467 uint8_t buf_temp[MAX_STRUCT_SIZE];
5468 int target_size;
5469 void *argptr;
5471 ie = ioctl_entries;
5472 for(;;) {
5473 if (ie->target_cmd == 0) {
5474 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5475 return -TARGET_ENOSYS;
5477 if (ie->target_cmd == cmd)
5478 break;
5479 ie++;
5481 arg_type = ie->arg_type;
5482 #if defined(DEBUG)
5483 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5484 #endif
5485 if (ie->do_ioctl) {
5486 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5489 switch(arg_type[0]) {
5490 case TYPE_NULL:
5491 /* no argument */
5492 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5493 break;
5494 case TYPE_PTRVOID:
5495 case TYPE_INT:
5496 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5497 break;
5498 case TYPE_PTR:
5499 arg_type++;
5500 target_size = thunk_type_size(arg_type, 0);
5501 switch(ie->access) {
5502 case IOC_R:
5503 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5504 if (!is_error(ret)) {
5505 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5506 if (!argptr)
5507 return -TARGET_EFAULT;
5508 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5509 unlock_user(argptr, arg, target_size);
5511 break;
5512 case IOC_W:
5513 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5514 if (!argptr)
5515 return -TARGET_EFAULT;
5516 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5517 unlock_user(argptr, arg, 0);
5518 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5519 break;
5520 default:
5521 case IOC_RW:
5522 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5523 if (!argptr)
5524 return -TARGET_EFAULT;
5525 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5526 unlock_user(argptr, arg, 0);
5527 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5528 if (!is_error(ret)) {
5529 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5530 if (!argptr)
5531 return -TARGET_EFAULT;
5532 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5533 unlock_user(argptr, arg, target_size);
5535 break;
5537 break;
5538 default:
5539 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5540 (long)cmd, arg_type[0]);
5541 ret = -TARGET_ENOSYS;
5542 break;
5544 return ret;
5547 static const bitmask_transtbl iflag_tbl[] = {
5548 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5549 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5550 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5551 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5552 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5553 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5554 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5555 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5556 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5557 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5558 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5559 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5560 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5561 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5562 { 0, 0, 0, 0 }
5565 static const bitmask_transtbl oflag_tbl[] = {
5566 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5567 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5568 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5569 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5570 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5571 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5572 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5573 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5574 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5575 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5576 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5577 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5578 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5579 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5580 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5581 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5582 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5583 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5584 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5585 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5586 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5587 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5588 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5589 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5590 { 0, 0, 0, 0 }
5593 static const bitmask_transtbl cflag_tbl[] = {
5594 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5595 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5596 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5597 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5598 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5599 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5600 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5601 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5602 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5603 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5604 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5605 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5606 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5607 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5608 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5609 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5610 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5611 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5612 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5613 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5614 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5615 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5616 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5617 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5618 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5619 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5620 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5621 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5622 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5623 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5624 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5625 { 0, 0, 0, 0 }
5628 static const bitmask_transtbl lflag_tbl[] = {
5629 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5630 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5631 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5632 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5633 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5634 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5635 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5636 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5637 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5638 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5639 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5640 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5641 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5642 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5643 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5644 { 0, 0, 0, 0 }
5647 static void target_to_host_termios (void *dst, const void *src)
5649 struct host_termios *host = dst;
5650 const struct target_termios *target = src;
5652 host->c_iflag =
5653 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5654 host->c_oflag =
5655 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5656 host->c_cflag =
5657 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5658 host->c_lflag =
5659 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5660 host->c_line = target->c_line;
5662 memset(host->c_cc, 0, sizeof(host->c_cc));
5663 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5664 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5665 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5666 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5667 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5668 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5669 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5670 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5671 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5672 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5673 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5674 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5675 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5676 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5677 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5678 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5679 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5682 static void host_to_target_termios (void *dst, const void *src)
5684 struct target_termios *target = dst;
5685 const struct host_termios *host = src;
5687 target->c_iflag =
5688 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5689 target->c_oflag =
5690 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5691 target->c_cflag =
5692 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5693 target->c_lflag =
5694 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5695 target->c_line = host->c_line;
5697 memset(target->c_cc, 0, sizeof(target->c_cc));
5698 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5699 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5700 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5701 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5702 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5703 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5704 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5705 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5706 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5707 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5708 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5709 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5710 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5711 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5712 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5713 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5714 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5717 static const StructEntry struct_termios_def = {
5718 .convert = { host_to_target_termios, target_to_host_termios },
5719 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5720 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5723 static bitmask_transtbl mmap_flags_tbl[] = {
5724 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5725 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5726 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5727 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5728 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5729 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5730 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5731 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5732 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5733 MAP_NORESERVE },
5734 { 0, 0, 0, 0 }
5737 #if defined(TARGET_I386)
5739 /* NOTE: there is really one LDT for all the threads */
5740 static uint8_t *ldt_table;
5742 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5744 int size;
5745 void *p;
5747 if (!ldt_table)
5748 return 0;
5749 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5750 if (size > bytecount)
5751 size = bytecount;
5752 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5753 if (!p)
5754 return -TARGET_EFAULT;
5755 /* ??? Should this by byteswapped? */
5756 memcpy(p, ldt_table, size);
5757 unlock_user(p, ptr, size);
5758 return size;
5761 /* XXX: add locking support */
5762 static abi_long write_ldt(CPUX86State *env,
5763 abi_ulong ptr, unsigned long bytecount, int oldmode)
5765 struct target_modify_ldt_ldt_s ldt_info;
5766 struct target_modify_ldt_ldt_s *target_ldt_info;
5767 int seg_32bit, contents, read_exec_only, limit_in_pages;
5768 int seg_not_present, useable, lm;
5769 uint32_t *lp, entry_1, entry_2;
5771 if (bytecount != sizeof(ldt_info))
5772 return -TARGET_EINVAL;
5773 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5774 return -TARGET_EFAULT;
5775 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5776 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5777 ldt_info.limit = tswap32(target_ldt_info->limit);
5778 ldt_info.flags = tswap32(target_ldt_info->flags);
5779 unlock_user_struct(target_ldt_info, ptr, 0);
5781 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5782 return -TARGET_EINVAL;
5783 seg_32bit = ldt_info.flags & 1;
5784 contents = (ldt_info.flags >> 1) & 3;
5785 read_exec_only = (ldt_info.flags >> 3) & 1;
5786 limit_in_pages = (ldt_info.flags >> 4) & 1;
5787 seg_not_present = (ldt_info.flags >> 5) & 1;
5788 useable = (ldt_info.flags >> 6) & 1;
5789 #ifdef TARGET_ABI32
5790 lm = 0;
5791 #else
5792 lm = (ldt_info.flags >> 7) & 1;
5793 #endif
5794 if (contents == 3) {
5795 if (oldmode)
5796 return -TARGET_EINVAL;
5797 if (seg_not_present == 0)
5798 return -TARGET_EINVAL;
5800 /* allocate the LDT */
5801 if (!ldt_table) {
5802 env->ldt.base = target_mmap(0,
5803 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5804 PROT_READ|PROT_WRITE,
5805 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5806 if (env->ldt.base == -1)
5807 return -TARGET_ENOMEM;
5808 memset(g2h(env->ldt.base), 0,
5809 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5810 env->ldt.limit = 0xffff;
5811 ldt_table = g2h(env->ldt.base);
5814 /* NOTE: same code as Linux kernel */
5815 /* Allow LDTs to be cleared by the user. */
5816 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5817 if (oldmode ||
5818 (contents == 0 &&
5819 read_exec_only == 1 &&
5820 seg_32bit == 0 &&
5821 limit_in_pages == 0 &&
5822 seg_not_present == 1 &&
5823 useable == 0 )) {
5824 entry_1 = 0;
5825 entry_2 = 0;
5826 goto install;
5830 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5831 (ldt_info.limit & 0x0ffff);
5832 entry_2 = (ldt_info.base_addr & 0xff000000) |
5833 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5834 (ldt_info.limit & 0xf0000) |
5835 ((read_exec_only ^ 1) << 9) |
5836 (contents << 10) |
5837 ((seg_not_present ^ 1) << 15) |
5838 (seg_32bit << 22) |
5839 (limit_in_pages << 23) |
5840 (lm << 21) |
5841 0x7000;
5842 if (!oldmode)
5843 entry_2 |= (useable << 20);
5845 /* Install the new entry ... */
5846 install:
5847 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5848 lp[0] = tswap32(entry_1);
5849 lp[1] = tswap32(entry_2);
5850 return 0;
5853 /* specific and weird i386 syscalls */
5854 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5855 unsigned long bytecount)
5857 abi_long ret;
5859 switch (func) {
5860 case 0:
5861 ret = read_ldt(ptr, bytecount);
5862 break;
5863 case 1:
5864 ret = write_ldt(env, ptr, bytecount, 1);
5865 break;
5866 case 0x11:
5867 ret = write_ldt(env, ptr, bytecount, 0);
5868 break;
5869 default:
5870 ret = -TARGET_ENOSYS;
5871 break;
5873 return ret;
5876 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5877 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5879 uint64_t *gdt_table = g2h(env->gdt.base);
5880 struct target_modify_ldt_ldt_s ldt_info;
5881 struct target_modify_ldt_ldt_s *target_ldt_info;
5882 int seg_32bit, contents, read_exec_only, limit_in_pages;
5883 int seg_not_present, useable, lm;
5884 uint32_t *lp, entry_1, entry_2;
5885 int i;
5887 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5888 if (!target_ldt_info)
5889 return -TARGET_EFAULT;
5890 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5891 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5892 ldt_info.limit = tswap32(target_ldt_info->limit);
5893 ldt_info.flags = tswap32(target_ldt_info->flags);
5894 if (ldt_info.entry_number == -1) {
5895 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5896 if (gdt_table[i] == 0) {
5897 ldt_info.entry_number = i;
5898 target_ldt_info->entry_number = tswap32(i);
5899 break;
5903 unlock_user_struct(target_ldt_info, ptr, 1);
5905 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5906 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5907 return -TARGET_EINVAL;
5908 seg_32bit = ldt_info.flags & 1;
5909 contents = (ldt_info.flags >> 1) & 3;
5910 read_exec_only = (ldt_info.flags >> 3) & 1;
5911 limit_in_pages = (ldt_info.flags >> 4) & 1;
5912 seg_not_present = (ldt_info.flags >> 5) & 1;
5913 useable = (ldt_info.flags >> 6) & 1;
5914 #ifdef TARGET_ABI32
5915 lm = 0;
5916 #else
5917 lm = (ldt_info.flags >> 7) & 1;
5918 #endif
5920 if (contents == 3) {
5921 if (seg_not_present == 0)
5922 return -TARGET_EINVAL;
5925 /* NOTE: same code as Linux kernel */
5926 /* Allow LDTs to be cleared by the user. */
5927 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5928 if ((contents == 0 &&
5929 read_exec_only == 1 &&
5930 seg_32bit == 0 &&
5931 limit_in_pages == 0 &&
5932 seg_not_present == 1 &&
5933 useable == 0 )) {
5934 entry_1 = 0;
5935 entry_2 = 0;
5936 goto install;
5940 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5941 (ldt_info.limit & 0x0ffff);
5942 entry_2 = (ldt_info.base_addr & 0xff000000) |
5943 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5944 (ldt_info.limit & 0xf0000) |
5945 ((read_exec_only ^ 1) << 9) |
5946 (contents << 10) |
5947 ((seg_not_present ^ 1) << 15) |
5948 (seg_32bit << 22) |
5949 (limit_in_pages << 23) |
5950 (useable << 20) |
5951 (lm << 21) |
5952 0x7000;
5954 /* Install the new entry ... */
5955 install:
5956 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5957 lp[0] = tswap32(entry_1);
5958 lp[1] = tswap32(entry_2);
5959 return 0;
5962 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5964 struct target_modify_ldt_ldt_s *target_ldt_info;
5965 uint64_t *gdt_table = g2h(env->gdt.base);
5966 uint32_t base_addr, limit, flags;
5967 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5968 int seg_not_present, useable, lm;
5969 uint32_t *lp, entry_1, entry_2;
5971 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5972 if (!target_ldt_info)
5973 return -TARGET_EFAULT;
5974 idx = tswap32(target_ldt_info->entry_number);
5975 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5976 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5977 unlock_user_struct(target_ldt_info, ptr, 1);
5978 return -TARGET_EINVAL;
5980 lp = (uint32_t *)(gdt_table + idx);
5981 entry_1 = tswap32(lp[0]);
5982 entry_2 = tswap32(lp[1]);
5984 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5985 contents = (entry_2 >> 10) & 3;
5986 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5987 seg_32bit = (entry_2 >> 22) & 1;
5988 limit_in_pages = (entry_2 >> 23) & 1;
5989 useable = (entry_2 >> 20) & 1;
5990 #ifdef TARGET_ABI32
5991 lm = 0;
5992 #else
5993 lm = (entry_2 >> 21) & 1;
5994 #endif
5995 flags = (seg_32bit << 0) | (contents << 1) |
5996 (read_exec_only << 3) | (limit_in_pages << 4) |
5997 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5998 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5999 base_addr = (entry_1 >> 16) |
6000 (entry_2 & 0xff000000) |
6001 ((entry_2 & 0xff) << 16);
6002 target_ldt_info->base_addr = tswapal(base_addr);
6003 target_ldt_info->limit = tswap32(limit);
6004 target_ldt_info->flags = tswap32(flags);
6005 unlock_user_struct(target_ldt_info, ptr, 1);
6006 return 0;
6008 #endif /* TARGET_I386 && TARGET_ABI32 */
6010 #ifndef TARGET_ABI32
6011 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6013 abi_long ret = 0;
6014 abi_ulong val;
6015 int idx;
6017 switch(code) {
6018 case TARGET_ARCH_SET_GS:
6019 case TARGET_ARCH_SET_FS:
6020 if (code == TARGET_ARCH_SET_GS)
6021 idx = R_GS;
6022 else
6023 idx = R_FS;
6024 cpu_x86_load_seg(env, idx, 0);
6025 env->segs[idx].base = addr;
6026 break;
6027 case TARGET_ARCH_GET_GS:
6028 case TARGET_ARCH_GET_FS:
6029 if (code == TARGET_ARCH_GET_GS)
6030 idx = R_GS;
6031 else
6032 idx = R_FS;
6033 val = env->segs[idx].base;
6034 if (put_user(val, addr, abi_ulong))
6035 ret = -TARGET_EFAULT;
6036 break;
6037 default:
6038 ret = -TARGET_EINVAL;
6039 break;
6041 return ret;
6043 #endif
6045 #endif /* defined(TARGET_I386) */
6047 #define NEW_STACK_SIZE 0x40000
6050 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6051 typedef struct {
6052 CPUArchState *env;
6053 pthread_mutex_t mutex;
6054 pthread_cond_t cond;
6055 pthread_t thread;
6056 uint32_t tid;
6057 abi_ulong child_tidptr;
6058 abi_ulong parent_tidptr;
6059 sigset_t sigmask;
6060 } new_thread_info;
6062 static void *clone_func(void *arg)
6064 new_thread_info *info = arg;
6065 CPUArchState *env;
6066 CPUState *cpu;
6067 TaskState *ts;
6069 rcu_register_thread();
6070 env = info->env;
6071 cpu = ENV_GET_CPU(env);
6072 thread_cpu = cpu;
6073 ts = (TaskState *)cpu->opaque;
6074 info->tid = gettid();
6075 cpu->host_tid = info->tid;
6076 task_settid(ts);
6077 if (info->child_tidptr)
6078 put_user_u32(info->tid, info->child_tidptr);
6079 if (info->parent_tidptr)
6080 put_user_u32(info->tid, info->parent_tidptr);
6081 /* Enable signals. */
6082 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6083 /* Signal to the parent that we're ready. */
6084 pthread_mutex_lock(&info->mutex);
6085 pthread_cond_broadcast(&info->cond);
6086 pthread_mutex_unlock(&info->mutex);
6087 /* Wait until the parent has finshed initializing the tls state. */
6088 pthread_mutex_lock(&clone_lock);
6089 pthread_mutex_unlock(&clone_lock);
6090 cpu_loop(env);
6091 /* never exits */
6092 return NULL;
6095 /* do_fork() Must return host values and target errnos (unlike most
6096 do_*() functions). */
6097 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6098 abi_ulong parent_tidptr, target_ulong newtls,
6099 abi_ulong child_tidptr)
6101 CPUState *cpu = ENV_GET_CPU(env);
6102 int ret;
6103 TaskState *ts;
6104 CPUState *new_cpu;
6105 CPUArchState *new_env;
6106 sigset_t sigmask;
6108 flags &= ~CLONE_IGNORED_FLAGS;
6110 /* Emulate vfork() with fork() */
6111 if (flags & CLONE_VFORK)
6112 flags &= ~(CLONE_VFORK | CLONE_VM);
6114 if (flags & CLONE_VM) {
6115 TaskState *parent_ts = (TaskState *)cpu->opaque;
6116 new_thread_info info;
6117 pthread_attr_t attr;
6119 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6120 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6121 return -TARGET_EINVAL;
6124 ts = g_new0(TaskState, 1);
6125 init_task_state(ts);
6126 /* we create a new CPU instance. */
6127 new_env = cpu_copy(env);
6128 /* Init regs that differ from the parent. */
6129 cpu_clone_regs(new_env, newsp);
6130 new_cpu = ENV_GET_CPU(new_env);
6131 new_cpu->opaque = ts;
6132 ts->bprm = parent_ts->bprm;
6133 ts->info = parent_ts->info;
6134 ts->signal_mask = parent_ts->signal_mask;
6136 if (flags & CLONE_CHILD_CLEARTID) {
6137 ts->child_tidptr = child_tidptr;
6140 if (flags & CLONE_SETTLS) {
6141 cpu_set_tls (new_env, newtls);
6144 /* Grab a mutex so that thread setup appears atomic. */
6145 pthread_mutex_lock(&clone_lock);
6147 memset(&info, 0, sizeof(info));
6148 pthread_mutex_init(&info.mutex, NULL);
6149 pthread_mutex_lock(&info.mutex);
6150 pthread_cond_init(&info.cond, NULL);
6151 info.env = new_env;
6152 if (flags & CLONE_CHILD_SETTID) {
6153 info.child_tidptr = child_tidptr;
6155 if (flags & CLONE_PARENT_SETTID) {
6156 info.parent_tidptr = parent_tidptr;
6159 ret = pthread_attr_init(&attr);
6160 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6161 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6162 /* It is not safe to deliver signals until the child has finished
6163 initializing, so temporarily block all signals. */
6164 sigfillset(&sigmask);
6165 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6167 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6168 /* TODO: Free new CPU state if thread creation failed. */
6170 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6171 pthread_attr_destroy(&attr);
6172 if (ret == 0) {
6173 /* Wait for the child to initialize. */
6174 pthread_cond_wait(&info.cond, &info.mutex);
6175 ret = info.tid;
6176 } else {
6177 ret = -1;
6179 pthread_mutex_unlock(&info.mutex);
6180 pthread_cond_destroy(&info.cond);
6181 pthread_mutex_destroy(&info.mutex);
6182 pthread_mutex_unlock(&clone_lock);
6183 } else {
6184 /* if no CLONE_VM, we consider it is a fork */
6185 if (flags & CLONE_INVALID_FORK_FLAGS) {
6186 return -TARGET_EINVAL;
6189 /* We can't support custom termination signals */
6190 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6191 return -TARGET_EINVAL;
6194 if (block_signals()) {
6195 return -TARGET_ERESTARTSYS;
6198 fork_start();
6199 ret = fork();
6200 if (ret == 0) {
6201 /* Child Process. */
6202 rcu_after_fork();
6203 cpu_clone_regs(env, newsp);
6204 fork_end(1);
6205 /* There is a race condition here. The parent process could
6206 theoretically read the TID in the child process before the child
6207 tid is set. This would require using either ptrace
6208 (not implemented) or having *_tidptr to point at a shared memory
6209 mapping. We can't repeat the spinlock hack used above because
6210 the child process gets its own copy of the lock. */
6211 if (flags & CLONE_CHILD_SETTID)
6212 put_user_u32(gettid(), child_tidptr);
6213 if (flags & CLONE_PARENT_SETTID)
6214 put_user_u32(gettid(), parent_tidptr);
6215 ts = (TaskState *)cpu->opaque;
6216 if (flags & CLONE_SETTLS)
6217 cpu_set_tls (env, newtls);
6218 if (flags & CLONE_CHILD_CLEARTID)
6219 ts->child_tidptr = child_tidptr;
6220 } else {
6221 fork_end(0);
6224 return ret;
6227 /* warning : doesn't handle linux specific flags... */
6228 static int target_to_host_fcntl_cmd(int cmd)
6230 switch(cmd) {
6231 case TARGET_F_DUPFD:
6232 case TARGET_F_GETFD:
6233 case TARGET_F_SETFD:
6234 case TARGET_F_GETFL:
6235 case TARGET_F_SETFL:
6236 return cmd;
6237 case TARGET_F_GETLK:
6238 return F_GETLK64;
6239 case TARGET_F_SETLK:
6240 return F_SETLK64;
6241 case TARGET_F_SETLKW:
6242 return F_SETLKW64;
6243 case TARGET_F_GETOWN:
6244 return F_GETOWN;
6245 case TARGET_F_SETOWN:
6246 return F_SETOWN;
6247 case TARGET_F_GETSIG:
6248 return F_GETSIG;
6249 case TARGET_F_SETSIG:
6250 return F_SETSIG;
6251 #if TARGET_ABI_BITS == 32
6252 case TARGET_F_GETLK64:
6253 return F_GETLK64;
6254 case TARGET_F_SETLK64:
6255 return F_SETLK64;
6256 case TARGET_F_SETLKW64:
6257 return F_SETLKW64;
6258 #endif
6259 case TARGET_F_SETLEASE:
6260 return F_SETLEASE;
6261 case TARGET_F_GETLEASE:
6262 return F_GETLEASE;
6263 #ifdef F_DUPFD_CLOEXEC
6264 case TARGET_F_DUPFD_CLOEXEC:
6265 return F_DUPFD_CLOEXEC;
6266 #endif
6267 case TARGET_F_NOTIFY:
6268 return F_NOTIFY;
6269 #ifdef F_GETOWN_EX
6270 case TARGET_F_GETOWN_EX:
6271 return F_GETOWN_EX;
6272 #endif
6273 #ifdef F_SETOWN_EX
6274 case TARGET_F_SETOWN_EX:
6275 return F_SETOWN_EX;
6276 #endif
6277 #ifdef F_SETPIPE_SZ
6278 case TARGET_F_SETPIPE_SZ:
6279 return F_SETPIPE_SZ;
6280 case TARGET_F_GETPIPE_SZ:
6281 return F_GETPIPE_SZ;
6282 #endif
6283 default:
6284 return -TARGET_EINVAL;
6286 return -TARGET_EINVAL;
6289 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6290 static const bitmask_transtbl flock_tbl[] = {
6291 TRANSTBL_CONVERT(F_RDLCK),
6292 TRANSTBL_CONVERT(F_WRLCK),
6293 TRANSTBL_CONVERT(F_UNLCK),
6294 TRANSTBL_CONVERT(F_EXLCK),
6295 TRANSTBL_CONVERT(F_SHLCK),
6296 { 0, 0, 0, 0 }
6299 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6300 abi_ulong target_flock_addr)
6302 struct target_flock *target_fl;
6303 short l_type;
6305 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6306 return -TARGET_EFAULT;
6309 __get_user(l_type, &target_fl->l_type);
6310 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6311 __get_user(fl->l_whence, &target_fl->l_whence);
6312 __get_user(fl->l_start, &target_fl->l_start);
6313 __get_user(fl->l_len, &target_fl->l_len);
6314 __get_user(fl->l_pid, &target_fl->l_pid);
6315 unlock_user_struct(target_fl, target_flock_addr, 0);
6316 return 0;
6319 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6320 const struct flock64 *fl)
6322 struct target_flock *target_fl;
6323 short l_type;
6325 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6326 return -TARGET_EFAULT;
6329 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6330 __put_user(l_type, &target_fl->l_type);
6331 __put_user(fl->l_whence, &target_fl->l_whence);
6332 __put_user(fl->l_start, &target_fl->l_start);
6333 __put_user(fl->l_len, &target_fl->l_len);
6334 __put_user(fl->l_pid, &target_fl->l_pid);
6335 unlock_user_struct(target_fl, target_flock_addr, 1);
6336 return 0;
6339 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6340 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6342 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6343 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6344 abi_ulong target_flock_addr)
6346 struct target_eabi_flock64 *target_fl;
6347 short l_type;
6349 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6350 return -TARGET_EFAULT;
6353 __get_user(l_type, &target_fl->l_type);
6354 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6355 __get_user(fl->l_whence, &target_fl->l_whence);
6356 __get_user(fl->l_start, &target_fl->l_start);
6357 __get_user(fl->l_len, &target_fl->l_len);
6358 __get_user(fl->l_pid, &target_fl->l_pid);
6359 unlock_user_struct(target_fl, target_flock_addr, 0);
6360 return 0;
6363 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6364 const struct flock64 *fl)
6366 struct target_eabi_flock64 *target_fl;
6367 short l_type;
6369 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6370 return -TARGET_EFAULT;
6373 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6374 __put_user(l_type, &target_fl->l_type);
6375 __put_user(fl->l_whence, &target_fl->l_whence);
6376 __put_user(fl->l_start, &target_fl->l_start);
6377 __put_user(fl->l_len, &target_fl->l_len);
6378 __put_user(fl->l_pid, &target_fl->l_pid);
6379 unlock_user_struct(target_fl, target_flock_addr, 1);
6380 return 0;
6382 #endif
6384 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6385 abi_ulong target_flock_addr)
6387 struct target_flock64 *target_fl;
6388 short l_type;
6390 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6391 return -TARGET_EFAULT;
6394 __get_user(l_type, &target_fl->l_type);
6395 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6396 __get_user(fl->l_whence, &target_fl->l_whence);
6397 __get_user(fl->l_start, &target_fl->l_start);
6398 __get_user(fl->l_len, &target_fl->l_len);
6399 __get_user(fl->l_pid, &target_fl->l_pid);
6400 unlock_user_struct(target_fl, target_flock_addr, 0);
6401 return 0;
6404 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6405 const struct flock64 *fl)
6407 struct target_flock64 *target_fl;
6408 short l_type;
6410 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6411 return -TARGET_EFAULT;
6414 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6415 __put_user(l_type, &target_fl->l_type);
6416 __put_user(fl->l_whence, &target_fl->l_whence);
6417 __put_user(fl->l_start, &target_fl->l_start);
6418 __put_user(fl->l_len, &target_fl->l_len);
6419 __put_user(fl->l_pid, &target_fl->l_pid);
6420 unlock_user_struct(target_fl, target_flock_addr, 1);
6421 return 0;
6424 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6426 struct flock64 fl64;
6427 #ifdef F_GETOWN_EX
6428 struct f_owner_ex fox;
6429 struct target_f_owner_ex *target_fox;
6430 #endif
6431 abi_long ret;
6432 int host_cmd = target_to_host_fcntl_cmd(cmd);
6434 if (host_cmd == -TARGET_EINVAL)
6435 return host_cmd;
6437 switch(cmd) {
6438 case TARGET_F_GETLK:
6439 ret = copy_from_user_flock(&fl64, arg);
6440 if (ret) {
6441 return ret;
6443 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6444 if (ret == 0) {
6445 ret = copy_to_user_flock(arg, &fl64);
6447 break;
6449 case TARGET_F_SETLK:
6450 case TARGET_F_SETLKW:
6451 ret = copy_from_user_flock(&fl64, arg);
6452 if (ret) {
6453 return ret;
6455 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6456 break;
6458 case TARGET_F_GETLK64:
6459 ret = copy_from_user_flock64(&fl64, arg);
6460 if (ret) {
6461 return ret;
6463 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6464 if (ret == 0) {
6465 ret = copy_to_user_flock64(arg, &fl64);
6467 break;
6468 case TARGET_F_SETLK64:
6469 case TARGET_F_SETLKW64:
6470 ret = copy_from_user_flock64(&fl64, arg);
6471 if (ret) {
6472 return ret;
6474 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6475 break;
6477 case TARGET_F_GETFL:
6478 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6479 if (ret >= 0) {
6480 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6482 break;
6484 case TARGET_F_SETFL:
6485 ret = get_errno(safe_fcntl(fd, host_cmd,
6486 target_to_host_bitmask(arg,
6487 fcntl_flags_tbl)));
6488 break;
6490 #ifdef F_GETOWN_EX
6491 case TARGET_F_GETOWN_EX:
6492 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6493 if (ret >= 0) {
6494 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6495 return -TARGET_EFAULT;
6496 target_fox->type = tswap32(fox.type);
6497 target_fox->pid = tswap32(fox.pid);
6498 unlock_user_struct(target_fox, arg, 1);
6500 break;
6501 #endif
6503 #ifdef F_SETOWN_EX
6504 case TARGET_F_SETOWN_EX:
6505 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6506 return -TARGET_EFAULT;
6507 fox.type = tswap32(target_fox->type);
6508 fox.pid = tswap32(target_fox->pid);
6509 unlock_user_struct(target_fox, arg, 0);
6510 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6511 break;
6512 #endif
6514 case TARGET_F_SETOWN:
6515 case TARGET_F_GETOWN:
6516 case TARGET_F_SETSIG:
6517 case TARGET_F_GETSIG:
6518 case TARGET_F_SETLEASE:
6519 case TARGET_F_GETLEASE:
6520 case TARGET_F_SETPIPE_SZ:
6521 case TARGET_F_GETPIPE_SZ:
6522 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6523 break;
6525 default:
6526 ret = get_errno(safe_fcntl(fd, cmd, arg));
6527 break;
6529 return ret;
6532 #ifdef USE_UID16
6534 static inline int high2lowuid(int uid)
6536 if (uid > 65535)
6537 return 65534;
6538 else
6539 return uid;
6542 static inline int high2lowgid(int gid)
6544 if (gid > 65535)
6545 return 65534;
6546 else
6547 return gid;
6550 static inline int low2highuid(int uid)
6552 if ((int16_t)uid == -1)
6553 return -1;
6554 else
6555 return uid;
6558 static inline int low2highgid(int gid)
6560 if ((int16_t)gid == -1)
6561 return -1;
6562 else
6563 return gid;
6565 static inline int tswapid(int id)
6567 return tswap16(id);
6570 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6572 #else /* !USE_UID16 */
6573 static inline int high2lowuid(int uid)
6575 return uid;
6577 static inline int high2lowgid(int gid)
6579 return gid;
6581 static inline int low2highuid(int uid)
6583 return uid;
6585 static inline int low2highgid(int gid)
6587 return gid;
6589 static inline int tswapid(int id)
6591 return tswap32(id);
6594 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6596 #endif /* USE_UID16 */
6598 /* We must do direct syscalls for setting UID/GID, because we want to
6599 * implement the Linux system call semantics of "change only for this thread",
6600 * not the libc/POSIX semantics of "change for all threads in process".
6601 * (See http://ewontfix.com/17/ for more details.)
6602 * We use the 32-bit version of the syscalls if present; if it is not
6603 * then either the host architecture supports 32-bit UIDs natively with
6604 * the standard syscall, or the 16-bit UID is the best we can do.
6606 #ifdef __NR_setuid32
6607 #define __NR_sys_setuid __NR_setuid32
6608 #else
6609 #define __NR_sys_setuid __NR_setuid
6610 #endif
6611 #ifdef __NR_setgid32
6612 #define __NR_sys_setgid __NR_setgid32
6613 #else
6614 #define __NR_sys_setgid __NR_setgid
6615 #endif
6616 #ifdef __NR_setresuid32
6617 #define __NR_sys_setresuid __NR_setresuid32
6618 #else
6619 #define __NR_sys_setresuid __NR_setresuid
6620 #endif
6621 #ifdef __NR_setresgid32
6622 #define __NR_sys_setresgid __NR_setresgid32
6623 #else
6624 #define __NR_sys_setresgid __NR_setresgid
6625 #endif
6627 _syscall1(int, sys_setuid, uid_t, uid)
6628 _syscall1(int, sys_setgid, gid_t, gid)
6629 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6630 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6632 void syscall_init(void)
6634 IOCTLEntry *ie;
6635 const argtype *arg_type;
6636 int size;
6637 int i;
6639 thunk_init(STRUCT_MAX);
6641 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6642 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6643 #include "syscall_types.h"
6644 #undef STRUCT
6645 #undef STRUCT_SPECIAL
6647 /* Build target_to_host_errno_table[] table from
6648 * host_to_target_errno_table[]. */
6649 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6650 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6653 /* we patch the ioctl size if necessary. We rely on the fact that
6654 no ioctl has all the bits at '1' in the size field */
6655 ie = ioctl_entries;
6656 while (ie->target_cmd != 0) {
6657 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6658 TARGET_IOC_SIZEMASK) {
6659 arg_type = ie->arg_type;
6660 if (arg_type[0] != TYPE_PTR) {
6661 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6662 ie->target_cmd);
6663 exit(1);
6665 arg_type++;
6666 size = thunk_type_size(arg_type, 0);
6667 ie->target_cmd = (ie->target_cmd &
6668 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6669 (size << TARGET_IOC_SIZESHIFT);
6672 /* automatic consistency check if same arch */
6673 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6674 (defined(__x86_64__) && defined(TARGET_X86_64))
6675 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6676 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6677 ie->name, ie->target_cmd, ie->host_cmd);
6679 #endif
6680 ie++;
6684 #if TARGET_ABI_BITS == 32
6685 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6687 #ifdef TARGET_WORDS_BIGENDIAN
6688 return ((uint64_t)word0 << 32) | word1;
6689 #else
6690 return ((uint64_t)word1 << 32) | word0;
6691 #endif
6693 #else /* TARGET_ABI_BITS == 32 */
6694 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6696 return word0;
6698 #endif /* TARGET_ABI_BITS != 32 */
6700 #ifdef TARGET_NR_truncate64
6701 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6702 abi_long arg2,
6703 abi_long arg3,
6704 abi_long arg4)
6706 if (regpairs_aligned(cpu_env)) {
6707 arg2 = arg3;
6708 arg3 = arg4;
6710 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6712 #endif
6714 #ifdef TARGET_NR_ftruncate64
6715 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6716 abi_long arg2,
6717 abi_long arg3,
6718 abi_long arg4)
6720 if (regpairs_aligned(cpu_env)) {
6721 arg2 = arg3;
6722 arg3 = arg4;
6724 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6726 #endif
6728 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6729 abi_ulong target_addr)
6731 struct target_timespec *target_ts;
6733 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6734 return -TARGET_EFAULT;
6735 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6736 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6737 unlock_user_struct(target_ts, target_addr, 0);
6738 return 0;
6741 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6742 struct timespec *host_ts)
6744 struct target_timespec *target_ts;
6746 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6747 return -TARGET_EFAULT;
6748 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6749 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6750 unlock_user_struct(target_ts, target_addr, 1);
6751 return 0;
6754 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6755 abi_ulong target_addr)
6757 struct target_itimerspec *target_itspec;
6759 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6760 return -TARGET_EFAULT;
6763 host_itspec->it_interval.tv_sec =
6764 tswapal(target_itspec->it_interval.tv_sec);
6765 host_itspec->it_interval.tv_nsec =
6766 tswapal(target_itspec->it_interval.tv_nsec);
6767 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6768 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6770 unlock_user_struct(target_itspec, target_addr, 1);
6771 return 0;
6774 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6775 struct itimerspec *host_its)
6777 struct target_itimerspec *target_itspec;
6779 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6780 return -TARGET_EFAULT;
6783 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6784 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6786 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6787 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6789 unlock_user_struct(target_itspec, target_addr, 0);
6790 return 0;
6793 static inline abi_long target_to_host_timex(struct timex *host_tx,
6794 abi_long target_addr)
6796 struct target_timex *target_tx;
6798 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6799 return -TARGET_EFAULT;
6802 __get_user(host_tx->modes, &target_tx->modes);
6803 __get_user(host_tx->offset, &target_tx->offset);
6804 __get_user(host_tx->freq, &target_tx->freq);
6805 __get_user(host_tx->maxerror, &target_tx->maxerror);
6806 __get_user(host_tx->esterror, &target_tx->esterror);
6807 __get_user(host_tx->status, &target_tx->status);
6808 __get_user(host_tx->constant, &target_tx->constant);
6809 __get_user(host_tx->precision, &target_tx->precision);
6810 __get_user(host_tx->tolerance, &target_tx->tolerance);
6811 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6812 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6813 __get_user(host_tx->tick, &target_tx->tick);
6814 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6815 __get_user(host_tx->jitter, &target_tx->jitter);
6816 __get_user(host_tx->shift, &target_tx->shift);
6817 __get_user(host_tx->stabil, &target_tx->stabil);
6818 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6819 __get_user(host_tx->calcnt, &target_tx->calcnt);
6820 __get_user(host_tx->errcnt, &target_tx->errcnt);
6821 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6822 __get_user(host_tx->tai, &target_tx->tai);
6824 unlock_user_struct(target_tx, target_addr, 0);
6825 return 0;
6828 static inline abi_long host_to_target_timex(abi_long target_addr,
6829 struct timex *host_tx)
6831 struct target_timex *target_tx;
6833 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6834 return -TARGET_EFAULT;
6837 __put_user(host_tx->modes, &target_tx->modes);
6838 __put_user(host_tx->offset, &target_tx->offset);
6839 __put_user(host_tx->freq, &target_tx->freq);
6840 __put_user(host_tx->maxerror, &target_tx->maxerror);
6841 __put_user(host_tx->esterror, &target_tx->esterror);
6842 __put_user(host_tx->status, &target_tx->status);
6843 __put_user(host_tx->constant, &target_tx->constant);
6844 __put_user(host_tx->precision, &target_tx->precision);
6845 __put_user(host_tx->tolerance, &target_tx->tolerance);
6846 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6847 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6848 __put_user(host_tx->tick, &target_tx->tick);
6849 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6850 __put_user(host_tx->jitter, &target_tx->jitter);
6851 __put_user(host_tx->shift, &target_tx->shift);
6852 __put_user(host_tx->stabil, &target_tx->stabil);
6853 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6854 __put_user(host_tx->calcnt, &target_tx->calcnt);
6855 __put_user(host_tx->errcnt, &target_tx->errcnt);
6856 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6857 __put_user(host_tx->tai, &target_tx->tai);
6859 unlock_user_struct(target_tx, target_addr, 1);
6860 return 0;
6864 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6865 abi_ulong target_addr)
6867 struct target_sigevent *target_sevp;
6869 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6870 return -TARGET_EFAULT;
6873 /* This union is awkward on 64 bit systems because it has a 32 bit
6874 * integer and a pointer in it; we follow the conversion approach
6875 * used for handling sigval types in signal.c so the guest should get
6876 * the correct value back even if we did a 64 bit byteswap and it's
6877 * using the 32 bit integer.
6879 host_sevp->sigev_value.sival_ptr =
6880 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6881 host_sevp->sigev_signo =
6882 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6883 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6884 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6886 unlock_user_struct(target_sevp, target_addr, 1);
6887 return 0;
6890 #if defined(TARGET_NR_mlockall)
6891 static inline int target_to_host_mlockall_arg(int arg)
6893 int result = 0;
6895 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6896 result |= MCL_CURRENT;
6898 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6899 result |= MCL_FUTURE;
6901 return result;
6903 #endif
6905 static inline abi_long host_to_target_stat64(void *cpu_env,
6906 abi_ulong target_addr,
6907 struct stat *host_st)
6909 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6910 if (((CPUARMState *)cpu_env)->eabi) {
6911 struct target_eabi_stat64 *target_st;
6913 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6914 return -TARGET_EFAULT;
6915 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6916 __put_user(host_st->st_dev, &target_st->st_dev);
6917 __put_user(host_st->st_ino, &target_st->st_ino);
6918 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6919 __put_user(host_st->st_ino, &target_st->__st_ino);
6920 #endif
6921 __put_user(host_st->st_mode, &target_st->st_mode);
6922 __put_user(host_st->st_nlink, &target_st->st_nlink);
6923 __put_user(host_st->st_uid, &target_st->st_uid);
6924 __put_user(host_st->st_gid, &target_st->st_gid);
6925 __put_user(host_st->st_rdev, &target_st->st_rdev);
6926 __put_user(host_st->st_size, &target_st->st_size);
6927 __put_user(host_st->st_blksize, &target_st->st_blksize);
6928 __put_user(host_st->st_blocks, &target_st->st_blocks);
6929 __put_user(host_st->st_atime, &target_st->target_st_atime);
6930 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6931 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6932 unlock_user_struct(target_st, target_addr, 1);
6933 } else
6934 #endif
6936 #if defined(TARGET_HAS_STRUCT_STAT64)
6937 struct target_stat64 *target_st;
6938 #else
6939 struct target_stat *target_st;
6940 #endif
6942 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6943 return -TARGET_EFAULT;
6944 memset(target_st, 0, sizeof(*target_st));
6945 __put_user(host_st->st_dev, &target_st->st_dev);
6946 __put_user(host_st->st_ino, &target_st->st_ino);
6947 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6948 __put_user(host_st->st_ino, &target_st->__st_ino);
6949 #endif
6950 __put_user(host_st->st_mode, &target_st->st_mode);
6951 __put_user(host_st->st_nlink, &target_st->st_nlink);
6952 __put_user(host_st->st_uid, &target_st->st_uid);
6953 __put_user(host_st->st_gid, &target_st->st_gid);
6954 __put_user(host_st->st_rdev, &target_st->st_rdev);
6955 /* XXX: better use of kernel struct */
6956 __put_user(host_st->st_size, &target_st->st_size);
6957 __put_user(host_st->st_blksize, &target_st->st_blksize);
6958 __put_user(host_st->st_blocks, &target_st->st_blocks);
6959 __put_user(host_st->st_atime, &target_st->target_st_atime);
6960 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6961 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6962 unlock_user_struct(target_st, target_addr, 1);
6965 return 0;
6968 /* ??? Using host futex calls even when target atomic operations
6969 are not really atomic probably breaks things. However implementing
6970 futexes locally would make futexes shared between multiple processes
6971 tricky. However they're probably useless because guest atomic
6972 operations won't work either. */
6973 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6974 target_ulong uaddr2, int val3)
6976 struct timespec ts, *pts;
6977 int base_op;
6979 /* ??? We assume FUTEX_* constants are the same on both host
6980 and target. */
6981 #ifdef FUTEX_CMD_MASK
6982 base_op = op & FUTEX_CMD_MASK;
6983 #else
6984 base_op = op;
6985 #endif
6986 switch (base_op) {
6987 case FUTEX_WAIT:
6988 case FUTEX_WAIT_BITSET:
6989 if (timeout) {
6990 pts = &ts;
6991 target_to_host_timespec(pts, timeout);
6992 } else {
6993 pts = NULL;
6995 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6996 pts, NULL, val3));
6997 case FUTEX_WAKE:
6998 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6999 case FUTEX_FD:
7000 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7001 case FUTEX_REQUEUE:
7002 case FUTEX_CMP_REQUEUE:
7003 case FUTEX_WAKE_OP:
7004 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7005 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7006 But the prototype takes a `struct timespec *'; insert casts
7007 to satisfy the compiler. We do not need to tswap TIMEOUT
7008 since it's not compared to guest memory. */
7009 pts = (struct timespec *)(uintptr_t) timeout;
7010 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7011 g2h(uaddr2),
7012 (base_op == FUTEX_CMP_REQUEUE
7013 ? tswap32(val3)
7014 : val3)));
7015 default:
7016 return -TARGET_ENOSYS;
7019 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7020 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7021 abi_long handle, abi_long mount_id,
7022 abi_long flags)
7024 struct file_handle *target_fh;
7025 struct file_handle *fh;
7026 int mid = 0;
7027 abi_long ret;
7028 char *name;
7029 unsigned int size, total_size;
7031 if (get_user_s32(size, handle)) {
7032 return -TARGET_EFAULT;
7035 name = lock_user_string(pathname);
7036 if (!name) {
7037 return -TARGET_EFAULT;
7040 total_size = sizeof(struct file_handle) + size;
7041 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7042 if (!target_fh) {
7043 unlock_user(name, pathname, 0);
7044 return -TARGET_EFAULT;
7047 fh = g_malloc0(total_size);
7048 fh->handle_bytes = size;
7050 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7051 unlock_user(name, pathname, 0);
7053 /* man name_to_handle_at(2):
7054 * Other than the use of the handle_bytes field, the caller should treat
7055 * the file_handle structure as an opaque data type
7058 memcpy(target_fh, fh, total_size);
7059 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7060 target_fh->handle_type = tswap32(fh->handle_type);
7061 g_free(fh);
7062 unlock_user(target_fh, handle, total_size);
7064 if (put_user_s32(mid, mount_id)) {
7065 return -TARGET_EFAULT;
7068 return ret;
7071 #endif
7073 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7074 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7075 abi_long flags)
7077 struct file_handle *target_fh;
7078 struct file_handle *fh;
7079 unsigned int size, total_size;
7080 abi_long ret;
7082 if (get_user_s32(size, handle)) {
7083 return -TARGET_EFAULT;
7086 total_size = sizeof(struct file_handle) + size;
7087 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7088 if (!target_fh) {
7089 return -TARGET_EFAULT;
7092 fh = g_memdup(target_fh, total_size);
7093 fh->handle_bytes = size;
7094 fh->handle_type = tswap32(target_fh->handle_type);
7096 ret = get_errno(open_by_handle_at(mount_fd, fh,
7097 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7099 g_free(fh);
7101 unlock_user(target_fh, handle, total_size);
7103 return ret;
7105 #endif
7107 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7109 /* signalfd siginfo conversion */
7111 static void
7112 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7113 const struct signalfd_siginfo *info)
7115 int sig = host_to_target_signal(info->ssi_signo);
7117 /* linux/signalfd.h defines a ssi_addr_lsb
7118 * not defined in sys/signalfd.h but used by some kernels
7121 #ifdef BUS_MCEERR_AO
7122 if (tinfo->ssi_signo == SIGBUS &&
7123 (tinfo->ssi_code == BUS_MCEERR_AR ||
7124 tinfo->ssi_code == BUS_MCEERR_AO)) {
7125 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7126 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7127 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7129 #endif
7131 tinfo->ssi_signo = tswap32(sig);
7132 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7133 tinfo->ssi_code = tswap32(info->ssi_code);
7134 tinfo->ssi_pid = tswap32(info->ssi_pid);
7135 tinfo->ssi_uid = tswap32(info->ssi_uid);
7136 tinfo->ssi_fd = tswap32(info->ssi_fd);
7137 tinfo->ssi_tid = tswap32(info->ssi_tid);
7138 tinfo->ssi_band = tswap32(info->ssi_band);
7139 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7140 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7141 tinfo->ssi_status = tswap32(info->ssi_status);
7142 tinfo->ssi_int = tswap32(info->ssi_int);
7143 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7144 tinfo->ssi_utime = tswap64(info->ssi_utime);
7145 tinfo->ssi_stime = tswap64(info->ssi_stime);
7146 tinfo->ssi_addr = tswap64(info->ssi_addr);
7149 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7151 int i;
7153 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7154 host_to_target_signalfd_siginfo(buf + i, buf + i);
7157 return len;
7160 static TargetFdTrans target_signalfd_trans = {
7161 .host_to_target_data = host_to_target_data_signalfd,
7164 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7166 int host_flags;
7167 target_sigset_t *target_mask;
7168 sigset_t host_mask;
7169 abi_long ret;
7171 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7172 return -TARGET_EINVAL;
7174 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7175 return -TARGET_EFAULT;
7178 target_to_host_sigset(&host_mask, target_mask);
7180 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7182 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7183 if (ret >= 0) {
7184 fd_trans_register(ret, &target_signalfd_trans);
7187 unlock_user_struct(target_mask, mask, 0);
7189 return ret;
7191 #endif
7193 /* Map host to target signal numbers for the wait family of syscalls.
7194 Assume all other status bits are the same. */
7195 int host_to_target_waitstatus(int status)
7197 if (WIFSIGNALED(status)) {
7198 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7200 if (WIFSTOPPED(status)) {
7201 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7202 | (status & 0xff);
7204 return status;
7207 static int open_self_cmdline(void *cpu_env, int fd)
7209 int fd_orig = -1;
7210 bool word_skipped = false;
7212 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7213 if (fd_orig < 0) {
7214 return fd_orig;
7217 while (true) {
7218 ssize_t nb_read;
7219 char buf[128];
7220 char *cp_buf = buf;
7222 nb_read = read(fd_orig, buf, sizeof(buf));
7223 if (nb_read < 0) {
7224 int e = errno;
7225 fd_orig = close(fd_orig);
7226 errno = e;
7227 return -1;
7228 } else if (nb_read == 0) {
7229 break;
7232 if (!word_skipped) {
7233 /* Skip the first string, which is the path to qemu-*-static
7234 instead of the actual command. */
7235 cp_buf = memchr(buf, 0, nb_read);
7236 if (cp_buf) {
7237 /* Null byte found, skip one string */
7238 cp_buf++;
7239 nb_read -= cp_buf - buf;
7240 word_skipped = true;
7244 if (word_skipped) {
7245 if (write(fd, cp_buf, nb_read) != nb_read) {
7246 int e = errno;
7247 close(fd_orig);
7248 errno = e;
7249 return -1;
7254 return close(fd_orig);
7257 static int open_self_maps(void *cpu_env, int fd)
7259 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7260 TaskState *ts = cpu->opaque;
7261 FILE *fp;
7262 char *line = NULL;
7263 size_t len = 0;
7264 ssize_t read;
7266 fp = fopen("/proc/self/maps", "r");
7267 if (fp == NULL) {
7268 return -1;
7271 while ((read = getline(&line, &len, fp)) != -1) {
7272 int fields, dev_maj, dev_min, inode;
7273 uint64_t min, max, offset;
7274 char flag_r, flag_w, flag_x, flag_p;
7275 char path[512] = "";
7276 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7277 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7278 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7280 if ((fields < 10) || (fields > 11)) {
7281 continue;
7283 if (h2g_valid(min)) {
7284 int flags = page_get_flags(h2g(min));
7285 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7286 if (page_check_range(h2g(min), max - min, flags) == -1) {
7287 continue;
7289 if (h2g(min) == ts->info->stack_limit) {
7290 pstrcpy(path, sizeof(path), " [stack]");
7292 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7293 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7294 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7295 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7296 path[0] ? " " : "", path);
7300 free(line);
7301 fclose(fp);
7303 return 0;
7306 static int open_self_stat(void *cpu_env, int fd)
7308 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7309 TaskState *ts = cpu->opaque;
7310 abi_ulong start_stack = ts->info->start_stack;
7311 int i;
7313 for (i = 0; i < 44; i++) {
7314 char buf[128];
7315 int len;
7316 uint64_t val = 0;
7318 if (i == 0) {
7319 /* pid */
7320 val = getpid();
7321 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7322 } else if (i == 1) {
7323 /* app name */
7324 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7325 } else if (i == 27) {
7326 /* stack bottom */
7327 val = start_stack;
7328 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7329 } else {
7330 /* for the rest, there is MasterCard */
7331 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7334 len = strlen(buf);
7335 if (write(fd, buf, len) != len) {
7336 return -1;
7340 return 0;
7343 static int open_self_auxv(void *cpu_env, int fd)
7345 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7346 TaskState *ts = cpu->opaque;
7347 abi_ulong auxv = ts->info->saved_auxv;
7348 abi_ulong len = ts->info->auxv_len;
7349 char *ptr;
7352 * Auxiliary vector is stored in target process stack.
7353 * read in whole auxv vector and copy it to file
7355 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7356 if (ptr != NULL) {
7357 while (len > 0) {
7358 ssize_t r;
7359 r = write(fd, ptr, len);
7360 if (r <= 0) {
7361 break;
7363 len -= r;
7364 ptr += r;
7366 lseek(fd, 0, SEEK_SET);
7367 unlock_user(ptr, auxv, len);
7370 return 0;
7373 static int is_proc_myself(const char *filename, const char *entry)
7375 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7376 filename += strlen("/proc/");
7377 if (!strncmp(filename, "self/", strlen("self/"))) {
7378 filename += strlen("self/");
7379 } else if (*filename >= '1' && *filename <= '9') {
7380 char myself[80];
7381 snprintf(myself, sizeof(myself), "%d/", getpid());
7382 if (!strncmp(filename, myself, strlen(myself))) {
7383 filename += strlen(myself);
7384 } else {
7385 return 0;
7387 } else {
7388 return 0;
7390 if (!strcmp(filename, entry)) {
7391 return 1;
7394 return 0;
7397 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7398 static int is_proc(const char *filename, const char *entry)
7400 return strcmp(filename, entry) == 0;
7403 static int open_net_route(void *cpu_env, int fd)
7405 FILE *fp;
7406 char *line = NULL;
7407 size_t len = 0;
7408 ssize_t read;
7410 fp = fopen("/proc/net/route", "r");
7411 if (fp == NULL) {
7412 return -1;
7415 /* read header */
7417 read = getline(&line, &len, fp);
7418 dprintf(fd, "%s", line);
7420 /* read routes */
7422 while ((read = getline(&line, &len, fp)) != -1) {
7423 char iface[16];
7424 uint32_t dest, gw, mask;
7425 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7426 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7427 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7428 &mask, &mtu, &window, &irtt);
7429 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7430 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7431 metric, tswap32(mask), mtu, window, irtt);
7434 free(line);
7435 fclose(fp);
7437 return 0;
7439 #endif
7441 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7443 struct fake_open {
7444 const char *filename;
7445 int (*fill)(void *cpu_env, int fd);
7446 int (*cmp)(const char *s1, const char *s2);
7448 const struct fake_open *fake_open;
7449 static const struct fake_open fakes[] = {
7450 { "maps", open_self_maps, is_proc_myself },
7451 { "stat", open_self_stat, is_proc_myself },
7452 { "auxv", open_self_auxv, is_proc_myself },
7453 { "cmdline", open_self_cmdline, is_proc_myself },
7454 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7455 { "/proc/net/route", open_net_route, is_proc },
7456 #endif
7457 { NULL, NULL, NULL }
7460 if (is_proc_myself(pathname, "exe")) {
7461 int execfd = qemu_getauxval(AT_EXECFD);
7462 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7465 for (fake_open = fakes; fake_open->filename; fake_open++) {
7466 if (fake_open->cmp(pathname, fake_open->filename)) {
7467 break;
7471 if (fake_open->filename) {
7472 const char *tmpdir;
7473 char filename[PATH_MAX];
7474 int fd, r;
7476 /* create temporary file to map stat to */
7477 tmpdir = getenv("TMPDIR");
7478 if (!tmpdir)
7479 tmpdir = "/tmp";
7480 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7481 fd = mkstemp(filename);
7482 if (fd < 0) {
7483 return fd;
7485 unlink(filename);
7487 if ((r = fake_open->fill(cpu_env, fd))) {
7488 int e = errno;
7489 close(fd);
7490 errno = e;
7491 return r;
7493 lseek(fd, 0, SEEK_SET);
7495 return fd;
7498 return safe_openat(dirfd, path(pathname), flags, mode);
7501 #define TIMER_MAGIC 0x0caf0000
7502 #define TIMER_MAGIC_MASK 0xffff0000
7504 /* Convert QEMU provided timer ID back to internal 16bit index format */
7505 static target_timer_t get_timer_id(abi_long arg)
7507 target_timer_t timerid = arg;
7509 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7510 return -TARGET_EINVAL;
7513 timerid &= 0xffff;
7515 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7516 return -TARGET_EINVAL;
7519 return timerid;
7522 /* do_syscall() should always have a single exit point at the end so
7523 that actions, such as logging of syscall results, can be performed.
7524 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7525 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7526 abi_long arg2, abi_long arg3, abi_long arg4,
7527 abi_long arg5, abi_long arg6, abi_long arg7,
7528 abi_long arg8)
7530 CPUState *cpu = ENV_GET_CPU(cpu_env);
7531 abi_long ret;
7532 struct stat st;
7533 struct statfs stfs;
7534 void *p;
7536 #if defined(DEBUG_ERESTARTSYS)
7537 /* Debug-only code for exercising the syscall-restart code paths
7538 * in the per-architecture cpu main loops: restart every syscall
7539 * the guest makes once before letting it through.
7542 static int flag;
7544 flag = !flag;
7545 if (flag) {
7546 return -TARGET_ERESTARTSYS;
7549 #endif
7551 #ifdef DEBUG
7552 gemu_log("syscall %d", num);
7553 #endif
7554 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7555 if(do_strace)
7556 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7558 switch(num) {
7559 case TARGET_NR_exit:
7560 /* In old applications this may be used to implement _exit(2).
7561 However in threaded applictions it is used for thread termination,
7562 and _exit_group is used for application termination.
7563 Do thread termination if we have more then one thread. */
7565 if (block_signals()) {
7566 ret = -TARGET_ERESTARTSYS;
7567 break;
7570 cpu_list_lock();
7572 if (CPU_NEXT(first_cpu)) {
7573 TaskState *ts;
7575 /* Remove the CPU from the list. */
7576 QTAILQ_REMOVE(&cpus, cpu, node);
7578 cpu_list_unlock();
7580 ts = cpu->opaque;
7581 if (ts->child_tidptr) {
7582 put_user_u32(0, ts->child_tidptr);
7583 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7584 NULL, NULL, 0);
7586 thread_cpu = NULL;
7587 object_unref(OBJECT(cpu));
7588 g_free(ts);
7589 rcu_unregister_thread();
7590 pthread_exit(NULL);
7593 cpu_list_unlock();
7594 #ifdef TARGET_GPROF
7595 _mcleanup();
7596 #endif
7597 gdb_exit(cpu_env, arg1);
7598 _exit(arg1);
7599 ret = 0; /* avoid warning */
7600 break;
7601 case TARGET_NR_read:
7602 if (arg3 == 0)
7603 ret = 0;
7604 else {
7605 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7606 goto efault;
7607 ret = get_errno(safe_read(arg1, p, arg3));
7608 if (ret >= 0 &&
7609 fd_trans_host_to_target_data(arg1)) {
7610 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7612 unlock_user(p, arg2, ret);
7614 break;
7615 case TARGET_NR_write:
7616 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7617 goto efault;
7618 ret = get_errno(safe_write(arg1, p, arg3));
7619 unlock_user(p, arg2, 0);
7620 break;
7621 #ifdef TARGET_NR_open
7622 case TARGET_NR_open:
7623 if (!(p = lock_user_string(arg1)))
7624 goto efault;
7625 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7626 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7627 arg3));
7628 fd_trans_unregister(ret);
7629 unlock_user(p, arg1, 0);
7630 break;
7631 #endif
7632 case TARGET_NR_openat:
7633 if (!(p = lock_user_string(arg2)))
7634 goto efault;
7635 ret = get_errno(do_openat(cpu_env, arg1, p,
7636 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7637 arg4));
7638 fd_trans_unregister(ret);
7639 unlock_user(p, arg2, 0);
7640 break;
7641 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7642 case TARGET_NR_name_to_handle_at:
7643 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7644 break;
7645 #endif
7646 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7647 case TARGET_NR_open_by_handle_at:
7648 ret = do_open_by_handle_at(arg1, arg2, arg3);
7649 fd_trans_unregister(ret);
7650 break;
7651 #endif
7652 case TARGET_NR_close:
7653 fd_trans_unregister(arg1);
7654 ret = get_errno(close(arg1));
7655 break;
7656 case TARGET_NR_brk:
7657 ret = do_brk(arg1);
7658 break;
7659 #ifdef TARGET_NR_fork
7660 case TARGET_NR_fork:
7661 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7662 break;
7663 #endif
7664 #ifdef TARGET_NR_waitpid
7665 case TARGET_NR_waitpid:
7667 int status;
7668 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7669 if (!is_error(ret) && arg2 && ret
7670 && put_user_s32(host_to_target_waitstatus(status), arg2))
7671 goto efault;
7673 break;
7674 #endif
7675 #ifdef TARGET_NR_waitid
7676 case TARGET_NR_waitid:
7678 siginfo_t info;
7679 info.si_pid = 0;
7680 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7681 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7682 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7683 goto efault;
7684 host_to_target_siginfo(p, &info);
7685 unlock_user(p, arg3, sizeof(target_siginfo_t));
7688 break;
7689 #endif
7690 #ifdef TARGET_NR_creat /* not on alpha */
7691 case TARGET_NR_creat:
7692 if (!(p = lock_user_string(arg1)))
7693 goto efault;
7694 ret = get_errno(creat(p, arg2));
7695 fd_trans_unregister(ret);
7696 unlock_user(p, arg1, 0);
7697 break;
7698 #endif
7699 #ifdef TARGET_NR_link
7700 case TARGET_NR_link:
7702 void * p2;
7703 p = lock_user_string(arg1);
7704 p2 = lock_user_string(arg2);
7705 if (!p || !p2)
7706 ret = -TARGET_EFAULT;
7707 else
7708 ret = get_errno(link(p, p2));
7709 unlock_user(p2, arg2, 0);
7710 unlock_user(p, arg1, 0);
7712 break;
7713 #endif
7714 #if defined(TARGET_NR_linkat)
7715 case TARGET_NR_linkat:
7717 void * p2 = NULL;
7718 if (!arg2 || !arg4)
7719 goto efault;
7720 p = lock_user_string(arg2);
7721 p2 = lock_user_string(arg4);
7722 if (!p || !p2)
7723 ret = -TARGET_EFAULT;
7724 else
7725 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7726 unlock_user(p, arg2, 0);
7727 unlock_user(p2, arg4, 0);
7729 break;
7730 #endif
7731 #ifdef TARGET_NR_unlink
7732 case TARGET_NR_unlink:
7733 if (!(p = lock_user_string(arg1)))
7734 goto efault;
7735 ret = get_errno(unlink(p));
7736 unlock_user(p, arg1, 0);
7737 break;
7738 #endif
7739 #if defined(TARGET_NR_unlinkat)
7740 case TARGET_NR_unlinkat:
7741 if (!(p = lock_user_string(arg2)))
7742 goto efault;
7743 ret = get_errno(unlinkat(arg1, p, arg3));
7744 unlock_user(p, arg2, 0);
7745 break;
7746 #endif
7747 case TARGET_NR_execve:
7749 char **argp, **envp;
7750 int argc, envc;
7751 abi_ulong gp;
7752 abi_ulong guest_argp;
7753 abi_ulong guest_envp;
7754 abi_ulong addr;
7755 char **q;
7756 int total_size = 0;
7758 argc = 0;
7759 guest_argp = arg2;
7760 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7761 if (get_user_ual(addr, gp))
7762 goto efault;
7763 if (!addr)
7764 break;
7765 argc++;
7767 envc = 0;
7768 guest_envp = arg3;
7769 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7770 if (get_user_ual(addr, gp))
7771 goto efault;
7772 if (!addr)
7773 break;
7774 envc++;
7777 argp = alloca((argc + 1) * sizeof(void *));
7778 envp = alloca((envc + 1) * sizeof(void *));
7780 for (gp = guest_argp, q = argp; gp;
7781 gp += sizeof(abi_ulong), q++) {
7782 if (get_user_ual(addr, gp))
7783 goto execve_efault;
7784 if (!addr)
7785 break;
7786 if (!(*q = lock_user_string(addr)))
7787 goto execve_efault;
7788 total_size += strlen(*q) + 1;
7790 *q = NULL;
7792 for (gp = guest_envp, q = envp; gp;
7793 gp += sizeof(abi_ulong), q++) {
7794 if (get_user_ual(addr, gp))
7795 goto execve_efault;
7796 if (!addr)
7797 break;
7798 if (!(*q = lock_user_string(addr)))
7799 goto execve_efault;
7800 total_size += strlen(*q) + 1;
7802 *q = NULL;
7804 if (!(p = lock_user_string(arg1)))
7805 goto execve_efault;
7806 /* Although execve() is not an interruptible syscall it is
7807 * a special case where we must use the safe_syscall wrapper:
7808 * if we allow a signal to happen before we make the host
7809 * syscall then we will 'lose' it, because at the point of
7810 * execve the process leaves QEMU's control. So we use the
7811 * safe syscall wrapper to ensure that we either take the
7812 * signal as a guest signal, or else it does not happen
7813 * before the execve completes and makes it the other
7814 * program's problem.
7816 ret = get_errno(safe_execve(p, argp, envp));
7817 unlock_user(p, arg1, 0);
7819 goto execve_end;
7821 execve_efault:
7822 ret = -TARGET_EFAULT;
7824 execve_end:
7825 for (gp = guest_argp, q = argp; *q;
7826 gp += sizeof(abi_ulong), q++) {
7827 if (get_user_ual(addr, gp)
7828 || !addr)
7829 break;
7830 unlock_user(*q, addr, 0);
7832 for (gp = guest_envp, q = envp; *q;
7833 gp += sizeof(abi_ulong), q++) {
7834 if (get_user_ual(addr, gp)
7835 || !addr)
7836 break;
7837 unlock_user(*q, addr, 0);
7840 break;
7841 case TARGET_NR_chdir:
7842 if (!(p = lock_user_string(arg1)))
7843 goto efault;
7844 ret = get_errno(chdir(p));
7845 unlock_user(p, arg1, 0);
7846 break;
7847 #ifdef TARGET_NR_time
7848 case TARGET_NR_time:
7850 time_t host_time;
7851 ret = get_errno(time(&host_time));
7852 if (!is_error(ret)
7853 && arg1
7854 && put_user_sal(host_time, arg1))
7855 goto efault;
7857 break;
7858 #endif
7859 #ifdef TARGET_NR_mknod
7860 case TARGET_NR_mknod:
7861 if (!(p = lock_user_string(arg1)))
7862 goto efault;
7863 ret = get_errno(mknod(p, arg2, arg3));
7864 unlock_user(p, arg1, 0);
7865 break;
7866 #endif
7867 #if defined(TARGET_NR_mknodat)
7868 case TARGET_NR_mknodat:
7869 if (!(p = lock_user_string(arg2)))
7870 goto efault;
7871 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7872 unlock_user(p, arg2, 0);
7873 break;
7874 #endif
7875 #ifdef TARGET_NR_chmod
7876 case TARGET_NR_chmod:
7877 if (!(p = lock_user_string(arg1)))
7878 goto efault;
7879 ret = get_errno(chmod(p, arg2));
7880 unlock_user(p, arg1, 0);
7881 break;
7882 #endif
7883 #ifdef TARGET_NR_break
7884 case TARGET_NR_break:
7885 goto unimplemented;
7886 #endif
7887 #ifdef TARGET_NR_oldstat
7888 case TARGET_NR_oldstat:
7889 goto unimplemented;
7890 #endif
7891 case TARGET_NR_lseek:
7892 ret = get_errno(lseek(arg1, arg2, arg3));
7893 break;
7894 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7895 /* Alpha specific */
7896 case TARGET_NR_getxpid:
7897 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7898 ret = get_errno(getpid());
7899 break;
7900 #endif
7901 #ifdef TARGET_NR_getpid
7902 case TARGET_NR_getpid:
7903 ret = get_errno(getpid());
7904 break;
7905 #endif
7906 case TARGET_NR_mount:
7908 /* need to look at the data field */
7909 void *p2, *p3;
7911 if (arg1) {
7912 p = lock_user_string(arg1);
7913 if (!p) {
7914 goto efault;
7916 } else {
7917 p = NULL;
7920 p2 = lock_user_string(arg2);
7921 if (!p2) {
7922 if (arg1) {
7923 unlock_user(p, arg1, 0);
7925 goto efault;
7928 if (arg3) {
7929 p3 = lock_user_string(arg3);
7930 if (!p3) {
7931 if (arg1) {
7932 unlock_user(p, arg1, 0);
7934 unlock_user(p2, arg2, 0);
7935 goto efault;
7937 } else {
7938 p3 = NULL;
7941 /* FIXME - arg5 should be locked, but it isn't clear how to
7942 * do that since it's not guaranteed to be a NULL-terminated
7943 * string.
7945 if (!arg5) {
7946 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7947 } else {
7948 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7950 ret = get_errno(ret);
7952 if (arg1) {
7953 unlock_user(p, arg1, 0);
7955 unlock_user(p2, arg2, 0);
7956 if (arg3) {
7957 unlock_user(p3, arg3, 0);
7960 break;
7961 #ifdef TARGET_NR_umount
7962 case TARGET_NR_umount:
7963 if (!(p = lock_user_string(arg1)))
7964 goto efault;
7965 ret = get_errno(umount(p));
7966 unlock_user(p, arg1, 0);
7967 break;
7968 #endif
7969 #ifdef TARGET_NR_stime /* not on alpha */
7970 case TARGET_NR_stime:
7972 time_t host_time;
7973 if (get_user_sal(host_time, arg1))
7974 goto efault;
7975 ret = get_errno(stime(&host_time));
7977 break;
7978 #endif
7979 case TARGET_NR_ptrace:
7980 goto unimplemented;
7981 #ifdef TARGET_NR_alarm /* not on alpha */
7982 case TARGET_NR_alarm:
7983 ret = alarm(arg1);
7984 break;
7985 #endif
7986 #ifdef TARGET_NR_oldfstat
7987 case TARGET_NR_oldfstat:
7988 goto unimplemented;
7989 #endif
7990 #ifdef TARGET_NR_pause /* not on alpha */
7991 case TARGET_NR_pause:
7992 if (!block_signals()) {
7993 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7995 ret = -TARGET_EINTR;
7996 break;
7997 #endif
7998 #ifdef TARGET_NR_utime
7999 case TARGET_NR_utime:
8001 struct utimbuf tbuf, *host_tbuf;
8002 struct target_utimbuf *target_tbuf;
8003 if (arg2) {
8004 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8005 goto efault;
8006 tbuf.actime = tswapal(target_tbuf->actime);
8007 tbuf.modtime = tswapal(target_tbuf->modtime);
8008 unlock_user_struct(target_tbuf, arg2, 0);
8009 host_tbuf = &tbuf;
8010 } else {
8011 host_tbuf = NULL;
8013 if (!(p = lock_user_string(arg1)))
8014 goto efault;
8015 ret = get_errno(utime(p, host_tbuf));
8016 unlock_user(p, arg1, 0);
8018 break;
8019 #endif
8020 #ifdef TARGET_NR_utimes
8021 case TARGET_NR_utimes:
8023 struct timeval *tvp, tv[2];
8024 if (arg2) {
8025 if (copy_from_user_timeval(&tv[0], arg2)
8026 || copy_from_user_timeval(&tv[1],
8027 arg2 + sizeof(struct target_timeval)))
8028 goto efault;
8029 tvp = tv;
8030 } else {
8031 tvp = NULL;
8033 if (!(p = lock_user_string(arg1)))
8034 goto efault;
8035 ret = get_errno(utimes(p, tvp));
8036 unlock_user(p, arg1, 0);
8038 break;
8039 #endif
8040 #if defined(TARGET_NR_futimesat)
8041 case TARGET_NR_futimesat:
8043 struct timeval *tvp, tv[2];
8044 if (arg3) {
8045 if (copy_from_user_timeval(&tv[0], arg3)
8046 || copy_from_user_timeval(&tv[1],
8047 arg3 + sizeof(struct target_timeval)))
8048 goto efault;
8049 tvp = tv;
8050 } else {
8051 tvp = NULL;
8053 if (!(p = lock_user_string(arg2)))
8054 goto efault;
8055 ret = get_errno(futimesat(arg1, path(p), tvp));
8056 unlock_user(p, arg2, 0);
8058 break;
8059 #endif
8060 #ifdef TARGET_NR_stty
8061 case TARGET_NR_stty:
8062 goto unimplemented;
8063 #endif
8064 #ifdef TARGET_NR_gtty
8065 case TARGET_NR_gtty:
8066 goto unimplemented;
8067 #endif
8068 #ifdef TARGET_NR_access
8069 case TARGET_NR_access:
8070 if (!(p = lock_user_string(arg1)))
8071 goto efault;
8072 ret = get_errno(access(path(p), arg2));
8073 unlock_user(p, arg1, 0);
8074 break;
8075 #endif
8076 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8077 case TARGET_NR_faccessat:
8078 if (!(p = lock_user_string(arg2)))
8079 goto efault;
8080 ret = get_errno(faccessat(arg1, p, arg3, 0));
8081 unlock_user(p, arg2, 0);
8082 break;
8083 #endif
8084 #ifdef TARGET_NR_nice /* not on alpha */
8085 case TARGET_NR_nice:
8086 ret = get_errno(nice(arg1));
8087 break;
8088 #endif
8089 #ifdef TARGET_NR_ftime
8090 case TARGET_NR_ftime:
8091 goto unimplemented;
8092 #endif
8093 case TARGET_NR_sync:
8094 sync();
8095 ret = 0;
8096 break;
8097 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8098 case TARGET_NR_syncfs:
8099 ret = get_errno(syncfs(arg1));
8100 break;
8101 #endif
8102 case TARGET_NR_kill:
8103 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8104 break;
8105 #ifdef TARGET_NR_rename
8106 case TARGET_NR_rename:
8108 void *p2;
8109 p = lock_user_string(arg1);
8110 p2 = lock_user_string(arg2);
8111 if (!p || !p2)
8112 ret = -TARGET_EFAULT;
8113 else
8114 ret = get_errno(rename(p, p2));
8115 unlock_user(p2, arg2, 0);
8116 unlock_user(p, arg1, 0);
8118 break;
8119 #endif
8120 #if defined(TARGET_NR_renameat)
8121 case TARGET_NR_renameat:
8123 void *p2;
8124 p = lock_user_string(arg2);
8125 p2 = lock_user_string(arg4);
8126 if (!p || !p2)
8127 ret = -TARGET_EFAULT;
8128 else
8129 ret = get_errno(renameat(arg1, p, arg3, p2));
8130 unlock_user(p2, arg4, 0);
8131 unlock_user(p, arg2, 0);
8133 break;
8134 #endif
8135 #ifdef TARGET_NR_mkdir
8136 case TARGET_NR_mkdir:
8137 if (!(p = lock_user_string(arg1)))
8138 goto efault;
8139 ret = get_errno(mkdir(p, arg2));
8140 unlock_user(p, arg1, 0);
8141 break;
8142 #endif
8143 #if defined(TARGET_NR_mkdirat)
8144 case TARGET_NR_mkdirat:
8145 if (!(p = lock_user_string(arg2)))
8146 goto efault;
8147 ret = get_errno(mkdirat(arg1, p, arg3));
8148 unlock_user(p, arg2, 0);
8149 break;
8150 #endif
8151 #ifdef TARGET_NR_rmdir
8152 case TARGET_NR_rmdir:
8153 if (!(p = lock_user_string(arg1)))
8154 goto efault;
8155 ret = get_errno(rmdir(p));
8156 unlock_user(p, arg1, 0);
8157 break;
8158 #endif
8159 case TARGET_NR_dup:
8160 ret = get_errno(dup(arg1));
8161 if (ret >= 0) {
8162 fd_trans_dup(arg1, ret);
8164 break;
8165 #ifdef TARGET_NR_pipe
8166 case TARGET_NR_pipe:
8167 ret = do_pipe(cpu_env, arg1, 0, 0);
8168 break;
8169 #endif
8170 #ifdef TARGET_NR_pipe2
8171 case TARGET_NR_pipe2:
8172 ret = do_pipe(cpu_env, arg1,
8173 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8174 break;
8175 #endif
8176 case TARGET_NR_times:
8178 struct target_tms *tmsp;
8179 struct tms tms;
8180 ret = get_errno(times(&tms));
8181 if (arg1) {
8182 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8183 if (!tmsp)
8184 goto efault;
8185 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8186 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8187 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8188 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8190 if (!is_error(ret))
8191 ret = host_to_target_clock_t(ret);
8193 break;
8194 #ifdef TARGET_NR_prof
8195 case TARGET_NR_prof:
8196 goto unimplemented;
8197 #endif
8198 #ifdef TARGET_NR_signal
8199 case TARGET_NR_signal:
8200 goto unimplemented;
8201 #endif
8202 case TARGET_NR_acct:
8203 if (arg1 == 0) {
8204 ret = get_errno(acct(NULL));
8205 } else {
8206 if (!(p = lock_user_string(arg1)))
8207 goto efault;
8208 ret = get_errno(acct(path(p)));
8209 unlock_user(p, arg1, 0);
8211 break;
8212 #ifdef TARGET_NR_umount2
8213 case TARGET_NR_umount2:
8214 if (!(p = lock_user_string(arg1)))
8215 goto efault;
8216 ret = get_errno(umount2(p, arg2));
8217 unlock_user(p, arg1, 0);
8218 break;
8219 #endif
8220 #ifdef TARGET_NR_lock
8221 case TARGET_NR_lock:
8222 goto unimplemented;
8223 #endif
8224 case TARGET_NR_ioctl:
8225 ret = do_ioctl(arg1, arg2, arg3);
8226 break;
8227 case TARGET_NR_fcntl:
8228 ret = do_fcntl(arg1, arg2, arg3);
8229 break;
8230 #ifdef TARGET_NR_mpx
8231 case TARGET_NR_mpx:
8232 goto unimplemented;
8233 #endif
8234 case TARGET_NR_setpgid:
8235 ret = get_errno(setpgid(arg1, arg2));
8236 break;
8237 #ifdef TARGET_NR_ulimit
8238 case TARGET_NR_ulimit:
8239 goto unimplemented;
8240 #endif
8241 #ifdef TARGET_NR_oldolduname
8242 case TARGET_NR_oldolduname:
8243 goto unimplemented;
8244 #endif
8245 case TARGET_NR_umask:
8246 ret = get_errno(umask(arg1));
8247 break;
8248 case TARGET_NR_chroot:
8249 if (!(p = lock_user_string(arg1)))
8250 goto efault;
8251 ret = get_errno(chroot(p));
8252 unlock_user(p, arg1, 0);
8253 break;
8254 #ifdef TARGET_NR_ustat
8255 case TARGET_NR_ustat:
8256 goto unimplemented;
8257 #endif
8258 #ifdef TARGET_NR_dup2
8259 case TARGET_NR_dup2:
8260 ret = get_errno(dup2(arg1, arg2));
8261 if (ret >= 0) {
8262 fd_trans_dup(arg1, arg2);
8264 break;
8265 #endif
8266 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8267 case TARGET_NR_dup3:
8268 ret = get_errno(dup3(arg1, arg2, arg3));
8269 if (ret >= 0) {
8270 fd_trans_dup(arg1, arg2);
8272 break;
8273 #endif
8274 #ifdef TARGET_NR_getppid /* not on alpha */
8275 case TARGET_NR_getppid:
8276 ret = get_errno(getppid());
8277 break;
8278 #endif
8279 #ifdef TARGET_NR_getpgrp
8280 case TARGET_NR_getpgrp:
8281 ret = get_errno(getpgrp());
8282 break;
8283 #endif
8284 case TARGET_NR_setsid:
8285 ret = get_errno(setsid());
8286 break;
8287 #ifdef TARGET_NR_sigaction
8288 case TARGET_NR_sigaction:
8290 #if defined(TARGET_ALPHA)
8291 struct target_sigaction act, oact, *pact = 0;
8292 struct target_old_sigaction *old_act;
8293 if (arg2) {
8294 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8295 goto efault;
8296 act._sa_handler = old_act->_sa_handler;
8297 target_siginitset(&act.sa_mask, old_act->sa_mask);
8298 act.sa_flags = old_act->sa_flags;
8299 act.sa_restorer = 0;
8300 unlock_user_struct(old_act, arg2, 0);
8301 pact = &act;
8303 ret = get_errno(do_sigaction(arg1, pact, &oact));
8304 if (!is_error(ret) && arg3) {
8305 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8306 goto efault;
8307 old_act->_sa_handler = oact._sa_handler;
8308 old_act->sa_mask = oact.sa_mask.sig[0];
8309 old_act->sa_flags = oact.sa_flags;
8310 unlock_user_struct(old_act, arg3, 1);
8312 #elif defined(TARGET_MIPS)
8313 struct target_sigaction act, oact, *pact, *old_act;
8315 if (arg2) {
8316 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8317 goto efault;
8318 act._sa_handler = old_act->_sa_handler;
8319 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8320 act.sa_flags = old_act->sa_flags;
8321 unlock_user_struct(old_act, arg2, 0);
8322 pact = &act;
8323 } else {
8324 pact = NULL;
8327 ret = get_errno(do_sigaction(arg1, pact, &oact));
8329 if (!is_error(ret) && arg3) {
8330 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8331 goto efault;
8332 old_act->_sa_handler = oact._sa_handler;
8333 old_act->sa_flags = oact.sa_flags;
8334 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8335 old_act->sa_mask.sig[1] = 0;
8336 old_act->sa_mask.sig[2] = 0;
8337 old_act->sa_mask.sig[3] = 0;
8338 unlock_user_struct(old_act, arg3, 1);
8340 #else
8341 struct target_old_sigaction *old_act;
8342 struct target_sigaction act, oact, *pact;
8343 if (arg2) {
8344 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8345 goto efault;
8346 act._sa_handler = old_act->_sa_handler;
8347 target_siginitset(&act.sa_mask, old_act->sa_mask);
8348 act.sa_flags = old_act->sa_flags;
8349 act.sa_restorer = old_act->sa_restorer;
8350 unlock_user_struct(old_act, arg2, 0);
8351 pact = &act;
8352 } else {
8353 pact = NULL;
8355 ret = get_errno(do_sigaction(arg1, pact, &oact));
8356 if (!is_error(ret) && arg3) {
8357 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8358 goto efault;
8359 old_act->_sa_handler = oact._sa_handler;
8360 old_act->sa_mask = oact.sa_mask.sig[0];
8361 old_act->sa_flags = oact.sa_flags;
8362 old_act->sa_restorer = oact.sa_restorer;
8363 unlock_user_struct(old_act, arg3, 1);
8365 #endif
8367 break;
8368 #endif
8369 case TARGET_NR_rt_sigaction:
8371 #if defined(TARGET_ALPHA)
8372 struct target_sigaction act, oact, *pact = 0;
8373 struct target_rt_sigaction *rt_act;
8375 if (arg4 != sizeof(target_sigset_t)) {
8376 ret = -TARGET_EINVAL;
8377 break;
8379 if (arg2) {
8380 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8381 goto efault;
8382 act._sa_handler = rt_act->_sa_handler;
8383 act.sa_mask = rt_act->sa_mask;
8384 act.sa_flags = rt_act->sa_flags;
8385 act.sa_restorer = arg5;
8386 unlock_user_struct(rt_act, arg2, 0);
8387 pact = &act;
8389 ret = get_errno(do_sigaction(arg1, pact, &oact));
8390 if (!is_error(ret) && arg3) {
8391 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8392 goto efault;
8393 rt_act->_sa_handler = oact._sa_handler;
8394 rt_act->sa_mask = oact.sa_mask;
8395 rt_act->sa_flags = oact.sa_flags;
8396 unlock_user_struct(rt_act, arg3, 1);
8398 #else
8399 struct target_sigaction *act;
8400 struct target_sigaction *oact;
8402 if (arg4 != sizeof(target_sigset_t)) {
8403 ret = -TARGET_EINVAL;
8404 break;
8406 if (arg2) {
8407 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8408 goto efault;
8409 } else
8410 act = NULL;
8411 if (arg3) {
8412 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8413 ret = -TARGET_EFAULT;
8414 goto rt_sigaction_fail;
8416 } else
8417 oact = NULL;
8418 ret = get_errno(do_sigaction(arg1, act, oact));
8419 rt_sigaction_fail:
8420 if (act)
8421 unlock_user_struct(act, arg2, 0);
8422 if (oact)
8423 unlock_user_struct(oact, arg3, 1);
8424 #endif
8426 break;
8427 #ifdef TARGET_NR_sgetmask /* not on alpha */
8428 case TARGET_NR_sgetmask:
8430 sigset_t cur_set;
8431 abi_ulong target_set;
8432 ret = do_sigprocmask(0, NULL, &cur_set);
8433 if (!ret) {
8434 host_to_target_old_sigset(&target_set, &cur_set);
8435 ret = target_set;
8438 break;
8439 #endif
8440 #ifdef TARGET_NR_ssetmask /* not on alpha */
8441 case TARGET_NR_ssetmask:
8443 sigset_t set, oset, cur_set;
8444 abi_ulong target_set = arg1;
8445 /* We only have one word of the new mask so we must read
8446 * the rest of it with do_sigprocmask() and OR in this word.
8447 * We are guaranteed that a do_sigprocmask() that only queries
8448 * the signal mask will not fail.
8450 ret = do_sigprocmask(0, NULL, &cur_set);
8451 assert(!ret);
8452 target_to_host_old_sigset(&set, &target_set);
8453 sigorset(&set, &set, &cur_set);
8454 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8455 if (!ret) {
8456 host_to_target_old_sigset(&target_set, &oset);
8457 ret = target_set;
8460 break;
8461 #endif
8462 #ifdef TARGET_NR_sigprocmask
8463 case TARGET_NR_sigprocmask:
8465 #if defined(TARGET_ALPHA)
8466 sigset_t set, oldset;
8467 abi_ulong mask;
8468 int how;
8470 switch (arg1) {
8471 case TARGET_SIG_BLOCK:
8472 how = SIG_BLOCK;
8473 break;
8474 case TARGET_SIG_UNBLOCK:
8475 how = SIG_UNBLOCK;
8476 break;
8477 case TARGET_SIG_SETMASK:
8478 how = SIG_SETMASK;
8479 break;
8480 default:
8481 ret = -TARGET_EINVAL;
8482 goto fail;
8484 mask = arg2;
8485 target_to_host_old_sigset(&set, &mask);
8487 ret = do_sigprocmask(how, &set, &oldset);
8488 if (!is_error(ret)) {
8489 host_to_target_old_sigset(&mask, &oldset);
8490 ret = mask;
8491 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8493 #else
8494 sigset_t set, oldset, *set_ptr;
8495 int how;
8497 if (arg2) {
8498 switch (arg1) {
8499 case TARGET_SIG_BLOCK:
8500 how = SIG_BLOCK;
8501 break;
8502 case TARGET_SIG_UNBLOCK:
8503 how = SIG_UNBLOCK;
8504 break;
8505 case TARGET_SIG_SETMASK:
8506 how = SIG_SETMASK;
8507 break;
8508 default:
8509 ret = -TARGET_EINVAL;
8510 goto fail;
8512 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8513 goto efault;
8514 target_to_host_old_sigset(&set, p);
8515 unlock_user(p, arg2, 0);
8516 set_ptr = &set;
8517 } else {
8518 how = 0;
8519 set_ptr = NULL;
8521 ret = do_sigprocmask(how, set_ptr, &oldset);
8522 if (!is_error(ret) && arg3) {
8523 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8524 goto efault;
8525 host_to_target_old_sigset(p, &oldset);
8526 unlock_user(p, arg3, sizeof(target_sigset_t));
8528 #endif
8530 break;
8531 #endif
8532 case TARGET_NR_rt_sigprocmask:
8534 int how = arg1;
8535 sigset_t set, oldset, *set_ptr;
8537 if (arg4 != sizeof(target_sigset_t)) {
8538 ret = -TARGET_EINVAL;
8539 break;
8542 if (arg2) {
8543 switch(how) {
8544 case TARGET_SIG_BLOCK:
8545 how = SIG_BLOCK;
8546 break;
8547 case TARGET_SIG_UNBLOCK:
8548 how = SIG_UNBLOCK;
8549 break;
8550 case TARGET_SIG_SETMASK:
8551 how = SIG_SETMASK;
8552 break;
8553 default:
8554 ret = -TARGET_EINVAL;
8555 goto fail;
8557 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8558 goto efault;
8559 target_to_host_sigset(&set, p);
8560 unlock_user(p, arg2, 0);
8561 set_ptr = &set;
8562 } else {
8563 how = 0;
8564 set_ptr = NULL;
8566 ret = do_sigprocmask(how, set_ptr, &oldset);
8567 if (!is_error(ret) && arg3) {
8568 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8569 goto efault;
8570 host_to_target_sigset(p, &oldset);
8571 unlock_user(p, arg3, sizeof(target_sigset_t));
8574 break;
8575 #ifdef TARGET_NR_sigpending
8576 case TARGET_NR_sigpending:
8578 sigset_t set;
8579 ret = get_errno(sigpending(&set));
8580 if (!is_error(ret)) {
8581 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8582 goto efault;
8583 host_to_target_old_sigset(p, &set);
8584 unlock_user(p, arg1, sizeof(target_sigset_t));
8587 break;
8588 #endif
8589 case TARGET_NR_rt_sigpending:
8591 sigset_t set;
8593 /* Yes, this check is >, not != like most. We follow the kernel's
8594 * logic and it does it like this because it implements
8595 * NR_sigpending through the same code path, and in that case
8596 * the old_sigset_t is smaller in size.
8598 if (arg2 > sizeof(target_sigset_t)) {
8599 ret = -TARGET_EINVAL;
8600 break;
8603 ret = get_errno(sigpending(&set));
8604 if (!is_error(ret)) {
8605 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8606 goto efault;
8607 host_to_target_sigset(p, &set);
8608 unlock_user(p, arg1, sizeof(target_sigset_t));
8611 break;
8612 #ifdef TARGET_NR_sigsuspend
8613 case TARGET_NR_sigsuspend:
8615 TaskState *ts = cpu->opaque;
8616 #if defined(TARGET_ALPHA)
8617 abi_ulong mask = arg1;
8618 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8619 #else
8620 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8621 goto efault;
8622 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8623 unlock_user(p, arg1, 0);
8624 #endif
8625 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8626 SIGSET_T_SIZE));
8627 if (ret != -TARGET_ERESTARTSYS) {
8628 ts->in_sigsuspend = 1;
8631 break;
8632 #endif
8633 case TARGET_NR_rt_sigsuspend:
8635 TaskState *ts = cpu->opaque;
8637 if (arg2 != sizeof(target_sigset_t)) {
8638 ret = -TARGET_EINVAL;
8639 break;
8641 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8642 goto efault;
8643 target_to_host_sigset(&ts->sigsuspend_mask, p);
8644 unlock_user(p, arg1, 0);
8645 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8646 SIGSET_T_SIZE));
8647 if (ret != -TARGET_ERESTARTSYS) {
8648 ts->in_sigsuspend = 1;
8651 break;
8652 case TARGET_NR_rt_sigtimedwait:
8654 sigset_t set;
8655 struct timespec uts, *puts;
8656 siginfo_t uinfo;
8658 if (arg4 != sizeof(target_sigset_t)) {
8659 ret = -TARGET_EINVAL;
8660 break;
8663 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8664 goto efault;
8665 target_to_host_sigset(&set, p);
8666 unlock_user(p, arg1, 0);
8667 if (arg3) {
8668 puts = &uts;
8669 target_to_host_timespec(puts, arg3);
8670 } else {
8671 puts = NULL;
8673 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8674 SIGSET_T_SIZE));
8675 if (!is_error(ret)) {
8676 if (arg2) {
8677 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8679 if (!p) {
8680 goto efault;
8682 host_to_target_siginfo(p, &uinfo);
8683 unlock_user(p, arg2, sizeof(target_siginfo_t));
8685 ret = host_to_target_signal(ret);
8688 break;
8689 case TARGET_NR_rt_sigqueueinfo:
8691 siginfo_t uinfo;
8693 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8694 if (!p) {
8695 goto efault;
8697 target_to_host_siginfo(&uinfo, p);
8698 unlock_user(p, arg1, 0);
8699 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8701 break;
8702 #ifdef TARGET_NR_sigreturn
8703 case TARGET_NR_sigreturn:
8704 if (block_signals()) {
8705 ret = -TARGET_ERESTARTSYS;
8706 } else {
8707 ret = do_sigreturn(cpu_env);
8709 break;
8710 #endif
8711 case TARGET_NR_rt_sigreturn:
8712 if (block_signals()) {
8713 ret = -TARGET_ERESTARTSYS;
8714 } else {
8715 ret = do_rt_sigreturn(cpu_env);
8717 break;
8718 case TARGET_NR_sethostname:
8719 if (!(p = lock_user_string(arg1)))
8720 goto efault;
8721 ret = get_errno(sethostname(p, arg2));
8722 unlock_user(p, arg1, 0);
8723 break;
8724 case TARGET_NR_setrlimit:
8726 int resource = target_to_host_resource(arg1);
8727 struct target_rlimit *target_rlim;
8728 struct rlimit rlim;
8729 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8730 goto efault;
8731 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8732 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8733 unlock_user_struct(target_rlim, arg2, 0);
8734 ret = get_errno(setrlimit(resource, &rlim));
8736 break;
8737 case TARGET_NR_getrlimit:
8739 int resource = target_to_host_resource(arg1);
8740 struct target_rlimit *target_rlim;
8741 struct rlimit rlim;
8743 ret = get_errno(getrlimit(resource, &rlim));
8744 if (!is_error(ret)) {
8745 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8746 goto efault;
8747 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8748 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8749 unlock_user_struct(target_rlim, arg2, 1);
8752 break;
8753 case TARGET_NR_getrusage:
8755 struct rusage rusage;
8756 ret = get_errno(getrusage(arg1, &rusage));
8757 if (!is_error(ret)) {
8758 ret = host_to_target_rusage(arg2, &rusage);
8761 break;
8762 case TARGET_NR_gettimeofday:
8764 struct timeval tv;
8765 ret = get_errno(gettimeofday(&tv, NULL));
8766 if (!is_error(ret)) {
8767 if (copy_to_user_timeval(arg1, &tv))
8768 goto efault;
8771 break;
8772 case TARGET_NR_settimeofday:
8774 struct timeval tv, *ptv = NULL;
8775 struct timezone tz, *ptz = NULL;
8777 if (arg1) {
8778 if (copy_from_user_timeval(&tv, arg1)) {
8779 goto efault;
8781 ptv = &tv;
8784 if (arg2) {
8785 if (copy_from_user_timezone(&tz, arg2)) {
8786 goto efault;
8788 ptz = &tz;
8791 ret = get_errno(settimeofday(ptv, ptz));
8793 break;
8794 #if defined(TARGET_NR_select)
8795 case TARGET_NR_select:
8796 #if defined(TARGET_WANT_NI_OLD_SELECT)
8797 /* some architectures used to have old_select here
8798 * but now ENOSYS it.
8800 ret = -TARGET_ENOSYS;
8801 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8802 ret = do_old_select(arg1);
8803 #else
8804 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8805 #endif
8806 break;
8807 #endif
8808 #ifdef TARGET_NR_pselect6
8809 case TARGET_NR_pselect6:
8811 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8812 fd_set rfds, wfds, efds;
8813 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8814 struct timespec ts, *ts_ptr;
8817 * The 6th arg is actually two args smashed together,
8818 * so we cannot use the C library.
8820 sigset_t set;
8821 struct {
8822 sigset_t *set;
8823 size_t size;
8824 } sig, *sig_ptr;
8826 abi_ulong arg_sigset, arg_sigsize, *arg7;
8827 target_sigset_t *target_sigset;
8829 n = arg1;
8830 rfd_addr = arg2;
8831 wfd_addr = arg3;
8832 efd_addr = arg4;
8833 ts_addr = arg5;
8835 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8836 if (ret) {
8837 goto fail;
8839 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8840 if (ret) {
8841 goto fail;
8843 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8844 if (ret) {
8845 goto fail;
8849 * This takes a timespec, and not a timeval, so we cannot
8850 * use the do_select() helper ...
8852 if (ts_addr) {
8853 if (target_to_host_timespec(&ts, ts_addr)) {
8854 goto efault;
8856 ts_ptr = &ts;
8857 } else {
8858 ts_ptr = NULL;
8861 /* Extract the two packed args for the sigset */
8862 if (arg6) {
8863 sig_ptr = &sig;
8864 sig.size = SIGSET_T_SIZE;
8866 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8867 if (!arg7) {
8868 goto efault;
8870 arg_sigset = tswapal(arg7[0]);
8871 arg_sigsize = tswapal(arg7[1]);
8872 unlock_user(arg7, arg6, 0);
8874 if (arg_sigset) {
8875 sig.set = &set;
8876 if (arg_sigsize != sizeof(*target_sigset)) {
8877 /* Like the kernel, we enforce correct size sigsets */
8878 ret = -TARGET_EINVAL;
8879 goto fail;
8881 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8882 sizeof(*target_sigset), 1);
8883 if (!target_sigset) {
8884 goto efault;
8886 target_to_host_sigset(&set, target_sigset);
8887 unlock_user(target_sigset, arg_sigset, 0);
8888 } else {
8889 sig.set = NULL;
8891 } else {
8892 sig_ptr = NULL;
8895 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8896 ts_ptr, sig_ptr));
8898 if (!is_error(ret)) {
8899 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8900 goto efault;
8901 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8902 goto efault;
8903 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8904 goto efault;
8906 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8907 goto efault;
8910 break;
8911 #endif
8912 #ifdef TARGET_NR_symlink
8913 case TARGET_NR_symlink:
8915 void *p2;
8916 p = lock_user_string(arg1);
8917 p2 = lock_user_string(arg2);
8918 if (!p || !p2)
8919 ret = -TARGET_EFAULT;
8920 else
8921 ret = get_errno(symlink(p, p2));
8922 unlock_user(p2, arg2, 0);
8923 unlock_user(p, arg1, 0);
8925 break;
8926 #endif
8927 #if defined(TARGET_NR_symlinkat)
8928 case TARGET_NR_symlinkat:
8930 void *p2;
8931 p = lock_user_string(arg1);
8932 p2 = lock_user_string(arg3);
8933 if (!p || !p2)
8934 ret = -TARGET_EFAULT;
8935 else
8936 ret = get_errno(symlinkat(p, arg2, p2));
8937 unlock_user(p2, arg3, 0);
8938 unlock_user(p, arg1, 0);
8940 break;
8941 #endif
8942 #ifdef TARGET_NR_oldlstat
8943 case TARGET_NR_oldlstat:
8944 goto unimplemented;
8945 #endif
8946 #ifdef TARGET_NR_readlink
8947 case TARGET_NR_readlink:
8949 void *p2;
8950 p = lock_user_string(arg1);
8951 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8952 if (!p || !p2) {
8953 ret = -TARGET_EFAULT;
8954 } else if (!arg3) {
8955 /* Short circuit this for the magic exe check. */
8956 ret = -TARGET_EINVAL;
8957 } else if (is_proc_myself((const char *)p, "exe")) {
8958 char real[PATH_MAX], *temp;
8959 temp = realpath(exec_path, real);
8960 /* Return value is # of bytes that we wrote to the buffer. */
8961 if (temp == NULL) {
8962 ret = get_errno(-1);
8963 } else {
8964 /* Don't worry about sign mismatch as earlier mapping
8965 * logic would have thrown a bad address error. */
8966 ret = MIN(strlen(real), arg3);
8967 /* We cannot NUL terminate the string. */
8968 memcpy(p2, real, ret);
8970 } else {
8971 ret = get_errno(readlink(path(p), p2, arg3));
8973 unlock_user(p2, arg2, ret);
8974 unlock_user(p, arg1, 0);
8976 break;
8977 #endif
8978 #if defined(TARGET_NR_readlinkat)
8979 case TARGET_NR_readlinkat:
8981 void *p2;
8982 p = lock_user_string(arg2);
8983 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8984 if (!p || !p2) {
8985 ret = -TARGET_EFAULT;
8986 } else if (is_proc_myself((const char *)p, "exe")) {
8987 char real[PATH_MAX], *temp;
8988 temp = realpath(exec_path, real);
8989 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8990 snprintf((char *)p2, arg4, "%s", real);
8991 } else {
8992 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8994 unlock_user(p2, arg3, ret);
8995 unlock_user(p, arg2, 0);
8997 break;
8998 #endif
8999 #ifdef TARGET_NR_uselib
9000 case TARGET_NR_uselib:
9001 goto unimplemented;
9002 #endif
9003 #ifdef TARGET_NR_swapon
9004 case TARGET_NR_swapon:
9005 if (!(p = lock_user_string(arg1)))
9006 goto efault;
9007 ret = get_errno(swapon(p, arg2));
9008 unlock_user(p, arg1, 0);
9009 break;
9010 #endif
9011 case TARGET_NR_reboot:
9012 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9013 /* arg4 must be ignored in all other cases */
9014 p = lock_user_string(arg4);
9015 if (!p) {
9016 goto efault;
9018 ret = get_errno(reboot(arg1, arg2, arg3, p));
9019 unlock_user(p, arg4, 0);
9020 } else {
9021 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9023 break;
9024 #ifdef TARGET_NR_readdir
9025 case TARGET_NR_readdir:
9026 goto unimplemented;
9027 #endif
9028 #ifdef TARGET_NR_mmap
9029 case TARGET_NR_mmap:
9030 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9031 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9032 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9033 || defined(TARGET_S390X)
9035 abi_ulong *v;
9036 abi_ulong v1, v2, v3, v4, v5, v6;
9037 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9038 goto efault;
9039 v1 = tswapal(v[0]);
9040 v2 = tswapal(v[1]);
9041 v3 = tswapal(v[2]);
9042 v4 = tswapal(v[3]);
9043 v5 = tswapal(v[4]);
9044 v6 = tswapal(v[5]);
9045 unlock_user(v, arg1, 0);
9046 ret = get_errno(target_mmap(v1, v2, v3,
9047 target_to_host_bitmask(v4, mmap_flags_tbl),
9048 v5, v6));
9050 #else
9051 ret = get_errno(target_mmap(arg1, arg2, arg3,
9052 target_to_host_bitmask(arg4, mmap_flags_tbl),
9053 arg5,
9054 arg6));
9055 #endif
9056 break;
9057 #endif
9058 #ifdef TARGET_NR_mmap2
9059 case TARGET_NR_mmap2:
9060 #ifndef MMAP_SHIFT
9061 #define MMAP_SHIFT 12
9062 #endif
9063 ret = get_errno(target_mmap(arg1, arg2, arg3,
9064 target_to_host_bitmask(arg4, mmap_flags_tbl),
9065 arg5,
9066 arg6 << MMAP_SHIFT));
9067 break;
9068 #endif
9069 case TARGET_NR_munmap:
9070 ret = get_errno(target_munmap(arg1, arg2));
9071 break;
9072 case TARGET_NR_mprotect:
9074 TaskState *ts = cpu->opaque;
9075 /* Special hack to detect libc making the stack executable. */
9076 if ((arg3 & PROT_GROWSDOWN)
9077 && arg1 >= ts->info->stack_limit
9078 && arg1 <= ts->info->start_stack) {
9079 arg3 &= ~PROT_GROWSDOWN;
9080 arg2 = arg2 + arg1 - ts->info->stack_limit;
9081 arg1 = ts->info->stack_limit;
9084 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9085 break;
9086 #ifdef TARGET_NR_mremap
9087 case TARGET_NR_mremap:
9088 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9089 break;
9090 #endif
9091 /* ??? msync/mlock/munlock are broken for softmmu. */
9092 #ifdef TARGET_NR_msync
9093 case TARGET_NR_msync:
9094 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9095 break;
9096 #endif
9097 #ifdef TARGET_NR_mlock
9098 case TARGET_NR_mlock:
9099 ret = get_errno(mlock(g2h(arg1), arg2));
9100 break;
9101 #endif
9102 #ifdef TARGET_NR_munlock
9103 case TARGET_NR_munlock:
9104 ret = get_errno(munlock(g2h(arg1), arg2));
9105 break;
9106 #endif
9107 #ifdef TARGET_NR_mlockall
9108 case TARGET_NR_mlockall:
9109 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9110 break;
9111 #endif
9112 #ifdef TARGET_NR_munlockall
9113 case TARGET_NR_munlockall:
9114 ret = get_errno(munlockall());
9115 break;
9116 #endif
9117 case TARGET_NR_truncate:
9118 if (!(p = lock_user_string(arg1)))
9119 goto efault;
9120 ret = get_errno(truncate(p, arg2));
9121 unlock_user(p, arg1, 0);
9122 break;
9123 case TARGET_NR_ftruncate:
9124 ret = get_errno(ftruncate(arg1, arg2));
9125 break;
9126 case TARGET_NR_fchmod:
9127 ret = get_errno(fchmod(arg1, arg2));
9128 break;
9129 #if defined(TARGET_NR_fchmodat)
9130 case TARGET_NR_fchmodat:
9131 if (!(p = lock_user_string(arg2)))
9132 goto efault;
9133 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9134 unlock_user(p, arg2, 0);
9135 break;
9136 #endif
9137 case TARGET_NR_getpriority:
9138 /* Note that negative values are valid for getpriority, so we must
9139 differentiate based on errno settings. */
9140 errno = 0;
9141 ret = getpriority(arg1, arg2);
9142 if (ret == -1 && errno != 0) {
9143 ret = -host_to_target_errno(errno);
9144 break;
9146 #ifdef TARGET_ALPHA
9147 /* Return value is the unbiased priority. Signal no error. */
9148 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9149 #else
9150 /* Return value is a biased priority to avoid negative numbers. */
9151 ret = 20 - ret;
9152 #endif
9153 break;
9154 case TARGET_NR_setpriority:
9155 ret = get_errno(setpriority(arg1, arg2, arg3));
9156 break;
9157 #ifdef TARGET_NR_profil
9158 case TARGET_NR_profil:
9159 goto unimplemented;
9160 #endif
9161 case TARGET_NR_statfs:
9162 if (!(p = lock_user_string(arg1)))
9163 goto efault;
9164 ret = get_errno(statfs(path(p), &stfs));
9165 unlock_user(p, arg1, 0);
9166 convert_statfs:
9167 if (!is_error(ret)) {
9168 struct target_statfs *target_stfs;
9170 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9171 goto efault;
9172 __put_user(stfs.f_type, &target_stfs->f_type);
9173 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9174 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9175 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9176 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9177 __put_user(stfs.f_files, &target_stfs->f_files);
9178 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9179 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9180 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9181 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9182 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9183 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9184 unlock_user_struct(target_stfs, arg2, 1);
9186 break;
9187 case TARGET_NR_fstatfs:
9188 ret = get_errno(fstatfs(arg1, &stfs));
9189 goto convert_statfs;
9190 #ifdef TARGET_NR_statfs64
9191 case TARGET_NR_statfs64:
9192 if (!(p = lock_user_string(arg1)))
9193 goto efault;
9194 ret = get_errno(statfs(path(p), &stfs));
9195 unlock_user(p, arg1, 0);
9196 convert_statfs64:
9197 if (!is_error(ret)) {
9198 struct target_statfs64 *target_stfs;
9200 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9201 goto efault;
9202 __put_user(stfs.f_type, &target_stfs->f_type);
9203 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9204 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9205 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9206 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9207 __put_user(stfs.f_files, &target_stfs->f_files);
9208 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9209 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9210 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9211 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9212 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9213 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9214 unlock_user_struct(target_stfs, arg3, 1);
9216 break;
9217 case TARGET_NR_fstatfs64:
9218 ret = get_errno(fstatfs(arg1, &stfs));
9219 goto convert_statfs64;
9220 #endif
9221 #ifdef TARGET_NR_ioperm
9222 case TARGET_NR_ioperm:
9223 goto unimplemented;
9224 #endif
9225 #ifdef TARGET_NR_socketcall
9226 case TARGET_NR_socketcall:
9227 ret = do_socketcall(arg1, arg2);
9228 break;
9229 #endif
9230 #ifdef TARGET_NR_accept
9231 case TARGET_NR_accept:
9232 ret = do_accept4(arg1, arg2, arg3, 0);
9233 break;
9234 #endif
9235 #ifdef TARGET_NR_accept4
9236 case TARGET_NR_accept4:
9237 ret = do_accept4(arg1, arg2, arg3, arg4);
9238 break;
9239 #endif
9240 #ifdef TARGET_NR_bind
9241 case TARGET_NR_bind:
9242 ret = do_bind(arg1, arg2, arg3);
9243 break;
9244 #endif
9245 #ifdef TARGET_NR_connect
9246 case TARGET_NR_connect:
9247 ret = do_connect(arg1, arg2, arg3);
9248 break;
9249 #endif
9250 #ifdef TARGET_NR_getpeername
9251 case TARGET_NR_getpeername:
9252 ret = do_getpeername(arg1, arg2, arg3);
9253 break;
9254 #endif
9255 #ifdef TARGET_NR_getsockname
9256 case TARGET_NR_getsockname:
9257 ret = do_getsockname(arg1, arg2, arg3);
9258 break;
9259 #endif
9260 #ifdef TARGET_NR_getsockopt
9261 case TARGET_NR_getsockopt:
9262 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9263 break;
9264 #endif
9265 #ifdef TARGET_NR_listen
9266 case TARGET_NR_listen:
9267 ret = get_errno(listen(arg1, arg2));
9268 break;
9269 #endif
9270 #ifdef TARGET_NR_recv
9271 case TARGET_NR_recv:
9272 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9273 break;
9274 #endif
9275 #ifdef TARGET_NR_recvfrom
9276 case TARGET_NR_recvfrom:
9277 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9278 break;
9279 #endif
9280 #ifdef TARGET_NR_recvmsg
9281 case TARGET_NR_recvmsg:
9282 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9283 break;
9284 #endif
9285 #ifdef TARGET_NR_send
9286 case TARGET_NR_send:
9287 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9288 break;
9289 #endif
9290 #ifdef TARGET_NR_sendmsg
9291 case TARGET_NR_sendmsg:
9292 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9293 break;
9294 #endif
9295 #ifdef TARGET_NR_sendmmsg
9296 case TARGET_NR_sendmmsg:
9297 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9298 break;
9299 case TARGET_NR_recvmmsg:
9300 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9301 break;
9302 #endif
9303 #ifdef TARGET_NR_sendto
9304 case TARGET_NR_sendto:
9305 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9306 break;
9307 #endif
9308 #ifdef TARGET_NR_shutdown
9309 case TARGET_NR_shutdown:
9310 ret = get_errno(shutdown(arg1, arg2));
9311 break;
9312 #endif
9313 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9314 case TARGET_NR_getrandom:
9315 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9316 if (!p) {
9317 goto efault;
9319 ret = get_errno(getrandom(p, arg2, arg3));
9320 unlock_user(p, arg1, ret);
9321 break;
9322 #endif
9323 #ifdef TARGET_NR_socket
9324 case TARGET_NR_socket:
9325 ret = do_socket(arg1, arg2, arg3);
9326 fd_trans_unregister(ret);
9327 break;
9328 #endif
9329 #ifdef TARGET_NR_socketpair
9330 case TARGET_NR_socketpair:
9331 ret = do_socketpair(arg1, arg2, arg3, arg4);
9332 break;
9333 #endif
9334 #ifdef TARGET_NR_setsockopt
9335 case TARGET_NR_setsockopt:
9336 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9337 break;
9338 #endif
9339 #if defined(TARGET_NR_syslog)
9340 case TARGET_NR_syslog:
9342 int len = arg2;
9344 switch (arg1) {
9345 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9346 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9347 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9348 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9349 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9350 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9351 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9352 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9354 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9356 break;
9357 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9358 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9359 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9361 ret = -TARGET_EINVAL;
9362 if (len < 0) {
9363 goto fail;
9365 ret = 0;
9366 if (len == 0) {
9367 break;
9369 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9370 if (!p) {
9371 ret = -TARGET_EFAULT;
9372 goto fail;
9374 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9375 unlock_user(p, arg2, arg3);
9377 break;
9378 default:
9379 ret = -EINVAL;
9380 break;
9383 break;
9384 #endif
9385 case TARGET_NR_setitimer:
9387 struct itimerval value, ovalue, *pvalue;
9389 if (arg2) {
9390 pvalue = &value;
9391 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9392 || copy_from_user_timeval(&pvalue->it_value,
9393 arg2 + sizeof(struct target_timeval)))
9394 goto efault;
9395 } else {
9396 pvalue = NULL;
9398 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9399 if (!is_error(ret) && arg3) {
9400 if (copy_to_user_timeval(arg3,
9401 &ovalue.it_interval)
9402 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9403 &ovalue.it_value))
9404 goto efault;
9407 break;
9408 case TARGET_NR_getitimer:
9410 struct itimerval value;
9412 ret = get_errno(getitimer(arg1, &value));
9413 if (!is_error(ret) && arg2) {
9414 if (copy_to_user_timeval(arg2,
9415 &value.it_interval)
9416 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9417 &value.it_value))
9418 goto efault;
9421 break;
9422 #ifdef TARGET_NR_stat
9423 case TARGET_NR_stat:
9424 if (!(p = lock_user_string(arg1)))
9425 goto efault;
9426 ret = get_errno(stat(path(p), &st));
9427 unlock_user(p, arg1, 0);
9428 goto do_stat;
9429 #endif
9430 #ifdef TARGET_NR_lstat
9431 case TARGET_NR_lstat:
9432 if (!(p = lock_user_string(arg1)))
9433 goto efault;
9434 ret = get_errno(lstat(path(p), &st));
9435 unlock_user(p, arg1, 0);
9436 goto do_stat;
9437 #endif
9438 case TARGET_NR_fstat:
9440 ret = get_errno(fstat(arg1, &st));
9441 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9442 do_stat:
9443 #endif
9444 if (!is_error(ret)) {
9445 struct target_stat *target_st;
9447 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9448 goto efault;
9449 memset(target_st, 0, sizeof(*target_st));
9450 __put_user(st.st_dev, &target_st->st_dev);
9451 __put_user(st.st_ino, &target_st->st_ino);
9452 __put_user(st.st_mode, &target_st->st_mode);
9453 __put_user(st.st_uid, &target_st->st_uid);
9454 __put_user(st.st_gid, &target_st->st_gid);
9455 __put_user(st.st_nlink, &target_st->st_nlink);
9456 __put_user(st.st_rdev, &target_st->st_rdev);
9457 __put_user(st.st_size, &target_st->st_size);
9458 __put_user(st.st_blksize, &target_st->st_blksize);
9459 __put_user(st.st_blocks, &target_st->st_blocks);
9460 __put_user(st.st_atime, &target_st->target_st_atime);
9461 __put_user(st.st_mtime, &target_st->target_st_mtime);
9462 __put_user(st.st_ctime, &target_st->target_st_ctime);
9463 unlock_user_struct(target_st, arg2, 1);
9466 break;
9467 #ifdef TARGET_NR_olduname
9468 case TARGET_NR_olduname:
9469 goto unimplemented;
9470 #endif
9471 #ifdef TARGET_NR_iopl
9472 case TARGET_NR_iopl:
9473 goto unimplemented;
9474 #endif
9475 case TARGET_NR_vhangup:
9476 ret = get_errno(vhangup());
9477 break;
9478 #ifdef TARGET_NR_idle
9479 case TARGET_NR_idle:
9480 goto unimplemented;
9481 #endif
9482 #ifdef TARGET_NR_syscall
9483 case TARGET_NR_syscall:
9484 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9485 arg6, arg7, arg8, 0);
9486 break;
9487 #endif
9488 case TARGET_NR_wait4:
9490 int status;
9491 abi_long status_ptr = arg2;
9492 struct rusage rusage, *rusage_ptr;
9493 abi_ulong target_rusage = arg4;
9494 abi_long rusage_err;
9495 if (target_rusage)
9496 rusage_ptr = &rusage;
9497 else
9498 rusage_ptr = NULL;
9499 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9500 if (!is_error(ret)) {
9501 if (status_ptr && ret) {
9502 status = host_to_target_waitstatus(status);
9503 if (put_user_s32(status, status_ptr))
9504 goto efault;
9506 if (target_rusage) {
9507 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9508 if (rusage_err) {
9509 ret = rusage_err;
9514 break;
9515 #ifdef TARGET_NR_swapoff
9516 case TARGET_NR_swapoff:
9517 if (!(p = lock_user_string(arg1)))
9518 goto efault;
9519 ret = get_errno(swapoff(p));
9520 unlock_user(p, arg1, 0);
9521 break;
9522 #endif
9523 case TARGET_NR_sysinfo:
9525 struct target_sysinfo *target_value;
9526 struct sysinfo value;
9527 ret = get_errno(sysinfo(&value));
9528 if (!is_error(ret) && arg1)
9530 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9531 goto efault;
9532 __put_user(value.uptime, &target_value->uptime);
9533 __put_user(value.loads[0], &target_value->loads[0]);
9534 __put_user(value.loads[1], &target_value->loads[1]);
9535 __put_user(value.loads[2], &target_value->loads[2]);
9536 __put_user(value.totalram, &target_value->totalram);
9537 __put_user(value.freeram, &target_value->freeram);
9538 __put_user(value.sharedram, &target_value->sharedram);
9539 __put_user(value.bufferram, &target_value->bufferram);
9540 __put_user(value.totalswap, &target_value->totalswap);
9541 __put_user(value.freeswap, &target_value->freeswap);
9542 __put_user(value.procs, &target_value->procs);
9543 __put_user(value.totalhigh, &target_value->totalhigh);
9544 __put_user(value.freehigh, &target_value->freehigh);
9545 __put_user(value.mem_unit, &target_value->mem_unit);
9546 unlock_user_struct(target_value, arg1, 1);
9549 break;
9550 #ifdef TARGET_NR_ipc
9551 case TARGET_NR_ipc:
9552 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9553 break;
9554 #endif
9555 #ifdef TARGET_NR_semget
9556 case TARGET_NR_semget:
9557 ret = get_errno(semget(arg1, arg2, arg3));
9558 break;
9559 #endif
9560 #ifdef TARGET_NR_semop
9561 case TARGET_NR_semop:
9562 ret = do_semop(arg1, arg2, arg3);
9563 break;
9564 #endif
9565 #ifdef TARGET_NR_semctl
9566 case TARGET_NR_semctl:
9567 ret = do_semctl(arg1, arg2, arg3, arg4);
9568 break;
9569 #endif
9570 #ifdef TARGET_NR_msgctl
9571 case TARGET_NR_msgctl:
9572 ret = do_msgctl(arg1, arg2, arg3);
9573 break;
9574 #endif
9575 #ifdef TARGET_NR_msgget
9576 case TARGET_NR_msgget:
9577 ret = get_errno(msgget(arg1, arg2));
9578 break;
9579 #endif
9580 #ifdef TARGET_NR_msgrcv
9581 case TARGET_NR_msgrcv:
9582 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9583 break;
9584 #endif
9585 #ifdef TARGET_NR_msgsnd
9586 case TARGET_NR_msgsnd:
9587 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9588 break;
9589 #endif
9590 #ifdef TARGET_NR_shmget
9591 case TARGET_NR_shmget:
9592 ret = get_errno(shmget(arg1, arg2, arg3));
9593 break;
9594 #endif
9595 #ifdef TARGET_NR_shmctl
9596 case TARGET_NR_shmctl:
9597 ret = do_shmctl(arg1, arg2, arg3);
9598 break;
9599 #endif
9600 #ifdef TARGET_NR_shmat
9601 case TARGET_NR_shmat:
9602 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9603 break;
9604 #endif
9605 #ifdef TARGET_NR_shmdt
9606 case TARGET_NR_shmdt:
9607 ret = do_shmdt(arg1);
9608 break;
9609 #endif
9610 case TARGET_NR_fsync:
9611 ret = get_errno(fsync(arg1));
9612 break;
9613 case TARGET_NR_clone:
9614 /* Linux manages to have three different orderings for its
9615 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9616 * match the kernel's CONFIG_CLONE_* settings.
9617 * Microblaze is further special in that it uses a sixth
9618 * implicit argument to clone for the TLS pointer.
9620 #if defined(TARGET_MICROBLAZE)
9621 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9622 #elif defined(TARGET_CLONE_BACKWARDS)
9623 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9624 #elif defined(TARGET_CLONE_BACKWARDS2)
9625 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9626 #else
9627 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9628 #endif
9629 break;
9630 #ifdef __NR_exit_group
9631 /* new thread calls */
9632 case TARGET_NR_exit_group:
9633 #ifdef TARGET_GPROF
9634 _mcleanup();
9635 #endif
9636 gdb_exit(cpu_env, arg1);
9637 ret = get_errno(exit_group(arg1));
9638 break;
9639 #endif
9640 case TARGET_NR_setdomainname:
9641 if (!(p = lock_user_string(arg1)))
9642 goto efault;
9643 ret = get_errno(setdomainname(p, arg2));
9644 unlock_user(p, arg1, 0);
9645 break;
9646 case TARGET_NR_uname:
9647 /* no need to transcode because we use the linux syscall */
9649 struct new_utsname * buf;
9651 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9652 goto efault;
9653 ret = get_errno(sys_uname(buf));
9654 if (!is_error(ret)) {
9655 /* Overwrite the native machine name with whatever is being
9656 emulated. */
9657 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9658 /* Allow the user to override the reported release. */
9659 if (qemu_uname_release && *qemu_uname_release) {
9660 g_strlcpy(buf->release, qemu_uname_release,
9661 sizeof(buf->release));
9664 unlock_user_struct(buf, arg1, 1);
9666 break;
9667 #ifdef TARGET_I386
9668 case TARGET_NR_modify_ldt:
9669 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9670 break;
9671 #if !defined(TARGET_X86_64)
9672 case TARGET_NR_vm86old:
9673 goto unimplemented;
9674 case TARGET_NR_vm86:
9675 ret = do_vm86(cpu_env, arg1, arg2);
9676 break;
9677 #endif
9678 #endif
9679 case TARGET_NR_adjtimex:
9681 struct timex host_buf;
9683 if (target_to_host_timex(&host_buf, arg1) != 0) {
9684 goto efault;
9686 ret = get_errno(adjtimex(&host_buf));
9687 if (!is_error(ret)) {
9688 if (host_to_target_timex(arg1, &host_buf) != 0) {
9689 goto efault;
9693 break;
9694 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9695 case TARGET_NR_clock_adjtime:
9697 struct timex htx, *phtx = &htx;
9699 if (target_to_host_timex(phtx, arg2) != 0) {
9700 goto efault;
9702 ret = get_errno(clock_adjtime(arg1, phtx));
9703 if (!is_error(ret) && phtx) {
9704 if (host_to_target_timex(arg2, phtx) != 0) {
9705 goto efault;
9709 break;
9710 #endif
9711 #ifdef TARGET_NR_create_module
9712 case TARGET_NR_create_module:
9713 #endif
9714 case TARGET_NR_init_module:
9715 case TARGET_NR_delete_module:
9716 #ifdef TARGET_NR_get_kernel_syms
9717 case TARGET_NR_get_kernel_syms:
9718 #endif
9719 goto unimplemented;
9720 case TARGET_NR_quotactl:
9721 goto unimplemented;
9722 case TARGET_NR_getpgid:
9723 ret = get_errno(getpgid(arg1));
9724 break;
9725 case TARGET_NR_fchdir:
9726 ret = get_errno(fchdir(arg1));
9727 break;
9728 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9729 case TARGET_NR_bdflush:
9730 goto unimplemented;
9731 #endif
9732 #ifdef TARGET_NR_sysfs
9733 case TARGET_NR_sysfs:
9734 goto unimplemented;
9735 #endif
9736 case TARGET_NR_personality:
9737 ret = get_errno(personality(arg1));
9738 break;
9739 #ifdef TARGET_NR_afs_syscall
9740 case TARGET_NR_afs_syscall:
9741 goto unimplemented;
9742 #endif
9743 #ifdef TARGET_NR__llseek /* Not on alpha */
9744 case TARGET_NR__llseek:
9746 int64_t res;
9747 #if !defined(__NR_llseek)
9748 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9749 if (res == -1) {
9750 ret = get_errno(res);
9751 } else {
9752 ret = 0;
9754 #else
9755 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9756 #endif
9757 if ((ret == 0) && put_user_s64(res, arg4)) {
9758 goto efault;
9761 break;
9762 #endif
9763 #ifdef TARGET_NR_getdents
9764 case TARGET_NR_getdents:
9765 #ifdef __NR_getdents
9766 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9768 struct target_dirent *target_dirp;
9769 struct linux_dirent *dirp;
9770 abi_long count = arg3;
9772 dirp = g_try_malloc(count);
9773 if (!dirp) {
9774 ret = -TARGET_ENOMEM;
9775 goto fail;
9778 ret = get_errno(sys_getdents(arg1, dirp, count));
9779 if (!is_error(ret)) {
9780 struct linux_dirent *de;
9781 struct target_dirent *tde;
9782 int len = ret;
9783 int reclen, treclen;
9784 int count1, tnamelen;
9786 count1 = 0;
9787 de = dirp;
9788 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9789 goto efault;
9790 tde = target_dirp;
9791 while (len > 0) {
9792 reclen = de->d_reclen;
9793 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9794 assert(tnamelen >= 0);
9795 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9796 assert(count1 + treclen <= count);
9797 tde->d_reclen = tswap16(treclen);
9798 tde->d_ino = tswapal(de->d_ino);
9799 tde->d_off = tswapal(de->d_off);
9800 memcpy(tde->d_name, de->d_name, tnamelen);
9801 de = (struct linux_dirent *)((char *)de + reclen);
9802 len -= reclen;
9803 tde = (struct target_dirent *)((char *)tde + treclen);
9804 count1 += treclen;
9806 ret = count1;
9807 unlock_user(target_dirp, arg2, ret);
9809 g_free(dirp);
9811 #else
9813 struct linux_dirent *dirp;
9814 abi_long count = arg3;
9816 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9817 goto efault;
9818 ret = get_errno(sys_getdents(arg1, dirp, count));
9819 if (!is_error(ret)) {
9820 struct linux_dirent *de;
9821 int len = ret;
9822 int reclen;
9823 de = dirp;
9824 while (len > 0) {
9825 reclen = de->d_reclen;
9826 if (reclen > len)
9827 break;
9828 de->d_reclen = tswap16(reclen);
9829 tswapls(&de->d_ino);
9830 tswapls(&de->d_off);
9831 de = (struct linux_dirent *)((char *)de + reclen);
9832 len -= reclen;
9835 unlock_user(dirp, arg2, ret);
9837 #endif
9838 #else
9839 /* Implement getdents in terms of getdents64 */
9841 struct linux_dirent64 *dirp;
9842 abi_long count = arg3;
9844 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9845 if (!dirp) {
9846 goto efault;
9848 ret = get_errno(sys_getdents64(arg1, dirp, count));
9849 if (!is_error(ret)) {
9850 /* Convert the dirent64 structs to target dirent. We do this
9851 * in-place, since we can guarantee that a target_dirent is no
9852 * larger than a dirent64; however this means we have to be
9853 * careful to read everything before writing in the new format.
9855 struct linux_dirent64 *de;
9856 struct target_dirent *tde;
9857 int len = ret;
9858 int tlen = 0;
9860 de = dirp;
9861 tde = (struct target_dirent *)dirp;
9862 while (len > 0) {
9863 int namelen, treclen;
9864 int reclen = de->d_reclen;
9865 uint64_t ino = de->d_ino;
9866 int64_t off = de->d_off;
9867 uint8_t type = de->d_type;
9869 namelen = strlen(de->d_name);
9870 treclen = offsetof(struct target_dirent, d_name)
9871 + namelen + 2;
9872 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9874 memmove(tde->d_name, de->d_name, namelen + 1);
9875 tde->d_ino = tswapal(ino);
9876 tde->d_off = tswapal(off);
9877 tde->d_reclen = tswap16(treclen);
9878 /* The target_dirent type is in what was formerly a padding
9879 * byte at the end of the structure:
9881 *(((char *)tde) + treclen - 1) = type;
9883 de = (struct linux_dirent64 *)((char *)de + reclen);
9884 tde = (struct target_dirent *)((char *)tde + treclen);
9885 len -= reclen;
9886 tlen += treclen;
9888 ret = tlen;
9890 unlock_user(dirp, arg2, ret);
9892 #endif
9893 break;
9894 #endif /* TARGET_NR_getdents */
9895 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9896 case TARGET_NR_getdents64:
9898 struct linux_dirent64 *dirp;
9899 abi_long count = arg3;
9900 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9901 goto efault;
9902 ret = get_errno(sys_getdents64(arg1, dirp, count));
9903 if (!is_error(ret)) {
9904 struct linux_dirent64 *de;
9905 int len = ret;
9906 int reclen;
9907 de = dirp;
9908 while (len > 0) {
9909 reclen = de->d_reclen;
9910 if (reclen > len)
9911 break;
9912 de->d_reclen = tswap16(reclen);
9913 tswap64s((uint64_t *)&de->d_ino);
9914 tswap64s((uint64_t *)&de->d_off);
9915 de = (struct linux_dirent64 *)((char *)de + reclen);
9916 len -= reclen;
9919 unlock_user(dirp, arg2, ret);
9921 break;
9922 #endif /* TARGET_NR_getdents64 */
9923 #if defined(TARGET_NR__newselect)
9924 case TARGET_NR__newselect:
9925 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9926 break;
9927 #endif
9928 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9929 # ifdef TARGET_NR_poll
9930 case TARGET_NR_poll:
9931 # endif
9932 # ifdef TARGET_NR_ppoll
9933 case TARGET_NR_ppoll:
9934 # endif
9936 struct target_pollfd *target_pfd;
9937 unsigned int nfds = arg2;
9938 struct pollfd *pfd;
9939 unsigned int i;
9941 pfd = NULL;
9942 target_pfd = NULL;
9943 if (nfds) {
9944 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9945 ret = -TARGET_EINVAL;
9946 break;
9949 target_pfd = lock_user(VERIFY_WRITE, arg1,
9950 sizeof(struct target_pollfd) * nfds, 1);
9951 if (!target_pfd) {
9952 goto efault;
9955 pfd = alloca(sizeof(struct pollfd) * nfds);
9956 for (i = 0; i < nfds; i++) {
9957 pfd[i].fd = tswap32(target_pfd[i].fd);
9958 pfd[i].events = tswap16(target_pfd[i].events);
9962 switch (num) {
9963 # ifdef TARGET_NR_ppoll
9964 case TARGET_NR_ppoll:
9966 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9967 target_sigset_t *target_set;
9968 sigset_t _set, *set = &_set;
9970 if (arg3) {
9971 if (target_to_host_timespec(timeout_ts, arg3)) {
9972 unlock_user(target_pfd, arg1, 0);
9973 goto efault;
9975 } else {
9976 timeout_ts = NULL;
9979 if (arg4) {
9980 if (arg5 != sizeof(target_sigset_t)) {
9981 unlock_user(target_pfd, arg1, 0);
9982 ret = -TARGET_EINVAL;
9983 break;
9986 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9987 if (!target_set) {
9988 unlock_user(target_pfd, arg1, 0);
9989 goto efault;
9991 target_to_host_sigset(set, target_set);
9992 } else {
9993 set = NULL;
9996 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9997 set, SIGSET_T_SIZE));
9999 if (!is_error(ret) && arg3) {
10000 host_to_target_timespec(arg3, timeout_ts);
10002 if (arg4) {
10003 unlock_user(target_set, arg4, 0);
10005 break;
10007 # endif
10008 # ifdef TARGET_NR_poll
10009 case TARGET_NR_poll:
10011 struct timespec ts, *pts;
10013 if (arg3 >= 0) {
10014 /* Convert ms to secs, ns */
10015 ts.tv_sec = arg3 / 1000;
10016 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10017 pts = &ts;
10018 } else {
10019 /* -ve poll() timeout means "infinite" */
10020 pts = NULL;
10022 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10023 break;
10025 # endif
10026 default:
10027 g_assert_not_reached();
10030 if (!is_error(ret)) {
10031 for(i = 0; i < nfds; i++) {
10032 target_pfd[i].revents = tswap16(pfd[i].revents);
10035 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10037 break;
10038 #endif
10039 case TARGET_NR_flock:
10040 /* NOTE: the flock constant seems to be the same for every
10041 Linux platform */
10042 ret = get_errno(safe_flock(arg1, arg2));
10043 break;
10044 case TARGET_NR_readv:
10046 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10047 if (vec != NULL) {
10048 ret = get_errno(safe_readv(arg1, vec, arg3));
10049 unlock_iovec(vec, arg2, arg3, 1);
10050 } else {
10051 ret = -host_to_target_errno(errno);
10054 break;
10055 case TARGET_NR_writev:
10057 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10058 if (vec != NULL) {
10059 ret = get_errno(safe_writev(arg1, vec, arg3));
10060 unlock_iovec(vec, arg2, arg3, 0);
10061 } else {
10062 ret = -host_to_target_errno(errno);
10065 break;
10066 #if defined(TARGET_NR_preadv)
10067 case TARGET_NR_preadv:
10069 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10070 if (vec != NULL) {
10071 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10072 unlock_iovec(vec, arg2, arg3, 1);
10073 } else {
10074 ret = -host_to_target_errno(errno);
10077 break;
10078 #endif
10079 #if defined(TARGET_NR_pwritev)
10080 case TARGET_NR_pwritev:
10082 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10083 if (vec != NULL) {
10084 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10085 unlock_iovec(vec, arg2, arg3, 0);
10086 } else {
10087 ret = -host_to_target_errno(errno);
10090 break;
10091 #endif
10092 case TARGET_NR_getsid:
10093 ret = get_errno(getsid(arg1));
10094 break;
10095 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10096 case TARGET_NR_fdatasync:
10097 ret = get_errno(fdatasync(arg1));
10098 break;
10099 #endif
10100 #ifdef TARGET_NR__sysctl
10101 case TARGET_NR__sysctl:
10102 /* We don't implement this, but ENOTDIR is always a safe
10103 return value. */
10104 ret = -TARGET_ENOTDIR;
10105 break;
10106 #endif
10107 case TARGET_NR_sched_getaffinity:
10109 unsigned int mask_size;
10110 unsigned long *mask;
10113 * sched_getaffinity needs multiples of ulong, so need to take
10114 * care of mismatches between target ulong and host ulong sizes.
10116 if (arg2 & (sizeof(abi_ulong) - 1)) {
10117 ret = -TARGET_EINVAL;
10118 break;
10120 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10122 mask = alloca(mask_size);
10123 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10125 if (!is_error(ret)) {
10126 if (ret > arg2) {
10127 /* More data returned than the caller's buffer will fit.
10128 * This only happens if sizeof(abi_long) < sizeof(long)
10129 * and the caller passed us a buffer holding an odd number
10130 * of abi_longs. If the host kernel is actually using the
10131 * extra 4 bytes then fail EINVAL; otherwise we can just
10132 * ignore them and only copy the interesting part.
10134 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10135 if (numcpus > arg2 * 8) {
10136 ret = -TARGET_EINVAL;
10137 break;
10139 ret = arg2;
10142 if (copy_to_user(arg3, mask, ret)) {
10143 goto efault;
10147 break;
10148 case TARGET_NR_sched_setaffinity:
10150 unsigned int mask_size;
10151 unsigned long *mask;
10154 * sched_setaffinity needs multiples of ulong, so need to take
10155 * care of mismatches between target ulong and host ulong sizes.
10157 if (arg2 & (sizeof(abi_ulong) - 1)) {
10158 ret = -TARGET_EINVAL;
10159 break;
10161 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10163 mask = alloca(mask_size);
10164 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10165 goto efault;
10167 memcpy(mask, p, arg2);
10168 unlock_user_struct(p, arg2, 0);
10170 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10172 break;
10173 case TARGET_NR_sched_setparam:
10175 struct sched_param *target_schp;
10176 struct sched_param schp;
10178 if (arg2 == 0) {
10179 return -TARGET_EINVAL;
10181 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10182 goto efault;
10183 schp.sched_priority = tswap32(target_schp->sched_priority);
10184 unlock_user_struct(target_schp, arg2, 0);
10185 ret = get_errno(sched_setparam(arg1, &schp));
10187 break;
10188 case TARGET_NR_sched_getparam:
10190 struct sched_param *target_schp;
10191 struct sched_param schp;
10193 if (arg2 == 0) {
10194 return -TARGET_EINVAL;
10196 ret = get_errno(sched_getparam(arg1, &schp));
10197 if (!is_error(ret)) {
10198 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10199 goto efault;
10200 target_schp->sched_priority = tswap32(schp.sched_priority);
10201 unlock_user_struct(target_schp, arg2, 1);
10204 break;
10205 case TARGET_NR_sched_setscheduler:
10207 struct sched_param *target_schp;
10208 struct sched_param schp;
10209 if (arg3 == 0) {
10210 return -TARGET_EINVAL;
10212 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10213 goto efault;
10214 schp.sched_priority = tswap32(target_schp->sched_priority);
10215 unlock_user_struct(target_schp, arg3, 0);
10216 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10218 break;
10219 case TARGET_NR_sched_getscheduler:
10220 ret = get_errno(sched_getscheduler(arg1));
10221 break;
10222 case TARGET_NR_sched_yield:
10223 ret = get_errno(sched_yield());
10224 break;
10225 case TARGET_NR_sched_get_priority_max:
10226 ret = get_errno(sched_get_priority_max(arg1));
10227 break;
10228 case TARGET_NR_sched_get_priority_min:
10229 ret = get_errno(sched_get_priority_min(arg1));
10230 break;
10231 case TARGET_NR_sched_rr_get_interval:
10233 struct timespec ts;
10234 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10235 if (!is_error(ret)) {
10236 ret = host_to_target_timespec(arg2, &ts);
10239 break;
10240 case TARGET_NR_nanosleep:
10242 struct timespec req, rem;
10243 target_to_host_timespec(&req, arg1);
10244 ret = get_errno(safe_nanosleep(&req, &rem));
10245 if (is_error(ret) && arg2) {
10246 host_to_target_timespec(arg2, &rem);
10249 break;
10250 #ifdef TARGET_NR_query_module
10251 case TARGET_NR_query_module:
10252 goto unimplemented;
10253 #endif
10254 #ifdef TARGET_NR_nfsservctl
10255 case TARGET_NR_nfsservctl:
10256 goto unimplemented;
10257 #endif
10258 case TARGET_NR_prctl:
10259 switch (arg1) {
10260 case PR_GET_PDEATHSIG:
10262 int deathsig;
10263 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10264 if (!is_error(ret) && arg2
10265 && put_user_ual(deathsig, arg2)) {
10266 goto efault;
10268 break;
10270 #ifdef PR_GET_NAME
10271 case PR_GET_NAME:
10273 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10274 if (!name) {
10275 goto efault;
10277 ret = get_errno(prctl(arg1, (unsigned long)name,
10278 arg3, arg4, arg5));
10279 unlock_user(name, arg2, 16);
10280 break;
10282 case PR_SET_NAME:
10284 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10285 if (!name) {
10286 goto efault;
10288 ret = get_errno(prctl(arg1, (unsigned long)name,
10289 arg3, arg4, arg5));
10290 unlock_user(name, arg2, 0);
10291 break;
10293 #endif
10294 default:
10295 /* Most prctl options have no pointer arguments */
10296 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10297 break;
10299 break;
10300 #ifdef TARGET_NR_arch_prctl
10301 case TARGET_NR_arch_prctl:
10302 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10303 ret = do_arch_prctl(cpu_env, arg1, arg2);
10304 break;
10305 #else
10306 goto unimplemented;
10307 #endif
10308 #endif
10309 #ifdef TARGET_NR_pread64
10310 case TARGET_NR_pread64:
10311 if (regpairs_aligned(cpu_env)) {
10312 arg4 = arg5;
10313 arg5 = arg6;
10315 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10316 goto efault;
10317 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10318 unlock_user(p, arg2, ret);
10319 break;
10320 case TARGET_NR_pwrite64:
10321 if (regpairs_aligned(cpu_env)) {
10322 arg4 = arg5;
10323 arg5 = arg6;
10325 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10326 goto efault;
10327 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10328 unlock_user(p, arg2, 0);
10329 break;
10330 #endif
10331 case TARGET_NR_getcwd:
10332 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10333 goto efault;
10334 ret = get_errno(sys_getcwd1(p, arg2));
10335 unlock_user(p, arg1, ret);
10336 break;
10337 case TARGET_NR_capget:
10338 case TARGET_NR_capset:
10340 struct target_user_cap_header *target_header;
10341 struct target_user_cap_data *target_data = NULL;
10342 struct __user_cap_header_struct header;
10343 struct __user_cap_data_struct data[2];
10344 struct __user_cap_data_struct *dataptr = NULL;
10345 int i, target_datalen;
10346 int data_items = 1;
10348 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10349 goto efault;
10351 header.version = tswap32(target_header->version);
10352 header.pid = tswap32(target_header->pid);
10354 if (header.version != _LINUX_CAPABILITY_VERSION) {
10355 /* Version 2 and up takes pointer to two user_data structs */
10356 data_items = 2;
10359 target_datalen = sizeof(*target_data) * data_items;
10361 if (arg2) {
10362 if (num == TARGET_NR_capget) {
10363 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10364 } else {
10365 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10367 if (!target_data) {
10368 unlock_user_struct(target_header, arg1, 0);
10369 goto efault;
10372 if (num == TARGET_NR_capset) {
10373 for (i = 0; i < data_items; i++) {
10374 data[i].effective = tswap32(target_data[i].effective);
10375 data[i].permitted = tswap32(target_data[i].permitted);
10376 data[i].inheritable = tswap32(target_data[i].inheritable);
10380 dataptr = data;
10383 if (num == TARGET_NR_capget) {
10384 ret = get_errno(capget(&header, dataptr));
10385 } else {
10386 ret = get_errno(capset(&header, dataptr));
10389 /* The kernel always updates version for both capget and capset */
10390 target_header->version = tswap32(header.version);
10391 unlock_user_struct(target_header, arg1, 1);
10393 if (arg2) {
10394 if (num == TARGET_NR_capget) {
10395 for (i = 0; i < data_items; i++) {
10396 target_data[i].effective = tswap32(data[i].effective);
10397 target_data[i].permitted = tswap32(data[i].permitted);
10398 target_data[i].inheritable = tswap32(data[i].inheritable);
10400 unlock_user(target_data, arg2, target_datalen);
10401 } else {
10402 unlock_user(target_data, arg2, 0);
10405 break;
10407 case TARGET_NR_sigaltstack:
10408 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10409 break;
10411 #ifdef CONFIG_SENDFILE
10412 case TARGET_NR_sendfile:
10414 off_t *offp = NULL;
10415 off_t off;
10416 if (arg3) {
10417 ret = get_user_sal(off, arg3);
10418 if (is_error(ret)) {
10419 break;
10421 offp = &off;
10423 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10424 if (!is_error(ret) && arg3) {
10425 abi_long ret2 = put_user_sal(off, arg3);
10426 if (is_error(ret2)) {
10427 ret = ret2;
10430 break;
10432 #ifdef TARGET_NR_sendfile64
10433 case TARGET_NR_sendfile64:
10435 off_t *offp = NULL;
10436 off_t off;
10437 if (arg3) {
10438 ret = get_user_s64(off, arg3);
10439 if (is_error(ret)) {
10440 break;
10442 offp = &off;
10444 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10445 if (!is_error(ret) && arg3) {
10446 abi_long ret2 = put_user_s64(off, arg3);
10447 if (is_error(ret2)) {
10448 ret = ret2;
10451 break;
10453 #endif
10454 #else
10455 case TARGET_NR_sendfile:
10456 #ifdef TARGET_NR_sendfile64
10457 case TARGET_NR_sendfile64:
10458 #endif
10459 goto unimplemented;
10460 #endif
10462 #ifdef TARGET_NR_getpmsg
10463 case TARGET_NR_getpmsg:
10464 goto unimplemented;
10465 #endif
10466 #ifdef TARGET_NR_putpmsg
10467 case TARGET_NR_putpmsg:
10468 goto unimplemented;
10469 #endif
10470 #ifdef TARGET_NR_vfork
10471 case TARGET_NR_vfork:
10472 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10473 0, 0, 0, 0));
10474 break;
10475 #endif
10476 #ifdef TARGET_NR_ugetrlimit
10477 case TARGET_NR_ugetrlimit:
10479 struct rlimit rlim;
10480 int resource = target_to_host_resource(arg1);
10481 ret = get_errno(getrlimit(resource, &rlim));
10482 if (!is_error(ret)) {
10483 struct target_rlimit *target_rlim;
10484 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10485 goto efault;
10486 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10487 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10488 unlock_user_struct(target_rlim, arg2, 1);
10490 break;
10492 #endif
10493 #ifdef TARGET_NR_truncate64
10494 case TARGET_NR_truncate64:
10495 if (!(p = lock_user_string(arg1)))
10496 goto efault;
10497 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10498 unlock_user(p, arg1, 0);
10499 break;
10500 #endif
10501 #ifdef TARGET_NR_ftruncate64
10502 case TARGET_NR_ftruncate64:
10503 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10504 break;
10505 #endif
10506 #ifdef TARGET_NR_stat64
10507 case TARGET_NR_stat64:
10508 if (!(p = lock_user_string(arg1)))
10509 goto efault;
10510 ret = get_errno(stat(path(p), &st));
10511 unlock_user(p, arg1, 0);
10512 if (!is_error(ret))
10513 ret = host_to_target_stat64(cpu_env, arg2, &st);
10514 break;
10515 #endif
10516 #ifdef TARGET_NR_lstat64
10517 case TARGET_NR_lstat64:
10518 if (!(p = lock_user_string(arg1)))
10519 goto efault;
10520 ret = get_errno(lstat(path(p), &st));
10521 unlock_user(p, arg1, 0);
10522 if (!is_error(ret))
10523 ret = host_to_target_stat64(cpu_env, arg2, &st);
10524 break;
10525 #endif
10526 #ifdef TARGET_NR_fstat64
10527 case TARGET_NR_fstat64:
10528 ret = get_errno(fstat(arg1, &st));
10529 if (!is_error(ret))
10530 ret = host_to_target_stat64(cpu_env, arg2, &st);
10531 break;
10532 #endif
10533 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10534 #ifdef TARGET_NR_fstatat64
10535 case TARGET_NR_fstatat64:
10536 #endif
10537 #ifdef TARGET_NR_newfstatat
10538 case TARGET_NR_newfstatat:
10539 #endif
10540 if (!(p = lock_user_string(arg2)))
10541 goto efault;
10542 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10543 if (!is_error(ret))
10544 ret = host_to_target_stat64(cpu_env, arg3, &st);
10545 break;
10546 #endif
10547 #ifdef TARGET_NR_lchown
10548 case TARGET_NR_lchown:
10549 if (!(p = lock_user_string(arg1)))
10550 goto efault;
10551 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10552 unlock_user(p, arg1, 0);
10553 break;
10554 #endif
10555 #ifdef TARGET_NR_getuid
10556 case TARGET_NR_getuid:
10557 ret = get_errno(high2lowuid(getuid()));
10558 break;
10559 #endif
10560 #ifdef TARGET_NR_getgid
10561 case TARGET_NR_getgid:
10562 ret = get_errno(high2lowgid(getgid()));
10563 break;
10564 #endif
10565 #ifdef TARGET_NR_geteuid
10566 case TARGET_NR_geteuid:
10567 ret = get_errno(high2lowuid(geteuid()));
10568 break;
10569 #endif
10570 #ifdef TARGET_NR_getegid
10571 case TARGET_NR_getegid:
10572 ret = get_errno(high2lowgid(getegid()));
10573 break;
10574 #endif
10575 case TARGET_NR_setreuid:
10576 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10577 break;
10578 case TARGET_NR_setregid:
10579 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10580 break;
10581 case TARGET_NR_getgroups:
10583 int gidsetsize = arg1;
10584 target_id *target_grouplist;
10585 gid_t *grouplist;
10586 int i;
10588 grouplist = alloca(gidsetsize * sizeof(gid_t));
10589 ret = get_errno(getgroups(gidsetsize, grouplist));
10590 if (gidsetsize == 0)
10591 break;
10592 if (!is_error(ret)) {
10593 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10594 if (!target_grouplist)
10595 goto efault;
10596 for(i = 0;i < ret; i++)
10597 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10598 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10601 break;
10602 case TARGET_NR_setgroups:
10604 int gidsetsize = arg1;
10605 target_id *target_grouplist;
10606 gid_t *grouplist = NULL;
10607 int i;
10608 if (gidsetsize) {
10609 grouplist = alloca(gidsetsize * sizeof(gid_t));
10610 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10611 if (!target_grouplist) {
10612 ret = -TARGET_EFAULT;
10613 goto fail;
10615 for (i = 0; i < gidsetsize; i++) {
10616 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10618 unlock_user(target_grouplist, arg2, 0);
10620 ret = get_errno(setgroups(gidsetsize, grouplist));
10622 break;
10623 case TARGET_NR_fchown:
10624 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10625 break;
10626 #if defined(TARGET_NR_fchownat)
10627 case TARGET_NR_fchownat:
10628 if (!(p = lock_user_string(arg2)))
10629 goto efault;
10630 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10631 low2highgid(arg4), arg5));
10632 unlock_user(p, arg2, 0);
10633 break;
10634 #endif
10635 #ifdef TARGET_NR_setresuid
10636 case TARGET_NR_setresuid:
10637 ret = get_errno(sys_setresuid(low2highuid(arg1),
10638 low2highuid(arg2),
10639 low2highuid(arg3)));
10640 break;
10641 #endif
10642 #ifdef TARGET_NR_getresuid
10643 case TARGET_NR_getresuid:
10645 uid_t ruid, euid, suid;
10646 ret = get_errno(getresuid(&ruid, &euid, &suid));
10647 if (!is_error(ret)) {
10648 if (put_user_id(high2lowuid(ruid), arg1)
10649 || put_user_id(high2lowuid(euid), arg2)
10650 || put_user_id(high2lowuid(suid), arg3))
10651 goto efault;
10654 break;
10655 #endif
10656 #ifdef TARGET_NR_getresgid
10657 case TARGET_NR_setresgid:
10658 ret = get_errno(sys_setresgid(low2highgid(arg1),
10659 low2highgid(arg2),
10660 low2highgid(arg3)));
10661 break;
10662 #endif
10663 #ifdef TARGET_NR_getresgid
10664 case TARGET_NR_getresgid:
10666 gid_t rgid, egid, sgid;
10667 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10668 if (!is_error(ret)) {
10669 if (put_user_id(high2lowgid(rgid), arg1)
10670 || put_user_id(high2lowgid(egid), arg2)
10671 || put_user_id(high2lowgid(sgid), arg3))
10672 goto efault;
10675 break;
10676 #endif
10677 #ifdef TARGET_NR_chown
10678 case TARGET_NR_chown:
10679 if (!(p = lock_user_string(arg1)))
10680 goto efault;
10681 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10682 unlock_user(p, arg1, 0);
10683 break;
10684 #endif
10685 case TARGET_NR_setuid:
10686 ret = get_errno(sys_setuid(low2highuid(arg1)));
10687 break;
10688 case TARGET_NR_setgid:
10689 ret = get_errno(sys_setgid(low2highgid(arg1)));
10690 break;
10691 case TARGET_NR_setfsuid:
10692 ret = get_errno(setfsuid(arg1));
10693 break;
10694 case TARGET_NR_setfsgid:
10695 ret = get_errno(setfsgid(arg1));
10696 break;
10698 #ifdef TARGET_NR_lchown32
10699 case TARGET_NR_lchown32:
10700 if (!(p = lock_user_string(arg1)))
10701 goto efault;
10702 ret = get_errno(lchown(p, arg2, arg3));
10703 unlock_user(p, arg1, 0);
10704 break;
10705 #endif
10706 #ifdef TARGET_NR_getuid32
10707 case TARGET_NR_getuid32:
10708 ret = get_errno(getuid());
10709 break;
10710 #endif
10712 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10713 /* Alpha specific */
10714 case TARGET_NR_getxuid:
10716 uid_t euid;
10717 euid=geteuid();
10718 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10720 ret = get_errno(getuid());
10721 break;
10722 #endif
10723 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10724 /* Alpha specific */
10725 case TARGET_NR_getxgid:
10727 uid_t egid;
10728 egid=getegid();
10729 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10731 ret = get_errno(getgid());
10732 break;
10733 #endif
10734 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10735 /* Alpha specific */
10736 case TARGET_NR_osf_getsysinfo:
10737 ret = -TARGET_EOPNOTSUPP;
10738 switch (arg1) {
10739 case TARGET_GSI_IEEE_FP_CONTROL:
10741 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10743 /* Copied from linux ieee_fpcr_to_swcr. */
10744 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10745 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10746 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10747 | SWCR_TRAP_ENABLE_DZE
10748 | SWCR_TRAP_ENABLE_OVF);
10749 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10750 | SWCR_TRAP_ENABLE_INE);
10751 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10752 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10754 if (put_user_u64 (swcr, arg2))
10755 goto efault;
10756 ret = 0;
10758 break;
10760 /* case GSI_IEEE_STATE_AT_SIGNAL:
10761 -- Not implemented in linux kernel.
10762 case GSI_UACPROC:
10763 -- Retrieves current unaligned access state; not much used.
10764 case GSI_PROC_TYPE:
10765 -- Retrieves implver information; surely not used.
10766 case GSI_GET_HWRPB:
10767 -- Grabs a copy of the HWRPB; surely not used.
10770 break;
10771 #endif
10772 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10773 /* Alpha specific */
10774 case TARGET_NR_osf_setsysinfo:
10775 ret = -TARGET_EOPNOTSUPP;
10776 switch (arg1) {
10777 case TARGET_SSI_IEEE_FP_CONTROL:
10779 uint64_t swcr, fpcr, orig_fpcr;
10781 if (get_user_u64 (swcr, arg2)) {
10782 goto efault;
10784 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10785 fpcr = orig_fpcr & FPCR_DYN_MASK;
10787 /* Copied from linux ieee_swcr_to_fpcr. */
10788 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10789 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10790 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10791 | SWCR_TRAP_ENABLE_DZE
10792 | SWCR_TRAP_ENABLE_OVF)) << 48;
10793 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10794 | SWCR_TRAP_ENABLE_INE)) << 57;
10795 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10796 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10798 cpu_alpha_store_fpcr(cpu_env, fpcr);
10799 ret = 0;
10801 break;
10803 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10805 uint64_t exc, fpcr, orig_fpcr;
10806 int si_code;
10808 if (get_user_u64(exc, arg2)) {
10809 goto efault;
10812 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10814 /* We only add to the exception status here. */
10815 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10817 cpu_alpha_store_fpcr(cpu_env, fpcr);
10818 ret = 0;
10820 /* Old exceptions are not signaled. */
10821 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10823 /* If any exceptions set by this call,
10824 and are unmasked, send a signal. */
10825 si_code = 0;
10826 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10827 si_code = TARGET_FPE_FLTRES;
10829 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10830 si_code = TARGET_FPE_FLTUND;
10832 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10833 si_code = TARGET_FPE_FLTOVF;
10835 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10836 si_code = TARGET_FPE_FLTDIV;
10838 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10839 si_code = TARGET_FPE_FLTINV;
10841 if (si_code != 0) {
10842 target_siginfo_t info;
10843 info.si_signo = SIGFPE;
10844 info.si_errno = 0;
10845 info.si_code = si_code;
10846 info._sifields._sigfault._addr
10847 = ((CPUArchState *)cpu_env)->pc;
10848 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10849 QEMU_SI_FAULT, &info);
10852 break;
10854 /* case SSI_NVPAIRS:
10855 -- Used with SSIN_UACPROC to enable unaligned accesses.
10856 case SSI_IEEE_STATE_AT_SIGNAL:
10857 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10858 -- Not implemented in linux kernel
10861 break;
10862 #endif
10863 #ifdef TARGET_NR_osf_sigprocmask
10864 /* Alpha specific. */
10865 case TARGET_NR_osf_sigprocmask:
10867 abi_ulong mask;
10868 int how;
10869 sigset_t set, oldset;
10871 switch(arg1) {
10872 case TARGET_SIG_BLOCK:
10873 how = SIG_BLOCK;
10874 break;
10875 case TARGET_SIG_UNBLOCK:
10876 how = SIG_UNBLOCK;
10877 break;
10878 case TARGET_SIG_SETMASK:
10879 how = SIG_SETMASK;
10880 break;
10881 default:
10882 ret = -TARGET_EINVAL;
10883 goto fail;
10885 mask = arg2;
10886 target_to_host_old_sigset(&set, &mask);
10887 ret = do_sigprocmask(how, &set, &oldset);
10888 if (!ret) {
10889 host_to_target_old_sigset(&mask, &oldset);
10890 ret = mask;
10893 break;
10894 #endif
10896 #ifdef TARGET_NR_getgid32
10897 case TARGET_NR_getgid32:
10898 ret = get_errno(getgid());
10899 break;
10900 #endif
10901 #ifdef TARGET_NR_geteuid32
10902 case TARGET_NR_geteuid32:
10903 ret = get_errno(geteuid());
10904 break;
10905 #endif
10906 #ifdef TARGET_NR_getegid32
10907 case TARGET_NR_getegid32:
10908 ret = get_errno(getegid());
10909 break;
10910 #endif
10911 #ifdef TARGET_NR_setreuid32
10912 case TARGET_NR_setreuid32:
10913 ret = get_errno(setreuid(arg1, arg2));
10914 break;
10915 #endif
10916 #ifdef TARGET_NR_setregid32
10917 case TARGET_NR_setregid32:
10918 ret = get_errno(setregid(arg1, arg2));
10919 break;
10920 #endif
10921 #ifdef TARGET_NR_getgroups32
10922 case TARGET_NR_getgroups32:
10924 int gidsetsize = arg1;
10925 uint32_t *target_grouplist;
10926 gid_t *grouplist;
10927 int i;
10929 grouplist = alloca(gidsetsize * sizeof(gid_t));
10930 ret = get_errno(getgroups(gidsetsize, grouplist));
10931 if (gidsetsize == 0)
10932 break;
10933 if (!is_error(ret)) {
10934 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10935 if (!target_grouplist) {
10936 ret = -TARGET_EFAULT;
10937 goto fail;
10939 for(i = 0;i < ret; i++)
10940 target_grouplist[i] = tswap32(grouplist[i]);
10941 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10944 break;
10945 #endif
10946 #ifdef TARGET_NR_setgroups32
10947 case TARGET_NR_setgroups32:
10949 int gidsetsize = arg1;
10950 uint32_t *target_grouplist;
10951 gid_t *grouplist;
10952 int i;
10954 grouplist = alloca(gidsetsize * sizeof(gid_t));
10955 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10956 if (!target_grouplist) {
10957 ret = -TARGET_EFAULT;
10958 goto fail;
10960 for(i = 0;i < gidsetsize; i++)
10961 grouplist[i] = tswap32(target_grouplist[i]);
10962 unlock_user(target_grouplist, arg2, 0);
10963 ret = get_errno(setgroups(gidsetsize, grouplist));
10965 break;
10966 #endif
10967 #ifdef TARGET_NR_fchown32
10968 case TARGET_NR_fchown32:
10969 ret = get_errno(fchown(arg1, arg2, arg3));
10970 break;
10971 #endif
10972 #ifdef TARGET_NR_setresuid32
10973 case TARGET_NR_setresuid32:
10974 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10975 break;
10976 #endif
10977 #ifdef TARGET_NR_getresuid32
10978 case TARGET_NR_getresuid32:
10980 uid_t ruid, euid, suid;
10981 ret = get_errno(getresuid(&ruid, &euid, &suid));
10982 if (!is_error(ret)) {
10983 if (put_user_u32(ruid, arg1)
10984 || put_user_u32(euid, arg2)
10985 || put_user_u32(suid, arg3))
10986 goto efault;
10989 break;
10990 #endif
10991 #ifdef TARGET_NR_setresgid32
10992 case TARGET_NR_setresgid32:
10993 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10994 break;
10995 #endif
10996 #ifdef TARGET_NR_getresgid32
10997 case TARGET_NR_getresgid32:
10999 gid_t rgid, egid, sgid;
11000 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11001 if (!is_error(ret)) {
11002 if (put_user_u32(rgid, arg1)
11003 || put_user_u32(egid, arg2)
11004 || put_user_u32(sgid, arg3))
11005 goto efault;
11008 break;
11009 #endif
11010 #ifdef TARGET_NR_chown32
11011 case TARGET_NR_chown32:
11012 if (!(p = lock_user_string(arg1)))
11013 goto efault;
11014 ret = get_errno(chown(p, arg2, arg3));
11015 unlock_user(p, arg1, 0);
11016 break;
11017 #endif
11018 #ifdef TARGET_NR_setuid32
11019 case TARGET_NR_setuid32:
11020 ret = get_errno(sys_setuid(arg1));
11021 break;
11022 #endif
11023 #ifdef TARGET_NR_setgid32
11024 case TARGET_NR_setgid32:
11025 ret = get_errno(sys_setgid(arg1));
11026 break;
11027 #endif
11028 #ifdef TARGET_NR_setfsuid32
11029 case TARGET_NR_setfsuid32:
11030 ret = get_errno(setfsuid(arg1));
11031 break;
11032 #endif
11033 #ifdef TARGET_NR_setfsgid32
11034 case TARGET_NR_setfsgid32:
11035 ret = get_errno(setfsgid(arg1));
11036 break;
11037 #endif
11039 case TARGET_NR_pivot_root:
11040 goto unimplemented;
11041 #ifdef TARGET_NR_mincore
11042 case TARGET_NR_mincore:
11044 void *a;
11045 ret = -TARGET_EFAULT;
11046 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
11047 goto efault;
11048 if (!(p = lock_user_string(arg3)))
11049 goto mincore_fail;
11050 ret = get_errno(mincore(a, arg2, p));
11051 unlock_user(p, arg3, ret);
11052 mincore_fail:
11053 unlock_user(a, arg1, 0);
11055 break;
11056 #endif
11057 #ifdef TARGET_NR_arm_fadvise64_64
11058 case TARGET_NR_arm_fadvise64_64:
11059 /* arm_fadvise64_64 looks like fadvise64_64 but
11060 * with different argument order: fd, advice, offset, len
11061 * rather than the usual fd, offset, len, advice.
11062 * Note that offset and len are both 64-bit so appear as
11063 * pairs of 32-bit registers.
11065 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11066 target_offset64(arg5, arg6), arg2);
11067 ret = -host_to_target_errno(ret);
11068 break;
11069 #endif
11071 #if TARGET_ABI_BITS == 32
11073 #ifdef TARGET_NR_fadvise64_64
11074 case TARGET_NR_fadvise64_64:
11075 /* 6 args: fd, offset (high, low), len (high, low), advice */
11076 if (regpairs_aligned(cpu_env)) {
11077 /* offset is in (3,4), len in (5,6) and advice in 7 */
11078 arg2 = arg3;
11079 arg3 = arg4;
11080 arg4 = arg5;
11081 arg5 = arg6;
11082 arg6 = arg7;
11084 ret = -host_to_target_errno(posix_fadvise(arg1,
11085 target_offset64(arg2, arg3),
11086 target_offset64(arg4, arg5),
11087 arg6));
11088 break;
11089 #endif
11091 #ifdef TARGET_NR_fadvise64
11092 case TARGET_NR_fadvise64:
11093 /* 5 args: fd, offset (high, low), len, advice */
11094 if (regpairs_aligned(cpu_env)) {
11095 /* offset is in (3,4), len in 5 and advice in 6 */
11096 arg2 = arg3;
11097 arg3 = arg4;
11098 arg4 = arg5;
11099 arg5 = arg6;
11101 ret = -host_to_target_errno(posix_fadvise(arg1,
11102 target_offset64(arg2, arg3),
11103 arg4, arg5));
11104 break;
11105 #endif
11107 #else /* not a 32-bit ABI */
11108 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11109 #ifdef TARGET_NR_fadvise64_64
11110 case TARGET_NR_fadvise64_64:
11111 #endif
11112 #ifdef TARGET_NR_fadvise64
11113 case TARGET_NR_fadvise64:
11114 #endif
11115 #ifdef TARGET_S390X
11116 switch (arg4) {
11117 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11118 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11119 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11120 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11121 default: break;
11123 #endif
11124 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11125 break;
11126 #endif
11127 #endif /* end of 64-bit ABI fadvise handling */
11129 #ifdef TARGET_NR_madvise
11130 case TARGET_NR_madvise:
11131 /* A straight passthrough may not be safe because qemu sometimes
11132 turns private file-backed mappings into anonymous mappings.
11133 This will break MADV_DONTNEED.
11134 This is a hint, so ignoring and returning success is ok. */
11135 ret = get_errno(0);
11136 break;
11137 #endif
11138 #if TARGET_ABI_BITS == 32
11139 case TARGET_NR_fcntl64:
11141 int cmd;
11142 struct flock64 fl;
11143 from_flock64_fn *copyfrom = copy_from_user_flock64;
11144 to_flock64_fn *copyto = copy_to_user_flock64;
11146 #ifdef TARGET_ARM
11147 if (((CPUARMState *)cpu_env)->eabi) {
11148 copyfrom = copy_from_user_eabi_flock64;
11149 copyto = copy_to_user_eabi_flock64;
11151 #endif
11153 cmd = target_to_host_fcntl_cmd(arg2);
11154 if (cmd == -TARGET_EINVAL) {
11155 ret = cmd;
11156 break;
11159 switch(arg2) {
11160 case TARGET_F_GETLK64:
11161 ret = copyfrom(&fl, arg3);
11162 if (ret) {
11163 break;
11165 ret = get_errno(fcntl(arg1, cmd, &fl));
11166 if (ret == 0) {
11167 ret = copyto(arg3, &fl);
11169 break;
11171 case TARGET_F_SETLK64:
11172 case TARGET_F_SETLKW64:
11173 ret = copyfrom(&fl, arg3);
11174 if (ret) {
11175 break;
11177 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11178 break;
11179 default:
11180 ret = do_fcntl(arg1, arg2, arg3);
11181 break;
11183 break;
11185 #endif
11186 #ifdef TARGET_NR_cacheflush
11187 case TARGET_NR_cacheflush:
11188 /* self-modifying code is handled automatically, so nothing needed */
11189 ret = 0;
11190 break;
11191 #endif
11192 #ifdef TARGET_NR_security
11193 case TARGET_NR_security:
11194 goto unimplemented;
11195 #endif
11196 #ifdef TARGET_NR_getpagesize
11197 case TARGET_NR_getpagesize:
11198 ret = TARGET_PAGE_SIZE;
11199 break;
11200 #endif
11201 case TARGET_NR_gettid:
11202 ret = get_errno(gettid());
11203 break;
11204 #ifdef TARGET_NR_readahead
11205 case TARGET_NR_readahead:
11206 #if TARGET_ABI_BITS == 32
11207 if (regpairs_aligned(cpu_env)) {
11208 arg2 = arg3;
11209 arg3 = arg4;
11210 arg4 = arg5;
11212 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11213 #else
11214 ret = get_errno(readahead(arg1, arg2, arg3));
11215 #endif
11216 break;
11217 #endif
11218 #ifdef CONFIG_ATTR
11219 #ifdef TARGET_NR_setxattr
11220 case TARGET_NR_listxattr:
11221 case TARGET_NR_llistxattr:
11223 void *p, *b = 0;
11224 if (arg2) {
11225 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11226 if (!b) {
11227 ret = -TARGET_EFAULT;
11228 break;
11231 p = lock_user_string(arg1);
11232 if (p) {
11233 if (num == TARGET_NR_listxattr) {
11234 ret = get_errno(listxattr(p, b, arg3));
11235 } else {
11236 ret = get_errno(llistxattr(p, b, arg3));
11238 } else {
11239 ret = -TARGET_EFAULT;
11241 unlock_user(p, arg1, 0);
11242 unlock_user(b, arg2, arg3);
11243 break;
11245 case TARGET_NR_flistxattr:
11247 void *b = 0;
11248 if (arg2) {
11249 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11250 if (!b) {
11251 ret = -TARGET_EFAULT;
11252 break;
11255 ret = get_errno(flistxattr(arg1, b, arg3));
11256 unlock_user(b, arg2, arg3);
11257 break;
11259 case TARGET_NR_setxattr:
11260 case TARGET_NR_lsetxattr:
11262 void *p, *n, *v = 0;
11263 if (arg3) {
11264 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11265 if (!v) {
11266 ret = -TARGET_EFAULT;
11267 break;
11270 p = lock_user_string(arg1);
11271 n = lock_user_string(arg2);
11272 if (p && n) {
11273 if (num == TARGET_NR_setxattr) {
11274 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11275 } else {
11276 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11278 } else {
11279 ret = -TARGET_EFAULT;
11281 unlock_user(p, arg1, 0);
11282 unlock_user(n, arg2, 0);
11283 unlock_user(v, arg3, 0);
11285 break;
11286 case TARGET_NR_fsetxattr:
11288 void *n, *v = 0;
11289 if (arg3) {
11290 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11291 if (!v) {
11292 ret = -TARGET_EFAULT;
11293 break;
11296 n = lock_user_string(arg2);
11297 if (n) {
11298 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11299 } else {
11300 ret = -TARGET_EFAULT;
11302 unlock_user(n, arg2, 0);
11303 unlock_user(v, arg3, 0);
11305 break;
11306 case TARGET_NR_getxattr:
11307 case TARGET_NR_lgetxattr:
11309 void *p, *n, *v = 0;
11310 if (arg3) {
11311 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11312 if (!v) {
11313 ret = -TARGET_EFAULT;
11314 break;
11317 p = lock_user_string(arg1);
11318 n = lock_user_string(arg2);
11319 if (p && n) {
11320 if (num == TARGET_NR_getxattr) {
11321 ret = get_errno(getxattr(p, n, v, arg4));
11322 } else {
11323 ret = get_errno(lgetxattr(p, n, v, arg4));
11325 } else {
11326 ret = -TARGET_EFAULT;
11328 unlock_user(p, arg1, 0);
11329 unlock_user(n, arg2, 0);
11330 unlock_user(v, arg3, arg4);
11332 break;
11333 case TARGET_NR_fgetxattr:
11335 void *n, *v = 0;
11336 if (arg3) {
11337 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11338 if (!v) {
11339 ret = -TARGET_EFAULT;
11340 break;
11343 n = lock_user_string(arg2);
11344 if (n) {
11345 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11346 } else {
11347 ret = -TARGET_EFAULT;
11349 unlock_user(n, arg2, 0);
11350 unlock_user(v, arg3, arg4);
11352 break;
11353 case TARGET_NR_removexattr:
11354 case TARGET_NR_lremovexattr:
11356 void *p, *n;
11357 p = lock_user_string(arg1);
11358 n = lock_user_string(arg2);
11359 if (p && n) {
11360 if (num == TARGET_NR_removexattr) {
11361 ret = get_errno(removexattr(p, n));
11362 } else {
11363 ret = get_errno(lremovexattr(p, n));
11365 } else {
11366 ret = -TARGET_EFAULT;
11368 unlock_user(p, arg1, 0);
11369 unlock_user(n, arg2, 0);
11371 break;
11372 case TARGET_NR_fremovexattr:
11374 void *n;
11375 n = lock_user_string(arg2);
11376 if (n) {
11377 ret = get_errno(fremovexattr(arg1, n));
11378 } else {
11379 ret = -TARGET_EFAULT;
11381 unlock_user(n, arg2, 0);
11383 break;
11384 #endif
11385 #endif /* CONFIG_ATTR */
11386 #ifdef TARGET_NR_set_thread_area
11387 case TARGET_NR_set_thread_area:
11388 #if defined(TARGET_MIPS)
11389 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11390 ret = 0;
11391 break;
11392 #elif defined(TARGET_CRIS)
11393 if (arg1 & 0xff)
11394 ret = -TARGET_EINVAL;
11395 else {
11396 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11397 ret = 0;
11399 break;
11400 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11401 ret = do_set_thread_area(cpu_env, arg1);
11402 break;
11403 #elif defined(TARGET_M68K)
11405 TaskState *ts = cpu->opaque;
11406 ts->tp_value = arg1;
11407 ret = 0;
11408 break;
11410 #else
11411 goto unimplemented_nowarn;
11412 #endif
11413 #endif
11414 #ifdef TARGET_NR_get_thread_area
11415 case TARGET_NR_get_thread_area:
11416 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11417 ret = do_get_thread_area(cpu_env, arg1);
11418 break;
11419 #elif defined(TARGET_M68K)
11421 TaskState *ts = cpu->opaque;
11422 ret = ts->tp_value;
11423 break;
11425 #else
11426 goto unimplemented_nowarn;
11427 #endif
11428 #endif
11429 #ifdef TARGET_NR_getdomainname
11430 case TARGET_NR_getdomainname:
11431 goto unimplemented_nowarn;
11432 #endif
11434 #ifdef TARGET_NR_clock_gettime
11435 case TARGET_NR_clock_gettime:
11437 struct timespec ts;
11438 ret = get_errno(clock_gettime(arg1, &ts));
11439 if (!is_error(ret)) {
11440 host_to_target_timespec(arg2, &ts);
11442 break;
11444 #endif
11445 #ifdef TARGET_NR_clock_getres
11446 case TARGET_NR_clock_getres:
11448 struct timespec ts;
11449 ret = get_errno(clock_getres(arg1, &ts));
11450 if (!is_error(ret)) {
11451 host_to_target_timespec(arg2, &ts);
11453 break;
11455 #endif
11456 #ifdef TARGET_NR_clock_nanosleep
11457 case TARGET_NR_clock_nanosleep:
11459 struct timespec ts;
11460 target_to_host_timespec(&ts, arg3);
11461 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11462 &ts, arg4 ? &ts : NULL));
11463 if (arg4)
11464 host_to_target_timespec(arg4, &ts);
11466 #if defined(TARGET_PPC)
11467 /* clock_nanosleep is odd in that it returns positive errno values.
11468 * On PPC, CR0 bit 3 should be set in such a situation. */
11469 if (ret && ret != -TARGET_ERESTARTSYS) {
11470 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11472 #endif
11473 break;
11475 #endif
11477 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11478 case TARGET_NR_set_tid_address:
11479 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11480 break;
11481 #endif
11483 case TARGET_NR_tkill:
11484 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11485 break;
11487 case TARGET_NR_tgkill:
11488 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11489 target_to_host_signal(arg3)));
11490 break;
11492 #ifdef TARGET_NR_set_robust_list
11493 case TARGET_NR_set_robust_list:
11494 case TARGET_NR_get_robust_list:
11495 /* The ABI for supporting robust futexes has userspace pass
11496 * the kernel a pointer to a linked list which is updated by
11497 * userspace after the syscall; the list is walked by the kernel
11498 * when the thread exits. Since the linked list in QEMU guest
11499 * memory isn't a valid linked list for the host and we have
11500 * no way to reliably intercept the thread-death event, we can't
11501 * support these. Silently return ENOSYS so that guest userspace
11502 * falls back to a non-robust futex implementation (which should
11503 * be OK except in the corner case of the guest crashing while
11504 * holding a mutex that is shared with another process via
11505 * shared memory).
11507 goto unimplemented_nowarn;
11508 #endif
11510 #if defined(TARGET_NR_utimensat)
11511 case TARGET_NR_utimensat:
11513 struct timespec *tsp, ts[2];
11514 if (!arg3) {
11515 tsp = NULL;
11516 } else {
11517 target_to_host_timespec(ts, arg3);
11518 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11519 tsp = ts;
11521 if (!arg2)
11522 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11523 else {
11524 if (!(p = lock_user_string(arg2))) {
11525 ret = -TARGET_EFAULT;
11526 goto fail;
11528 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11529 unlock_user(p, arg2, 0);
11532 break;
11533 #endif
11534 case TARGET_NR_futex:
11535 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11536 break;
11537 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11538 case TARGET_NR_inotify_init:
11539 ret = get_errno(sys_inotify_init());
11540 break;
11541 #endif
11542 #ifdef CONFIG_INOTIFY1
11543 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11544 case TARGET_NR_inotify_init1:
11545 ret = get_errno(sys_inotify_init1(arg1));
11546 break;
11547 #endif
11548 #endif
11549 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11550 case TARGET_NR_inotify_add_watch:
11551 p = lock_user_string(arg2);
11552 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11553 unlock_user(p, arg2, 0);
11554 break;
11555 #endif
11556 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11557 case TARGET_NR_inotify_rm_watch:
11558 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11559 break;
11560 #endif
11562 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11563 case TARGET_NR_mq_open:
11565 struct mq_attr posix_mq_attr;
11566 int host_flags;
11568 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11569 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11570 goto efault;
11572 p = lock_user_string(arg1 - 1);
11573 if (!p) {
11574 goto efault;
11576 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11577 unlock_user (p, arg1, 0);
11579 break;
11581 case TARGET_NR_mq_unlink:
11582 p = lock_user_string(arg1 - 1);
11583 if (!p) {
11584 ret = -TARGET_EFAULT;
11585 break;
11587 ret = get_errno(mq_unlink(p));
11588 unlock_user (p, arg1, 0);
11589 break;
11591 case TARGET_NR_mq_timedsend:
11593 struct timespec ts;
11595 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11596 if (arg5 != 0) {
11597 target_to_host_timespec(&ts, arg5);
11598 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11599 host_to_target_timespec(arg5, &ts);
11600 } else {
11601 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11603 unlock_user (p, arg2, arg3);
11605 break;
11607 case TARGET_NR_mq_timedreceive:
11609 struct timespec ts;
11610 unsigned int prio;
11612 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11613 if (arg5 != 0) {
11614 target_to_host_timespec(&ts, arg5);
11615 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11616 &prio, &ts));
11617 host_to_target_timespec(arg5, &ts);
11618 } else {
11619 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11620 &prio, NULL));
11622 unlock_user (p, arg2, arg3);
11623 if (arg4 != 0)
11624 put_user_u32(prio, arg4);
11626 break;
11628 /* Not implemented for now... */
11629 /* case TARGET_NR_mq_notify: */
11630 /* break; */
11632 case TARGET_NR_mq_getsetattr:
11634 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11635 ret = 0;
11636 if (arg3 != 0) {
11637 ret = mq_getattr(arg1, &posix_mq_attr_out);
11638 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11640 if (arg2 != 0) {
11641 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11642 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11646 break;
11647 #endif
11649 #ifdef CONFIG_SPLICE
11650 #ifdef TARGET_NR_tee
11651 case TARGET_NR_tee:
11653 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11655 break;
11656 #endif
11657 #ifdef TARGET_NR_splice
11658 case TARGET_NR_splice:
11660 loff_t loff_in, loff_out;
11661 loff_t *ploff_in = NULL, *ploff_out = NULL;
11662 if (arg2) {
11663 if (get_user_u64(loff_in, arg2)) {
11664 goto efault;
11666 ploff_in = &loff_in;
11668 if (arg4) {
11669 if (get_user_u64(loff_out, arg4)) {
11670 goto efault;
11672 ploff_out = &loff_out;
11674 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11675 if (arg2) {
11676 if (put_user_u64(loff_in, arg2)) {
11677 goto efault;
11680 if (arg4) {
11681 if (put_user_u64(loff_out, arg4)) {
11682 goto efault;
11686 break;
11687 #endif
11688 #ifdef TARGET_NR_vmsplice
11689 case TARGET_NR_vmsplice:
11691 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11692 if (vec != NULL) {
11693 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11694 unlock_iovec(vec, arg2, arg3, 0);
11695 } else {
11696 ret = -host_to_target_errno(errno);
11699 break;
11700 #endif
11701 #endif /* CONFIG_SPLICE */
11702 #ifdef CONFIG_EVENTFD
11703 #if defined(TARGET_NR_eventfd)
11704 case TARGET_NR_eventfd:
11705 ret = get_errno(eventfd(arg1, 0));
11706 fd_trans_unregister(ret);
11707 break;
11708 #endif
11709 #if defined(TARGET_NR_eventfd2)
11710 case TARGET_NR_eventfd2:
11712 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11713 if (arg2 & TARGET_O_NONBLOCK) {
11714 host_flags |= O_NONBLOCK;
11716 if (arg2 & TARGET_O_CLOEXEC) {
11717 host_flags |= O_CLOEXEC;
11719 ret = get_errno(eventfd(arg1, host_flags));
11720 fd_trans_unregister(ret);
11721 break;
11723 #endif
11724 #endif /* CONFIG_EVENTFD */
11725 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11726 case TARGET_NR_fallocate:
11727 #if TARGET_ABI_BITS == 32
11728 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11729 target_offset64(arg5, arg6)));
11730 #else
11731 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11732 #endif
11733 break;
11734 #endif
11735 #if defined(CONFIG_SYNC_FILE_RANGE)
11736 #if defined(TARGET_NR_sync_file_range)
11737 case TARGET_NR_sync_file_range:
11738 #if TARGET_ABI_BITS == 32
11739 #if defined(TARGET_MIPS)
11740 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11741 target_offset64(arg5, arg6), arg7));
11742 #else
11743 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11744 target_offset64(arg4, arg5), arg6));
11745 #endif /* !TARGET_MIPS */
11746 #else
11747 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11748 #endif
11749 break;
11750 #endif
11751 #if defined(TARGET_NR_sync_file_range2)
11752 case TARGET_NR_sync_file_range2:
11753 /* This is like sync_file_range but the arguments are reordered */
11754 #if TARGET_ABI_BITS == 32
11755 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11756 target_offset64(arg5, arg6), arg2));
11757 #else
11758 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11759 #endif
11760 break;
11761 #endif
11762 #endif
11763 #if defined(TARGET_NR_signalfd4)
11764 case TARGET_NR_signalfd4:
11765 ret = do_signalfd4(arg1, arg2, arg4);
11766 break;
11767 #endif
11768 #if defined(TARGET_NR_signalfd)
11769 case TARGET_NR_signalfd:
11770 ret = do_signalfd4(arg1, arg2, 0);
11771 break;
11772 #endif
11773 #if defined(CONFIG_EPOLL)
11774 #if defined(TARGET_NR_epoll_create)
11775 case TARGET_NR_epoll_create:
11776 ret = get_errno(epoll_create(arg1));
11777 break;
11778 #endif
11779 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11780 case TARGET_NR_epoll_create1:
11781 ret = get_errno(epoll_create1(arg1));
11782 break;
11783 #endif
11784 #if defined(TARGET_NR_epoll_ctl)
11785 case TARGET_NR_epoll_ctl:
11787 struct epoll_event ep;
11788 struct epoll_event *epp = 0;
11789 if (arg4) {
11790 struct target_epoll_event *target_ep;
11791 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11792 goto efault;
11794 ep.events = tswap32(target_ep->events);
11795 /* The epoll_data_t union is just opaque data to the kernel,
11796 * so we transfer all 64 bits across and need not worry what
11797 * actual data type it is.
11799 ep.data.u64 = tswap64(target_ep->data.u64);
11800 unlock_user_struct(target_ep, arg4, 0);
11801 epp = &ep;
11803 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11804 break;
11806 #endif
11808 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11809 #if defined(TARGET_NR_epoll_wait)
11810 case TARGET_NR_epoll_wait:
11811 #endif
11812 #if defined(TARGET_NR_epoll_pwait)
11813 case TARGET_NR_epoll_pwait:
11814 #endif
11816 struct target_epoll_event *target_ep;
11817 struct epoll_event *ep;
11818 int epfd = arg1;
11819 int maxevents = arg3;
11820 int timeout = arg4;
11822 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11823 ret = -TARGET_EINVAL;
11824 break;
11827 target_ep = lock_user(VERIFY_WRITE, arg2,
11828 maxevents * sizeof(struct target_epoll_event), 1);
11829 if (!target_ep) {
11830 goto efault;
11833 ep = g_try_new(struct epoll_event, maxevents);
11834 if (!ep) {
11835 unlock_user(target_ep, arg2, 0);
11836 ret = -TARGET_ENOMEM;
11837 break;
11840 switch (num) {
11841 #if defined(TARGET_NR_epoll_pwait)
11842 case TARGET_NR_epoll_pwait:
11844 target_sigset_t *target_set;
11845 sigset_t _set, *set = &_set;
11847 if (arg5) {
11848 if (arg6 != sizeof(target_sigset_t)) {
11849 ret = -TARGET_EINVAL;
11850 break;
11853 target_set = lock_user(VERIFY_READ, arg5,
11854 sizeof(target_sigset_t), 1);
11855 if (!target_set) {
11856 ret = -TARGET_EFAULT;
11857 break;
11859 target_to_host_sigset(set, target_set);
11860 unlock_user(target_set, arg5, 0);
11861 } else {
11862 set = NULL;
11865 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11866 set, SIGSET_T_SIZE));
11867 break;
11869 #endif
11870 #if defined(TARGET_NR_epoll_wait)
11871 case TARGET_NR_epoll_wait:
11872 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11873 NULL, 0));
11874 break;
11875 #endif
11876 default:
11877 ret = -TARGET_ENOSYS;
11879 if (!is_error(ret)) {
11880 int i;
11881 for (i = 0; i < ret; i++) {
11882 target_ep[i].events = tswap32(ep[i].events);
11883 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11885 unlock_user(target_ep, arg2,
11886 ret * sizeof(struct target_epoll_event));
11887 } else {
11888 unlock_user(target_ep, arg2, 0);
11890 g_free(ep);
11891 break;
11893 #endif
11894 #endif
11895 #ifdef TARGET_NR_prlimit64
11896 case TARGET_NR_prlimit64:
11898 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11899 struct target_rlimit64 *target_rnew, *target_rold;
11900 struct host_rlimit64 rnew, rold, *rnewp = 0;
11901 int resource = target_to_host_resource(arg2);
11902 if (arg3) {
11903 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11904 goto efault;
11906 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11907 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11908 unlock_user_struct(target_rnew, arg3, 0);
11909 rnewp = &rnew;
11912 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11913 if (!is_error(ret) && arg4) {
11914 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11915 goto efault;
11917 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11918 target_rold->rlim_max = tswap64(rold.rlim_max);
11919 unlock_user_struct(target_rold, arg4, 1);
11921 break;
11923 #endif
11924 #ifdef TARGET_NR_gethostname
11925 case TARGET_NR_gethostname:
11927 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11928 if (name) {
11929 ret = get_errno(gethostname(name, arg2));
11930 unlock_user(name, arg1, arg2);
11931 } else {
11932 ret = -TARGET_EFAULT;
11934 break;
11936 #endif
11937 #ifdef TARGET_NR_atomic_cmpxchg_32
11938 case TARGET_NR_atomic_cmpxchg_32:
11940 /* should use start_exclusive from main.c */
11941 abi_ulong mem_value;
11942 if (get_user_u32(mem_value, arg6)) {
11943 target_siginfo_t info;
11944 info.si_signo = SIGSEGV;
11945 info.si_errno = 0;
11946 info.si_code = TARGET_SEGV_MAPERR;
11947 info._sifields._sigfault._addr = arg6;
11948 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11949 QEMU_SI_FAULT, &info);
11950 ret = 0xdeadbeef;
11953 if (mem_value == arg2)
11954 put_user_u32(arg1, arg6);
11955 ret = mem_value;
11956 break;
11958 #endif
11959 #ifdef TARGET_NR_atomic_barrier
11960 case TARGET_NR_atomic_barrier:
11962 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11963 ret = 0;
11964 break;
11966 #endif
11968 #ifdef TARGET_NR_timer_create
11969 case TARGET_NR_timer_create:
11971 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11973 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11975 int clkid = arg1;
11976 int timer_index = next_free_host_timer();
11978 if (timer_index < 0) {
11979 ret = -TARGET_EAGAIN;
11980 } else {
11981 timer_t *phtimer = g_posix_timers + timer_index;
11983 if (arg2) {
11984 phost_sevp = &host_sevp;
11985 ret = target_to_host_sigevent(phost_sevp, arg2);
11986 if (ret != 0) {
11987 break;
11991 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11992 if (ret) {
11993 phtimer = NULL;
11994 } else {
11995 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11996 goto efault;
12000 break;
12002 #endif
12004 #ifdef TARGET_NR_timer_settime
12005 case TARGET_NR_timer_settime:
12007 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12008 * struct itimerspec * old_value */
12009 target_timer_t timerid = get_timer_id(arg1);
12011 if (timerid < 0) {
12012 ret = timerid;
12013 } else if (arg3 == 0) {
12014 ret = -TARGET_EINVAL;
12015 } else {
12016 timer_t htimer = g_posix_timers[timerid];
12017 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12019 target_to_host_itimerspec(&hspec_new, arg3);
12020 ret = get_errno(
12021 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12022 host_to_target_itimerspec(arg2, &hspec_old);
12024 break;
12026 #endif
12028 #ifdef TARGET_NR_timer_gettime
12029 case TARGET_NR_timer_gettime:
12031 /* args: timer_t timerid, struct itimerspec *curr_value */
12032 target_timer_t timerid = get_timer_id(arg1);
12034 if (timerid < 0) {
12035 ret = timerid;
12036 } else if (!arg2) {
12037 ret = -TARGET_EFAULT;
12038 } else {
12039 timer_t htimer = g_posix_timers[timerid];
12040 struct itimerspec hspec;
12041 ret = get_errno(timer_gettime(htimer, &hspec));
12043 if (host_to_target_itimerspec(arg2, &hspec)) {
12044 ret = -TARGET_EFAULT;
12047 break;
12049 #endif
12051 #ifdef TARGET_NR_timer_getoverrun
12052 case TARGET_NR_timer_getoverrun:
12054 /* args: timer_t timerid */
12055 target_timer_t timerid = get_timer_id(arg1);
12057 if (timerid < 0) {
12058 ret = timerid;
12059 } else {
12060 timer_t htimer = g_posix_timers[timerid];
12061 ret = get_errno(timer_getoverrun(htimer));
12063 fd_trans_unregister(ret);
12064 break;
12066 #endif
12068 #ifdef TARGET_NR_timer_delete
12069 case TARGET_NR_timer_delete:
12071 /* args: timer_t timerid */
12072 target_timer_t timerid = get_timer_id(arg1);
12074 if (timerid < 0) {
12075 ret = timerid;
12076 } else {
12077 timer_t htimer = g_posix_timers[timerid];
12078 ret = get_errno(timer_delete(htimer));
12079 g_posix_timers[timerid] = 0;
12081 break;
12083 #endif
12085 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12086 case TARGET_NR_timerfd_create:
12087 ret = get_errno(timerfd_create(arg1,
12088 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12089 break;
12090 #endif
12092 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12093 case TARGET_NR_timerfd_gettime:
12095 struct itimerspec its_curr;
12097 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12099 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12100 goto efault;
12103 break;
12104 #endif
12106 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12107 case TARGET_NR_timerfd_settime:
12109 struct itimerspec its_new, its_old, *p_new;
12111 if (arg3) {
12112 if (target_to_host_itimerspec(&its_new, arg3)) {
12113 goto efault;
12115 p_new = &its_new;
12116 } else {
12117 p_new = NULL;
12120 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12122 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12123 goto efault;
12126 break;
12127 #endif
12129 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12130 case TARGET_NR_ioprio_get:
12131 ret = get_errno(ioprio_get(arg1, arg2));
12132 break;
12133 #endif
12135 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12136 case TARGET_NR_ioprio_set:
12137 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12138 break;
12139 #endif
12141 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12142 case TARGET_NR_setns:
12143 ret = get_errno(setns(arg1, arg2));
12144 break;
12145 #endif
12146 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12147 case TARGET_NR_unshare:
12148 ret = get_errno(unshare(arg1));
12149 break;
12150 #endif
12151 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12152 case TARGET_NR_kcmp:
12153 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12154 break;
12155 #endif
12157 default:
12158 unimplemented:
12159 gemu_log("qemu: Unsupported syscall: %d\n", num);
12160 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12161 unimplemented_nowarn:
12162 #endif
12163 ret = -TARGET_ENOSYS;
12164 break;
12166 fail:
12167 #ifdef DEBUG
12168 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12169 #endif
12170 if(do_strace)
12171 print_syscall_ret(num, ret);
12172 trace_guest_user_syscall_ret(cpu, num, ret);
12173 return ret;
12174 efault:
12175 ret = -TARGET_EFAULT;
12176 goto fail;