linux-user: Add support for syncfs() syscall
[qemu/ar7.git] / linux-user / syscall.c
blob14c52072f94df303d81eb2f68be7df1fb04fe9dd
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu/xattr.h"
75 #endif
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
97 #endif
98 #include <linux/fb.h>
99 #include <linux/vt.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
110 #endif
111 #include <linux/audit.h>
112 #include "linux_loop.h"
113 #include "uname.h"
115 #include "qemu.h"
117 #ifndef CLONE_IO
118 #define CLONE_IO 0x80000000 /* Clone io context */
119 #endif
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 //#define DEBUG
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
178 #undef _syscall0
179 #undef _syscall1
180 #undef _syscall2
181 #undef _syscall3
182 #undef _syscall4
183 #undef _syscall5
184 #undef _syscall6
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 type5,arg5) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 type6 arg6) \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 defined(__s390x__)
247 #define __NR__llseek __NR_lseek
248 #endif
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
253 #endif
255 #ifdef __NR_gettid
256 _syscall0(int, gettid)
257 #else
258 /* This is a replacement for the host gettid() and must return a host
259 errno. */
260 static int gettid(void) {
261 return -ENOSYS;
263 #endif
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
273 loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
294 void *, arg);
295 _syscall2(int, capget, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 _syscall2(int, capset, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get, int, which, int, who)
301 #endif
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
304 #endif
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
307 #endif
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
311 unsigned long, idx1, unsigned long, idx2)
312 #endif
314 static bitmask_transtbl fcntl_flags_tbl[] = {
315 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
316 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
317 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
318 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
319 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
320 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
321 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
322 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
323 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
324 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
325 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
326 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
327 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
330 #endif
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
333 #endif
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
336 #endif
337 #if defined(O_PATH)
338 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
339 #endif
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
343 #endif
344 { 0, 0, 0, 0 }
347 enum {
348 QEMU_IFLA_BR_UNSPEC,
349 QEMU_IFLA_BR_FORWARD_DELAY,
350 QEMU_IFLA_BR_HELLO_TIME,
351 QEMU_IFLA_BR_MAX_AGE,
352 QEMU_IFLA_BR_AGEING_TIME,
353 QEMU_IFLA_BR_STP_STATE,
354 QEMU_IFLA_BR_PRIORITY,
355 QEMU_IFLA_BR_VLAN_FILTERING,
356 QEMU_IFLA_BR_VLAN_PROTOCOL,
357 QEMU_IFLA_BR_GROUP_FWD_MASK,
358 QEMU_IFLA_BR_ROOT_ID,
359 QEMU_IFLA_BR_BRIDGE_ID,
360 QEMU_IFLA_BR_ROOT_PORT,
361 QEMU_IFLA_BR_ROOT_PATH_COST,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
364 QEMU_IFLA_BR_HELLO_TIMER,
365 QEMU_IFLA_BR_TCN_TIMER,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
367 QEMU_IFLA_BR_GC_TIMER,
368 QEMU_IFLA_BR_GROUP_ADDR,
369 QEMU_IFLA_BR_FDB_FLUSH,
370 QEMU_IFLA_BR_MCAST_ROUTER,
371 QEMU_IFLA_BR_MCAST_SNOOPING,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
373 QEMU_IFLA_BR_MCAST_QUERIER,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
375 QEMU_IFLA_BR_MCAST_HASH_MAX,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
388 QEMU_IFLA_BR_PAD,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
391 QEMU___IFLA_BR_MAX,
394 enum {
395 QEMU_IFLA_UNSPEC,
396 QEMU_IFLA_ADDRESS,
397 QEMU_IFLA_BROADCAST,
398 QEMU_IFLA_IFNAME,
399 QEMU_IFLA_MTU,
400 QEMU_IFLA_LINK,
401 QEMU_IFLA_QDISC,
402 QEMU_IFLA_STATS,
403 QEMU_IFLA_COST,
404 QEMU_IFLA_PRIORITY,
405 QEMU_IFLA_MASTER,
406 QEMU_IFLA_WIRELESS,
407 QEMU_IFLA_PROTINFO,
408 QEMU_IFLA_TXQLEN,
409 QEMU_IFLA_MAP,
410 QEMU_IFLA_WEIGHT,
411 QEMU_IFLA_OPERSTATE,
412 QEMU_IFLA_LINKMODE,
413 QEMU_IFLA_LINKINFO,
414 QEMU_IFLA_NET_NS_PID,
415 QEMU_IFLA_IFALIAS,
416 QEMU_IFLA_NUM_VF,
417 QEMU_IFLA_VFINFO_LIST,
418 QEMU_IFLA_STATS64,
419 QEMU_IFLA_VF_PORTS,
420 QEMU_IFLA_PORT_SELF,
421 QEMU_IFLA_AF_SPEC,
422 QEMU_IFLA_GROUP,
423 QEMU_IFLA_NET_NS_FD,
424 QEMU_IFLA_EXT_MASK,
425 QEMU_IFLA_PROMISCUITY,
426 QEMU_IFLA_NUM_TX_QUEUES,
427 QEMU_IFLA_NUM_RX_QUEUES,
428 QEMU_IFLA_CARRIER,
429 QEMU_IFLA_PHYS_PORT_ID,
430 QEMU_IFLA_CARRIER_CHANGES,
431 QEMU_IFLA_PHYS_SWITCH_ID,
432 QEMU_IFLA_LINK_NETNSID,
433 QEMU_IFLA_PHYS_PORT_NAME,
434 QEMU_IFLA_PROTO_DOWN,
435 QEMU_IFLA_GSO_MAX_SEGS,
436 QEMU_IFLA_GSO_MAX_SIZE,
437 QEMU_IFLA_PAD,
438 QEMU_IFLA_XDP,
439 QEMU___IFLA_MAX
442 enum {
443 QEMU_IFLA_BRPORT_UNSPEC,
444 QEMU_IFLA_BRPORT_STATE,
445 QEMU_IFLA_BRPORT_PRIORITY,
446 QEMU_IFLA_BRPORT_COST,
447 QEMU_IFLA_BRPORT_MODE,
448 QEMU_IFLA_BRPORT_GUARD,
449 QEMU_IFLA_BRPORT_PROTECT,
450 QEMU_IFLA_BRPORT_FAST_LEAVE,
451 QEMU_IFLA_BRPORT_LEARNING,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
453 QEMU_IFLA_BRPORT_PROXYARP,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
456 QEMU_IFLA_BRPORT_ROOT_ID,
457 QEMU_IFLA_BRPORT_BRIDGE_ID,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST,
460 QEMU_IFLA_BRPORT_ID,
461 QEMU_IFLA_BRPORT_NO,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
466 QEMU_IFLA_BRPORT_HOLD_TIMER,
467 QEMU_IFLA_BRPORT_FLUSH,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
469 QEMU_IFLA_BRPORT_PAD,
470 QEMU___IFLA_BRPORT_MAX
473 enum {
474 QEMU_IFLA_INFO_UNSPEC,
475 QEMU_IFLA_INFO_KIND,
476 QEMU_IFLA_INFO_DATA,
477 QEMU_IFLA_INFO_XSTATS,
478 QEMU_IFLA_INFO_SLAVE_KIND,
479 QEMU_IFLA_INFO_SLAVE_DATA,
480 QEMU___IFLA_INFO_MAX,
483 enum {
484 QEMU_IFLA_INET_UNSPEC,
485 QEMU_IFLA_INET_CONF,
486 QEMU___IFLA_INET_MAX,
489 enum {
490 QEMU_IFLA_INET6_UNSPEC,
491 QEMU_IFLA_INET6_FLAGS,
492 QEMU_IFLA_INET6_CONF,
493 QEMU_IFLA_INET6_STATS,
494 QEMU_IFLA_INET6_MCAST,
495 QEMU_IFLA_INET6_CACHEINFO,
496 QEMU_IFLA_INET6_ICMP6STATS,
497 QEMU_IFLA_INET6_TOKEN,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
504 typedef struct TargetFdTrans {
505 TargetFdDataFunc host_to_target_data;
506 TargetFdDataFunc target_to_host_data;
507 TargetFdAddrFunc target_to_host_addr;
508 } TargetFdTrans;
510 static TargetFdTrans **target_fd_trans;
512 static unsigned int target_fd_max;
514 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
516 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
517 return target_fd_trans[fd]->target_to_host_data;
519 return NULL;
522 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
524 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
525 return target_fd_trans[fd]->host_to_target_data;
527 return NULL;
530 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
532 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
533 return target_fd_trans[fd]->target_to_host_addr;
535 return NULL;
538 static void fd_trans_register(int fd, TargetFdTrans *trans)
540 unsigned int oldmax;
542 if (fd >= target_fd_max) {
543 oldmax = target_fd_max;
544 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans = g_renew(TargetFdTrans *,
546 target_fd_trans, target_fd_max);
547 memset((void *)(target_fd_trans + oldmax), 0,
548 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
550 target_fd_trans[fd] = trans;
553 static void fd_trans_unregister(int fd)
555 if (fd >= 0 && fd < target_fd_max) {
556 target_fd_trans[fd] = NULL;
560 static void fd_trans_dup(int oldfd, int newfd)
562 fd_trans_unregister(newfd);
563 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
564 fd_trans_register(newfd, target_fd_trans[oldfd]);
568 static int sys_getcwd1(char *buf, size_t size)
570 if (getcwd(buf, size) == NULL) {
571 /* getcwd() sets errno */
572 return (-1);
574 return strlen(buf)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
581 const struct timespec *,tsp,int,flags)
582 #else
583 static int sys_utimensat(int dirfd, const char *pathname,
584 const struct timespec times[2], int flags)
586 errno = ENOSYS;
587 return -1;
589 #endif
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
600 #endif
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
604 return (inotify_add_watch(fd, pathname, mask));
606 #endif
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd, int32_t wd)
610 return (inotify_rm_watch(fd, wd));
612 #endif
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags)
617 return (inotify_init1(flags));
619 #endif
620 #endif
621 #else
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
632 #endif
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64 {
636 uint64_t rlim_cur;
637 uint64_t rlim_max;
639 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
640 const struct host_rlimit64 *, new_limit,
641 struct host_rlimit64 *, old_limit)
642 #endif
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
651 int k ;
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
654 if (g_posix_timers[k] == 0) {
655 g_posix_timers[k] = (timer_t) 1;
656 return k;
659 return -1;
661 #endif
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
664 #ifdef TARGET_ARM
665 static inline int regpairs_aligned(void *cpu_env) {
666 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
673 * r3 as arg1 */
674 static inline int regpairs_aligned(void *cpu_env) { return 1; }
675 #else
676 static inline int regpairs_aligned(void *cpu_env) { return 0; }
677 #endif
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
691 [EAGAIN] = TARGET_EAGAIN,
692 [EIDRM] = TARGET_EIDRM,
693 [ECHRNG] = TARGET_ECHRNG,
694 [EL2NSYNC] = TARGET_EL2NSYNC,
695 [EL3HLT] = TARGET_EL3HLT,
696 [EL3RST] = TARGET_EL3RST,
697 [ELNRNG] = TARGET_ELNRNG,
698 [EUNATCH] = TARGET_EUNATCH,
699 [ENOCSI] = TARGET_ENOCSI,
700 [EL2HLT] = TARGET_EL2HLT,
701 [EDEADLK] = TARGET_EDEADLK,
702 [ENOLCK] = TARGET_ENOLCK,
703 [EBADE] = TARGET_EBADE,
704 [EBADR] = TARGET_EBADR,
705 [EXFULL] = TARGET_EXFULL,
706 [ENOANO] = TARGET_ENOANO,
707 [EBADRQC] = TARGET_EBADRQC,
708 [EBADSLT] = TARGET_EBADSLT,
709 [EBFONT] = TARGET_EBFONT,
710 [ENOSTR] = TARGET_ENOSTR,
711 [ENODATA] = TARGET_ENODATA,
712 [ETIME] = TARGET_ETIME,
713 [ENOSR] = TARGET_ENOSR,
714 [ENONET] = TARGET_ENONET,
715 [ENOPKG] = TARGET_ENOPKG,
716 [EREMOTE] = TARGET_EREMOTE,
717 [ENOLINK] = TARGET_ENOLINK,
718 [EADV] = TARGET_EADV,
719 [ESRMNT] = TARGET_ESRMNT,
720 [ECOMM] = TARGET_ECOMM,
721 [EPROTO] = TARGET_EPROTO,
722 [EDOTDOT] = TARGET_EDOTDOT,
723 [EMULTIHOP] = TARGET_EMULTIHOP,
724 [EBADMSG] = TARGET_EBADMSG,
725 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
726 [EOVERFLOW] = TARGET_EOVERFLOW,
727 [ENOTUNIQ] = TARGET_ENOTUNIQ,
728 [EBADFD] = TARGET_EBADFD,
729 [EREMCHG] = TARGET_EREMCHG,
730 [ELIBACC] = TARGET_ELIBACC,
731 [ELIBBAD] = TARGET_ELIBBAD,
732 [ELIBSCN] = TARGET_ELIBSCN,
733 [ELIBMAX] = TARGET_ELIBMAX,
734 [ELIBEXEC] = TARGET_ELIBEXEC,
735 [EILSEQ] = TARGET_EILSEQ,
736 [ENOSYS] = TARGET_ENOSYS,
737 [ELOOP] = TARGET_ELOOP,
738 [ERESTART] = TARGET_ERESTART,
739 [ESTRPIPE] = TARGET_ESTRPIPE,
740 [ENOTEMPTY] = TARGET_ENOTEMPTY,
741 [EUSERS] = TARGET_EUSERS,
742 [ENOTSOCK] = TARGET_ENOTSOCK,
743 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
744 [EMSGSIZE] = TARGET_EMSGSIZE,
745 [EPROTOTYPE] = TARGET_EPROTOTYPE,
746 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
747 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
748 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
749 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
750 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
751 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
752 [EADDRINUSE] = TARGET_EADDRINUSE,
753 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
754 [ENETDOWN] = TARGET_ENETDOWN,
755 [ENETUNREACH] = TARGET_ENETUNREACH,
756 [ENETRESET] = TARGET_ENETRESET,
757 [ECONNABORTED] = TARGET_ECONNABORTED,
758 [ECONNRESET] = TARGET_ECONNRESET,
759 [ENOBUFS] = TARGET_ENOBUFS,
760 [EISCONN] = TARGET_EISCONN,
761 [ENOTCONN] = TARGET_ENOTCONN,
762 [EUCLEAN] = TARGET_EUCLEAN,
763 [ENOTNAM] = TARGET_ENOTNAM,
764 [ENAVAIL] = TARGET_ENAVAIL,
765 [EISNAM] = TARGET_EISNAM,
766 [EREMOTEIO] = TARGET_EREMOTEIO,
767 [EDQUOT] = TARGET_EDQUOT,
768 [ESHUTDOWN] = TARGET_ESHUTDOWN,
769 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
770 [ETIMEDOUT] = TARGET_ETIMEDOUT,
771 [ECONNREFUSED] = TARGET_ECONNREFUSED,
772 [EHOSTDOWN] = TARGET_EHOSTDOWN,
773 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
774 [EALREADY] = TARGET_EALREADY,
775 [EINPROGRESS] = TARGET_EINPROGRESS,
776 [ESTALE] = TARGET_ESTALE,
777 [ECANCELED] = TARGET_ECANCELED,
778 [ENOMEDIUM] = TARGET_ENOMEDIUM,
779 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
780 #ifdef ENOKEY
781 [ENOKEY] = TARGET_ENOKEY,
782 #endif
783 #ifdef EKEYEXPIRED
784 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
785 #endif
786 #ifdef EKEYREVOKED
787 [EKEYREVOKED] = TARGET_EKEYREVOKED,
788 #endif
789 #ifdef EKEYREJECTED
790 [EKEYREJECTED] = TARGET_EKEYREJECTED,
791 #endif
792 #ifdef EOWNERDEAD
793 [EOWNERDEAD] = TARGET_EOWNERDEAD,
794 #endif
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
797 #endif
798 #ifdef ENOMSG
799 [ENOMSG] = TARGET_ENOMSG,
800 #endif
803 static inline int host_to_target_errno(int err)
805 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
806 host_to_target_errno_table[err]) {
807 return host_to_target_errno_table[err];
809 return err;
812 static inline int target_to_host_errno(int err)
814 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
815 target_to_host_errno_table[err]) {
816 return target_to_host_errno_table[err];
818 return err;
821 static inline abi_long get_errno(abi_long ret)
823 if (ret == -1)
824 return -host_to_target_errno(errno);
825 else
826 return ret;
829 static inline int is_error(abi_long ret)
831 return (abi_ulong)ret >= (abi_ulong)(-4096);
834 const char *target_strerror(int err)
836 if (err == TARGET_ERESTARTSYS) {
837 return "To be restarted";
839 if (err == TARGET_QEMU_ESIGRETURN) {
840 return "Successful exit from sigreturn";
843 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
844 return NULL;
846 return strerror(target_to_host_errno(err));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
874 type4, arg4) \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
883 type5 arg5) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
897 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
898 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
899 int, flags, mode_t, mode)
900 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
901 struct rusage *, rusage)
902 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
903 int, options, struct rusage *, rusage)
904 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
905 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
906 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
907 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
908 struct timespec *, tsp, const sigset_t *, sigmask,
909 size_t, sigsetsize)
910 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
911 int, maxevents, int, timeout, const sigset_t *, sigmask,
912 size_t, sigsetsize)
913 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
914 const struct timespec *,timeout,int *,uaddr2,int,val3)
915 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
916 safe_syscall2(int, kill, pid_t, pid, int, sig)
917 safe_syscall2(int, tkill, int, tid, int, sig)
918 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
919 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
920 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
921 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
922 socklen_t, addrlen)
923 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
924 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
925 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
926 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
927 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
928 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
929 safe_syscall2(int, flock, int, fd, int, operation)
930 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
931 const struct timespec *, uts, size_t, sigsetsize)
932 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
933 int, flags)
934 safe_syscall2(int, nanosleep, const struct timespec *, req,
935 struct timespec *, rem)
936 #ifdef TARGET_NR_clock_nanosleep
937 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
938 const struct timespec *, req, struct timespec *, rem)
939 #endif
940 #ifdef __NR_msgsnd
941 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
942 int, flags)
943 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
944 long, msgtype, int, flags)
945 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
946 unsigned, nsops, const struct timespec *, timeout)
947 #else
948 /* This host kernel architecture uses a single ipc syscall; fake up
949 * wrappers for the sub-operations to hide this implementation detail.
950 * Annoyingly we can't include linux/ipc.h to get the constant definitions
951 * for the call parameter because some structs in there conflict with the
952 * sys/ipc.h ones. So we just define them here, and rely on them being
953 * the same for all host architectures.
955 #define Q_SEMTIMEDOP 4
956 #define Q_MSGSND 11
957 #define Q_MSGRCV 12
958 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
960 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
961 void *, ptr, long, fifth)
962 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
964 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
966 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
968 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
970 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
971 const struct timespec *timeout)
973 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
974 (long)timeout);
976 #endif
977 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
978 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
979 size_t, len, unsigned, prio, const struct timespec *, timeout)
980 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
981 size_t, len, unsigned *, prio, const struct timespec *, timeout)
982 #endif
983 /* We do ioctl like this rather than via safe_syscall3 to preserve the
984 * "third argument might be integer or pointer or not present" behaviour of
985 * the libc function.
987 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
988 /* Similarly for fcntl. Note that callers must always:
989 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
990 * use the flock64 struct rather than unsuffixed flock
991 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
993 #ifdef __NR_fcntl64
994 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
995 #else
996 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
997 #endif
999 static inline int host_to_target_sock_type(int host_type)
1001 int target_type;
1003 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1004 case SOCK_DGRAM:
1005 target_type = TARGET_SOCK_DGRAM;
1006 break;
1007 case SOCK_STREAM:
1008 target_type = TARGET_SOCK_STREAM;
1009 break;
1010 default:
1011 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1012 break;
1015 #if defined(SOCK_CLOEXEC)
1016 if (host_type & SOCK_CLOEXEC) {
1017 target_type |= TARGET_SOCK_CLOEXEC;
1019 #endif
1021 #if defined(SOCK_NONBLOCK)
1022 if (host_type & SOCK_NONBLOCK) {
1023 target_type |= TARGET_SOCK_NONBLOCK;
1025 #endif
1027 return target_type;
1030 static abi_ulong target_brk;
1031 static abi_ulong target_original_brk;
1032 static abi_ulong brk_page;
1034 void target_set_brk(abi_ulong new_brk)
1036 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1037 brk_page = HOST_PAGE_ALIGN(target_brk);
1040 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1041 #define DEBUGF_BRK(message, args...)
1043 /* do_brk() must return target values and target errnos. */
1044 abi_long do_brk(abi_ulong new_brk)
1046 abi_long mapped_addr;
1047 abi_ulong new_alloc_size;
1049 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1051 if (!new_brk) {
1052 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1053 return target_brk;
1055 if (new_brk < target_original_brk) {
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1057 target_brk);
1058 return target_brk;
1061 /* If the new brk is less than the highest page reserved to the
1062 * target heap allocation, set it and we're almost done... */
1063 if (new_brk <= brk_page) {
1064 /* Heap contents are initialized to zero, as for anonymous
1065 * mapped pages. */
1066 if (new_brk > target_brk) {
1067 memset(g2h(target_brk), 0, new_brk - target_brk);
1069 target_brk = new_brk;
1070 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1071 return target_brk;
1074 /* We need to allocate more memory after the brk... Note that
1075 * we don't use MAP_FIXED because that will map over the top of
1076 * any existing mapping (like the one with the host libc or qemu
1077 * itself); instead we treat "mapped but at wrong address" as
1078 * a failure and unmap again.
1080 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1081 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1082 PROT_READ|PROT_WRITE,
1083 MAP_ANON|MAP_PRIVATE, 0, 0));
1085 if (mapped_addr == brk_page) {
1086 /* Heap contents are initialized to zero, as for anonymous
1087 * mapped pages. Technically the new pages are already
1088 * initialized to zero since they *are* anonymous mapped
1089 * pages, however we have to take care with the contents that
1090 * come from the remaining part of the previous page: it may
1091 * contains garbage data due to a previous heap usage (grown
1092 * then shrunken). */
1093 memset(g2h(target_brk), 0, brk_page - target_brk);
1095 target_brk = new_brk;
1096 brk_page = HOST_PAGE_ALIGN(target_brk);
1097 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1098 target_brk);
1099 return target_brk;
1100 } else if (mapped_addr != -1) {
1101 /* Mapped but at wrong address, meaning there wasn't actually
1102 * enough space for this brk.
1104 target_munmap(mapped_addr, new_alloc_size);
1105 mapped_addr = -1;
1106 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1108 else {
1109 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1112 #if defined(TARGET_ALPHA)
1113 /* We (partially) emulate OSF/1 on Alpha, which requires we
1114 return a proper errno, not an unchanged brk value. */
1115 return -TARGET_ENOMEM;
1116 #endif
1117 /* For everything else, return the previous break. */
1118 return target_brk;
1121 static inline abi_long copy_from_user_fdset(fd_set *fds,
1122 abi_ulong target_fds_addr,
1123 int n)
1125 int i, nw, j, k;
1126 abi_ulong b, *target_fds;
1128 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1129 if (!(target_fds = lock_user(VERIFY_READ,
1130 target_fds_addr,
1131 sizeof(abi_ulong) * nw,
1132 1)))
1133 return -TARGET_EFAULT;
1135 FD_ZERO(fds);
1136 k = 0;
1137 for (i = 0; i < nw; i++) {
1138 /* grab the abi_ulong */
1139 __get_user(b, &target_fds[i]);
1140 for (j = 0; j < TARGET_ABI_BITS; j++) {
1141 /* check the bit inside the abi_ulong */
1142 if ((b >> j) & 1)
1143 FD_SET(k, fds);
1144 k++;
1148 unlock_user(target_fds, target_fds_addr, 0);
1150 return 0;
1153 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1154 abi_ulong target_fds_addr,
1155 int n)
1157 if (target_fds_addr) {
1158 if (copy_from_user_fdset(fds, target_fds_addr, n))
1159 return -TARGET_EFAULT;
1160 *fds_ptr = fds;
1161 } else {
1162 *fds_ptr = NULL;
1164 return 0;
1167 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1168 const fd_set *fds,
1169 int n)
1171 int i, nw, j, k;
1172 abi_long v;
1173 abi_ulong *target_fds;
1175 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1176 if (!(target_fds = lock_user(VERIFY_WRITE,
1177 target_fds_addr,
1178 sizeof(abi_ulong) * nw,
1179 0)))
1180 return -TARGET_EFAULT;
1182 k = 0;
1183 for (i = 0; i < nw; i++) {
1184 v = 0;
1185 for (j = 0; j < TARGET_ABI_BITS; j++) {
1186 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1187 k++;
1189 __put_user(v, &target_fds[i]);
1192 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1194 return 0;
1197 #if defined(__alpha__)
1198 #define HOST_HZ 1024
1199 #else
1200 #define HOST_HZ 100
1201 #endif
1203 static inline abi_long host_to_target_clock_t(long ticks)
1205 #if HOST_HZ == TARGET_HZ
1206 return ticks;
1207 #else
1208 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1209 #endif
1212 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1213 const struct rusage *rusage)
1215 struct target_rusage *target_rusage;
1217 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1218 return -TARGET_EFAULT;
1219 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1220 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1221 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1222 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1223 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1224 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1225 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1226 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1227 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1228 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1229 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1230 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1231 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1232 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1233 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1234 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1235 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1236 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1237 unlock_user_struct(target_rusage, target_addr, 1);
1239 return 0;
1242 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1244 abi_ulong target_rlim_swap;
1245 rlim_t result;
1247 target_rlim_swap = tswapal(target_rlim);
1248 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1249 return RLIM_INFINITY;
1251 result = target_rlim_swap;
1252 if (target_rlim_swap != (rlim_t)result)
1253 return RLIM_INFINITY;
1255 return result;
1258 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1260 abi_ulong target_rlim_swap;
1261 abi_ulong result;
1263 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1264 target_rlim_swap = TARGET_RLIM_INFINITY;
1265 else
1266 target_rlim_swap = rlim;
1267 result = tswapal(target_rlim_swap);
1269 return result;
1272 static inline int target_to_host_resource(int code)
1274 switch (code) {
1275 case TARGET_RLIMIT_AS:
1276 return RLIMIT_AS;
1277 case TARGET_RLIMIT_CORE:
1278 return RLIMIT_CORE;
1279 case TARGET_RLIMIT_CPU:
1280 return RLIMIT_CPU;
1281 case TARGET_RLIMIT_DATA:
1282 return RLIMIT_DATA;
1283 case TARGET_RLIMIT_FSIZE:
1284 return RLIMIT_FSIZE;
1285 case TARGET_RLIMIT_LOCKS:
1286 return RLIMIT_LOCKS;
1287 case TARGET_RLIMIT_MEMLOCK:
1288 return RLIMIT_MEMLOCK;
1289 case TARGET_RLIMIT_MSGQUEUE:
1290 return RLIMIT_MSGQUEUE;
1291 case TARGET_RLIMIT_NICE:
1292 return RLIMIT_NICE;
1293 case TARGET_RLIMIT_NOFILE:
1294 return RLIMIT_NOFILE;
1295 case TARGET_RLIMIT_NPROC:
1296 return RLIMIT_NPROC;
1297 case TARGET_RLIMIT_RSS:
1298 return RLIMIT_RSS;
1299 case TARGET_RLIMIT_RTPRIO:
1300 return RLIMIT_RTPRIO;
1301 case TARGET_RLIMIT_SIGPENDING:
1302 return RLIMIT_SIGPENDING;
1303 case TARGET_RLIMIT_STACK:
1304 return RLIMIT_STACK;
1305 default:
1306 return code;
1310 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1311 abi_ulong target_tv_addr)
1313 struct target_timeval *target_tv;
1315 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1316 return -TARGET_EFAULT;
1318 __get_user(tv->tv_sec, &target_tv->tv_sec);
1319 __get_user(tv->tv_usec, &target_tv->tv_usec);
1321 unlock_user_struct(target_tv, target_tv_addr, 0);
1323 return 0;
1326 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1327 const struct timeval *tv)
1329 struct target_timeval *target_tv;
1331 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1332 return -TARGET_EFAULT;
1334 __put_user(tv->tv_sec, &target_tv->tv_sec);
1335 __put_user(tv->tv_usec, &target_tv->tv_usec);
1337 unlock_user_struct(target_tv, target_tv_addr, 1);
1339 return 0;
1342 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1343 abi_ulong target_tz_addr)
1345 struct target_timezone *target_tz;
1347 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1348 return -TARGET_EFAULT;
1351 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1352 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1354 unlock_user_struct(target_tz, target_tz_addr, 0);
1356 return 0;
1359 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1360 #include <mqueue.h>
1362 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1363 abi_ulong target_mq_attr_addr)
1365 struct target_mq_attr *target_mq_attr;
1367 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1368 target_mq_attr_addr, 1))
1369 return -TARGET_EFAULT;
1371 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1372 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1373 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1374 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1376 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1378 return 0;
1381 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1382 const struct mq_attr *attr)
1384 struct target_mq_attr *target_mq_attr;
1386 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1387 target_mq_attr_addr, 0))
1388 return -TARGET_EFAULT;
1390 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1391 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1392 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1393 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1395 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1397 return 0;
1399 #endif
1401 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1402 /* do_select() must return target values and target errnos. */
1403 static abi_long do_select(int n,
1404 abi_ulong rfd_addr, abi_ulong wfd_addr,
1405 abi_ulong efd_addr, abi_ulong target_tv_addr)
1407 fd_set rfds, wfds, efds;
1408 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1409 struct timeval tv;
1410 struct timespec ts, *ts_ptr;
1411 abi_long ret;
1413 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1414 if (ret) {
1415 return ret;
1417 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1418 if (ret) {
1419 return ret;
1421 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1422 if (ret) {
1423 return ret;
1426 if (target_tv_addr) {
1427 if (copy_from_user_timeval(&tv, target_tv_addr))
1428 return -TARGET_EFAULT;
1429 ts.tv_sec = tv.tv_sec;
1430 ts.tv_nsec = tv.tv_usec * 1000;
1431 ts_ptr = &ts;
1432 } else {
1433 ts_ptr = NULL;
1436 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1437 ts_ptr, NULL));
1439 if (!is_error(ret)) {
1440 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1441 return -TARGET_EFAULT;
1442 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1443 return -TARGET_EFAULT;
1444 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1445 return -TARGET_EFAULT;
1447 if (target_tv_addr) {
1448 tv.tv_sec = ts.tv_sec;
1449 tv.tv_usec = ts.tv_nsec / 1000;
1450 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1451 return -TARGET_EFAULT;
1456 return ret;
1459 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1460 static abi_long do_old_select(abi_ulong arg1)
1462 struct target_sel_arg_struct *sel;
1463 abi_ulong inp, outp, exp, tvp;
1464 long nsel;
1466 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1467 return -TARGET_EFAULT;
1470 nsel = tswapal(sel->n);
1471 inp = tswapal(sel->inp);
1472 outp = tswapal(sel->outp);
1473 exp = tswapal(sel->exp);
1474 tvp = tswapal(sel->tvp);
1476 unlock_user_struct(sel, arg1, 0);
1478 return do_select(nsel, inp, outp, exp, tvp);
1480 #endif
1481 #endif
1483 static abi_long do_pipe2(int host_pipe[], int flags)
1485 #ifdef CONFIG_PIPE2
1486 return pipe2(host_pipe, flags);
1487 #else
1488 return -ENOSYS;
1489 #endif
1492 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1493 int flags, int is_pipe2)
1495 int host_pipe[2];
1496 abi_long ret;
1497 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1499 if (is_error(ret))
1500 return get_errno(ret);
1502 /* Several targets have special calling conventions for the original
1503 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1504 if (!is_pipe2) {
1505 #if defined(TARGET_ALPHA)
1506 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1507 return host_pipe[0];
1508 #elif defined(TARGET_MIPS)
1509 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1510 return host_pipe[0];
1511 #elif defined(TARGET_SH4)
1512 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1513 return host_pipe[0];
1514 #elif defined(TARGET_SPARC)
1515 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1516 return host_pipe[0];
1517 #endif
1520 if (put_user_s32(host_pipe[0], pipedes)
1521 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1522 return -TARGET_EFAULT;
1523 return get_errno(ret);
1526 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1527 abi_ulong target_addr,
1528 socklen_t len)
1530 struct target_ip_mreqn *target_smreqn;
1532 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1533 if (!target_smreqn)
1534 return -TARGET_EFAULT;
1535 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1536 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1537 if (len == sizeof(struct target_ip_mreqn))
1538 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1539 unlock_user(target_smreqn, target_addr, 0);
1541 return 0;
1544 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1545 abi_ulong target_addr,
1546 socklen_t len)
1548 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1549 sa_family_t sa_family;
1550 struct target_sockaddr *target_saddr;
1552 if (fd_trans_target_to_host_addr(fd)) {
1553 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1556 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1557 if (!target_saddr)
1558 return -TARGET_EFAULT;
1560 sa_family = tswap16(target_saddr->sa_family);
1562 /* Oops. The caller might send a incomplete sun_path; sun_path
1563 * must be terminated by \0 (see the manual page), but
1564 * unfortunately it is quite common to specify sockaddr_un
1565 * length as "strlen(x->sun_path)" while it should be
1566 * "strlen(...) + 1". We'll fix that here if needed.
1567 * Linux kernel has a similar feature.
1570 if (sa_family == AF_UNIX) {
1571 if (len < unix_maxlen && len > 0) {
1572 char *cp = (char*)target_saddr;
1574 if ( cp[len-1] && !cp[len] )
1575 len++;
1577 if (len > unix_maxlen)
1578 len = unix_maxlen;
1581 memcpy(addr, target_saddr, len);
1582 addr->sa_family = sa_family;
1583 if (sa_family == AF_NETLINK) {
1584 struct sockaddr_nl *nladdr;
1586 nladdr = (struct sockaddr_nl *)addr;
1587 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1588 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1589 } else if (sa_family == AF_PACKET) {
1590 struct target_sockaddr_ll *lladdr;
1592 lladdr = (struct target_sockaddr_ll *)addr;
1593 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1594 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1596 unlock_user(target_saddr, target_addr, 0);
1598 return 0;
1601 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1602 struct sockaddr *addr,
1603 socklen_t len)
1605 struct target_sockaddr *target_saddr;
1607 if (len == 0) {
1608 return 0;
1611 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1612 if (!target_saddr)
1613 return -TARGET_EFAULT;
1614 memcpy(target_saddr, addr, len);
1615 if (len >= offsetof(struct target_sockaddr, sa_family) +
1616 sizeof(target_saddr->sa_family)) {
1617 target_saddr->sa_family = tswap16(addr->sa_family);
1619 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1620 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1621 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1622 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1623 } else if (addr->sa_family == AF_PACKET) {
1624 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1625 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1626 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1628 unlock_user(target_saddr, target_addr, len);
1630 return 0;
1633 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1634 struct target_msghdr *target_msgh)
1636 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1637 abi_long msg_controllen;
1638 abi_ulong target_cmsg_addr;
1639 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1640 socklen_t space = 0;
1642 msg_controllen = tswapal(target_msgh->msg_controllen);
1643 if (msg_controllen < sizeof (struct target_cmsghdr))
1644 goto the_end;
1645 target_cmsg_addr = tswapal(target_msgh->msg_control);
1646 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1647 target_cmsg_start = target_cmsg;
1648 if (!target_cmsg)
1649 return -TARGET_EFAULT;
1651 while (cmsg && target_cmsg) {
1652 void *data = CMSG_DATA(cmsg);
1653 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1655 int len = tswapal(target_cmsg->cmsg_len)
1656 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1658 space += CMSG_SPACE(len);
1659 if (space > msgh->msg_controllen) {
1660 space -= CMSG_SPACE(len);
1661 /* This is a QEMU bug, since we allocated the payload
1662 * area ourselves (unlike overflow in host-to-target
1663 * conversion, which is just the guest giving us a buffer
1664 * that's too small). It can't happen for the payload types
1665 * we currently support; if it becomes an issue in future
1666 * we would need to improve our allocation strategy to
1667 * something more intelligent than "twice the size of the
1668 * target buffer we're reading from".
1670 gemu_log("Host cmsg overflow\n");
1671 break;
1674 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1675 cmsg->cmsg_level = SOL_SOCKET;
1676 } else {
1677 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1679 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1680 cmsg->cmsg_len = CMSG_LEN(len);
1682 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1683 int *fd = (int *)data;
1684 int *target_fd = (int *)target_data;
1685 int i, numfds = len / sizeof(int);
1687 for (i = 0; i < numfds; i++) {
1688 __get_user(fd[i], target_fd + i);
1690 } else if (cmsg->cmsg_level == SOL_SOCKET
1691 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1692 struct ucred *cred = (struct ucred *)data;
1693 struct target_ucred *target_cred =
1694 (struct target_ucred *)target_data;
1696 __get_user(cred->pid, &target_cred->pid);
1697 __get_user(cred->uid, &target_cred->uid);
1698 __get_user(cred->gid, &target_cred->gid);
1699 } else {
1700 gemu_log("Unsupported ancillary data: %d/%d\n",
1701 cmsg->cmsg_level, cmsg->cmsg_type);
1702 memcpy(data, target_data, len);
1705 cmsg = CMSG_NXTHDR(msgh, cmsg);
1706 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1707 target_cmsg_start);
1709 unlock_user(target_cmsg, target_cmsg_addr, 0);
1710 the_end:
1711 msgh->msg_controllen = space;
1712 return 0;
1715 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1716 struct msghdr *msgh)
1718 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1719 abi_long msg_controllen;
1720 abi_ulong target_cmsg_addr;
1721 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1722 socklen_t space = 0;
1724 msg_controllen = tswapal(target_msgh->msg_controllen);
1725 if (msg_controllen < sizeof (struct target_cmsghdr))
1726 goto the_end;
1727 target_cmsg_addr = tswapal(target_msgh->msg_control);
1728 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1729 target_cmsg_start = target_cmsg;
1730 if (!target_cmsg)
1731 return -TARGET_EFAULT;
1733 while (cmsg && target_cmsg) {
1734 void *data = CMSG_DATA(cmsg);
1735 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1737 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1738 int tgt_len, tgt_space;
1740 /* We never copy a half-header but may copy half-data;
1741 * this is Linux's behaviour in put_cmsg(). Note that
1742 * truncation here is a guest problem (which we report
1743 * to the guest via the CTRUNC bit), unlike truncation
1744 * in target_to_host_cmsg, which is a QEMU bug.
1746 if (msg_controllen < sizeof(struct cmsghdr)) {
1747 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1748 break;
1751 if (cmsg->cmsg_level == SOL_SOCKET) {
1752 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1753 } else {
1754 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1756 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1758 tgt_len = TARGET_CMSG_LEN(len);
1760 /* Payload types which need a different size of payload on
1761 * the target must adjust tgt_len here.
1763 switch (cmsg->cmsg_level) {
1764 case SOL_SOCKET:
1765 switch (cmsg->cmsg_type) {
1766 case SO_TIMESTAMP:
1767 tgt_len = sizeof(struct target_timeval);
1768 break;
1769 default:
1770 break;
1772 default:
1773 break;
1776 if (msg_controllen < tgt_len) {
1777 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1778 tgt_len = msg_controllen;
1781 /* We must now copy-and-convert len bytes of payload
1782 * into tgt_len bytes of destination space. Bear in mind
1783 * that in both source and destination we may be dealing
1784 * with a truncated value!
1786 switch (cmsg->cmsg_level) {
1787 case SOL_SOCKET:
1788 switch (cmsg->cmsg_type) {
1789 case SCM_RIGHTS:
1791 int *fd = (int *)data;
1792 int *target_fd = (int *)target_data;
1793 int i, numfds = tgt_len / sizeof(int);
1795 for (i = 0; i < numfds; i++) {
1796 __put_user(fd[i], target_fd + i);
1798 break;
1800 case SO_TIMESTAMP:
1802 struct timeval *tv = (struct timeval *)data;
1803 struct target_timeval *target_tv =
1804 (struct target_timeval *)target_data;
1806 if (len != sizeof(struct timeval) ||
1807 tgt_len != sizeof(struct target_timeval)) {
1808 goto unimplemented;
1811 /* copy struct timeval to target */
1812 __put_user(tv->tv_sec, &target_tv->tv_sec);
1813 __put_user(tv->tv_usec, &target_tv->tv_usec);
1814 break;
1816 case SCM_CREDENTIALS:
1818 struct ucred *cred = (struct ucred *)data;
1819 struct target_ucred *target_cred =
1820 (struct target_ucred *)target_data;
1822 __put_user(cred->pid, &target_cred->pid);
1823 __put_user(cred->uid, &target_cred->uid);
1824 __put_user(cred->gid, &target_cred->gid);
1825 break;
1827 default:
1828 goto unimplemented;
1830 break;
1832 default:
1833 unimplemented:
1834 gemu_log("Unsupported ancillary data: %d/%d\n",
1835 cmsg->cmsg_level, cmsg->cmsg_type);
1836 memcpy(target_data, data, MIN(len, tgt_len));
1837 if (tgt_len > len) {
1838 memset(target_data + len, 0, tgt_len - len);
1842 target_cmsg->cmsg_len = tswapal(tgt_len);
1843 tgt_space = TARGET_CMSG_SPACE(len);
1844 if (msg_controllen < tgt_space) {
1845 tgt_space = msg_controllen;
1847 msg_controllen -= tgt_space;
1848 space += tgt_space;
1849 cmsg = CMSG_NXTHDR(msgh, cmsg);
1850 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1851 target_cmsg_start);
1853 unlock_user(target_cmsg, target_cmsg_addr, space);
1854 the_end:
1855 target_msgh->msg_controllen = tswapal(space);
1856 return 0;
1859 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1861 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1862 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1863 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1864 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1865 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1868 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1869 size_t len,
1870 abi_long (*host_to_target_nlmsg)
1871 (struct nlmsghdr *))
1873 uint32_t nlmsg_len;
1874 abi_long ret;
1876 while (len > sizeof(struct nlmsghdr)) {
1878 nlmsg_len = nlh->nlmsg_len;
1879 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1880 nlmsg_len > len) {
1881 break;
1884 switch (nlh->nlmsg_type) {
1885 case NLMSG_DONE:
1886 tswap_nlmsghdr(nlh);
1887 return 0;
1888 case NLMSG_NOOP:
1889 break;
1890 case NLMSG_ERROR:
1892 struct nlmsgerr *e = NLMSG_DATA(nlh);
1893 e->error = tswap32(e->error);
1894 tswap_nlmsghdr(&e->msg);
1895 tswap_nlmsghdr(nlh);
1896 return 0;
1898 default:
1899 ret = host_to_target_nlmsg(nlh);
1900 if (ret < 0) {
1901 tswap_nlmsghdr(nlh);
1902 return ret;
1904 break;
1906 tswap_nlmsghdr(nlh);
1907 len -= NLMSG_ALIGN(nlmsg_len);
1908 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1910 return 0;
1913 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1914 size_t len,
1915 abi_long (*target_to_host_nlmsg)
1916 (struct nlmsghdr *))
1918 int ret;
1920 while (len > sizeof(struct nlmsghdr)) {
1921 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1922 tswap32(nlh->nlmsg_len) > len) {
1923 break;
1925 tswap_nlmsghdr(nlh);
1926 switch (nlh->nlmsg_type) {
1927 case NLMSG_DONE:
1928 return 0;
1929 case NLMSG_NOOP:
1930 break;
1931 case NLMSG_ERROR:
1933 struct nlmsgerr *e = NLMSG_DATA(nlh);
1934 e->error = tswap32(e->error);
1935 tswap_nlmsghdr(&e->msg);
1936 return 0;
1938 default:
1939 ret = target_to_host_nlmsg(nlh);
1940 if (ret < 0) {
1941 return ret;
1944 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1945 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1947 return 0;
1950 #ifdef CONFIG_RTNETLINK
1951 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1952 size_t len, void *context,
1953 abi_long (*host_to_target_nlattr)
1954 (struct nlattr *,
1955 void *context))
1957 unsigned short nla_len;
1958 abi_long ret;
1960 while (len > sizeof(struct nlattr)) {
1961 nla_len = nlattr->nla_len;
1962 if (nla_len < sizeof(struct nlattr) ||
1963 nla_len > len) {
1964 break;
1966 ret = host_to_target_nlattr(nlattr, context);
1967 nlattr->nla_len = tswap16(nlattr->nla_len);
1968 nlattr->nla_type = tswap16(nlattr->nla_type);
1969 if (ret < 0) {
1970 return ret;
1972 len -= NLA_ALIGN(nla_len);
1973 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1975 return 0;
1978 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1979 size_t len,
1980 abi_long (*host_to_target_rtattr)
1981 (struct rtattr *))
1983 unsigned short rta_len;
1984 abi_long ret;
1986 while (len > sizeof(struct rtattr)) {
1987 rta_len = rtattr->rta_len;
1988 if (rta_len < sizeof(struct rtattr) ||
1989 rta_len > len) {
1990 break;
1992 ret = host_to_target_rtattr(rtattr);
1993 rtattr->rta_len = tswap16(rtattr->rta_len);
1994 rtattr->rta_type = tswap16(rtattr->rta_type);
1995 if (ret < 0) {
1996 return ret;
1998 len -= RTA_ALIGN(rta_len);
1999 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2001 return 0;
2004 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2006 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2007 void *context)
2009 uint16_t *u16;
2010 uint32_t *u32;
2011 uint64_t *u64;
2013 switch (nlattr->nla_type) {
2014 /* no data */
2015 case QEMU_IFLA_BR_FDB_FLUSH:
2016 break;
2017 /* binary */
2018 case QEMU_IFLA_BR_GROUP_ADDR:
2019 break;
2020 /* uint8_t */
2021 case QEMU_IFLA_BR_VLAN_FILTERING:
2022 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2023 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2024 case QEMU_IFLA_BR_MCAST_ROUTER:
2025 case QEMU_IFLA_BR_MCAST_SNOOPING:
2026 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2027 case QEMU_IFLA_BR_MCAST_QUERIER:
2028 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2029 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2030 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2031 break;
2032 /* uint16_t */
2033 case QEMU_IFLA_BR_PRIORITY:
2034 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2035 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2036 case QEMU_IFLA_BR_ROOT_PORT:
2037 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2038 u16 = NLA_DATA(nlattr);
2039 *u16 = tswap16(*u16);
2040 break;
2041 /* uint32_t */
2042 case QEMU_IFLA_BR_FORWARD_DELAY:
2043 case QEMU_IFLA_BR_HELLO_TIME:
2044 case QEMU_IFLA_BR_MAX_AGE:
2045 case QEMU_IFLA_BR_AGEING_TIME:
2046 case QEMU_IFLA_BR_STP_STATE:
2047 case QEMU_IFLA_BR_ROOT_PATH_COST:
2048 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2049 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2050 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2051 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2052 u32 = NLA_DATA(nlattr);
2053 *u32 = tswap32(*u32);
2054 break;
2055 /* uint64_t */
2056 case QEMU_IFLA_BR_HELLO_TIMER:
2057 case QEMU_IFLA_BR_TCN_TIMER:
2058 case QEMU_IFLA_BR_GC_TIMER:
2059 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2060 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2061 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2062 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2063 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2064 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2065 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2066 u64 = NLA_DATA(nlattr);
2067 *u64 = tswap64(*u64);
2068 break;
2069 /* ifla_bridge_id: uin8_t[] */
2070 case QEMU_IFLA_BR_ROOT_ID:
2071 case QEMU_IFLA_BR_BRIDGE_ID:
2072 break;
2073 default:
2074 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2075 break;
2077 return 0;
2080 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2081 void *context)
2083 uint16_t *u16;
2084 uint32_t *u32;
2085 uint64_t *u64;
2087 switch (nlattr->nla_type) {
2088 /* uint8_t */
2089 case QEMU_IFLA_BRPORT_STATE:
2090 case QEMU_IFLA_BRPORT_MODE:
2091 case QEMU_IFLA_BRPORT_GUARD:
2092 case QEMU_IFLA_BRPORT_PROTECT:
2093 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2094 case QEMU_IFLA_BRPORT_LEARNING:
2095 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2096 case QEMU_IFLA_BRPORT_PROXYARP:
2097 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2098 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2099 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2100 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2101 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2102 break;
2103 /* uint16_t */
2104 case QEMU_IFLA_BRPORT_PRIORITY:
2105 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2106 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2107 case QEMU_IFLA_BRPORT_ID:
2108 case QEMU_IFLA_BRPORT_NO:
2109 u16 = NLA_DATA(nlattr);
2110 *u16 = tswap16(*u16);
2111 break;
2112 /* uin32_t */
2113 case QEMU_IFLA_BRPORT_COST:
2114 u32 = NLA_DATA(nlattr);
2115 *u32 = tswap32(*u32);
2116 break;
2117 /* uint64_t */
2118 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2119 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2120 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2121 u64 = NLA_DATA(nlattr);
2122 *u64 = tswap64(*u64);
2123 break;
2124 /* ifla_bridge_id: uint8_t[] */
2125 case QEMU_IFLA_BRPORT_ROOT_ID:
2126 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2127 break;
2128 default:
2129 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2130 break;
2132 return 0;
2135 struct linkinfo_context {
2136 int len;
2137 char *name;
2138 int slave_len;
2139 char *slave_name;
2142 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2143 void *context)
2145 struct linkinfo_context *li_context = context;
2147 switch (nlattr->nla_type) {
2148 /* string */
2149 case QEMU_IFLA_INFO_KIND:
2150 li_context->name = NLA_DATA(nlattr);
2151 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2152 break;
2153 case QEMU_IFLA_INFO_SLAVE_KIND:
2154 li_context->slave_name = NLA_DATA(nlattr);
2155 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2156 break;
2157 /* stats */
2158 case QEMU_IFLA_INFO_XSTATS:
2159 /* FIXME: only used by CAN */
2160 break;
2161 /* nested */
2162 case QEMU_IFLA_INFO_DATA:
2163 if (strncmp(li_context->name, "bridge",
2164 li_context->len) == 0) {
2165 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2166 nlattr->nla_len,
2167 NULL,
2168 host_to_target_data_bridge_nlattr);
2169 } else {
2170 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2172 break;
2173 case QEMU_IFLA_INFO_SLAVE_DATA:
2174 if (strncmp(li_context->slave_name, "bridge",
2175 li_context->slave_len) == 0) {
2176 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2177 nlattr->nla_len,
2178 NULL,
2179 host_to_target_slave_data_bridge_nlattr);
2180 } else {
2181 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2182 li_context->slave_name);
2184 break;
2185 default:
2186 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2187 break;
2190 return 0;
2193 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2194 void *context)
2196 uint32_t *u32;
2197 int i;
2199 switch (nlattr->nla_type) {
2200 case QEMU_IFLA_INET_CONF:
2201 u32 = NLA_DATA(nlattr);
2202 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2203 i++) {
2204 u32[i] = tswap32(u32[i]);
2206 break;
2207 default:
2208 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2210 return 0;
2213 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2214 void *context)
2216 uint32_t *u32;
2217 uint64_t *u64;
2218 struct ifla_cacheinfo *ci;
2219 int i;
2221 switch (nlattr->nla_type) {
2222 /* binaries */
2223 case QEMU_IFLA_INET6_TOKEN:
2224 break;
2225 /* uint8_t */
2226 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2227 break;
2228 /* uint32_t */
2229 case QEMU_IFLA_INET6_FLAGS:
2230 u32 = NLA_DATA(nlattr);
2231 *u32 = tswap32(*u32);
2232 break;
2233 /* uint32_t[] */
2234 case QEMU_IFLA_INET6_CONF:
2235 u32 = NLA_DATA(nlattr);
2236 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2237 i++) {
2238 u32[i] = tswap32(u32[i]);
2240 break;
2241 /* ifla_cacheinfo */
2242 case QEMU_IFLA_INET6_CACHEINFO:
2243 ci = NLA_DATA(nlattr);
2244 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2245 ci->tstamp = tswap32(ci->tstamp);
2246 ci->reachable_time = tswap32(ci->reachable_time);
2247 ci->retrans_time = tswap32(ci->retrans_time);
2248 break;
2249 /* uint64_t[] */
2250 case QEMU_IFLA_INET6_STATS:
2251 case QEMU_IFLA_INET6_ICMP6STATS:
2252 u64 = NLA_DATA(nlattr);
2253 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2254 i++) {
2255 u64[i] = tswap64(u64[i]);
2257 break;
2258 default:
2259 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2261 return 0;
2264 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2265 void *context)
2267 switch (nlattr->nla_type) {
2268 case AF_INET:
2269 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2270 NULL,
2271 host_to_target_data_inet_nlattr);
2272 case AF_INET6:
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2274 NULL,
2275 host_to_target_data_inet6_nlattr);
2276 default:
2277 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2278 break;
2280 return 0;
2283 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2285 uint32_t *u32;
2286 struct rtnl_link_stats *st;
2287 struct rtnl_link_stats64 *st64;
2288 struct rtnl_link_ifmap *map;
2289 struct linkinfo_context li_context;
2291 switch (rtattr->rta_type) {
2292 /* binary stream */
2293 case QEMU_IFLA_ADDRESS:
2294 case QEMU_IFLA_BROADCAST:
2295 /* string */
2296 case QEMU_IFLA_IFNAME:
2297 case QEMU_IFLA_QDISC:
2298 break;
2299 /* uin8_t */
2300 case QEMU_IFLA_OPERSTATE:
2301 case QEMU_IFLA_LINKMODE:
2302 case QEMU_IFLA_CARRIER:
2303 case QEMU_IFLA_PROTO_DOWN:
2304 break;
2305 /* uint32_t */
2306 case QEMU_IFLA_MTU:
2307 case QEMU_IFLA_LINK:
2308 case QEMU_IFLA_WEIGHT:
2309 case QEMU_IFLA_TXQLEN:
2310 case QEMU_IFLA_CARRIER_CHANGES:
2311 case QEMU_IFLA_NUM_RX_QUEUES:
2312 case QEMU_IFLA_NUM_TX_QUEUES:
2313 case QEMU_IFLA_PROMISCUITY:
2314 case QEMU_IFLA_EXT_MASK:
2315 case QEMU_IFLA_LINK_NETNSID:
2316 case QEMU_IFLA_GROUP:
2317 case QEMU_IFLA_MASTER:
2318 case QEMU_IFLA_NUM_VF:
2319 u32 = RTA_DATA(rtattr);
2320 *u32 = tswap32(*u32);
2321 break;
2322 /* struct rtnl_link_stats */
2323 case QEMU_IFLA_STATS:
2324 st = RTA_DATA(rtattr);
2325 st->rx_packets = tswap32(st->rx_packets);
2326 st->tx_packets = tswap32(st->tx_packets);
2327 st->rx_bytes = tswap32(st->rx_bytes);
2328 st->tx_bytes = tswap32(st->tx_bytes);
2329 st->rx_errors = tswap32(st->rx_errors);
2330 st->tx_errors = tswap32(st->tx_errors);
2331 st->rx_dropped = tswap32(st->rx_dropped);
2332 st->tx_dropped = tswap32(st->tx_dropped);
2333 st->multicast = tswap32(st->multicast);
2334 st->collisions = tswap32(st->collisions);
2336 /* detailed rx_errors: */
2337 st->rx_length_errors = tswap32(st->rx_length_errors);
2338 st->rx_over_errors = tswap32(st->rx_over_errors);
2339 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2340 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2341 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2342 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2344 /* detailed tx_errors */
2345 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2346 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2347 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2348 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2349 st->tx_window_errors = tswap32(st->tx_window_errors);
2351 /* for cslip etc */
2352 st->rx_compressed = tswap32(st->rx_compressed);
2353 st->tx_compressed = tswap32(st->tx_compressed);
2354 break;
2355 /* struct rtnl_link_stats64 */
2356 case QEMU_IFLA_STATS64:
2357 st64 = RTA_DATA(rtattr);
2358 st64->rx_packets = tswap64(st64->rx_packets);
2359 st64->tx_packets = tswap64(st64->tx_packets);
2360 st64->rx_bytes = tswap64(st64->rx_bytes);
2361 st64->tx_bytes = tswap64(st64->tx_bytes);
2362 st64->rx_errors = tswap64(st64->rx_errors);
2363 st64->tx_errors = tswap64(st64->tx_errors);
2364 st64->rx_dropped = tswap64(st64->rx_dropped);
2365 st64->tx_dropped = tswap64(st64->tx_dropped);
2366 st64->multicast = tswap64(st64->multicast);
2367 st64->collisions = tswap64(st64->collisions);
2369 /* detailed rx_errors: */
2370 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2371 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2372 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2373 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2374 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2375 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2377 /* detailed tx_errors */
2378 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2379 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2380 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2381 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2382 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2384 /* for cslip etc */
2385 st64->rx_compressed = tswap64(st64->rx_compressed);
2386 st64->tx_compressed = tswap64(st64->tx_compressed);
2387 break;
2388 /* struct rtnl_link_ifmap */
2389 case QEMU_IFLA_MAP:
2390 map = RTA_DATA(rtattr);
2391 map->mem_start = tswap64(map->mem_start);
2392 map->mem_end = tswap64(map->mem_end);
2393 map->base_addr = tswap64(map->base_addr);
2394 map->irq = tswap16(map->irq);
2395 break;
2396 /* nested */
2397 case QEMU_IFLA_LINKINFO:
2398 memset(&li_context, 0, sizeof(li_context));
2399 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2400 &li_context,
2401 host_to_target_data_linkinfo_nlattr);
2402 case QEMU_IFLA_AF_SPEC:
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2404 NULL,
2405 host_to_target_data_spec_nlattr);
2406 default:
2407 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2408 break;
2410 return 0;
2413 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2415 uint32_t *u32;
2416 struct ifa_cacheinfo *ci;
2418 switch (rtattr->rta_type) {
2419 /* binary: depends on family type */
2420 case IFA_ADDRESS:
2421 case IFA_LOCAL:
2422 break;
2423 /* string */
2424 case IFA_LABEL:
2425 break;
2426 /* u32 */
2427 case IFA_FLAGS:
2428 case IFA_BROADCAST:
2429 u32 = RTA_DATA(rtattr);
2430 *u32 = tswap32(*u32);
2431 break;
2432 /* struct ifa_cacheinfo */
2433 case IFA_CACHEINFO:
2434 ci = RTA_DATA(rtattr);
2435 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2436 ci->ifa_valid = tswap32(ci->ifa_valid);
2437 ci->cstamp = tswap32(ci->cstamp);
2438 ci->tstamp = tswap32(ci->tstamp);
2439 break;
2440 default:
2441 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2442 break;
2444 return 0;
2447 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2449 uint32_t *u32;
2450 switch (rtattr->rta_type) {
2451 /* binary: depends on family type */
2452 case RTA_GATEWAY:
2453 case RTA_DST:
2454 case RTA_PREFSRC:
2455 break;
2456 /* u32 */
2457 case RTA_PRIORITY:
2458 case RTA_TABLE:
2459 case RTA_OIF:
2460 u32 = RTA_DATA(rtattr);
2461 *u32 = tswap32(*u32);
2462 break;
2463 default:
2464 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2465 break;
2467 return 0;
2470 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2471 uint32_t rtattr_len)
2473 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2474 host_to_target_data_link_rtattr);
2477 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2478 uint32_t rtattr_len)
2480 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2481 host_to_target_data_addr_rtattr);
2484 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2485 uint32_t rtattr_len)
2487 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2488 host_to_target_data_route_rtattr);
2491 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2493 uint32_t nlmsg_len;
2494 struct ifinfomsg *ifi;
2495 struct ifaddrmsg *ifa;
2496 struct rtmsg *rtm;
2498 nlmsg_len = nlh->nlmsg_len;
2499 switch (nlh->nlmsg_type) {
2500 case RTM_NEWLINK:
2501 case RTM_DELLINK:
2502 case RTM_GETLINK:
2503 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2504 ifi = NLMSG_DATA(nlh);
2505 ifi->ifi_type = tswap16(ifi->ifi_type);
2506 ifi->ifi_index = tswap32(ifi->ifi_index);
2507 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2508 ifi->ifi_change = tswap32(ifi->ifi_change);
2509 host_to_target_link_rtattr(IFLA_RTA(ifi),
2510 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2512 break;
2513 case RTM_NEWADDR:
2514 case RTM_DELADDR:
2515 case RTM_GETADDR:
2516 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2517 ifa = NLMSG_DATA(nlh);
2518 ifa->ifa_index = tswap32(ifa->ifa_index);
2519 host_to_target_addr_rtattr(IFA_RTA(ifa),
2520 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2522 break;
2523 case RTM_NEWROUTE:
2524 case RTM_DELROUTE:
2525 case RTM_GETROUTE:
2526 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2527 rtm = NLMSG_DATA(nlh);
2528 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2529 host_to_target_route_rtattr(RTM_RTA(rtm),
2530 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2532 break;
2533 default:
2534 return -TARGET_EINVAL;
2536 return 0;
2539 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2540 size_t len)
2542 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2545 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2546 size_t len,
2547 abi_long (*target_to_host_rtattr)
2548 (struct rtattr *))
2550 abi_long ret;
2552 while (len >= sizeof(struct rtattr)) {
2553 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2554 tswap16(rtattr->rta_len) > len) {
2555 break;
2557 rtattr->rta_len = tswap16(rtattr->rta_len);
2558 rtattr->rta_type = tswap16(rtattr->rta_type);
2559 ret = target_to_host_rtattr(rtattr);
2560 if (ret < 0) {
2561 return ret;
2563 len -= RTA_ALIGN(rtattr->rta_len);
2564 rtattr = (struct rtattr *)(((char *)rtattr) +
2565 RTA_ALIGN(rtattr->rta_len));
2567 return 0;
2570 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2572 switch (rtattr->rta_type) {
2573 default:
2574 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2575 break;
2577 return 0;
2580 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2582 switch (rtattr->rta_type) {
2583 /* binary: depends on family type */
2584 case IFA_LOCAL:
2585 case IFA_ADDRESS:
2586 break;
2587 default:
2588 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2589 break;
2591 return 0;
2594 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2596 uint32_t *u32;
2597 switch (rtattr->rta_type) {
2598 /* binary: depends on family type */
2599 case RTA_DST:
2600 case RTA_SRC:
2601 case RTA_GATEWAY:
2602 break;
2603 /* u32 */
2604 case RTA_PRIORITY:
2605 case RTA_OIF:
2606 u32 = RTA_DATA(rtattr);
2607 *u32 = tswap32(*u32);
2608 break;
2609 default:
2610 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2611 break;
2613 return 0;
2616 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2617 uint32_t rtattr_len)
2619 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2620 target_to_host_data_link_rtattr);
2623 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2624 uint32_t rtattr_len)
2626 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2627 target_to_host_data_addr_rtattr);
2630 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2631 uint32_t rtattr_len)
2633 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2634 target_to_host_data_route_rtattr);
2637 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2639 struct ifinfomsg *ifi;
2640 struct ifaddrmsg *ifa;
2641 struct rtmsg *rtm;
2643 switch (nlh->nlmsg_type) {
2644 case RTM_GETLINK:
2645 break;
2646 case RTM_NEWLINK:
2647 case RTM_DELLINK:
2648 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2649 ifi = NLMSG_DATA(nlh);
2650 ifi->ifi_type = tswap16(ifi->ifi_type);
2651 ifi->ifi_index = tswap32(ifi->ifi_index);
2652 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2653 ifi->ifi_change = tswap32(ifi->ifi_change);
2654 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2655 NLMSG_LENGTH(sizeof(*ifi)));
2657 break;
2658 case RTM_GETADDR:
2659 case RTM_NEWADDR:
2660 case RTM_DELADDR:
2661 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2662 ifa = NLMSG_DATA(nlh);
2663 ifa->ifa_index = tswap32(ifa->ifa_index);
2664 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2665 NLMSG_LENGTH(sizeof(*ifa)));
2667 break;
2668 case RTM_GETROUTE:
2669 break;
2670 case RTM_NEWROUTE:
2671 case RTM_DELROUTE:
2672 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2673 rtm = NLMSG_DATA(nlh);
2674 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2675 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2676 NLMSG_LENGTH(sizeof(*rtm)));
2678 break;
2679 default:
2680 return -TARGET_EOPNOTSUPP;
2682 return 0;
2685 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2687 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2689 #endif /* CONFIG_RTNETLINK */
2691 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2693 switch (nlh->nlmsg_type) {
2694 default:
2695 gemu_log("Unknown host audit message type %d\n",
2696 nlh->nlmsg_type);
2697 return -TARGET_EINVAL;
2699 return 0;
2702 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2703 size_t len)
2705 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2708 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2710 switch (nlh->nlmsg_type) {
2711 case AUDIT_USER:
2712 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2713 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2714 break;
2715 default:
2716 gemu_log("Unknown target audit message type %d\n",
2717 nlh->nlmsg_type);
2718 return -TARGET_EINVAL;
2721 return 0;
2724 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2726 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2729 /* do_setsockopt() Must return target values and target errnos. */
2730 static abi_long do_setsockopt(int sockfd, int level, int optname,
2731 abi_ulong optval_addr, socklen_t optlen)
2733 abi_long ret;
2734 int val;
2735 struct ip_mreqn *ip_mreq;
2736 struct ip_mreq_source *ip_mreq_source;
2738 switch(level) {
2739 case SOL_TCP:
2740 /* TCP options all take an 'int' value. */
2741 if (optlen < sizeof(uint32_t))
2742 return -TARGET_EINVAL;
2744 if (get_user_u32(val, optval_addr))
2745 return -TARGET_EFAULT;
2746 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2747 break;
2748 case SOL_IP:
2749 switch(optname) {
2750 case IP_TOS:
2751 case IP_TTL:
2752 case IP_HDRINCL:
2753 case IP_ROUTER_ALERT:
2754 case IP_RECVOPTS:
2755 case IP_RETOPTS:
2756 case IP_PKTINFO:
2757 case IP_MTU_DISCOVER:
2758 case IP_RECVERR:
2759 case IP_RECVTOS:
2760 #ifdef IP_FREEBIND
2761 case IP_FREEBIND:
2762 #endif
2763 case IP_MULTICAST_TTL:
2764 case IP_MULTICAST_LOOP:
2765 val = 0;
2766 if (optlen >= sizeof(uint32_t)) {
2767 if (get_user_u32(val, optval_addr))
2768 return -TARGET_EFAULT;
2769 } else if (optlen >= 1) {
2770 if (get_user_u8(val, optval_addr))
2771 return -TARGET_EFAULT;
2773 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2774 break;
2775 case IP_ADD_MEMBERSHIP:
2776 case IP_DROP_MEMBERSHIP:
2777 if (optlen < sizeof (struct target_ip_mreq) ||
2778 optlen > sizeof (struct target_ip_mreqn))
2779 return -TARGET_EINVAL;
2781 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2782 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2783 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2784 break;
2786 case IP_BLOCK_SOURCE:
2787 case IP_UNBLOCK_SOURCE:
2788 case IP_ADD_SOURCE_MEMBERSHIP:
2789 case IP_DROP_SOURCE_MEMBERSHIP:
2790 if (optlen != sizeof (struct target_ip_mreq_source))
2791 return -TARGET_EINVAL;
2793 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2794 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2795 unlock_user (ip_mreq_source, optval_addr, 0);
2796 break;
2798 default:
2799 goto unimplemented;
2801 break;
2802 case SOL_IPV6:
2803 switch (optname) {
2804 case IPV6_MTU_DISCOVER:
2805 case IPV6_MTU:
2806 case IPV6_V6ONLY:
2807 case IPV6_RECVPKTINFO:
2808 val = 0;
2809 if (optlen < sizeof(uint32_t)) {
2810 return -TARGET_EINVAL;
2812 if (get_user_u32(val, optval_addr)) {
2813 return -TARGET_EFAULT;
2815 ret = get_errno(setsockopt(sockfd, level, optname,
2816 &val, sizeof(val)));
2817 break;
2818 default:
2819 goto unimplemented;
2821 break;
2822 case SOL_RAW:
2823 switch (optname) {
2824 case ICMP_FILTER:
2825 /* struct icmp_filter takes an u32 value */
2826 if (optlen < sizeof(uint32_t)) {
2827 return -TARGET_EINVAL;
2830 if (get_user_u32(val, optval_addr)) {
2831 return -TARGET_EFAULT;
2833 ret = get_errno(setsockopt(sockfd, level, optname,
2834 &val, sizeof(val)));
2835 break;
2837 default:
2838 goto unimplemented;
2840 break;
2841 case TARGET_SOL_SOCKET:
2842 switch (optname) {
2843 case TARGET_SO_RCVTIMEO:
2845 struct timeval tv;
2847 optname = SO_RCVTIMEO;
2849 set_timeout:
2850 if (optlen != sizeof(struct target_timeval)) {
2851 return -TARGET_EINVAL;
2854 if (copy_from_user_timeval(&tv, optval_addr)) {
2855 return -TARGET_EFAULT;
2858 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2859 &tv, sizeof(tv)));
2860 return ret;
2862 case TARGET_SO_SNDTIMEO:
2863 optname = SO_SNDTIMEO;
2864 goto set_timeout;
2865 case TARGET_SO_ATTACH_FILTER:
2867 struct target_sock_fprog *tfprog;
2868 struct target_sock_filter *tfilter;
2869 struct sock_fprog fprog;
2870 struct sock_filter *filter;
2871 int i;
2873 if (optlen != sizeof(*tfprog)) {
2874 return -TARGET_EINVAL;
2876 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2877 return -TARGET_EFAULT;
2879 if (!lock_user_struct(VERIFY_READ, tfilter,
2880 tswapal(tfprog->filter), 0)) {
2881 unlock_user_struct(tfprog, optval_addr, 1);
2882 return -TARGET_EFAULT;
2885 fprog.len = tswap16(tfprog->len);
2886 filter = g_try_new(struct sock_filter, fprog.len);
2887 if (filter == NULL) {
2888 unlock_user_struct(tfilter, tfprog->filter, 1);
2889 unlock_user_struct(tfprog, optval_addr, 1);
2890 return -TARGET_ENOMEM;
2892 for (i = 0; i < fprog.len; i++) {
2893 filter[i].code = tswap16(tfilter[i].code);
2894 filter[i].jt = tfilter[i].jt;
2895 filter[i].jf = tfilter[i].jf;
2896 filter[i].k = tswap32(tfilter[i].k);
2898 fprog.filter = filter;
2900 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2901 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2902 g_free(filter);
2904 unlock_user_struct(tfilter, tfprog->filter, 1);
2905 unlock_user_struct(tfprog, optval_addr, 1);
2906 return ret;
2908 case TARGET_SO_BINDTODEVICE:
2910 char *dev_ifname, *addr_ifname;
2912 if (optlen > IFNAMSIZ - 1) {
2913 optlen = IFNAMSIZ - 1;
2915 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2916 if (!dev_ifname) {
2917 return -TARGET_EFAULT;
2919 optname = SO_BINDTODEVICE;
2920 addr_ifname = alloca(IFNAMSIZ);
2921 memcpy(addr_ifname, dev_ifname, optlen);
2922 addr_ifname[optlen] = 0;
2923 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2924 addr_ifname, optlen));
2925 unlock_user (dev_ifname, optval_addr, 0);
2926 return ret;
2928 /* Options with 'int' argument. */
2929 case TARGET_SO_DEBUG:
2930 optname = SO_DEBUG;
2931 break;
2932 case TARGET_SO_REUSEADDR:
2933 optname = SO_REUSEADDR;
2934 break;
2935 case TARGET_SO_TYPE:
2936 optname = SO_TYPE;
2937 break;
2938 case TARGET_SO_ERROR:
2939 optname = SO_ERROR;
2940 break;
2941 case TARGET_SO_DONTROUTE:
2942 optname = SO_DONTROUTE;
2943 break;
2944 case TARGET_SO_BROADCAST:
2945 optname = SO_BROADCAST;
2946 break;
2947 case TARGET_SO_SNDBUF:
2948 optname = SO_SNDBUF;
2949 break;
2950 case TARGET_SO_SNDBUFFORCE:
2951 optname = SO_SNDBUFFORCE;
2952 break;
2953 case TARGET_SO_RCVBUF:
2954 optname = SO_RCVBUF;
2955 break;
2956 case TARGET_SO_RCVBUFFORCE:
2957 optname = SO_RCVBUFFORCE;
2958 break;
2959 case TARGET_SO_KEEPALIVE:
2960 optname = SO_KEEPALIVE;
2961 break;
2962 case TARGET_SO_OOBINLINE:
2963 optname = SO_OOBINLINE;
2964 break;
2965 case TARGET_SO_NO_CHECK:
2966 optname = SO_NO_CHECK;
2967 break;
2968 case TARGET_SO_PRIORITY:
2969 optname = SO_PRIORITY;
2970 break;
2971 #ifdef SO_BSDCOMPAT
2972 case TARGET_SO_BSDCOMPAT:
2973 optname = SO_BSDCOMPAT;
2974 break;
2975 #endif
2976 case TARGET_SO_PASSCRED:
2977 optname = SO_PASSCRED;
2978 break;
2979 case TARGET_SO_PASSSEC:
2980 optname = SO_PASSSEC;
2981 break;
2982 case TARGET_SO_TIMESTAMP:
2983 optname = SO_TIMESTAMP;
2984 break;
2985 case TARGET_SO_RCVLOWAT:
2986 optname = SO_RCVLOWAT;
2987 break;
2988 break;
2989 default:
2990 goto unimplemented;
2992 if (optlen < sizeof(uint32_t))
2993 return -TARGET_EINVAL;
2995 if (get_user_u32(val, optval_addr))
2996 return -TARGET_EFAULT;
2997 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2998 break;
2999 default:
3000 unimplemented:
3001 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3002 ret = -TARGET_ENOPROTOOPT;
3004 return ret;
3007 /* do_getsockopt() Must return target values and target errnos. */
3008 static abi_long do_getsockopt(int sockfd, int level, int optname,
3009 abi_ulong optval_addr, abi_ulong optlen)
3011 abi_long ret;
3012 int len, val;
3013 socklen_t lv;
3015 switch(level) {
3016 case TARGET_SOL_SOCKET:
3017 level = SOL_SOCKET;
3018 switch (optname) {
3019 /* These don't just return a single integer */
3020 case TARGET_SO_LINGER:
3021 case TARGET_SO_RCVTIMEO:
3022 case TARGET_SO_SNDTIMEO:
3023 case TARGET_SO_PEERNAME:
3024 goto unimplemented;
3025 case TARGET_SO_PEERCRED: {
3026 struct ucred cr;
3027 socklen_t crlen;
3028 struct target_ucred *tcr;
3030 if (get_user_u32(len, optlen)) {
3031 return -TARGET_EFAULT;
3033 if (len < 0) {
3034 return -TARGET_EINVAL;
3037 crlen = sizeof(cr);
3038 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3039 &cr, &crlen));
3040 if (ret < 0) {
3041 return ret;
3043 if (len > crlen) {
3044 len = crlen;
3046 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3047 return -TARGET_EFAULT;
3049 __put_user(cr.pid, &tcr->pid);
3050 __put_user(cr.uid, &tcr->uid);
3051 __put_user(cr.gid, &tcr->gid);
3052 unlock_user_struct(tcr, optval_addr, 1);
3053 if (put_user_u32(len, optlen)) {
3054 return -TARGET_EFAULT;
3056 break;
3058 /* Options with 'int' argument. */
3059 case TARGET_SO_DEBUG:
3060 optname = SO_DEBUG;
3061 goto int_case;
3062 case TARGET_SO_REUSEADDR:
3063 optname = SO_REUSEADDR;
3064 goto int_case;
3065 case TARGET_SO_TYPE:
3066 optname = SO_TYPE;
3067 goto int_case;
3068 case TARGET_SO_ERROR:
3069 optname = SO_ERROR;
3070 goto int_case;
3071 case TARGET_SO_DONTROUTE:
3072 optname = SO_DONTROUTE;
3073 goto int_case;
3074 case TARGET_SO_BROADCAST:
3075 optname = SO_BROADCAST;
3076 goto int_case;
3077 case TARGET_SO_SNDBUF:
3078 optname = SO_SNDBUF;
3079 goto int_case;
3080 case TARGET_SO_RCVBUF:
3081 optname = SO_RCVBUF;
3082 goto int_case;
3083 case TARGET_SO_KEEPALIVE:
3084 optname = SO_KEEPALIVE;
3085 goto int_case;
3086 case TARGET_SO_OOBINLINE:
3087 optname = SO_OOBINLINE;
3088 goto int_case;
3089 case TARGET_SO_NO_CHECK:
3090 optname = SO_NO_CHECK;
3091 goto int_case;
3092 case TARGET_SO_PRIORITY:
3093 optname = SO_PRIORITY;
3094 goto int_case;
3095 #ifdef SO_BSDCOMPAT
3096 case TARGET_SO_BSDCOMPAT:
3097 optname = SO_BSDCOMPAT;
3098 goto int_case;
3099 #endif
3100 case TARGET_SO_PASSCRED:
3101 optname = SO_PASSCRED;
3102 goto int_case;
3103 case TARGET_SO_TIMESTAMP:
3104 optname = SO_TIMESTAMP;
3105 goto int_case;
3106 case TARGET_SO_RCVLOWAT:
3107 optname = SO_RCVLOWAT;
3108 goto int_case;
3109 case TARGET_SO_ACCEPTCONN:
3110 optname = SO_ACCEPTCONN;
3111 goto int_case;
3112 default:
3113 goto int_case;
3115 break;
3116 case SOL_TCP:
3117 /* TCP options all take an 'int' value. */
3118 int_case:
3119 if (get_user_u32(len, optlen))
3120 return -TARGET_EFAULT;
3121 if (len < 0)
3122 return -TARGET_EINVAL;
3123 lv = sizeof(lv);
3124 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3125 if (ret < 0)
3126 return ret;
3127 if (optname == SO_TYPE) {
3128 val = host_to_target_sock_type(val);
3130 if (len > lv)
3131 len = lv;
3132 if (len == 4) {
3133 if (put_user_u32(val, optval_addr))
3134 return -TARGET_EFAULT;
3135 } else {
3136 if (put_user_u8(val, optval_addr))
3137 return -TARGET_EFAULT;
3139 if (put_user_u32(len, optlen))
3140 return -TARGET_EFAULT;
3141 break;
3142 case SOL_IP:
3143 switch(optname) {
3144 case IP_TOS:
3145 case IP_TTL:
3146 case IP_HDRINCL:
3147 case IP_ROUTER_ALERT:
3148 case IP_RECVOPTS:
3149 case IP_RETOPTS:
3150 case IP_PKTINFO:
3151 case IP_MTU_DISCOVER:
3152 case IP_RECVERR:
3153 case IP_RECVTOS:
3154 #ifdef IP_FREEBIND
3155 case IP_FREEBIND:
3156 #endif
3157 case IP_MULTICAST_TTL:
3158 case IP_MULTICAST_LOOP:
3159 if (get_user_u32(len, optlen))
3160 return -TARGET_EFAULT;
3161 if (len < 0)
3162 return -TARGET_EINVAL;
3163 lv = sizeof(lv);
3164 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3165 if (ret < 0)
3166 return ret;
3167 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3168 len = 1;
3169 if (put_user_u32(len, optlen)
3170 || put_user_u8(val, optval_addr))
3171 return -TARGET_EFAULT;
3172 } else {
3173 if (len > sizeof(int))
3174 len = sizeof(int);
3175 if (put_user_u32(len, optlen)
3176 || put_user_u32(val, optval_addr))
3177 return -TARGET_EFAULT;
3179 break;
3180 default:
3181 ret = -TARGET_ENOPROTOOPT;
3182 break;
3184 break;
3185 default:
3186 unimplemented:
3187 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3188 level, optname);
3189 ret = -TARGET_EOPNOTSUPP;
3190 break;
3192 return ret;
3195 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3196 abi_ulong count, int copy)
3198 struct target_iovec *target_vec;
3199 struct iovec *vec;
3200 abi_ulong total_len, max_len;
3201 int i;
3202 int err = 0;
3203 bool bad_address = false;
3205 if (count == 0) {
3206 errno = 0;
3207 return NULL;
3209 if (count > IOV_MAX) {
3210 errno = EINVAL;
3211 return NULL;
3214 vec = g_try_new0(struct iovec, count);
3215 if (vec == NULL) {
3216 errno = ENOMEM;
3217 return NULL;
3220 target_vec = lock_user(VERIFY_READ, target_addr,
3221 count * sizeof(struct target_iovec), 1);
3222 if (target_vec == NULL) {
3223 err = EFAULT;
3224 goto fail2;
3227 /* ??? If host page size > target page size, this will result in a
3228 value larger than what we can actually support. */
3229 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3230 total_len = 0;
3232 for (i = 0; i < count; i++) {
3233 abi_ulong base = tswapal(target_vec[i].iov_base);
3234 abi_long len = tswapal(target_vec[i].iov_len);
3236 if (len < 0) {
3237 err = EINVAL;
3238 goto fail;
3239 } else if (len == 0) {
3240 /* Zero length pointer is ignored. */
3241 vec[i].iov_base = 0;
3242 } else {
3243 vec[i].iov_base = lock_user(type, base, len, copy);
3244 /* If the first buffer pointer is bad, this is a fault. But
3245 * subsequent bad buffers will result in a partial write; this
3246 * is realized by filling the vector with null pointers and
3247 * zero lengths. */
3248 if (!vec[i].iov_base) {
3249 if (i == 0) {
3250 err = EFAULT;
3251 goto fail;
3252 } else {
3253 bad_address = true;
3256 if (bad_address) {
3257 len = 0;
3259 if (len > max_len - total_len) {
3260 len = max_len - total_len;
3263 vec[i].iov_len = len;
3264 total_len += len;
3267 unlock_user(target_vec, target_addr, 0);
3268 return vec;
3270 fail:
3271 while (--i >= 0) {
3272 if (tswapal(target_vec[i].iov_len) > 0) {
3273 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3276 unlock_user(target_vec, target_addr, 0);
3277 fail2:
3278 g_free(vec);
3279 errno = err;
3280 return NULL;
3283 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3284 abi_ulong count, int copy)
3286 struct target_iovec *target_vec;
3287 int i;
3289 target_vec = lock_user(VERIFY_READ, target_addr,
3290 count * sizeof(struct target_iovec), 1);
3291 if (target_vec) {
3292 for (i = 0; i < count; i++) {
3293 abi_ulong base = tswapal(target_vec[i].iov_base);
3294 abi_long len = tswapal(target_vec[i].iov_len);
3295 if (len < 0) {
3296 break;
3298 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3300 unlock_user(target_vec, target_addr, 0);
3303 g_free(vec);
3306 static inline int target_to_host_sock_type(int *type)
3308 int host_type = 0;
3309 int target_type = *type;
3311 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3312 case TARGET_SOCK_DGRAM:
3313 host_type = SOCK_DGRAM;
3314 break;
3315 case TARGET_SOCK_STREAM:
3316 host_type = SOCK_STREAM;
3317 break;
3318 default:
3319 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3320 break;
3322 if (target_type & TARGET_SOCK_CLOEXEC) {
3323 #if defined(SOCK_CLOEXEC)
3324 host_type |= SOCK_CLOEXEC;
3325 #else
3326 return -TARGET_EINVAL;
3327 #endif
3329 if (target_type & TARGET_SOCK_NONBLOCK) {
3330 #if defined(SOCK_NONBLOCK)
3331 host_type |= SOCK_NONBLOCK;
3332 #elif !defined(O_NONBLOCK)
3333 return -TARGET_EINVAL;
3334 #endif
3336 *type = host_type;
3337 return 0;
3340 /* Try to emulate socket type flags after socket creation. */
3341 static int sock_flags_fixup(int fd, int target_type)
3343 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3344 if (target_type & TARGET_SOCK_NONBLOCK) {
3345 int flags = fcntl(fd, F_GETFL);
3346 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3347 close(fd);
3348 return -TARGET_EINVAL;
3351 #endif
3352 return fd;
3355 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3356 abi_ulong target_addr,
3357 socklen_t len)
3359 struct sockaddr *addr = host_addr;
3360 struct target_sockaddr *target_saddr;
3362 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3363 if (!target_saddr) {
3364 return -TARGET_EFAULT;
3367 memcpy(addr, target_saddr, len);
3368 addr->sa_family = tswap16(target_saddr->sa_family);
3369 /* spkt_protocol is big-endian */
3371 unlock_user(target_saddr, target_addr, 0);
3372 return 0;
3375 static TargetFdTrans target_packet_trans = {
3376 .target_to_host_addr = packet_target_to_host_sockaddr,
3379 #ifdef CONFIG_RTNETLINK
3380 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3382 abi_long ret;
3384 ret = target_to_host_nlmsg_route(buf, len);
3385 if (ret < 0) {
3386 return ret;
3389 return len;
3392 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3394 abi_long ret;
3396 ret = host_to_target_nlmsg_route(buf, len);
3397 if (ret < 0) {
3398 return ret;
3401 return len;
3404 static TargetFdTrans target_netlink_route_trans = {
3405 .target_to_host_data = netlink_route_target_to_host,
3406 .host_to_target_data = netlink_route_host_to_target,
3408 #endif /* CONFIG_RTNETLINK */
3410 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3412 abi_long ret;
3414 ret = target_to_host_nlmsg_audit(buf, len);
3415 if (ret < 0) {
3416 return ret;
3419 return len;
3422 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3424 abi_long ret;
3426 ret = host_to_target_nlmsg_audit(buf, len);
3427 if (ret < 0) {
3428 return ret;
3431 return len;
3434 static TargetFdTrans target_netlink_audit_trans = {
3435 .target_to_host_data = netlink_audit_target_to_host,
3436 .host_to_target_data = netlink_audit_host_to_target,
3439 /* do_socket() Must return target values and target errnos. */
3440 static abi_long do_socket(int domain, int type, int protocol)
3442 int target_type = type;
3443 int ret;
3445 ret = target_to_host_sock_type(&type);
3446 if (ret) {
3447 return ret;
3450 if (domain == PF_NETLINK && !(
3451 #ifdef CONFIG_RTNETLINK
3452 protocol == NETLINK_ROUTE ||
3453 #endif
3454 protocol == NETLINK_KOBJECT_UEVENT ||
3455 protocol == NETLINK_AUDIT)) {
3456 return -EPFNOSUPPORT;
3459 if (domain == AF_PACKET ||
3460 (domain == AF_INET && type == SOCK_PACKET)) {
3461 protocol = tswap16(protocol);
3464 ret = get_errno(socket(domain, type, protocol));
3465 if (ret >= 0) {
3466 ret = sock_flags_fixup(ret, target_type);
3467 if (type == SOCK_PACKET) {
3468 /* Manage an obsolete case :
3469 * if socket type is SOCK_PACKET, bind by name
3471 fd_trans_register(ret, &target_packet_trans);
3472 } else if (domain == PF_NETLINK) {
3473 switch (protocol) {
3474 #ifdef CONFIG_RTNETLINK
3475 case NETLINK_ROUTE:
3476 fd_trans_register(ret, &target_netlink_route_trans);
3477 break;
3478 #endif
3479 case NETLINK_KOBJECT_UEVENT:
3480 /* nothing to do: messages are strings */
3481 break;
3482 case NETLINK_AUDIT:
3483 fd_trans_register(ret, &target_netlink_audit_trans);
3484 break;
3485 default:
3486 g_assert_not_reached();
3490 return ret;
3493 /* do_bind() Must return target values and target errnos. */
3494 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3495 socklen_t addrlen)
3497 void *addr;
3498 abi_long ret;
3500 if ((int)addrlen < 0) {
3501 return -TARGET_EINVAL;
3504 addr = alloca(addrlen+1);
3506 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3507 if (ret)
3508 return ret;
3510 return get_errno(bind(sockfd, addr, addrlen));
3513 /* do_connect() Must return target values and target errnos. */
3514 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3515 socklen_t addrlen)
3517 void *addr;
3518 abi_long ret;
3520 if ((int)addrlen < 0) {
3521 return -TARGET_EINVAL;
3524 addr = alloca(addrlen+1);
3526 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3527 if (ret)
3528 return ret;
3530 return get_errno(safe_connect(sockfd, addr, addrlen));
3533 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3534 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3535 int flags, int send)
3537 abi_long ret, len;
3538 struct msghdr msg;
3539 abi_ulong count;
3540 struct iovec *vec;
3541 abi_ulong target_vec;
3543 if (msgp->msg_name) {
3544 msg.msg_namelen = tswap32(msgp->msg_namelen);
3545 msg.msg_name = alloca(msg.msg_namelen+1);
3546 ret = target_to_host_sockaddr(fd, msg.msg_name,
3547 tswapal(msgp->msg_name),
3548 msg.msg_namelen);
3549 if (ret == -TARGET_EFAULT) {
3550 /* For connected sockets msg_name and msg_namelen must
3551 * be ignored, so returning EFAULT immediately is wrong.
3552 * Instead, pass a bad msg_name to the host kernel, and
3553 * let it decide whether to return EFAULT or not.
3555 msg.msg_name = (void *)-1;
3556 } else if (ret) {
3557 goto out2;
3559 } else {
3560 msg.msg_name = NULL;
3561 msg.msg_namelen = 0;
3563 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3564 msg.msg_control = alloca(msg.msg_controllen);
3565 msg.msg_flags = tswap32(msgp->msg_flags);
3567 count = tswapal(msgp->msg_iovlen);
3568 target_vec = tswapal(msgp->msg_iov);
3570 if (count > IOV_MAX) {
3571 /* sendrcvmsg returns a different errno for this condition than
3572 * readv/writev, so we must catch it here before lock_iovec() does.
3574 ret = -TARGET_EMSGSIZE;
3575 goto out2;
3578 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3579 target_vec, count, send);
3580 if (vec == NULL) {
3581 ret = -host_to_target_errno(errno);
3582 goto out2;
3584 msg.msg_iovlen = count;
3585 msg.msg_iov = vec;
3587 if (send) {
3588 if (fd_trans_target_to_host_data(fd)) {
3589 void *host_msg;
3591 host_msg = g_malloc(msg.msg_iov->iov_len);
3592 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3593 ret = fd_trans_target_to_host_data(fd)(host_msg,
3594 msg.msg_iov->iov_len);
3595 if (ret >= 0) {
3596 msg.msg_iov->iov_base = host_msg;
3597 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3599 g_free(host_msg);
3600 } else {
3601 ret = target_to_host_cmsg(&msg, msgp);
3602 if (ret == 0) {
3603 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3606 } else {
3607 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3608 if (!is_error(ret)) {
3609 len = ret;
3610 if (fd_trans_host_to_target_data(fd)) {
3611 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3612 len);
3613 } else {
3614 ret = host_to_target_cmsg(msgp, &msg);
3616 if (!is_error(ret)) {
3617 msgp->msg_namelen = tswap32(msg.msg_namelen);
3618 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3619 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3620 msg.msg_name, msg.msg_namelen);
3621 if (ret) {
3622 goto out;
3626 ret = len;
3631 out:
3632 unlock_iovec(vec, target_vec, count, !send);
3633 out2:
3634 return ret;
3637 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3638 int flags, int send)
3640 abi_long ret;
3641 struct target_msghdr *msgp;
3643 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3644 msgp,
3645 target_msg,
3646 send ? 1 : 0)) {
3647 return -TARGET_EFAULT;
3649 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3650 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3651 return ret;
3654 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3655 * so it might not have this *mmsg-specific flag either.
3657 #ifndef MSG_WAITFORONE
3658 #define MSG_WAITFORONE 0x10000
3659 #endif
3661 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3662 unsigned int vlen, unsigned int flags,
3663 int send)
3665 struct target_mmsghdr *mmsgp;
3666 abi_long ret = 0;
3667 int i;
3669 if (vlen > UIO_MAXIOV) {
3670 vlen = UIO_MAXIOV;
3673 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3674 if (!mmsgp) {
3675 return -TARGET_EFAULT;
3678 for (i = 0; i < vlen; i++) {
3679 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3680 if (is_error(ret)) {
3681 break;
3683 mmsgp[i].msg_len = tswap32(ret);
3684 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3685 if (flags & MSG_WAITFORONE) {
3686 flags |= MSG_DONTWAIT;
3690 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3692 /* Return number of datagrams sent if we sent any at all;
3693 * otherwise return the error.
3695 if (i) {
3696 return i;
3698 return ret;
3701 /* do_accept4() Must return target values and target errnos. */
3702 static abi_long do_accept4(int fd, abi_ulong target_addr,
3703 abi_ulong target_addrlen_addr, int flags)
3705 socklen_t addrlen;
3706 void *addr;
3707 abi_long ret;
3708 int host_flags;
3710 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3712 if (target_addr == 0) {
3713 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3716 /* linux returns EINVAL if addrlen pointer is invalid */
3717 if (get_user_u32(addrlen, target_addrlen_addr))
3718 return -TARGET_EINVAL;
3720 if ((int)addrlen < 0) {
3721 return -TARGET_EINVAL;
3724 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3725 return -TARGET_EINVAL;
3727 addr = alloca(addrlen);
3729 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3730 if (!is_error(ret)) {
3731 host_to_target_sockaddr(target_addr, addr, addrlen);
3732 if (put_user_u32(addrlen, target_addrlen_addr))
3733 ret = -TARGET_EFAULT;
3735 return ret;
3738 /* do_getpeername() Must return target values and target errnos. */
3739 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3740 abi_ulong target_addrlen_addr)
3742 socklen_t addrlen;
3743 void *addr;
3744 abi_long ret;
3746 if (get_user_u32(addrlen, target_addrlen_addr))
3747 return -TARGET_EFAULT;
3749 if ((int)addrlen < 0) {
3750 return -TARGET_EINVAL;
3753 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3754 return -TARGET_EFAULT;
3756 addr = alloca(addrlen);
3758 ret = get_errno(getpeername(fd, addr, &addrlen));
3759 if (!is_error(ret)) {
3760 host_to_target_sockaddr(target_addr, addr, addrlen);
3761 if (put_user_u32(addrlen, target_addrlen_addr))
3762 ret = -TARGET_EFAULT;
3764 return ret;
3767 /* do_getsockname() Must return target values and target errnos. */
3768 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3769 abi_ulong target_addrlen_addr)
3771 socklen_t addrlen;
3772 void *addr;
3773 abi_long ret;
3775 if (get_user_u32(addrlen, target_addrlen_addr))
3776 return -TARGET_EFAULT;
3778 if ((int)addrlen < 0) {
3779 return -TARGET_EINVAL;
3782 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3783 return -TARGET_EFAULT;
3785 addr = alloca(addrlen);
3787 ret = get_errno(getsockname(fd, addr, &addrlen));
3788 if (!is_error(ret)) {
3789 host_to_target_sockaddr(target_addr, addr, addrlen);
3790 if (put_user_u32(addrlen, target_addrlen_addr))
3791 ret = -TARGET_EFAULT;
3793 return ret;
3796 /* do_socketpair() Must return target values and target errnos. */
3797 static abi_long do_socketpair(int domain, int type, int protocol,
3798 abi_ulong target_tab_addr)
3800 int tab[2];
3801 abi_long ret;
3803 target_to_host_sock_type(&type);
3805 ret = get_errno(socketpair(domain, type, protocol, tab));
3806 if (!is_error(ret)) {
3807 if (put_user_s32(tab[0], target_tab_addr)
3808 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3809 ret = -TARGET_EFAULT;
3811 return ret;
3814 /* do_sendto() Must return target values and target errnos. */
3815 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3816 abi_ulong target_addr, socklen_t addrlen)
3818 void *addr;
3819 void *host_msg;
3820 void *copy_msg = NULL;
3821 abi_long ret;
3823 if ((int)addrlen < 0) {
3824 return -TARGET_EINVAL;
3827 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3828 if (!host_msg)
3829 return -TARGET_EFAULT;
3830 if (fd_trans_target_to_host_data(fd)) {
3831 copy_msg = host_msg;
3832 host_msg = g_malloc(len);
3833 memcpy(host_msg, copy_msg, len);
3834 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3835 if (ret < 0) {
3836 goto fail;
3839 if (target_addr) {
3840 addr = alloca(addrlen+1);
3841 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3842 if (ret) {
3843 goto fail;
3845 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3846 } else {
3847 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3849 fail:
3850 if (copy_msg) {
3851 g_free(host_msg);
3852 host_msg = copy_msg;
3854 unlock_user(host_msg, msg, 0);
3855 return ret;
3858 /* do_recvfrom() Must return target values and target errnos. */
3859 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3860 abi_ulong target_addr,
3861 abi_ulong target_addrlen)
3863 socklen_t addrlen;
3864 void *addr;
3865 void *host_msg;
3866 abi_long ret;
3868 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3869 if (!host_msg)
3870 return -TARGET_EFAULT;
3871 if (target_addr) {
3872 if (get_user_u32(addrlen, target_addrlen)) {
3873 ret = -TARGET_EFAULT;
3874 goto fail;
3876 if ((int)addrlen < 0) {
3877 ret = -TARGET_EINVAL;
3878 goto fail;
3880 addr = alloca(addrlen);
3881 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3882 addr, &addrlen));
3883 } else {
3884 addr = NULL; /* To keep compiler quiet. */
3885 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3887 if (!is_error(ret)) {
3888 if (fd_trans_host_to_target_data(fd)) {
3889 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3891 if (target_addr) {
3892 host_to_target_sockaddr(target_addr, addr, addrlen);
3893 if (put_user_u32(addrlen, target_addrlen)) {
3894 ret = -TARGET_EFAULT;
3895 goto fail;
3898 unlock_user(host_msg, msg, len);
3899 } else {
3900 fail:
3901 unlock_user(host_msg, msg, 0);
3903 return ret;
3906 #ifdef TARGET_NR_socketcall
3907 /* do_socketcall() must return target values and target errnos. */
3908 static abi_long do_socketcall(int num, abi_ulong vptr)
3910 static const unsigned nargs[] = { /* number of arguments per operation */
3911 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3912 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3913 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3914 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3915 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3916 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3919 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3920 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3921 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3922 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3923 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3924 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3925 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3926 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3927 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3928 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3929 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3930 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3932 abi_long a[6]; /* max 6 args */
3933 unsigned i;
3935 /* check the range of the first argument num */
3936 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3937 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3938 return -TARGET_EINVAL;
3940 /* ensure we have space for args */
3941 if (nargs[num] > ARRAY_SIZE(a)) {
3942 return -TARGET_EINVAL;
3944 /* collect the arguments in a[] according to nargs[] */
3945 for (i = 0; i < nargs[num]; ++i) {
3946 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3947 return -TARGET_EFAULT;
3950 /* now when we have the args, invoke the appropriate underlying function */
3951 switch (num) {
3952 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3953 return do_socket(a[0], a[1], a[2]);
3954 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3955 return do_bind(a[0], a[1], a[2]);
3956 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3957 return do_connect(a[0], a[1], a[2]);
3958 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3959 return get_errno(listen(a[0], a[1]));
3960 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3961 return do_accept4(a[0], a[1], a[2], 0);
3962 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3963 return do_getsockname(a[0], a[1], a[2]);
3964 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3965 return do_getpeername(a[0], a[1], a[2]);
3966 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3967 return do_socketpair(a[0], a[1], a[2], a[3]);
3968 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3969 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3970 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3971 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3972 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3973 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3974 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3975 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3976 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3977 return get_errno(shutdown(a[0], a[1]));
3978 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3979 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3980 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3981 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3982 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3983 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3984 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3985 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3986 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3987 return do_accept4(a[0], a[1], a[2], a[3]);
3988 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3989 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3990 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3991 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3992 default:
3993 gemu_log("Unsupported socketcall: %d\n", num);
3994 return -TARGET_EINVAL;
3997 #endif
3999 #define N_SHM_REGIONS 32
4001 static struct shm_region {
4002 abi_ulong start;
4003 abi_ulong size;
4004 bool in_use;
4005 } shm_regions[N_SHM_REGIONS];
4007 #ifndef TARGET_SEMID64_DS
4008 /* asm-generic version of this struct */
4009 struct target_semid64_ds
4011 struct target_ipc_perm sem_perm;
4012 abi_ulong sem_otime;
4013 #if TARGET_ABI_BITS == 32
4014 abi_ulong __unused1;
4015 #endif
4016 abi_ulong sem_ctime;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused2;
4019 #endif
4020 abi_ulong sem_nsems;
4021 abi_ulong __unused3;
4022 abi_ulong __unused4;
4024 #endif
4026 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4027 abi_ulong target_addr)
4029 struct target_ipc_perm *target_ip;
4030 struct target_semid64_ds *target_sd;
4032 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4033 return -TARGET_EFAULT;
4034 target_ip = &(target_sd->sem_perm);
4035 host_ip->__key = tswap32(target_ip->__key);
4036 host_ip->uid = tswap32(target_ip->uid);
4037 host_ip->gid = tswap32(target_ip->gid);
4038 host_ip->cuid = tswap32(target_ip->cuid);
4039 host_ip->cgid = tswap32(target_ip->cgid);
4040 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4041 host_ip->mode = tswap32(target_ip->mode);
4042 #else
4043 host_ip->mode = tswap16(target_ip->mode);
4044 #endif
4045 #if defined(TARGET_PPC)
4046 host_ip->__seq = tswap32(target_ip->__seq);
4047 #else
4048 host_ip->__seq = tswap16(target_ip->__seq);
4049 #endif
4050 unlock_user_struct(target_sd, target_addr, 0);
4051 return 0;
4054 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4055 struct ipc_perm *host_ip)
4057 struct target_ipc_perm *target_ip;
4058 struct target_semid64_ds *target_sd;
4060 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4061 return -TARGET_EFAULT;
4062 target_ip = &(target_sd->sem_perm);
4063 target_ip->__key = tswap32(host_ip->__key);
4064 target_ip->uid = tswap32(host_ip->uid);
4065 target_ip->gid = tswap32(host_ip->gid);
4066 target_ip->cuid = tswap32(host_ip->cuid);
4067 target_ip->cgid = tswap32(host_ip->cgid);
4068 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4069 target_ip->mode = tswap32(host_ip->mode);
4070 #else
4071 target_ip->mode = tswap16(host_ip->mode);
4072 #endif
4073 #if defined(TARGET_PPC)
4074 target_ip->__seq = tswap32(host_ip->__seq);
4075 #else
4076 target_ip->__seq = tswap16(host_ip->__seq);
4077 #endif
4078 unlock_user_struct(target_sd, target_addr, 1);
4079 return 0;
4082 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4083 abi_ulong target_addr)
4085 struct target_semid64_ds *target_sd;
4087 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4088 return -TARGET_EFAULT;
4089 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4090 return -TARGET_EFAULT;
4091 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4092 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4093 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4094 unlock_user_struct(target_sd, target_addr, 0);
4095 return 0;
4098 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4099 struct semid_ds *host_sd)
4101 struct target_semid64_ds *target_sd;
4103 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4104 return -TARGET_EFAULT;
4105 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4106 return -TARGET_EFAULT;
4107 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4108 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4109 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4110 unlock_user_struct(target_sd, target_addr, 1);
4111 return 0;
4114 struct target_seminfo {
4115 int semmap;
4116 int semmni;
4117 int semmns;
4118 int semmnu;
4119 int semmsl;
4120 int semopm;
4121 int semume;
4122 int semusz;
4123 int semvmx;
4124 int semaem;
4127 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4128 struct seminfo *host_seminfo)
4130 struct target_seminfo *target_seminfo;
4131 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4132 return -TARGET_EFAULT;
4133 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4134 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4135 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4136 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4137 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4138 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4139 __put_user(host_seminfo->semume, &target_seminfo->semume);
4140 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4141 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4142 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4143 unlock_user_struct(target_seminfo, target_addr, 1);
4144 return 0;
4147 union semun {
4148 int val;
4149 struct semid_ds *buf;
4150 unsigned short *array;
4151 struct seminfo *__buf;
4154 union target_semun {
4155 int val;
4156 abi_ulong buf;
4157 abi_ulong array;
4158 abi_ulong __buf;
4161 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4162 abi_ulong target_addr)
4164 int nsems;
4165 unsigned short *array;
4166 union semun semun;
4167 struct semid_ds semid_ds;
4168 int i, ret;
4170 semun.buf = &semid_ds;
4172 ret = semctl(semid, 0, IPC_STAT, semun);
4173 if (ret == -1)
4174 return get_errno(ret);
4176 nsems = semid_ds.sem_nsems;
4178 *host_array = g_try_new(unsigned short, nsems);
4179 if (!*host_array) {
4180 return -TARGET_ENOMEM;
4182 array = lock_user(VERIFY_READ, target_addr,
4183 nsems*sizeof(unsigned short), 1);
4184 if (!array) {
4185 g_free(*host_array);
4186 return -TARGET_EFAULT;
4189 for(i=0; i<nsems; i++) {
4190 __get_user((*host_array)[i], &array[i]);
4192 unlock_user(array, target_addr, 0);
4194 return 0;
4197 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4198 unsigned short **host_array)
4200 int nsems;
4201 unsigned short *array;
4202 union semun semun;
4203 struct semid_ds semid_ds;
4204 int i, ret;
4206 semun.buf = &semid_ds;
4208 ret = semctl(semid, 0, IPC_STAT, semun);
4209 if (ret == -1)
4210 return get_errno(ret);
4212 nsems = semid_ds.sem_nsems;
4214 array = lock_user(VERIFY_WRITE, target_addr,
4215 nsems*sizeof(unsigned short), 0);
4216 if (!array)
4217 return -TARGET_EFAULT;
4219 for(i=0; i<nsems; i++) {
4220 __put_user((*host_array)[i], &array[i]);
4222 g_free(*host_array);
4223 unlock_user(array, target_addr, 1);
4225 return 0;
4228 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4229 abi_ulong target_arg)
4231 union target_semun target_su = { .buf = target_arg };
4232 union semun arg;
4233 struct semid_ds dsarg;
4234 unsigned short *array = NULL;
4235 struct seminfo seminfo;
4236 abi_long ret = -TARGET_EINVAL;
4237 abi_long err;
4238 cmd &= 0xff;
4240 switch( cmd ) {
4241 case GETVAL:
4242 case SETVAL:
4243 /* In 64 bit cross-endian situations, we will erroneously pick up
4244 * the wrong half of the union for the "val" element. To rectify
4245 * this, the entire 8-byte structure is byteswapped, followed by
4246 * a swap of the 4 byte val field. In other cases, the data is
4247 * already in proper host byte order. */
4248 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4249 target_su.buf = tswapal(target_su.buf);
4250 arg.val = tswap32(target_su.val);
4251 } else {
4252 arg.val = target_su.val;
4254 ret = get_errno(semctl(semid, semnum, cmd, arg));
4255 break;
4256 case GETALL:
4257 case SETALL:
4258 err = target_to_host_semarray(semid, &array, target_su.array);
4259 if (err)
4260 return err;
4261 arg.array = array;
4262 ret = get_errno(semctl(semid, semnum, cmd, arg));
4263 err = host_to_target_semarray(semid, target_su.array, &array);
4264 if (err)
4265 return err;
4266 break;
4267 case IPC_STAT:
4268 case IPC_SET:
4269 case SEM_STAT:
4270 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4271 if (err)
4272 return err;
4273 arg.buf = &dsarg;
4274 ret = get_errno(semctl(semid, semnum, cmd, arg));
4275 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4276 if (err)
4277 return err;
4278 break;
4279 case IPC_INFO:
4280 case SEM_INFO:
4281 arg.__buf = &seminfo;
4282 ret = get_errno(semctl(semid, semnum, cmd, arg));
4283 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4284 if (err)
4285 return err;
4286 break;
4287 case IPC_RMID:
4288 case GETPID:
4289 case GETNCNT:
4290 case GETZCNT:
4291 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4292 break;
4295 return ret;
4298 struct target_sembuf {
4299 unsigned short sem_num;
4300 short sem_op;
4301 short sem_flg;
4304 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4305 abi_ulong target_addr,
4306 unsigned nsops)
4308 struct target_sembuf *target_sembuf;
4309 int i;
4311 target_sembuf = lock_user(VERIFY_READ, target_addr,
4312 nsops*sizeof(struct target_sembuf), 1);
4313 if (!target_sembuf)
4314 return -TARGET_EFAULT;
4316 for(i=0; i<nsops; i++) {
4317 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4318 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4319 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4322 unlock_user(target_sembuf, target_addr, 0);
4324 return 0;
4327 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4329 struct sembuf sops[nsops];
4331 if (target_to_host_sembuf(sops, ptr, nsops))
4332 return -TARGET_EFAULT;
4334 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4337 struct target_msqid_ds
4339 struct target_ipc_perm msg_perm;
4340 abi_ulong msg_stime;
4341 #if TARGET_ABI_BITS == 32
4342 abi_ulong __unused1;
4343 #endif
4344 abi_ulong msg_rtime;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused2;
4347 #endif
4348 abi_ulong msg_ctime;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused3;
4351 #endif
4352 abi_ulong __msg_cbytes;
4353 abi_ulong msg_qnum;
4354 abi_ulong msg_qbytes;
4355 abi_ulong msg_lspid;
4356 abi_ulong msg_lrpid;
4357 abi_ulong __unused4;
4358 abi_ulong __unused5;
4361 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4362 abi_ulong target_addr)
4364 struct target_msqid_ds *target_md;
4366 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4367 return -TARGET_EFAULT;
4368 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4369 return -TARGET_EFAULT;
4370 host_md->msg_stime = tswapal(target_md->msg_stime);
4371 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4372 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4373 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4374 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4375 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4376 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4377 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4378 unlock_user_struct(target_md, target_addr, 0);
4379 return 0;
4382 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4383 struct msqid_ds *host_md)
4385 struct target_msqid_ds *target_md;
4387 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4388 return -TARGET_EFAULT;
4389 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4390 return -TARGET_EFAULT;
4391 target_md->msg_stime = tswapal(host_md->msg_stime);
4392 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4393 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4394 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4395 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4396 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4397 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4398 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4399 unlock_user_struct(target_md, target_addr, 1);
4400 return 0;
4403 struct target_msginfo {
4404 int msgpool;
4405 int msgmap;
4406 int msgmax;
4407 int msgmnb;
4408 int msgmni;
4409 int msgssz;
4410 int msgtql;
4411 unsigned short int msgseg;
4414 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4415 struct msginfo *host_msginfo)
4417 struct target_msginfo *target_msginfo;
4418 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4419 return -TARGET_EFAULT;
4420 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4421 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4422 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4423 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4424 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4425 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4426 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4427 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4428 unlock_user_struct(target_msginfo, target_addr, 1);
4429 return 0;
4432 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4434 struct msqid_ds dsarg;
4435 struct msginfo msginfo;
4436 abi_long ret = -TARGET_EINVAL;
4438 cmd &= 0xff;
4440 switch (cmd) {
4441 case IPC_STAT:
4442 case IPC_SET:
4443 case MSG_STAT:
4444 if (target_to_host_msqid_ds(&dsarg,ptr))
4445 return -TARGET_EFAULT;
4446 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4447 if (host_to_target_msqid_ds(ptr,&dsarg))
4448 return -TARGET_EFAULT;
4449 break;
4450 case IPC_RMID:
4451 ret = get_errno(msgctl(msgid, cmd, NULL));
4452 break;
4453 case IPC_INFO:
4454 case MSG_INFO:
4455 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4456 if (host_to_target_msginfo(ptr, &msginfo))
4457 return -TARGET_EFAULT;
4458 break;
4461 return ret;
4464 struct target_msgbuf {
4465 abi_long mtype;
4466 char mtext[1];
4469 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4470 ssize_t msgsz, int msgflg)
4472 struct target_msgbuf *target_mb;
4473 struct msgbuf *host_mb;
4474 abi_long ret = 0;
4476 if (msgsz < 0) {
4477 return -TARGET_EINVAL;
4480 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4481 return -TARGET_EFAULT;
4482 host_mb = g_try_malloc(msgsz + sizeof(long));
4483 if (!host_mb) {
4484 unlock_user_struct(target_mb, msgp, 0);
4485 return -TARGET_ENOMEM;
4487 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4488 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4489 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4490 g_free(host_mb);
4491 unlock_user_struct(target_mb, msgp, 0);
4493 return ret;
4496 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4497 ssize_t msgsz, abi_long msgtyp,
4498 int msgflg)
4500 struct target_msgbuf *target_mb;
4501 char *target_mtext;
4502 struct msgbuf *host_mb;
4503 abi_long ret = 0;
4505 if (msgsz < 0) {
4506 return -TARGET_EINVAL;
4509 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4510 return -TARGET_EFAULT;
4512 host_mb = g_try_malloc(msgsz + sizeof(long));
4513 if (!host_mb) {
4514 ret = -TARGET_ENOMEM;
4515 goto end;
4517 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4519 if (ret > 0) {
4520 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4521 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4522 if (!target_mtext) {
4523 ret = -TARGET_EFAULT;
4524 goto end;
4526 memcpy(target_mb->mtext, host_mb->mtext, ret);
4527 unlock_user(target_mtext, target_mtext_addr, ret);
4530 target_mb->mtype = tswapal(host_mb->mtype);
4532 end:
4533 if (target_mb)
4534 unlock_user_struct(target_mb, msgp, 1);
4535 g_free(host_mb);
4536 return ret;
4539 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4540 abi_ulong target_addr)
4542 struct target_shmid_ds *target_sd;
4544 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4545 return -TARGET_EFAULT;
4546 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4547 return -TARGET_EFAULT;
4548 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4549 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4550 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4551 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4552 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4553 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4554 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4555 unlock_user_struct(target_sd, target_addr, 0);
4556 return 0;
4559 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4560 struct shmid_ds *host_sd)
4562 struct target_shmid_ds *target_sd;
4564 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4565 return -TARGET_EFAULT;
4566 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4567 return -TARGET_EFAULT;
4568 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4569 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4570 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4571 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4572 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4573 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4574 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4575 unlock_user_struct(target_sd, target_addr, 1);
4576 return 0;
4579 struct target_shminfo {
4580 abi_ulong shmmax;
4581 abi_ulong shmmin;
4582 abi_ulong shmmni;
4583 abi_ulong shmseg;
4584 abi_ulong shmall;
4587 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4588 struct shminfo *host_shminfo)
4590 struct target_shminfo *target_shminfo;
4591 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4592 return -TARGET_EFAULT;
4593 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4594 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4595 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4596 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4597 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4598 unlock_user_struct(target_shminfo, target_addr, 1);
4599 return 0;
4602 struct target_shm_info {
4603 int used_ids;
4604 abi_ulong shm_tot;
4605 abi_ulong shm_rss;
4606 abi_ulong shm_swp;
4607 abi_ulong swap_attempts;
4608 abi_ulong swap_successes;
4611 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4612 struct shm_info *host_shm_info)
4614 struct target_shm_info *target_shm_info;
4615 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4616 return -TARGET_EFAULT;
4617 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4618 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4619 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4620 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4621 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4622 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4623 unlock_user_struct(target_shm_info, target_addr, 1);
4624 return 0;
4627 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4629 struct shmid_ds dsarg;
4630 struct shminfo shminfo;
4631 struct shm_info shm_info;
4632 abi_long ret = -TARGET_EINVAL;
4634 cmd &= 0xff;
4636 switch(cmd) {
4637 case IPC_STAT:
4638 case IPC_SET:
4639 case SHM_STAT:
4640 if (target_to_host_shmid_ds(&dsarg, buf))
4641 return -TARGET_EFAULT;
4642 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4643 if (host_to_target_shmid_ds(buf, &dsarg))
4644 return -TARGET_EFAULT;
4645 break;
4646 case IPC_INFO:
4647 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4648 if (host_to_target_shminfo(buf, &shminfo))
4649 return -TARGET_EFAULT;
4650 break;
4651 case SHM_INFO:
4652 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4653 if (host_to_target_shm_info(buf, &shm_info))
4654 return -TARGET_EFAULT;
4655 break;
4656 case IPC_RMID:
4657 case SHM_LOCK:
4658 case SHM_UNLOCK:
4659 ret = get_errno(shmctl(shmid, cmd, NULL));
4660 break;
4663 return ret;
4666 #ifndef TARGET_FORCE_SHMLBA
4667 /* For most architectures, SHMLBA is the same as the page size;
4668 * some architectures have larger values, in which case they should
4669 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4670 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4671 * and defining its own value for SHMLBA.
4673 * The kernel also permits SHMLBA to be set by the architecture to a
4674 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4675 * this means that addresses are rounded to the large size if
4676 * SHM_RND is set but addresses not aligned to that size are not rejected
4677 * as long as they are at least page-aligned. Since the only architecture
4678 * which uses this is ia64 this code doesn't provide for that oddity.
4680 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4682 return TARGET_PAGE_SIZE;
4684 #endif
4686 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4687 int shmid, abi_ulong shmaddr, int shmflg)
4689 abi_long raddr;
4690 void *host_raddr;
4691 struct shmid_ds shm_info;
4692 int i,ret;
4693 abi_ulong shmlba;
4695 /* find out the length of the shared memory segment */
4696 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4697 if (is_error(ret)) {
4698 /* can't get length, bail out */
4699 return ret;
4702 shmlba = target_shmlba(cpu_env);
4704 if (shmaddr & (shmlba - 1)) {
4705 if (shmflg & SHM_RND) {
4706 shmaddr &= ~(shmlba - 1);
4707 } else {
4708 return -TARGET_EINVAL;
4712 mmap_lock();
4714 if (shmaddr)
4715 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4716 else {
4717 abi_ulong mmap_start;
4719 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4721 if (mmap_start == -1) {
4722 errno = ENOMEM;
4723 host_raddr = (void *)-1;
4724 } else
4725 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4728 if (host_raddr == (void *)-1) {
4729 mmap_unlock();
4730 return get_errno((long)host_raddr);
4732 raddr=h2g((unsigned long)host_raddr);
4734 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4735 PAGE_VALID | PAGE_READ |
4736 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4738 for (i = 0; i < N_SHM_REGIONS; i++) {
4739 if (!shm_regions[i].in_use) {
4740 shm_regions[i].in_use = true;
4741 shm_regions[i].start = raddr;
4742 shm_regions[i].size = shm_info.shm_segsz;
4743 break;
4747 mmap_unlock();
4748 return raddr;
4752 static inline abi_long do_shmdt(abi_ulong shmaddr)
4754 int i;
4756 for (i = 0; i < N_SHM_REGIONS; ++i) {
4757 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4758 shm_regions[i].in_use = false;
4759 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4760 break;
4764 return get_errno(shmdt(g2h(shmaddr)));
4767 #ifdef TARGET_NR_ipc
4768 /* ??? This only works with linear mappings. */
4769 /* do_ipc() must return target values and target errnos. */
4770 static abi_long do_ipc(CPUArchState *cpu_env,
4771 unsigned int call, abi_long first,
4772 abi_long second, abi_long third,
4773 abi_long ptr, abi_long fifth)
4775 int version;
4776 abi_long ret = 0;
4778 version = call >> 16;
4779 call &= 0xffff;
4781 switch (call) {
4782 case IPCOP_semop:
4783 ret = do_semop(first, ptr, second);
4784 break;
4786 case IPCOP_semget:
4787 ret = get_errno(semget(first, second, third));
4788 break;
4790 case IPCOP_semctl: {
4791 /* The semun argument to semctl is passed by value, so dereference the
4792 * ptr argument. */
4793 abi_ulong atptr;
4794 get_user_ual(atptr, ptr);
4795 ret = do_semctl(first, second, third, atptr);
4796 break;
4799 case IPCOP_msgget:
4800 ret = get_errno(msgget(first, second));
4801 break;
4803 case IPCOP_msgsnd:
4804 ret = do_msgsnd(first, ptr, second, third);
4805 break;
4807 case IPCOP_msgctl:
4808 ret = do_msgctl(first, second, ptr);
4809 break;
4811 case IPCOP_msgrcv:
4812 switch (version) {
4813 case 0:
4815 struct target_ipc_kludge {
4816 abi_long msgp;
4817 abi_long msgtyp;
4818 } *tmp;
4820 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4821 ret = -TARGET_EFAULT;
4822 break;
4825 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4827 unlock_user_struct(tmp, ptr, 0);
4828 break;
4830 default:
4831 ret = do_msgrcv(first, ptr, second, fifth, third);
4833 break;
4835 case IPCOP_shmat:
4836 switch (version) {
4837 default:
4839 abi_ulong raddr;
4840 raddr = do_shmat(cpu_env, first, ptr, second);
4841 if (is_error(raddr))
4842 return get_errno(raddr);
4843 if (put_user_ual(raddr, third))
4844 return -TARGET_EFAULT;
4845 break;
4847 case 1:
4848 ret = -TARGET_EINVAL;
4849 break;
4851 break;
4852 case IPCOP_shmdt:
4853 ret = do_shmdt(ptr);
4854 break;
4856 case IPCOP_shmget:
4857 /* IPC_* flag values are the same on all linux platforms */
4858 ret = get_errno(shmget(first, second, third));
4859 break;
4861 /* IPC_* and SHM_* command values are the same on all linux platforms */
4862 case IPCOP_shmctl:
4863 ret = do_shmctl(first, second, ptr);
4864 break;
4865 default:
4866 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4867 ret = -TARGET_ENOSYS;
4868 break;
4870 return ret;
4872 #endif
4874 /* kernel structure types definitions */
4876 #define STRUCT(name, ...) STRUCT_ ## name,
4877 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4878 enum {
4879 #include "syscall_types.h"
4880 STRUCT_MAX
4882 #undef STRUCT
4883 #undef STRUCT_SPECIAL
4885 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4886 #define STRUCT_SPECIAL(name)
4887 #include "syscall_types.h"
4888 #undef STRUCT
4889 #undef STRUCT_SPECIAL
4891 typedef struct IOCTLEntry IOCTLEntry;
4893 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4894 int fd, int cmd, abi_long arg);
4896 struct IOCTLEntry {
4897 int target_cmd;
4898 unsigned int host_cmd;
4899 const char *name;
4900 int access;
4901 do_ioctl_fn *do_ioctl;
4902 const argtype arg_type[5];
4905 #define IOC_R 0x0001
4906 #define IOC_W 0x0002
4907 #define IOC_RW (IOC_R | IOC_W)
4909 #define MAX_STRUCT_SIZE 4096
4911 #ifdef CONFIG_FIEMAP
4912 /* So fiemap access checks don't overflow on 32 bit systems.
4913 * This is very slightly smaller than the limit imposed by
4914 * the underlying kernel.
4916 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4917 / sizeof(struct fiemap_extent))
4919 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4920 int fd, int cmd, abi_long arg)
4922 /* The parameter for this ioctl is a struct fiemap followed
4923 * by an array of struct fiemap_extent whose size is set
4924 * in fiemap->fm_extent_count. The array is filled in by the
4925 * ioctl.
4927 int target_size_in, target_size_out;
4928 struct fiemap *fm;
4929 const argtype *arg_type = ie->arg_type;
4930 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4931 void *argptr, *p;
4932 abi_long ret;
4933 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4934 uint32_t outbufsz;
4935 int free_fm = 0;
4937 assert(arg_type[0] == TYPE_PTR);
4938 assert(ie->access == IOC_RW);
4939 arg_type++;
4940 target_size_in = thunk_type_size(arg_type, 0);
4941 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4942 if (!argptr) {
4943 return -TARGET_EFAULT;
4945 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4946 unlock_user(argptr, arg, 0);
4947 fm = (struct fiemap *)buf_temp;
4948 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4949 return -TARGET_EINVAL;
4952 outbufsz = sizeof (*fm) +
4953 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4955 if (outbufsz > MAX_STRUCT_SIZE) {
4956 /* We can't fit all the extents into the fixed size buffer.
4957 * Allocate one that is large enough and use it instead.
4959 fm = g_try_malloc(outbufsz);
4960 if (!fm) {
4961 return -TARGET_ENOMEM;
4963 memcpy(fm, buf_temp, sizeof(struct fiemap));
4964 free_fm = 1;
4966 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4967 if (!is_error(ret)) {
4968 target_size_out = target_size_in;
4969 /* An extent_count of 0 means we were only counting the extents
4970 * so there are no structs to copy
4972 if (fm->fm_extent_count != 0) {
4973 target_size_out += fm->fm_mapped_extents * extent_size;
4975 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4976 if (!argptr) {
4977 ret = -TARGET_EFAULT;
4978 } else {
4979 /* Convert the struct fiemap */
4980 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4981 if (fm->fm_extent_count != 0) {
4982 p = argptr + target_size_in;
4983 /* ...and then all the struct fiemap_extents */
4984 for (i = 0; i < fm->fm_mapped_extents; i++) {
4985 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4986 THUNK_TARGET);
4987 p += extent_size;
4990 unlock_user(argptr, arg, target_size_out);
4993 if (free_fm) {
4994 g_free(fm);
4996 return ret;
4998 #endif
5000 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5001 int fd, int cmd, abi_long arg)
5003 const argtype *arg_type = ie->arg_type;
5004 int target_size;
5005 void *argptr;
5006 int ret;
5007 struct ifconf *host_ifconf;
5008 uint32_t outbufsz;
5009 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5010 int target_ifreq_size;
5011 int nb_ifreq;
5012 int free_buf = 0;
5013 int i;
5014 int target_ifc_len;
5015 abi_long target_ifc_buf;
5016 int host_ifc_len;
5017 char *host_ifc_buf;
5019 assert(arg_type[0] == TYPE_PTR);
5020 assert(ie->access == IOC_RW);
5022 arg_type++;
5023 target_size = thunk_type_size(arg_type, 0);
5025 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5026 if (!argptr)
5027 return -TARGET_EFAULT;
5028 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5029 unlock_user(argptr, arg, 0);
5031 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5032 target_ifc_len = host_ifconf->ifc_len;
5033 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5035 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5036 nb_ifreq = target_ifc_len / target_ifreq_size;
5037 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5039 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5040 if (outbufsz > MAX_STRUCT_SIZE) {
5041 /* We can't fit all the extents into the fixed size buffer.
5042 * Allocate one that is large enough and use it instead.
5044 host_ifconf = malloc(outbufsz);
5045 if (!host_ifconf) {
5046 return -TARGET_ENOMEM;
5048 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5049 free_buf = 1;
5051 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5053 host_ifconf->ifc_len = host_ifc_len;
5054 host_ifconf->ifc_buf = host_ifc_buf;
5056 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5057 if (!is_error(ret)) {
5058 /* convert host ifc_len to target ifc_len */
5060 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5061 target_ifc_len = nb_ifreq * target_ifreq_size;
5062 host_ifconf->ifc_len = target_ifc_len;
5064 /* restore target ifc_buf */
5066 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5068 /* copy struct ifconf to target user */
5070 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5071 if (!argptr)
5072 return -TARGET_EFAULT;
5073 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5074 unlock_user(argptr, arg, target_size);
5076 /* copy ifreq[] to target user */
5078 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5079 for (i = 0; i < nb_ifreq ; i++) {
5080 thunk_convert(argptr + i * target_ifreq_size,
5081 host_ifc_buf + i * sizeof(struct ifreq),
5082 ifreq_arg_type, THUNK_TARGET);
5084 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5087 if (free_buf) {
5088 free(host_ifconf);
5091 return ret;
5094 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5095 int cmd, abi_long arg)
5097 void *argptr;
5098 struct dm_ioctl *host_dm;
5099 abi_long guest_data;
5100 uint32_t guest_data_size;
5101 int target_size;
5102 const argtype *arg_type = ie->arg_type;
5103 abi_long ret;
5104 void *big_buf = NULL;
5105 char *host_data;
5107 arg_type++;
5108 target_size = thunk_type_size(arg_type, 0);
5109 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5110 if (!argptr) {
5111 ret = -TARGET_EFAULT;
5112 goto out;
5114 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5115 unlock_user(argptr, arg, 0);
5117 /* buf_temp is too small, so fetch things into a bigger buffer */
5118 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5119 memcpy(big_buf, buf_temp, target_size);
5120 buf_temp = big_buf;
5121 host_dm = big_buf;
5123 guest_data = arg + host_dm->data_start;
5124 if ((guest_data - arg) < 0) {
5125 ret = -TARGET_EINVAL;
5126 goto out;
5128 guest_data_size = host_dm->data_size - host_dm->data_start;
5129 host_data = (char*)host_dm + host_dm->data_start;
5131 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5132 if (!argptr) {
5133 ret = -TARGET_EFAULT;
5134 goto out;
5137 switch (ie->host_cmd) {
5138 case DM_REMOVE_ALL:
5139 case DM_LIST_DEVICES:
5140 case DM_DEV_CREATE:
5141 case DM_DEV_REMOVE:
5142 case DM_DEV_SUSPEND:
5143 case DM_DEV_STATUS:
5144 case DM_DEV_WAIT:
5145 case DM_TABLE_STATUS:
5146 case DM_TABLE_CLEAR:
5147 case DM_TABLE_DEPS:
5148 case DM_LIST_VERSIONS:
5149 /* no input data */
5150 break;
5151 case DM_DEV_RENAME:
5152 case DM_DEV_SET_GEOMETRY:
5153 /* data contains only strings */
5154 memcpy(host_data, argptr, guest_data_size);
5155 break;
5156 case DM_TARGET_MSG:
5157 memcpy(host_data, argptr, guest_data_size);
5158 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5159 break;
5160 case DM_TABLE_LOAD:
5162 void *gspec = argptr;
5163 void *cur_data = host_data;
5164 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5165 int spec_size = thunk_type_size(arg_type, 0);
5166 int i;
5168 for (i = 0; i < host_dm->target_count; i++) {
5169 struct dm_target_spec *spec = cur_data;
5170 uint32_t next;
5171 int slen;
5173 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5174 slen = strlen((char*)gspec + spec_size) + 1;
5175 next = spec->next;
5176 spec->next = sizeof(*spec) + slen;
5177 strcpy((char*)&spec[1], gspec + spec_size);
5178 gspec += next;
5179 cur_data += spec->next;
5181 break;
5183 default:
5184 ret = -TARGET_EINVAL;
5185 unlock_user(argptr, guest_data, 0);
5186 goto out;
5188 unlock_user(argptr, guest_data, 0);
5190 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5191 if (!is_error(ret)) {
5192 guest_data = arg + host_dm->data_start;
5193 guest_data_size = host_dm->data_size - host_dm->data_start;
5194 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5195 switch (ie->host_cmd) {
5196 case DM_REMOVE_ALL:
5197 case DM_DEV_CREATE:
5198 case DM_DEV_REMOVE:
5199 case DM_DEV_RENAME:
5200 case DM_DEV_SUSPEND:
5201 case DM_DEV_STATUS:
5202 case DM_TABLE_LOAD:
5203 case DM_TABLE_CLEAR:
5204 case DM_TARGET_MSG:
5205 case DM_DEV_SET_GEOMETRY:
5206 /* no return data */
5207 break;
5208 case DM_LIST_DEVICES:
5210 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5211 uint32_t remaining_data = guest_data_size;
5212 void *cur_data = argptr;
5213 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5214 int nl_size = 12; /* can't use thunk_size due to alignment */
5216 while (1) {
5217 uint32_t next = nl->next;
5218 if (next) {
5219 nl->next = nl_size + (strlen(nl->name) + 1);
5221 if (remaining_data < nl->next) {
5222 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5223 break;
5225 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5226 strcpy(cur_data + nl_size, nl->name);
5227 cur_data += nl->next;
5228 remaining_data -= nl->next;
5229 if (!next) {
5230 break;
5232 nl = (void*)nl + next;
5234 break;
5236 case DM_DEV_WAIT:
5237 case DM_TABLE_STATUS:
5239 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5240 void *cur_data = argptr;
5241 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5242 int spec_size = thunk_type_size(arg_type, 0);
5243 int i;
5245 for (i = 0; i < host_dm->target_count; i++) {
5246 uint32_t next = spec->next;
5247 int slen = strlen((char*)&spec[1]) + 1;
5248 spec->next = (cur_data - argptr) + spec_size + slen;
5249 if (guest_data_size < spec->next) {
5250 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5251 break;
5253 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5254 strcpy(cur_data + spec_size, (char*)&spec[1]);
5255 cur_data = argptr + spec->next;
5256 spec = (void*)host_dm + host_dm->data_start + next;
5258 break;
5260 case DM_TABLE_DEPS:
5262 void *hdata = (void*)host_dm + host_dm->data_start;
5263 int count = *(uint32_t*)hdata;
5264 uint64_t *hdev = hdata + 8;
5265 uint64_t *gdev = argptr + 8;
5266 int i;
5268 *(uint32_t*)argptr = tswap32(count);
5269 for (i = 0; i < count; i++) {
5270 *gdev = tswap64(*hdev);
5271 gdev++;
5272 hdev++;
5274 break;
5276 case DM_LIST_VERSIONS:
5278 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5279 uint32_t remaining_data = guest_data_size;
5280 void *cur_data = argptr;
5281 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5282 int vers_size = thunk_type_size(arg_type, 0);
5284 while (1) {
5285 uint32_t next = vers->next;
5286 if (next) {
5287 vers->next = vers_size + (strlen(vers->name) + 1);
5289 if (remaining_data < vers->next) {
5290 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5291 break;
5293 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5294 strcpy(cur_data + vers_size, vers->name);
5295 cur_data += vers->next;
5296 remaining_data -= vers->next;
5297 if (!next) {
5298 break;
5300 vers = (void*)vers + next;
5302 break;
5304 default:
5305 unlock_user(argptr, guest_data, 0);
5306 ret = -TARGET_EINVAL;
5307 goto out;
5309 unlock_user(argptr, guest_data, guest_data_size);
5311 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5312 if (!argptr) {
5313 ret = -TARGET_EFAULT;
5314 goto out;
5316 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5317 unlock_user(argptr, arg, target_size);
5319 out:
5320 g_free(big_buf);
5321 return ret;
5324 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5325 int cmd, abi_long arg)
5327 void *argptr;
5328 int target_size;
5329 const argtype *arg_type = ie->arg_type;
5330 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5331 abi_long ret;
5333 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5334 struct blkpg_partition host_part;
5336 /* Read and convert blkpg */
5337 arg_type++;
5338 target_size = thunk_type_size(arg_type, 0);
5339 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5340 if (!argptr) {
5341 ret = -TARGET_EFAULT;
5342 goto out;
5344 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5345 unlock_user(argptr, arg, 0);
5347 switch (host_blkpg->op) {
5348 case BLKPG_ADD_PARTITION:
5349 case BLKPG_DEL_PARTITION:
5350 /* payload is struct blkpg_partition */
5351 break;
5352 default:
5353 /* Unknown opcode */
5354 ret = -TARGET_EINVAL;
5355 goto out;
5358 /* Read and convert blkpg->data */
5359 arg = (abi_long)(uintptr_t)host_blkpg->data;
5360 target_size = thunk_type_size(part_arg_type, 0);
5361 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5362 if (!argptr) {
5363 ret = -TARGET_EFAULT;
5364 goto out;
5366 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5367 unlock_user(argptr, arg, 0);
5369 /* Swizzle the data pointer to our local copy and call! */
5370 host_blkpg->data = &host_part;
5371 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5373 out:
5374 return ret;
5377 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5378 int fd, int cmd, abi_long arg)
5380 const argtype *arg_type = ie->arg_type;
5381 const StructEntry *se;
5382 const argtype *field_types;
5383 const int *dst_offsets, *src_offsets;
5384 int target_size;
5385 void *argptr;
5386 abi_ulong *target_rt_dev_ptr;
5387 unsigned long *host_rt_dev_ptr;
5388 abi_long ret;
5389 int i;
5391 assert(ie->access == IOC_W);
5392 assert(*arg_type == TYPE_PTR);
5393 arg_type++;
5394 assert(*arg_type == TYPE_STRUCT);
5395 target_size = thunk_type_size(arg_type, 0);
5396 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5397 if (!argptr) {
5398 return -TARGET_EFAULT;
5400 arg_type++;
5401 assert(*arg_type == (int)STRUCT_rtentry);
5402 se = struct_entries + *arg_type++;
5403 assert(se->convert[0] == NULL);
5404 /* convert struct here to be able to catch rt_dev string */
5405 field_types = se->field_types;
5406 dst_offsets = se->field_offsets[THUNK_HOST];
5407 src_offsets = se->field_offsets[THUNK_TARGET];
5408 for (i = 0; i < se->nb_fields; i++) {
5409 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5410 assert(*field_types == TYPE_PTRVOID);
5411 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5412 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5413 if (*target_rt_dev_ptr != 0) {
5414 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5415 tswapal(*target_rt_dev_ptr));
5416 if (!*host_rt_dev_ptr) {
5417 unlock_user(argptr, arg, 0);
5418 return -TARGET_EFAULT;
5420 } else {
5421 *host_rt_dev_ptr = 0;
5423 field_types++;
5424 continue;
5426 field_types = thunk_convert(buf_temp + dst_offsets[i],
5427 argptr + src_offsets[i],
5428 field_types, THUNK_HOST);
5430 unlock_user(argptr, arg, 0);
5432 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5433 if (*host_rt_dev_ptr != 0) {
5434 unlock_user((void *)*host_rt_dev_ptr,
5435 *target_rt_dev_ptr, 0);
5437 return ret;
5440 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5441 int fd, int cmd, abi_long arg)
5443 int sig = target_to_host_signal(arg);
5444 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5447 static IOCTLEntry ioctl_entries[] = {
5448 #define IOCTL(cmd, access, ...) \
5449 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5450 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5451 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5452 #include "ioctls.h"
5453 { 0, 0, },
5456 /* ??? Implement proper locking for ioctls. */
5457 /* do_ioctl() Must return target values and target errnos. */
5458 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5460 const IOCTLEntry *ie;
5461 const argtype *arg_type;
5462 abi_long ret;
5463 uint8_t buf_temp[MAX_STRUCT_SIZE];
5464 int target_size;
5465 void *argptr;
5467 ie = ioctl_entries;
5468 for(;;) {
5469 if (ie->target_cmd == 0) {
5470 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5471 return -TARGET_ENOSYS;
5473 if (ie->target_cmd == cmd)
5474 break;
5475 ie++;
5477 arg_type = ie->arg_type;
5478 #if defined(DEBUG)
5479 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5480 #endif
5481 if (ie->do_ioctl) {
5482 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5485 switch(arg_type[0]) {
5486 case TYPE_NULL:
5487 /* no argument */
5488 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5489 break;
5490 case TYPE_PTRVOID:
5491 case TYPE_INT:
5492 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5493 break;
5494 case TYPE_PTR:
5495 arg_type++;
5496 target_size = thunk_type_size(arg_type, 0);
5497 switch(ie->access) {
5498 case IOC_R:
5499 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5500 if (!is_error(ret)) {
5501 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5502 if (!argptr)
5503 return -TARGET_EFAULT;
5504 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5505 unlock_user(argptr, arg, target_size);
5507 break;
5508 case IOC_W:
5509 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5510 if (!argptr)
5511 return -TARGET_EFAULT;
5512 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5513 unlock_user(argptr, arg, 0);
5514 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5515 break;
5516 default:
5517 case IOC_RW:
5518 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5519 if (!argptr)
5520 return -TARGET_EFAULT;
5521 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5522 unlock_user(argptr, arg, 0);
5523 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5524 if (!is_error(ret)) {
5525 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5526 if (!argptr)
5527 return -TARGET_EFAULT;
5528 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5529 unlock_user(argptr, arg, target_size);
5531 break;
5533 break;
5534 default:
5535 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5536 (long)cmd, arg_type[0]);
5537 ret = -TARGET_ENOSYS;
5538 break;
5540 return ret;
5543 static const bitmask_transtbl iflag_tbl[] = {
5544 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5545 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5546 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5547 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5548 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5549 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5550 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5551 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5552 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5553 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5554 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5555 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5556 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5557 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5558 { 0, 0, 0, 0 }
5561 static const bitmask_transtbl oflag_tbl[] = {
5562 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5563 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5564 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5565 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5566 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5567 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5568 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5569 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5570 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5571 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5572 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5573 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5574 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5575 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5576 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5577 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5578 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5579 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5580 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5581 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5582 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5583 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5584 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5585 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5586 { 0, 0, 0, 0 }
5589 static const bitmask_transtbl cflag_tbl[] = {
5590 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5591 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5592 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5593 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5594 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5595 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5596 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5597 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5598 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5599 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5600 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5601 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5602 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5603 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5604 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5605 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5606 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5607 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5608 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5609 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5610 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5611 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5612 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5613 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5614 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5615 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5616 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5617 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5618 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5619 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5620 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5621 { 0, 0, 0, 0 }
5624 static const bitmask_transtbl lflag_tbl[] = {
5625 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5626 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5627 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5628 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5629 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5630 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5631 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5632 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5633 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5634 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5635 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5636 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5637 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5638 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5639 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5640 { 0, 0, 0, 0 }
5643 static void target_to_host_termios (void *dst, const void *src)
5645 struct host_termios *host = dst;
5646 const struct target_termios *target = src;
5648 host->c_iflag =
5649 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5650 host->c_oflag =
5651 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5652 host->c_cflag =
5653 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5654 host->c_lflag =
5655 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5656 host->c_line = target->c_line;
5658 memset(host->c_cc, 0, sizeof(host->c_cc));
5659 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5660 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5661 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5662 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5663 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5664 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5665 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5666 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5667 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5668 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5669 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5670 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5671 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5672 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5673 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5674 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5675 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5678 static void host_to_target_termios (void *dst, const void *src)
5680 struct target_termios *target = dst;
5681 const struct host_termios *host = src;
5683 target->c_iflag =
5684 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5685 target->c_oflag =
5686 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5687 target->c_cflag =
5688 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5689 target->c_lflag =
5690 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5691 target->c_line = host->c_line;
5693 memset(target->c_cc, 0, sizeof(target->c_cc));
5694 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5695 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5696 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5697 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5698 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5699 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5700 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5701 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5702 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5703 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5704 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5705 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5706 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5707 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5708 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5709 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5710 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5713 static const StructEntry struct_termios_def = {
5714 .convert = { host_to_target_termios, target_to_host_termios },
5715 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5716 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5719 static bitmask_transtbl mmap_flags_tbl[] = {
5720 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5721 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5722 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5723 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5724 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5725 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5726 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5727 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5728 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5729 MAP_NORESERVE },
5730 { 0, 0, 0, 0 }
5733 #if defined(TARGET_I386)
5735 /* NOTE: there is really one LDT for all the threads */
5736 static uint8_t *ldt_table;
5738 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5740 int size;
5741 void *p;
5743 if (!ldt_table)
5744 return 0;
5745 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5746 if (size > bytecount)
5747 size = bytecount;
5748 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5749 if (!p)
5750 return -TARGET_EFAULT;
5751 /* ??? Should this by byteswapped? */
5752 memcpy(p, ldt_table, size);
5753 unlock_user(p, ptr, size);
5754 return size;
5757 /* XXX: add locking support */
5758 static abi_long write_ldt(CPUX86State *env,
5759 abi_ulong ptr, unsigned long bytecount, int oldmode)
5761 struct target_modify_ldt_ldt_s ldt_info;
5762 struct target_modify_ldt_ldt_s *target_ldt_info;
5763 int seg_32bit, contents, read_exec_only, limit_in_pages;
5764 int seg_not_present, useable, lm;
5765 uint32_t *lp, entry_1, entry_2;
5767 if (bytecount != sizeof(ldt_info))
5768 return -TARGET_EINVAL;
5769 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5770 return -TARGET_EFAULT;
5771 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5772 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5773 ldt_info.limit = tswap32(target_ldt_info->limit);
5774 ldt_info.flags = tswap32(target_ldt_info->flags);
5775 unlock_user_struct(target_ldt_info, ptr, 0);
5777 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5778 return -TARGET_EINVAL;
5779 seg_32bit = ldt_info.flags & 1;
5780 contents = (ldt_info.flags >> 1) & 3;
5781 read_exec_only = (ldt_info.flags >> 3) & 1;
5782 limit_in_pages = (ldt_info.flags >> 4) & 1;
5783 seg_not_present = (ldt_info.flags >> 5) & 1;
5784 useable = (ldt_info.flags >> 6) & 1;
5785 #ifdef TARGET_ABI32
5786 lm = 0;
5787 #else
5788 lm = (ldt_info.flags >> 7) & 1;
5789 #endif
5790 if (contents == 3) {
5791 if (oldmode)
5792 return -TARGET_EINVAL;
5793 if (seg_not_present == 0)
5794 return -TARGET_EINVAL;
5796 /* allocate the LDT */
5797 if (!ldt_table) {
5798 env->ldt.base = target_mmap(0,
5799 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5800 PROT_READ|PROT_WRITE,
5801 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5802 if (env->ldt.base == -1)
5803 return -TARGET_ENOMEM;
5804 memset(g2h(env->ldt.base), 0,
5805 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5806 env->ldt.limit = 0xffff;
5807 ldt_table = g2h(env->ldt.base);
5810 /* NOTE: same code as Linux kernel */
5811 /* Allow LDTs to be cleared by the user. */
5812 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5813 if (oldmode ||
5814 (contents == 0 &&
5815 read_exec_only == 1 &&
5816 seg_32bit == 0 &&
5817 limit_in_pages == 0 &&
5818 seg_not_present == 1 &&
5819 useable == 0 )) {
5820 entry_1 = 0;
5821 entry_2 = 0;
5822 goto install;
5826 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5827 (ldt_info.limit & 0x0ffff);
5828 entry_2 = (ldt_info.base_addr & 0xff000000) |
5829 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5830 (ldt_info.limit & 0xf0000) |
5831 ((read_exec_only ^ 1) << 9) |
5832 (contents << 10) |
5833 ((seg_not_present ^ 1) << 15) |
5834 (seg_32bit << 22) |
5835 (limit_in_pages << 23) |
5836 (lm << 21) |
5837 0x7000;
5838 if (!oldmode)
5839 entry_2 |= (useable << 20);
5841 /* Install the new entry ... */
5842 install:
5843 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5844 lp[0] = tswap32(entry_1);
5845 lp[1] = tswap32(entry_2);
5846 return 0;
5849 /* specific and weird i386 syscalls */
5850 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5851 unsigned long bytecount)
5853 abi_long ret;
5855 switch (func) {
5856 case 0:
5857 ret = read_ldt(ptr, bytecount);
5858 break;
5859 case 1:
5860 ret = write_ldt(env, ptr, bytecount, 1);
5861 break;
5862 case 0x11:
5863 ret = write_ldt(env, ptr, bytecount, 0);
5864 break;
5865 default:
5866 ret = -TARGET_ENOSYS;
5867 break;
5869 return ret;
5872 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5873 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5875 uint64_t *gdt_table = g2h(env->gdt.base);
5876 struct target_modify_ldt_ldt_s ldt_info;
5877 struct target_modify_ldt_ldt_s *target_ldt_info;
5878 int seg_32bit, contents, read_exec_only, limit_in_pages;
5879 int seg_not_present, useable, lm;
5880 uint32_t *lp, entry_1, entry_2;
5881 int i;
5883 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5884 if (!target_ldt_info)
5885 return -TARGET_EFAULT;
5886 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5887 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5888 ldt_info.limit = tswap32(target_ldt_info->limit);
5889 ldt_info.flags = tswap32(target_ldt_info->flags);
5890 if (ldt_info.entry_number == -1) {
5891 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5892 if (gdt_table[i] == 0) {
5893 ldt_info.entry_number = i;
5894 target_ldt_info->entry_number = tswap32(i);
5895 break;
5899 unlock_user_struct(target_ldt_info, ptr, 1);
5901 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5902 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5903 return -TARGET_EINVAL;
5904 seg_32bit = ldt_info.flags & 1;
5905 contents = (ldt_info.flags >> 1) & 3;
5906 read_exec_only = (ldt_info.flags >> 3) & 1;
5907 limit_in_pages = (ldt_info.flags >> 4) & 1;
5908 seg_not_present = (ldt_info.flags >> 5) & 1;
5909 useable = (ldt_info.flags >> 6) & 1;
5910 #ifdef TARGET_ABI32
5911 lm = 0;
5912 #else
5913 lm = (ldt_info.flags >> 7) & 1;
5914 #endif
5916 if (contents == 3) {
5917 if (seg_not_present == 0)
5918 return -TARGET_EINVAL;
5921 /* NOTE: same code as Linux kernel */
5922 /* Allow LDTs to be cleared by the user. */
5923 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5924 if ((contents == 0 &&
5925 read_exec_only == 1 &&
5926 seg_32bit == 0 &&
5927 limit_in_pages == 0 &&
5928 seg_not_present == 1 &&
5929 useable == 0 )) {
5930 entry_1 = 0;
5931 entry_2 = 0;
5932 goto install;
5936 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5937 (ldt_info.limit & 0x0ffff);
5938 entry_2 = (ldt_info.base_addr & 0xff000000) |
5939 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5940 (ldt_info.limit & 0xf0000) |
5941 ((read_exec_only ^ 1) << 9) |
5942 (contents << 10) |
5943 ((seg_not_present ^ 1) << 15) |
5944 (seg_32bit << 22) |
5945 (limit_in_pages << 23) |
5946 (useable << 20) |
5947 (lm << 21) |
5948 0x7000;
5950 /* Install the new entry ... */
5951 install:
5952 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5953 lp[0] = tswap32(entry_1);
5954 lp[1] = tswap32(entry_2);
5955 return 0;
5958 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5960 struct target_modify_ldt_ldt_s *target_ldt_info;
5961 uint64_t *gdt_table = g2h(env->gdt.base);
5962 uint32_t base_addr, limit, flags;
5963 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5964 int seg_not_present, useable, lm;
5965 uint32_t *lp, entry_1, entry_2;
5967 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5968 if (!target_ldt_info)
5969 return -TARGET_EFAULT;
5970 idx = tswap32(target_ldt_info->entry_number);
5971 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5972 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5973 unlock_user_struct(target_ldt_info, ptr, 1);
5974 return -TARGET_EINVAL;
5976 lp = (uint32_t *)(gdt_table + idx);
5977 entry_1 = tswap32(lp[0]);
5978 entry_2 = tswap32(lp[1]);
5980 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5981 contents = (entry_2 >> 10) & 3;
5982 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5983 seg_32bit = (entry_2 >> 22) & 1;
5984 limit_in_pages = (entry_2 >> 23) & 1;
5985 useable = (entry_2 >> 20) & 1;
5986 #ifdef TARGET_ABI32
5987 lm = 0;
5988 #else
5989 lm = (entry_2 >> 21) & 1;
5990 #endif
5991 flags = (seg_32bit << 0) | (contents << 1) |
5992 (read_exec_only << 3) | (limit_in_pages << 4) |
5993 (seg_not_present << 5) | (useable << 6) | (lm << 7);
5994 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
5995 base_addr = (entry_1 >> 16) |
5996 (entry_2 & 0xff000000) |
5997 ((entry_2 & 0xff) << 16);
5998 target_ldt_info->base_addr = tswapal(base_addr);
5999 target_ldt_info->limit = tswap32(limit);
6000 target_ldt_info->flags = tswap32(flags);
6001 unlock_user_struct(target_ldt_info, ptr, 1);
6002 return 0;
6004 #endif /* TARGET_I386 && TARGET_ABI32 */
6006 #ifndef TARGET_ABI32
6007 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6009 abi_long ret = 0;
6010 abi_ulong val;
6011 int idx;
6013 switch(code) {
6014 case TARGET_ARCH_SET_GS:
6015 case TARGET_ARCH_SET_FS:
6016 if (code == TARGET_ARCH_SET_GS)
6017 idx = R_GS;
6018 else
6019 idx = R_FS;
6020 cpu_x86_load_seg(env, idx, 0);
6021 env->segs[idx].base = addr;
6022 break;
6023 case TARGET_ARCH_GET_GS:
6024 case TARGET_ARCH_GET_FS:
6025 if (code == TARGET_ARCH_GET_GS)
6026 idx = R_GS;
6027 else
6028 idx = R_FS;
6029 val = env->segs[idx].base;
6030 if (put_user(val, addr, abi_ulong))
6031 ret = -TARGET_EFAULT;
6032 break;
6033 default:
6034 ret = -TARGET_EINVAL;
6035 break;
6037 return ret;
6039 #endif
6041 #endif /* defined(TARGET_I386) */
6043 #define NEW_STACK_SIZE 0x40000
6046 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6047 typedef struct {
6048 CPUArchState *env;
6049 pthread_mutex_t mutex;
6050 pthread_cond_t cond;
6051 pthread_t thread;
6052 uint32_t tid;
6053 abi_ulong child_tidptr;
6054 abi_ulong parent_tidptr;
6055 sigset_t sigmask;
6056 } new_thread_info;
6058 static void *clone_func(void *arg)
6060 new_thread_info *info = arg;
6061 CPUArchState *env;
6062 CPUState *cpu;
6063 TaskState *ts;
6065 rcu_register_thread();
6066 env = info->env;
6067 cpu = ENV_GET_CPU(env);
6068 thread_cpu = cpu;
6069 ts = (TaskState *)cpu->opaque;
6070 info->tid = gettid();
6071 cpu->host_tid = info->tid;
6072 task_settid(ts);
6073 if (info->child_tidptr)
6074 put_user_u32(info->tid, info->child_tidptr);
6075 if (info->parent_tidptr)
6076 put_user_u32(info->tid, info->parent_tidptr);
6077 /* Enable signals. */
6078 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6079 /* Signal to the parent that we're ready. */
6080 pthread_mutex_lock(&info->mutex);
6081 pthread_cond_broadcast(&info->cond);
6082 pthread_mutex_unlock(&info->mutex);
6083 /* Wait until the parent has finshed initializing the tls state. */
6084 pthread_mutex_lock(&clone_lock);
6085 pthread_mutex_unlock(&clone_lock);
6086 cpu_loop(env);
6087 /* never exits */
6088 return NULL;
6091 /* do_fork() Must return host values and target errnos (unlike most
6092 do_*() functions). */
6093 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6094 abi_ulong parent_tidptr, target_ulong newtls,
6095 abi_ulong child_tidptr)
6097 CPUState *cpu = ENV_GET_CPU(env);
6098 int ret;
6099 TaskState *ts;
6100 CPUState *new_cpu;
6101 CPUArchState *new_env;
6102 sigset_t sigmask;
6104 flags &= ~CLONE_IGNORED_FLAGS;
6106 /* Emulate vfork() with fork() */
6107 if (flags & CLONE_VFORK)
6108 flags &= ~(CLONE_VFORK | CLONE_VM);
6110 if (flags & CLONE_VM) {
6111 TaskState *parent_ts = (TaskState *)cpu->opaque;
6112 new_thread_info info;
6113 pthread_attr_t attr;
6115 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6116 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6117 return -TARGET_EINVAL;
6120 ts = g_new0(TaskState, 1);
6121 init_task_state(ts);
6122 /* we create a new CPU instance. */
6123 new_env = cpu_copy(env);
6124 /* Init regs that differ from the parent. */
6125 cpu_clone_regs(new_env, newsp);
6126 new_cpu = ENV_GET_CPU(new_env);
6127 new_cpu->opaque = ts;
6128 ts->bprm = parent_ts->bprm;
6129 ts->info = parent_ts->info;
6130 ts->signal_mask = parent_ts->signal_mask;
6132 if (flags & CLONE_CHILD_CLEARTID) {
6133 ts->child_tidptr = child_tidptr;
6136 if (flags & CLONE_SETTLS) {
6137 cpu_set_tls (new_env, newtls);
6140 /* Grab a mutex so that thread setup appears atomic. */
6141 pthread_mutex_lock(&clone_lock);
6143 memset(&info, 0, sizeof(info));
6144 pthread_mutex_init(&info.mutex, NULL);
6145 pthread_mutex_lock(&info.mutex);
6146 pthread_cond_init(&info.cond, NULL);
6147 info.env = new_env;
6148 if (flags & CLONE_CHILD_SETTID) {
6149 info.child_tidptr = child_tidptr;
6151 if (flags & CLONE_PARENT_SETTID) {
6152 info.parent_tidptr = parent_tidptr;
6155 ret = pthread_attr_init(&attr);
6156 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6157 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6158 /* It is not safe to deliver signals until the child has finished
6159 initializing, so temporarily block all signals. */
6160 sigfillset(&sigmask);
6161 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6163 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6164 /* TODO: Free new CPU state if thread creation failed. */
6166 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6167 pthread_attr_destroy(&attr);
6168 if (ret == 0) {
6169 /* Wait for the child to initialize. */
6170 pthread_cond_wait(&info.cond, &info.mutex);
6171 ret = info.tid;
6172 } else {
6173 ret = -1;
6175 pthread_mutex_unlock(&info.mutex);
6176 pthread_cond_destroy(&info.cond);
6177 pthread_mutex_destroy(&info.mutex);
6178 pthread_mutex_unlock(&clone_lock);
6179 } else {
6180 /* if no CLONE_VM, we consider it is a fork */
6181 if (flags & CLONE_INVALID_FORK_FLAGS) {
6182 return -TARGET_EINVAL;
6185 /* We can't support custom termination signals */
6186 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6187 return -TARGET_EINVAL;
6190 if (block_signals()) {
6191 return -TARGET_ERESTARTSYS;
6194 fork_start();
6195 ret = fork();
6196 if (ret == 0) {
6197 /* Child Process. */
6198 rcu_after_fork();
6199 cpu_clone_regs(env, newsp);
6200 fork_end(1);
6201 /* There is a race condition here. The parent process could
6202 theoretically read the TID in the child process before the child
6203 tid is set. This would require using either ptrace
6204 (not implemented) or having *_tidptr to point at a shared memory
6205 mapping. We can't repeat the spinlock hack used above because
6206 the child process gets its own copy of the lock. */
6207 if (flags & CLONE_CHILD_SETTID)
6208 put_user_u32(gettid(), child_tidptr);
6209 if (flags & CLONE_PARENT_SETTID)
6210 put_user_u32(gettid(), parent_tidptr);
6211 ts = (TaskState *)cpu->opaque;
6212 if (flags & CLONE_SETTLS)
6213 cpu_set_tls (env, newtls);
6214 if (flags & CLONE_CHILD_CLEARTID)
6215 ts->child_tidptr = child_tidptr;
6216 } else {
6217 fork_end(0);
6220 return ret;
6223 /* warning : doesn't handle linux specific flags... */
6224 static int target_to_host_fcntl_cmd(int cmd)
6226 switch(cmd) {
6227 case TARGET_F_DUPFD:
6228 case TARGET_F_GETFD:
6229 case TARGET_F_SETFD:
6230 case TARGET_F_GETFL:
6231 case TARGET_F_SETFL:
6232 return cmd;
6233 case TARGET_F_GETLK:
6234 return F_GETLK64;
6235 case TARGET_F_SETLK:
6236 return F_SETLK64;
6237 case TARGET_F_SETLKW:
6238 return F_SETLKW64;
6239 case TARGET_F_GETOWN:
6240 return F_GETOWN;
6241 case TARGET_F_SETOWN:
6242 return F_SETOWN;
6243 case TARGET_F_GETSIG:
6244 return F_GETSIG;
6245 case TARGET_F_SETSIG:
6246 return F_SETSIG;
6247 #if TARGET_ABI_BITS == 32
6248 case TARGET_F_GETLK64:
6249 return F_GETLK64;
6250 case TARGET_F_SETLK64:
6251 return F_SETLK64;
6252 case TARGET_F_SETLKW64:
6253 return F_SETLKW64;
6254 #endif
6255 case TARGET_F_SETLEASE:
6256 return F_SETLEASE;
6257 case TARGET_F_GETLEASE:
6258 return F_GETLEASE;
6259 #ifdef F_DUPFD_CLOEXEC
6260 case TARGET_F_DUPFD_CLOEXEC:
6261 return F_DUPFD_CLOEXEC;
6262 #endif
6263 case TARGET_F_NOTIFY:
6264 return F_NOTIFY;
6265 #ifdef F_GETOWN_EX
6266 case TARGET_F_GETOWN_EX:
6267 return F_GETOWN_EX;
6268 #endif
6269 #ifdef F_SETOWN_EX
6270 case TARGET_F_SETOWN_EX:
6271 return F_SETOWN_EX;
6272 #endif
6273 #ifdef F_SETPIPE_SZ
6274 case TARGET_F_SETPIPE_SZ:
6275 return F_SETPIPE_SZ;
6276 case TARGET_F_GETPIPE_SZ:
6277 return F_GETPIPE_SZ;
6278 #endif
6279 default:
6280 return -TARGET_EINVAL;
6282 return -TARGET_EINVAL;
6285 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6286 static const bitmask_transtbl flock_tbl[] = {
6287 TRANSTBL_CONVERT(F_RDLCK),
6288 TRANSTBL_CONVERT(F_WRLCK),
6289 TRANSTBL_CONVERT(F_UNLCK),
6290 TRANSTBL_CONVERT(F_EXLCK),
6291 TRANSTBL_CONVERT(F_SHLCK),
6292 { 0, 0, 0, 0 }
6295 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6296 abi_ulong target_flock_addr)
6298 struct target_flock *target_fl;
6299 short l_type;
6301 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6302 return -TARGET_EFAULT;
6305 __get_user(l_type, &target_fl->l_type);
6306 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6307 __get_user(fl->l_whence, &target_fl->l_whence);
6308 __get_user(fl->l_start, &target_fl->l_start);
6309 __get_user(fl->l_len, &target_fl->l_len);
6310 __get_user(fl->l_pid, &target_fl->l_pid);
6311 unlock_user_struct(target_fl, target_flock_addr, 0);
6312 return 0;
6315 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6316 const struct flock64 *fl)
6318 struct target_flock *target_fl;
6319 short l_type;
6321 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6322 return -TARGET_EFAULT;
6325 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6326 __put_user(l_type, &target_fl->l_type);
6327 __put_user(fl->l_whence, &target_fl->l_whence);
6328 __put_user(fl->l_start, &target_fl->l_start);
6329 __put_user(fl->l_len, &target_fl->l_len);
6330 __put_user(fl->l_pid, &target_fl->l_pid);
6331 unlock_user_struct(target_fl, target_flock_addr, 1);
6332 return 0;
6335 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6336 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6338 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6339 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6340 abi_ulong target_flock_addr)
6342 struct target_eabi_flock64 *target_fl;
6343 short l_type;
6345 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6346 return -TARGET_EFAULT;
6349 __get_user(l_type, &target_fl->l_type);
6350 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6351 __get_user(fl->l_whence, &target_fl->l_whence);
6352 __get_user(fl->l_start, &target_fl->l_start);
6353 __get_user(fl->l_len, &target_fl->l_len);
6354 __get_user(fl->l_pid, &target_fl->l_pid);
6355 unlock_user_struct(target_fl, target_flock_addr, 0);
6356 return 0;
6359 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6360 const struct flock64 *fl)
6362 struct target_eabi_flock64 *target_fl;
6363 short l_type;
6365 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6366 return -TARGET_EFAULT;
6369 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6370 __put_user(l_type, &target_fl->l_type);
6371 __put_user(fl->l_whence, &target_fl->l_whence);
6372 __put_user(fl->l_start, &target_fl->l_start);
6373 __put_user(fl->l_len, &target_fl->l_len);
6374 __put_user(fl->l_pid, &target_fl->l_pid);
6375 unlock_user_struct(target_fl, target_flock_addr, 1);
6376 return 0;
6378 #endif
6380 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6381 abi_ulong target_flock_addr)
6383 struct target_flock64 *target_fl;
6384 short l_type;
6386 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6387 return -TARGET_EFAULT;
6390 __get_user(l_type, &target_fl->l_type);
6391 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6392 __get_user(fl->l_whence, &target_fl->l_whence);
6393 __get_user(fl->l_start, &target_fl->l_start);
6394 __get_user(fl->l_len, &target_fl->l_len);
6395 __get_user(fl->l_pid, &target_fl->l_pid);
6396 unlock_user_struct(target_fl, target_flock_addr, 0);
6397 return 0;
6400 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6401 const struct flock64 *fl)
6403 struct target_flock64 *target_fl;
6404 short l_type;
6406 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6407 return -TARGET_EFAULT;
6410 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6411 __put_user(l_type, &target_fl->l_type);
6412 __put_user(fl->l_whence, &target_fl->l_whence);
6413 __put_user(fl->l_start, &target_fl->l_start);
6414 __put_user(fl->l_len, &target_fl->l_len);
6415 __put_user(fl->l_pid, &target_fl->l_pid);
6416 unlock_user_struct(target_fl, target_flock_addr, 1);
6417 return 0;
6420 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6422 struct flock64 fl64;
6423 #ifdef F_GETOWN_EX
6424 struct f_owner_ex fox;
6425 struct target_f_owner_ex *target_fox;
6426 #endif
6427 abi_long ret;
6428 int host_cmd = target_to_host_fcntl_cmd(cmd);
6430 if (host_cmd == -TARGET_EINVAL)
6431 return host_cmd;
6433 switch(cmd) {
6434 case TARGET_F_GETLK:
6435 ret = copy_from_user_flock(&fl64, arg);
6436 if (ret) {
6437 return ret;
6439 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6440 if (ret == 0) {
6441 ret = copy_to_user_flock(arg, &fl64);
6443 break;
6445 case TARGET_F_SETLK:
6446 case TARGET_F_SETLKW:
6447 ret = copy_from_user_flock(&fl64, arg);
6448 if (ret) {
6449 return ret;
6451 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6452 break;
6454 case TARGET_F_GETLK64:
6455 ret = copy_from_user_flock64(&fl64, arg);
6456 if (ret) {
6457 return ret;
6459 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6460 if (ret == 0) {
6461 ret = copy_to_user_flock64(arg, &fl64);
6463 break;
6464 case TARGET_F_SETLK64:
6465 case TARGET_F_SETLKW64:
6466 ret = copy_from_user_flock64(&fl64, arg);
6467 if (ret) {
6468 return ret;
6470 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6471 break;
6473 case TARGET_F_GETFL:
6474 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6475 if (ret >= 0) {
6476 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6478 break;
6480 case TARGET_F_SETFL:
6481 ret = get_errno(safe_fcntl(fd, host_cmd,
6482 target_to_host_bitmask(arg,
6483 fcntl_flags_tbl)));
6484 break;
6486 #ifdef F_GETOWN_EX
6487 case TARGET_F_GETOWN_EX:
6488 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6489 if (ret >= 0) {
6490 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6491 return -TARGET_EFAULT;
6492 target_fox->type = tswap32(fox.type);
6493 target_fox->pid = tswap32(fox.pid);
6494 unlock_user_struct(target_fox, arg, 1);
6496 break;
6497 #endif
6499 #ifdef F_SETOWN_EX
6500 case TARGET_F_SETOWN_EX:
6501 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6502 return -TARGET_EFAULT;
6503 fox.type = tswap32(target_fox->type);
6504 fox.pid = tswap32(target_fox->pid);
6505 unlock_user_struct(target_fox, arg, 0);
6506 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6507 break;
6508 #endif
6510 case TARGET_F_SETOWN:
6511 case TARGET_F_GETOWN:
6512 case TARGET_F_SETSIG:
6513 case TARGET_F_GETSIG:
6514 case TARGET_F_SETLEASE:
6515 case TARGET_F_GETLEASE:
6516 case TARGET_F_SETPIPE_SZ:
6517 case TARGET_F_GETPIPE_SZ:
6518 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6519 break;
6521 default:
6522 ret = get_errno(safe_fcntl(fd, cmd, arg));
6523 break;
6525 return ret;
6528 #ifdef USE_UID16
6530 static inline int high2lowuid(int uid)
6532 if (uid > 65535)
6533 return 65534;
6534 else
6535 return uid;
6538 static inline int high2lowgid(int gid)
6540 if (gid > 65535)
6541 return 65534;
6542 else
6543 return gid;
6546 static inline int low2highuid(int uid)
6548 if ((int16_t)uid == -1)
6549 return -1;
6550 else
6551 return uid;
6554 static inline int low2highgid(int gid)
6556 if ((int16_t)gid == -1)
6557 return -1;
6558 else
6559 return gid;
6561 static inline int tswapid(int id)
6563 return tswap16(id);
6566 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6568 #else /* !USE_UID16 */
6569 static inline int high2lowuid(int uid)
6571 return uid;
6573 static inline int high2lowgid(int gid)
6575 return gid;
6577 static inline int low2highuid(int uid)
6579 return uid;
6581 static inline int low2highgid(int gid)
6583 return gid;
6585 static inline int tswapid(int id)
6587 return tswap32(id);
6590 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6592 #endif /* USE_UID16 */
6594 /* We must do direct syscalls for setting UID/GID, because we want to
6595 * implement the Linux system call semantics of "change only for this thread",
6596 * not the libc/POSIX semantics of "change for all threads in process".
6597 * (See http://ewontfix.com/17/ for more details.)
6598 * We use the 32-bit version of the syscalls if present; if it is not
6599 * then either the host architecture supports 32-bit UIDs natively with
6600 * the standard syscall, or the 16-bit UID is the best we can do.
6602 #ifdef __NR_setuid32
6603 #define __NR_sys_setuid __NR_setuid32
6604 #else
6605 #define __NR_sys_setuid __NR_setuid
6606 #endif
6607 #ifdef __NR_setgid32
6608 #define __NR_sys_setgid __NR_setgid32
6609 #else
6610 #define __NR_sys_setgid __NR_setgid
6611 #endif
6612 #ifdef __NR_setresuid32
6613 #define __NR_sys_setresuid __NR_setresuid32
6614 #else
6615 #define __NR_sys_setresuid __NR_setresuid
6616 #endif
6617 #ifdef __NR_setresgid32
6618 #define __NR_sys_setresgid __NR_setresgid32
6619 #else
6620 #define __NR_sys_setresgid __NR_setresgid
6621 #endif
6623 _syscall1(int, sys_setuid, uid_t, uid)
6624 _syscall1(int, sys_setgid, gid_t, gid)
6625 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6626 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6628 void syscall_init(void)
6630 IOCTLEntry *ie;
6631 const argtype *arg_type;
6632 int size;
6633 int i;
6635 thunk_init(STRUCT_MAX);
6637 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6638 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6639 #include "syscall_types.h"
6640 #undef STRUCT
6641 #undef STRUCT_SPECIAL
6643 /* Build target_to_host_errno_table[] table from
6644 * host_to_target_errno_table[]. */
6645 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6646 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6649 /* we patch the ioctl size if necessary. We rely on the fact that
6650 no ioctl has all the bits at '1' in the size field */
6651 ie = ioctl_entries;
6652 while (ie->target_cmd != 0) {
6653 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6654 TARGET_IOC_SIZEMASK) {
6655 arg_type = ie->arg_type;
6656 if (arg_type[0] != TYPE_PTR) {
6657 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6658 ie->target_cmd);
6659 exit(1);
6661 arg_type++;
6662 size = thunk_type_size(arg_type, 0);
6663 ie->target_cmd = (ie->target_cmd &
6664 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6665 (size << TARGET_IOC_SIZESHIFT);
6668 /* automatic consistency check if same arch */
6669 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6670 (defined(__x86_64__) && defined(TARGET_X86_64))
6671 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6672 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6673 ie->name, ie->target_cmd, ie->host_cmd);
6675 #endif
6676 ie++;
6680 #if TARGET_ABI_BITS == 32
6681 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6683 #ifdef TARGET_WORDS_BIGENDIAN
6684 return ((uint64_t)word0 << 32) | word1;
6685 #else
6686 return ((uint64_t)word1 << 32) | word0;
6687 #endif
6689 #else /* TARGET_ABI_BITS == 32 */
6690 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6692 return word0;
6694 #endif /* TARGET_ABI_BITS != 32 */
6696 #ifdef TARGET_NR_truncate64
6697 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6698 abi_long arg2,
6699 abi_long arg3,
6700 abi_long arg4)
6702 if (regpairs_aligned(cpu_env)) {
6703 arg2 = arg3;
6704 arg3 = arg4;
6706 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6708 #endif
6710 #ifdef TARGET_NR_ftruncate64
6711 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6712 abi_long arg2,
6713 abi_long arg3,
6714 abi_long arg4)
6716 if (regpairs_aligned(cpu_env)) {
6717 arg2 = arg3;
6718 arg3 = arg4;
6720 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6722 #endif
6724 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6725 abi_ulong target_addr)
6727 struct target_timespec *target_ts;
6729 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6730 return -TARGET_EFAULT;
6731 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6732 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6733 unlock_user_struct(target_ts, target_addr, 0);
6734 return 0;
6737 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6738 struct timespec *host_ts)
6740 struct target_timespec *target_ts;
6742 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6743 return -TARGET_EFAULT;
6744 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6745 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6746 unlock_user_struct(target_ts, target_addr, 1);
6747 return 0;
6750 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6751 abi_ulong target_addr)
6753 struct target_itimerspec *target_itspec;
6755 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6756 return -TARGET_EFAULT;
6759 host_itspec->it_interval.tv_sec =
6760 tswapal(target_itspec->it_interval.tv_sec);
6761 host_itspec->it_interval.tv_nsec =
6762 tswapal(target_itspec->it_interval.tv_nsec);
6763 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6764 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6766 unlock_user_struct(target_itspec, target_addr, 1);
6767 return 0;
6770 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6771 struct itimerspec *host_its)
6773 struct target_itimerspec *target_itspec;
6775 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6776 return -TARGET_EFAULT;
6779 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6780 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6782 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6783 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6785 unlock_user_struct(target_itspec, target_addr, 0);
6786 return 0;
6789 static inline abi_long target_to_host_timex(struct timex *host_tx,
6790 abi_long target_addr)
6792 struct target_timex *target_tx;
6794 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6795 return -TARGET_EFAULT;
6798 __get_user(host_tx->modes, &target_tx->modes);
6799 __get_user(host_tx->offset, &target_tx->offset);
6800 __get_user(host_tx->freq, &target_tx->freq);
6801 __get_user(host_tx->maxerror, &target_tx->maxerror);
6802 __get_user(host_tx->esterror, &target_tx->esterror);
6803 __get_user(host_tx->status, &target_tx->status);
6804 __get_user(host_tx->constant, &target_tx->constant);
6805 __get_user(host_tx->precision, &target_tx->precision);
6806 __get_user(host_tx->tolerance, &target_tx->tolerance);
6807 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6808 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6809 __get_user(host_tx->tick, &target_tx->tick);
6810 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6811 __get_user(host_tx->jitter, &target_tx->jitter);
6812 __get_user(host_tx->shift, &target_tx->shift);
6813 __get_user(host_tx->stabil, &target_tx->stabil);
6814 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6815 __get_user(host_tx->calcnt, &target_tx->calcnt);
6816 __get_user(host_tx->errcnt, &target_tx->errcnt);
6817 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6818 __get_user(host_tx->tai, &target_tx->tai);
6820 unlock_user_struct(target_tx, target_addr, 0);
6821 return 0;
6824 static inline abi_long host_to_target_timex(abi_long target_addr,
6825 struct timex *host_tx)
6827 struct target_timex *target_tx;
6829 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6830 return -TARGET_EFAULT;
6833 __put_user(host_tx->modes, &target_tx->modes);
6834 __put_user(host_tx->offset, &target_tx->offset);
6835 __put_user(host_tx->freq, &target_tx->freq);
6836 __put_user(host_tx->maxerror, &target_tx->maxerror);
6837 __put_user(host_tx->esterror, &target_tx->esterror);
6838 __put_user(host_tx->status, &target_tx->status);
6839 __put_user(host_tx->constant, &target_tx->constant);
6840 __put_user(host_tx->precision, &target_tx->precision);
6841 __put_user(host_tx->tolerance, &target_tx->tolerance);
6842 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6843 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6844 __put_user(host_tx->tick, &target_tx->tick);
6845 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6846 __put_user(host_tx->jitter, &target_tx->jitter);
6847 __put_user(host_tx->shift, &target_tx->shift);
6848 __put_user(host_tx->stabil, &target_tx->stabil);
6849 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6850 __put_user(host_tx->calcnt, &target_tx->calcnt);
6851 __put_user(host_tx->errcnt, &target_tx->errcnt);
6852 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6853 __put_user(host_tx->tai, &target_tx->tai);
6855 unlock_user_struct(target_tx, target_addr, 1);
6856 return 0;
6860 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6861 abi_ulong target_addr)
6863 struct target_sigevent *target_sevp;
6865 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6866 return -TARGET_EFAULT;
6869 /* This union is awkward on 64 bit systems because it has a 32 bit
6870 * integer and a pointer in it; we follow the conversion approach
6871 * used for handling sigval types in signal.c so the guest should get
6872 * the correct value back even if we did a 64 bit byteswap and it's
6873 * using the 32 bit integer.
6875 host_sevp->sigev_value.sival_ptr =
6876 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6877 host_sevp->sigev_signo =
6878 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6879 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6880 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6882 unlock_user_struct(target_sevp, target_addr, 1);
6883 return 0;
6886 #if defined(TARGET_NR_mlockall)
6887 static inline int target_to_host_mlockall_arg(int arg)
6889 int result = 0;
6891 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6892 result |= MCL_CURRENT;
6894 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6895 result |= MCL_FUTURE;
6897 return result;
6899 #endif
6901 static inline abi_long host_to_target_stat64(void *cpu_env,
6902 abi_ulong target_addr,
6903 struct stat *host_st)
6905 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6906 if (((CPUARMState *)cpu_env)->eabi) {
6907 struct target_eabi_stat64 *target_st;
6909 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6910 return -TARGET_EFAULT;
6911 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6912 __put_user(host_st->st_dev, &target_st->st_dev);
6913 __put_user(host_st->st_ino, &target_st->st_ino);
6914 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6915 __put_user(host_st->st_ino, &target_st->__st_ino);
6916 #endif
6917 __put_user(host_st->st_mode, &target_st->st_mode);
6918 __put_user(host_st->st_nlink, &target_st->st_nlink);
6919 __put_user(host_st->st_uid, &target_st->st_uid);
6920 __put_user(host_st->st_gid, &target_st->st_gid);
6921 __put_user(host_st->st_rdev, &target_st->st_rdev);
6922 __put_user(host_st->st_size, &target_st->st_size);
6923 __put_user(host_st->st_blksize, &target_st->st_blksize);
6924 __put_user(host_st->st_blocks, &target_st->st_blocks);
6925 __put_user(host_st->st_atime, &target_st->target_st_atime);
6926 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6927 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6928 unlock_user_struct(target_st, target_addr, 1);
6929 } else
6930 #endif
6932 #if defined(TARGET_HAS_STRUCT_STAT64)
6933 struct target_stat64 *target_st;
6934 #else
6935 struct target_stat *target_st;
6936 #endif
6938 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6939 return -TARGET_EFAULT;
6940 memset(target_st, 0, sizeof(*target_st));
6941 __put_user(host_st->st_dev, &target_st->st_dev);
6942 __put_user(host_st->st_ino, &target_st->st_ino);
6943 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6944 __put_user(host_st->st_ino, &target_st->__st_ino);
6945 #endif
6946 __put_user(host_st->st_mode, &target_st->st_mode);
6947 __put_user(host_st->st_nlink, &target_st->st_nlink);
6948 __put_user(host_st->st_uid, &target_st->st_uid);
6949 __put_user(host_st->st_gid, &target_st->st_gid);
6950 __put_user(host_st->st_rdev, &target_st->st_rdev);
6951 /* XXX: better use of kernel struct */
6952 __put_user(host_st->st_size, &target_st->st_size);
6953 __put_user(host_st->st_blksize, &target_st->st_blksize);
6954 __put_user(host_st->st_blocks, &target_st->st_blocks);
6955 __put_user(host_st->st_atime, &target_st->target_st_atime);
6956 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6957 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6958 unlock_user_struct(target_st, target_addr, 1);
6961 return 0;
6964 /* ??? Using host futex calls even when target atomic operations
6965 are not really atomic probably breaks things. However implementing
6966 futexes locally would make futexes shared between multiple processes
6967 tricky. However they're probably useless because guest atomic
6968 operations won't work either. */
6969 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6970 target_ulong uaddr2, int val3)
6972 struct timespec ts, *pts;
6973 int base_op;
6975 /* ??? We assume FUTEX_* constants are the same on both host
6976 and target. */
6977 #ifdef FUTEX_CMD_MASK
6978 base_op = op & FUTEX_CMD_MASK;
6979 #else
6980 base_op = op;
6981 #endif
6982 switch (base_op) {
6983 case FUTEX_WAIT:
6984 case FUTEX_WAIT_BITSET:
6985 if (timeout) {
6986 pts = &ts;
6987 target_to_host_timespec(pts, timeout);
6988 } else {
6989 pts = NULL;
6991 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
6992 pts, NULL, val3));
6993 case FUTEX_WAKE:
6994 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6995 case FUTEX_FD:
6996 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
6997 case FUTEX_REQUEUE:
6998 case FUTEX_CMP_REQUEUE:
6999 case FUTEX_WAKE_OP:
7000 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7001 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7002 But the prototype takes a `struct timespec *'; insert casts
7003 to satisfy the compiler. We do not need to tswap TIMEOUT
7004 since it's not compared to guest memory. */
7005 pts = (struct timespec *)(uintptr_t) timeout;
7006 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7007 g2h(uaddr2),
7008 (base_op == FUTEX_CMP_REQUEUE
7009 ? tswap32(val3)
7010 : val3)));
7011 default:
7012 return -TARGET_ENOSYS;
7015 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7016 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7017 abi_long handle, abi_long mount_id,
7018 abi_long flags)
7020 struct file_handle *target_fh;
7021 struct file_handle *fh;
7022 int mid = 0;
7023 abi_long ret;
7024 char *name;
7025 unsigned int size, total_size;
7027 if (get_user_s32(size, handle)) {
7028 return -TARGET_EFAULT;
7031 name = lock_user_string(pathname);
7032 if (!name) {
7033 return -TARGET_EFAULT;
7036 total_size = sizeof(struct file_handle) + size;
7037 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7038 if (!target_fh) {
7039 unlock_user(name, pathname, 0);
7040 return -TARGET_EFAULT;
7043 fh = g_malloc0(total_size);
7044 fh->handle_bytes = size;
7046 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7047 unlock_user(name, pathname, 0);
7049 /* man name_to_handle_at(2):
7050 * Other than the use of the handle_bytes field, the caller should treat
7051 * the file_handle structure as an opaque data type
7054 memcpy(target_fh, fh, total_size);
7055 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7056 target_fh->handle_type = tswap32(fh->handle_type);
7057 g_free(fh);
7058 unlock_user(target_fh, handle, total_size);
7060 if (put_user_s32(mid, mount_id)) {
7061 return -TARGET_EFAULT;
7064 return ret;
7067 #endif
7069 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7070 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7071 abi_long flags)
7073 struct file_handle *target_fh;
7074 struct file_handle *fh;
7075 unsigned int size, total_size;
7076 abi_long ret;
7078 if (get_user_s32(size, handle)) {
7079 return -TARGET_EFAULT;
7082 total_size = sizeof(struct file_handle) + size;
7083 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7084 if (!target_fh) {
7085 return -TARGET_EFAULT;
7088 fh = g_memdup(target_fh, total_size);
7089 fh->handle_bytes = size;
7090 fh->handle_type = tswap32(target_fh->handle_type);
7092 ret = get_errno(open_by_handle_at(mount_fd, fh,
7093 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7095 g_free(fh);
7097 unlock_user(target_fh, handle, total_size);
7099 return ret;
7101 #endif
7103 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7105 /* signalfd siginfo conversion */
7107 static void
7108 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7109 const struct signalfd_siginfo *info)
7111 int sig = host_to_target_signal(info->ssi_signo);
7113 /* linux/signalfd.h defines a ssi_addr_lsb
7114 * not defined in sys/signalfd.h but used by some kernels
7117 #ifdef BUS_MCEERR_AO
7118 if (tinfo->ssi_signo == SIGBUS &&
7119 (tinfo->ssi_code == BUS_MCEERR_AR ||
7120 tinfo->ssi_code == BUS_MCEERR_AO)) {
7121 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7122 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7123 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7125 #endif
7127 tinfo->ssi_signo = tswap32(sig);
7128 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7129 tinfo->ssi_code = tswap32(info->ssi_code);
7130 tinfo->ssi_pid = tswap32(info->ssi_pid);
7131 tinfo->ssi_uid = tswap32(info->ssi_uid);
7132 tinfo->ssi_fd = tswap32(info->ssi_fd);
7133 tinfo->ssi_tid = tswap32(info->ssi_tid);
7134 tinfo->ssi_band = tswap32(info->ssi_band);
7135 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7136 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7137 tinfo->ssi_status = tswap32(info->ssi_status);
7138 tinfo->ssi_int = tswap32(info->ssi_int);
7139 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7140 tinfo->ssi_utime = tswap64(info->ssi_utime);
7141 tinfo->ssi_stime = tswap64(info->ssi_stime);
7142 tinfo->ssi_addr = tswap64(info->ssi_addr);
7145 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7147 int i;
7149 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7150 host_to_target_signalfd_siginfo(buf + i, buf + i);
7153 return len;
7156 static TargetFdTrans target_signalfd_trans = {
7157 .host_to_target_data = host_to_target_data_signalfd,
7160 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7162 int host_flags;
7163 target_sigset_t *target_mask;
7164 sigset_t host_mask;
7165 abi_long ret;
7167 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7168 return -TARGET_EINVAL;
7170 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7171 return -TARGET_EFAULT;
7174 target_to_host_sigset(&host_mask, target_mask);
7176 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7178 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7179 if (ret >= 0) {
7180 fd_trans_register(ret, &target_signalfd_trans);
7183 unlock_user_struct(target_mask, mask, 0);
7185 return ret;
7187 #endif
7189 /* Map host to target signal numbers for the wait family of syscalls.
7190 Assume all other status bits are the same. */
7191 int host_to_target_waitstatus(int status)
7193 if (WIFSIGNALED(status)) {
7194 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7196 if (WIFSTOPPED(status)) {
7197 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7198 | (status & 0xff);
7200 return status;
7203 static int open_self_cmdline(void *cpu_env, int fd)
7205 int fd_orig = -1;
7206 bool word_skipped = false;
7208 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7209 if (fd_orig < 0) {
7210 return fd_orig;
7213 while (true) {
7214 ssize_t nb_read;
7215 char buf[128];
7216 char *cp_buf = buf;
7218 nb_read = read(fd_orig, buf, sizeof(buf));
7219 if (nb_read < 0) {
7220 int e = errno;
7221 fd_orig = close(fd_orig);
7222 errno = e;
7223 return -1;
7224 } else if (nb_read == 0) {
7225 break;
7228 if (!word_skipped) {
7229 /* Skip the first string, which is the path to qemu-*-static
7230 instead of the actual command. */
7231 cp_buf = memchr(buf, 0, nb_read);
7232 if (cp_buf) {
7233 /* Null byte found, skip one string */
7234 cp_buf++;
7235 nb_read -= cp_buf - buf;
7236 word_skipped = true;
7240 if (word_skipped) {
7241 if (write(fd, cp_buf, nb_read) != nb_read) {
7242 int e = errno;
7243 close(fd_orig);
7244 errno = e;
7245 return -1;
7250 return close(fd_orig);
7253 static int open_self_maps(void *cpu_env, int fd)
7255 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7256 TaskState *ts = cpu->opaque;
7257 FILE *fp;
7258 char *line = NULL;
7259 size_t len = 0;
7260 ssize_t read;
7262 fp = fopen("/proc/self/maps", "r");
7263 if (fp == NULL) {
7264 return -1;
7267 while ((read = getline(&line, &len, fp)) != -1) {
7268 int fields, dev_maj, dev_min, inode;
7269 uint64_t min, max, offset;
7270 char flag_r, flag_w, flag_x, flag_p;
7271 char path[512] = "";
7272 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7273 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7274 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7276 if ((fields < 10) || (fields > 11)) {
7277 continue;
7279 if (h2g_valid(min)) {
7280 int flags = page_get_flags(h2g(min));
7281 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7282 if (page_check_range(h2g(min), max - min, flags) == -1) {
7283 continue;
7285 if (h2g(min) == ts->info->stack_limit) {
7286 pstrcpy(path, sizeof(path), " [stack]");
7288 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7289 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7290 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7291 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7292 path[0] ? " " : "", path);
7296 free(line);
7297 fclose(fp);
7299 return 0;
7302 static int open_self_stat(void *cpu_env, int fd)
7304 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7305 TaskState *ts = cpu->opaque;
7306 abi_ulong start_stack = ts->info->start_stack;
7307 int i;
7309 for (i = 0; i < 44; i++) {
7310 char buf[128];
7311 int len;
7312 uint64_t val = 0;
7314 if (i == 0) {
7315 /* pid */
7316 val = getpid();
7317 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7318 } else if (i == 1) {
7319 /* app name */
7320 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7321 } else if (i == 27) {
7322 /* stack bottom */
7323 val = start_stack;
7324 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7325 } else {
7326 /* for the rest, there is MasterCard */
7327 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7330 len = strlen(buf);
7331 if (write(fd, buf, len) != len) {
7332 return -1;
7336 return 0;
7339 static int open_self_auxv(void *cpu_env, int fd)
7341 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7342 TaskState *ts = cpu->opaque;
7343 abi_ulong auxv = ts->info->saved_auxv;
7344 abi_ulong len = ts->info->auxv_len;
7345 char *ptr;
7348 * Auxiliary vector is stored in target process stack.
7349 * read in whole auxv vector and copy it to file
7351 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7352 if (ptr != NULL) {
7353 while (len > 0) {
7354 ssize_t r;
7355 r = write(fd, ptr, len);
7356 if (r <= 0) {
7357 break;
7359 len -= r;
7360 ptr += r;
7362 lseek(fd, 0, SEEK_SET);
7363 unlock_user(ptr, auxv, len);
7366 return 0;
7369 static int is_proc_myself(const char *filename, const char *entry)
7371 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7372 filename += strlen("/proc/");
7373 if (!strncmp(filename, "self/", strlen("self/"))) {
7374 filename += strlen("self/");
7375 } else if (*filename >= '1' && *filename <= '9') {
7376 char myself[80];
7377 snprintf(myself, sizeof(myself), "%d/", getpid());
7378 if (!strncmp(filename, myself, strlen(myself))) {
7379 filename += strlen(myself);
7380 } else {
7381 return 0;
7383 } else {
7384 return 0;
7386 if (!strcmp(filename, entry)) {
7387 return 1;
7390 return 0;
7393 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7394 static int is_proc(const char *filename, const char *entry)
7396 return strcmp(filename, entry) == 0;
7399 static int open_net_route(void *cpu_env, int fd)
7401 FILE *fp;
7402 char *line = NULL;
7403 size_t len = 0;
7404 ssize_t read;
7406 fp = fopen("/proc/net/route", "r");
7407 if (fp == NULL) {
7408 return -1;
7411 /* read header */
7413 read = getline(&line, &len, fp);
7414 dprintf(fd, "%s", line);
7416 /* read routes */
7418 while ((read = getline(&line, &len, fp)) != -1) {
7419 char iface[16];
7420 uint32_t dest, gw, mask;
7421 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7422 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7423 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7424 &mask, &mtu, &window, &irtt);
7425 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7426 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7427 metric, tswap32(mask), mtu, window, irtt);
7430 free(line);
7431 fclose(fp);
7433 return 0;
7435 #endif
7437 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7439 struct fake_open {
7440 const char *filename;
7441 int (*fill)(void *cpu_env, int fd);
7442 int (*cmp)(const char *s1, const char *s2);
7444 const struct fake_open *fake_open;
7445 static const struct fake_open fakes[] = {
7446 { "maps", open_self_maps, is_proc_myself },
7447 { "stat", open_self_stat, is_proc_myself },
7448 { "auxv", open_self_auxv, is_proc_myself },
7449 { "cmdline", open_self_cmdline, is_proc_myself },
7450 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7451 { "/proc/net/route", open_net_route, is_proc },
7452 #endif
7453 { NULL, NULL, NULL }
7456 if (is_proc_myself(pathname, "exe")) {
7457 int execfd = qemu_getauxval(AT_EXECFD);
7458 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7461 for (fake_open = fakes; fake_open->filename; fake_open++) {
7462 if (fake_open->cmp(pathname, fake_open->filename)) {
7463 break;
7467 if (fake_open->filename) {
7468 const char *tmpdir;
7469 char filename[PATH_MAX];
7470 int fd, r;
7472 /* create temporary file to map stat to */
7473 tmpdir = getenv("TMPDIR");
7474 if (!tmpdir)
7475 tmpdir = "/tmp";
7476 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7477 fd = mkstemp(filename);
7478 if (fd < 0) {
7479 return fd;
7481 unlink(filename);
7483 if ((r = fake_open->fill(cpu_env, fd))) {
7484 int e = errno;
7485 close(fd);
7486 errno = e;
7487 return r;
7489 lseek(fd, 0, SEEK_SET);
7491 return fd;
7494 return safe_openat(dirfd, path(pathname), flags, mode);
7497 #define TIMER_MAGIC 0x0caf0000
7498 #define TIMER_MAGIC_MASK 0xffff0000
7500 /* Convert QEMU provided timer ID back to internal 16bit index format */
7501 static target_timer_t get_timer_id(abi_long arg)
7503 target_timer_t timerid = arg;
7505 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7506 return -TARGET_EINVAL;
7509 timerid &= 0xffff;
7511 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7512 return -TARGET_EINVAL;
7515 return timerid;
7518 /* do_syscall() should always have a single exit point at the end so
7519 that actions, such as logging of syscall results, can be performed.
7520 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7521 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7522 abi_long arg2, abi_long arg3, abi_long arg4,
7523 abi_long arg5, abi_long arg6, abi_long arg7,
7524 abi_long arg8)
7526 CPUState *cpu = ENV_GET_CPU(cpu_env);
7527 abi_long ret;
7528 struct stat st;
7529 struct statfs stfs;
7530 void *p;
7532 #if defined(DEBUG_ERESTARTSYS)
7533 /* Debug-only code for exercising the syscall-restart code paths
7534 * in the per-architecture cpu main loops: restart every syscall
7535 * the guest makes once before letting it through.
7538 static int flag;
7540 flag = !flag;
7541 if (flag) {
7542 return -TARGET_ERESTARTSYS;
7545 #endif
7547 #ifdef DEBUG
7548 gemu_log("syscall %d", num);
7549 #endif
7550 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7551 if(do_strace)
7552 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7554 switch(num) {
7555 case TARGET_NR_exit:
7556 /* In old applications this may be used to implement _exit(2).
7557 However in threaded applictions it is used for thread termination,
7558 and _exit_group is used for application termination.
7559 Do thread termination if we have more then one thread. */
7561 if (block_signals()) {
7562 ret = -TARGET_ERESTARTSYS;
7563 break;
7566 cpu_list_lock();
7568 if (CPU_NEXT(first_cpu)) {
7569 TaskState *ts;
7571 /* Remove the CPU from the list. */
7572 QTAILQ_REMOVE(&cpus, cpu, node);
7574 cpu_list_unlock();
7576 ts = cpu->opaque;
7577 if (ts->child_tidptr) {
7578 put_user_u32(0, ts->child_tidptr);
7579 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7580 NULL, NULL, 0);
7582 thread_cpu = NULL;
7583 object_unref(OBJECT(cpu));
7584 g_free(ts);
7585 rcu_unregister_thread();
7586 pthread_exit(NULL);
7589 cpu_list_unlock();
7590 #ifdef TARGET_GPROF
7591 _mcleanup();
7592 #endif
7593 gdb_exit(cpu_env, arg1);
7594 _exit(arg1);
7595 ret = 0; /* avoid warning */
7596 break;
7597 case TARGET_NR_read:
7598 if (arg3 == 0)
7599 ret = 0;
7600 else {
7601 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7602 goto efault;
7603 ret = get_errno(safe_read(arg1, p, arg3));
7604 if (ret >= 0 &&
7605 fd_trans_host_to_target_data(arg1)) {
7606 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7608 unlock_user(p, arg2, ret);
7610 break;
7611 case TARGET_NR_write:
7612 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7613 goto efault;
7614 ret = get_errno(safe_write(arg1, p, arg3));
7615 unlock_user(p, arg2, 0);
7616 break;
7617 #ifdef TARGET_NR_open
7618 case TARGET_NR_open:
7619 if (!(p = lock_user_string(arg1)))
7620 goto efault;
7621 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7622 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7623 arg3));
7624 fd_trans_unregister(ret);
7625 unlock_user(p, arg1, 0);
7626 break;
7627 #endif
7628 case TARGET_NR_openat:
7629 if (!(p = lock_user_string(arg2)))
7630 goto efault;
7631 ret = get_errno(do_openat(cpu_env, arg1, p,
7632 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7633 arg4));
7634 fd_trans_unregister(ret);
7635 unlock_user(p, arg2, 0);
7636 break;
7637 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7638 case TARGET_NR_name_to_handle_at:
7639 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7640 break;
7641 #endif
7642 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7643 case TARGET_NR_open_by_handle_at:
7644 ret = do_open_by_handle_at(arg1, arg2, arg3);
7645 fd_trans_unregister(ret);
7646 break;
7647 #endif
7648 case TARGET_NR_close:
7649 fd_trans_unregister(arg1);
7650 ret = get_errno(close(arg1));
7651 break;
7652 case TARGET_NR_brk:
7653 ret = do_brk(arg1);
7654 break;
7655 #ifdef TARGET_NR_fork
7656 case TARGET_NR_fork:
7657 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7658 break;
7659 #endif
7660 #ifdef TARGET_NR_waitpid
7661 case TARGET_NR_waitpid:
7663 int status;
7664 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7665 if (!is_error(ret) && arg2 && ret
7666 && put_user_s32(host_to_target_waitstatus(status), arg2))
7667 goto efault;
7669 break;
7670 #endif
7671 #ifdef TARGET_NR_waitid
7672 case TARGET_NR_waitid:
7674 siginfo_t info;
7675 info.si_pid = 0;
7676 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7677 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7678 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7679 goto efault;
7680 host_to_target_siginfo(p, &info);
7681 unlock_user(p, arg3, sizeof(target_siginfo_t));
7684 break;
7685 #endif
7686 #ifdef TARGET_NR_creat /* not on alpha */
7687 case TARGET_NR_creat:
7688 if (!(p = lock_user_string(arg1)))
7689 goto efault;
7690 ret = get_errno(creat(p, arg2));
7691 fd_trans_unregister(ret);
7692 unlock_user(p, arg1, 0);
7693 break;
7694 #endif
7695 #ifdef TARGET_NR_link
7696 case TARGET_NR_link:
7698 void * p2;
7699 p = lock_user_string(arg1);
7700 p2 = lock_user_string(arg2);
7701 if (!p || !p2)
7702 ret = -TARGET_EFAULT;
7703 else
7704 ret = get_errno(link(p, p2));
7705 unlock_user(p2, arg2, 0);
7706 unlock_user(p, arg1, 0);
7708 break;
7709 #endif
7710 #if defined(TARGET_NR_linkat)
7711 case TARGET_NR_linkat:
7713 void * p2 = NULL;
7714 if (!arg2 || !arg4)
7715 goto efault;
7716 p = lock_user_string(arg2);
7717 p2 = lock_user_string(arg4);
7718 if (!p || !p2)
7719 ret = -TARGET_EFAULT;
7720 else
7721 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7722 unlock_user(p, arg2, 0);
7723 unlock_user(p2, arg4, 0);
7725 break;
7726 #endif
7727 #ifdef TARGET_NR_unlink
7728 case TARGET_NR_unlink:
7729 if (!(p = lock_user_string(arg1)))
7730 goto efault;
7731 ret = get_errno(unlink(p));
7732 unlock_user(p, arg1, 0);
7733 break;
7734 #endif
7735 #if defined(TARGET_NR_unlinkat)
7736 case TARGET_NR_unlinkat:
7737 if (!(p = lock_user_string(arg2)))
7738 goto efault;
7739 ret = get_errno(unlinkat(arg1, p, arg3));
7740 unlock_user(p, arg2, 0);
7741 break;
7742 #endif
7743 case TARGET_NR_execve:
7745 char **argp, **envp;
7746 int argc, envc;
7747 abi_ulong gp;
7748 abi_ulong guest_argp;
7749 abi_ulong guest_envp;
7750 abi_ulong addr;
7751 char **q;
7752 int total_size = 0;
7754 argc = 0;
7755 guest_argp = arg2;
7756 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7757 if (get_user_ual(addr, gp))
7758 goto efault;
7759 if (!addr)
7760 break;
7761 argc++;
7763 envc = 0;
7764 guest_envp = arg3;
7765 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7766 if (get_user_ual(addr, gp))
7767 goto efault;
7768 if (!addr)
7769 break;
7770 envc++;
7773 argp = alloca((argc + 1) * sizeof(void *));
7774 envp = alloca((envc + 1) * sizeof(void *));
7776 for (gp = guest_argp, q = argp; gp;
7777 gp += sizeof(abi_ulong), q++) {
7778 if (get_user_ual(addr, gp))
7779 goto execve_efault;
7780 if (!addr)
7781 break;
7782 if (!(*q = lock_user_string(addr)))
7783 goto execve_efault;
7784 total_size += strlen(*q) + 1;
7786 *q = NULL;
7788 for (gp = guest_envp, q = envp; gp;
7789 gp += sizeof(abi_ulong), q++) {
7790 if (get_user_ual(addr, gp))
7791 goto execve_efault;
7792 if (!addr)
7793 break;
7794 if (!(*q = lock_user_string(addr)))
7795 goto execve_efault;
7796 total_size += strlen(*q) + 1;
7798 *q = NULL;
7800 if (!(p = lock_user_string(arg1)))
7801 goto execve_efault;
7802 /* Although execve() is not an interruptible syscall it is
7803 * a special case where we must use the safe_syscall wrapper:
7804 * if we allow a signal to happen before we make the host
7805 * syscall then we will 'lose' it, because at the point of
7806 * execve the process leaves QEMU's control. So we use the
7807 * safe syscall wrapper to ensure that we either take the
7808 * signal as a guest signal, or else it does not happen
7809 * before the execve completes and makes it the other
7810 * program's problem.
7812 ret = get_errno(safe_execve(p, argp, envp));
7813 unlock_user(p, arg1, 0);
7815 goto execve_end;
7817 execve_efault:
7818 ret = -TARGET_EFAULT;
7820 execve_end:
7821 for (gp = guest_argp, q = argp; *q;
7822 gp += sizeof(abi_ulong), q++) {
7823 if (get_user_ual(addr, gp)
7824 || !addr)
7825 break;
7826 unlock_user(*q, addr, 0);
7828 for (gp = guest_envp, q = envp; *q;
7829 gp += sizeof(abi_ulong), q++) {
7830 if (get_user_ual(addr, gp)
7831 || !addr)
7832 break;
7833 unlock_user(*q, addr, 0);
7836 break;
7837 case TARGET_NR_chdir:
7838 if (!(p = lock_user_string(arg1)))
7839 goto efault;
7840 ret = get_errno(chdir(p));
7841 unlock_user(p, arg1, 0);
7842 break;
7843 #ifdef TARGET_NR_time
7844 case TARGET_NR_time:
7846 time_t host_time;
7847 ret = get_errno(time(&host_time));
7848 if (!is_error(ret)
7849 && arg1
7850 && put_user_sal(host_time, arg1))
7851 goto efault;
7853 break;
7854 #endif
7855 #ifdef TARGET_NR_mknod
7856 case TARGET_NR_mknod:
7857 if (!(p = lock_user_string(arg1)))
7858 goto efault;
7859 ret = get_errno(mknod(p, arg2, arg3));
7860 unlock_user(p, arg1, 0);
7861 break;
7862 #endif
7863 #if defined(TARGET_NR_mknodat)
7864 case TARGET_NR_mknodat:
7865 if (!(p = lock_user_string(arg2)))
7866 goto efault;
7867 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7868 unlock_user(p, arg2, 0);
7869 break;
7870 #endif
7871 #ifdef TARGET_NR_chmod
7872 case TARGET_NR_chmod:
7873 if (!(p = lock_user_string(arg1)))
7874 goto efault;
7875 ret = get_errno(chmod(p, arg2));
7876 unlock_user(p, arg1, 0);
7877 break;
7878 #endif
7879 #ifdef TARGET_NR_break
7880 case TARGET_NR_break:
7881 goto unimplemented;
7882 #endif
7883 #ifdef TARGET_NR_oldstat
7884 case TARGET_NR_oldstat:
7885 goto unimplemented;
7886 #endif
7887 case TARGET_NR_lseek:
7888 ret = get_errno(lseek(arg1, arg2, arg3));
7889 break;
7890 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7891 /* Alpha specific */
7892 case TARGET_NR_getxpid:
7893 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7894 ret = get_errno(getpid());
7895 break;
7896 #endif
7897 #ifdef TARGET_NR_getpid
7898 case TARGET_NR_getpid:
7899 ret = get_errno(getpid());
7900 break;
7901 #endif
7902 case TARGET_NR_mount:
7904 /* need to look at the data field */
7905 void *p2, *p3;
7907 if (arg1) {
7908 p = lock_user_string(arg1);
7909 if (!p) {
7910 goto efault;
7912 } else {
7913 p = NULL;
7916 p2 = lock_user_string(arg2);
7917 if (!p2) {
7918 if (arg1) {
7919 unlock_user(p, arg1, 0);
7921 goto efault;
7924 if (arg3) {
7925 p3 = lock_user_string(arg3);
7926 if (!p3) {
7927 if (arg1) {
7928 unlock_user(p, arg1, 0);
7930 unlock_user(p2, arg2, 0);
7931 goto efault;
7933 } else {
7934 p3 = NULL;
7937 /* FIXME - arg5 should be locked, but it isn't clear how to
7938 * do that since it's not guaranteed to be a NULL-terminated
7939 * string.
7941 if (!arg5) {
7942 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7943 } else {
7944 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7946 ret = get_errno(ret);
7948 if (arg1) {
7949 unlock_user(p, arg1, 0);
7951 unlock_user(p2, arg2, 0);
7952 if (arg3) {
7953 unlock_user(p3, arg3, 0);
7956 break;
7957 #ifdef TARGET_NR_umount
7958 case TARGET_NR_umount:
7959 if (!(p = lock_user_string(arg1)))
7960 goto efault;
7961 ret = get_errno(umount(p));
7962 unlock_user(p, arg1, 0);
7963 break;
7964 #endif
7965 #ifdef TARGET_NR_stime /* not on alpha */
7966 case TARGET_NR_stime:
7968 time_t host_time;
7969 if (get_user_sal(host_time, arg1))
7970 goto efault;
7971 ret = get_errno(stime(&host_time));
7973 break;
7974 #endif
7975 case TARGET_NR_ptrace:
7976 goto unimplemented;
7977 #ifdef TARGET_NR_alarm /* not on alpha */
7978 case TARGET_NR_alarm:
7979 ret = alarm(arg1);
7980 break;
7981 #endif
7982 #ifdef TARGET_NR_oldfstat
7983 case TARGET_NR_oldfstat:
7984 goto unimplemented;
7985 #endif
7986 #ifdef TARGET_NR_pause /* not on alpha */
7987 case TARGET_NR_pause:
7988 if (!block_signals()) {
7989 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
7991 ret = -TARGET_EINTR;
7992 break;
7993 #endif
7994 #ifdef TARGET_NR_utime
7995 case TARGET_NR_utime:
7997 struct utimbuf tbuf, *host_tbuf;
7998 struct target_utimbuf *target_tbuf;
7999 if (arg2) {
8000 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8001 goto efault;
8002 tbuf.actime = tswapal(target_tbuf->actime);
8003 tbuf.modtime = tswapal(target_tbuf->modtime);
8004 unlock_user_struct(target_tbuf, arg2, 0);
8005 host_tbuf = &tbuf;
8006 } else {
8007 host_tbuf = NULL;
8009 if (!(p = lock_user_string(arg1)))
8010 goto efault;
8011 ret = get_errno(utime(p, host_tbuf));
8012 unlock_user(p, arg1, 0);
8014 break;
8015 #endif
8016 #ifdef TARGET_NR_utimes
8017 case TARGET_NR_utimes:
8019 struct timeval *tvp, tv[2];
8020 if (arg2) {
8021 if (copy_from_user_timeval(&tv[0], arg2)
8022 || copy_from_user_timeval(&tv[1],
8023 arg2 + sizeof(struct target_timeval)))
8024 goto efault;
8025 tvp = tv;
8026 } else {
8027 tvp = NULL;
8029 if (!(p = lock_user_string(arg1)))
8030 goto efault;
8031 ret = get_errno(utimes(p, tvp));
8032 unlock_user(p, arg1, 0);
8034 break;
8035 #endif
8036 #if defined(TARGET_NR_futimesat)
8037 case TARGET_NR_futimesat:
8039 struct timeval *tvp, tv[2];
8040 if (arg3) {
8041 if (copy_from_user_timeval(&tv[0], arg3)
8042 || copy_from_user_timeval(&tv[1],
8043 arg3 + sizeof(struct target_timeval)))
8044 goto efault;
8045 tvp = tv;
8046 } else {
8047 tvp = NULL;
8049 if (!(p = lock_user_string(arg2)))
8050 goto efault;
8051 ret = get_errno(futimesat(arg1, path(p), tvp));
8052 unlock_user(p, arg2, 0);
8054 break;
8055 #endif
8056 #ifdef TARGET_NR_stty
8057 case TARGET_NR_stty:
8058 goto unimplemented;
8059 #endif
8060 #ifdef TARGET_NR_gtty
8061 case TARGET_NR_gtty:
8062 goto unimplemented;
8063 #endif
8064 #ifdef TARGET_NR_access
8065 case TARGET_NR_access:
8066 if (!(p = lock_user_string(arg1)))
8067 goto efault;
8068 ret = get_errno(access(path(p), arg2));
8069 unlock_user(p, arg1, 0);
8070 break;
8071 #endif
8072 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8073 case TARGET_NR_faccessat:
8074 if (!(p = lock_user_string(arg2)))
8075 goto efault;
8076 ret = get_errno(faccessat(arg1, p, arg3, 0));
8077 unlock_user(p, arg2, 0);
8078 break;
8079 #endif
8080 #ifdef TARGET_NR_nice /* not on alpha */
8081 case TARGET_NR_nice:
8082 ret = get_errno(nice(arg1));
8083 break;
8084 #endif
8085 #ifdef TARGET_NR_ftime
8086 case TARGET_NR_ftime:
8087 goto unimplemented;
8088 #endif
8089 case TARGET_NR_sync:
8090 sync();
8091 ret = 0;
8092 break;
8093 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8094 case TARGET_NR_syncfs:
8095 ret = get_errno(syncfs(arg1));
8096 break;
8097 #endif
8098 case TARGET_NR_kill:
8099 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8100 break;
8101 #ifdef TARGET_NR_rename
8102 case TARGET_NR_rename:
8104 void *p2;
8105 p = lock_user_string(arg1);
8106 p2 = lock_user_string(arg2);
8107 if (!p || !p2)
8108 ret = -TARGET_EFAULT;
8109 else
8110 ret = get_errno(rename(p, p2));
8111 unlock_user(p2, arg2, 0);
8112 unlock_user(p, arg1, 0);
8114 break;
8115 #endif
8116 #if defined(TARGET_NR_renameat)
8117 case TARGET_NR_renameat:
8119 void *p2;
8120 p = lock_user_string(arg2);
8121 p2 = lock_user_string(arg4);
8122 if (!p || !p2)
8123 ret = -TARGET_EFAULT;
8124 else
8125 ret = get_errno(renameat(arg1, p, arg3, p2));
8126 unlock_user(p2, arg4, 0);
8127 unlock_user(p, arg2, 0);
8129 break;
8130 #endif
8131 #ifdef TARGET_NR_mkdir
8132 case TARGET_NR_mkdir:
8133 if (!(p = lock_user_string(arg1)))
8134 goto efault;
8135 ret = get_errno(mkdir(p, arg2));
8136 unlock_user(p, arg1, 0);
8137 break;
8138 #endif
8139 #if defined(TARGET_NR_mkdirat)
8140 case TARGET_NR_mkdirat:
8141 if (!(p = lock_user_string(arg2)))
8142 goto efault;
8143 ret = get_errno(mkdirat(arg1, p, arg3));
8144 unlock_user(p, arg2, 0);
8145 break;
8146 #endif
8147 #ifdef TARGET_NR_rmdir
8148 case TARGET_NR_rmdir:
8149 if (!(p = lock_user_string(arg1)))
8150 goto efault;
8151 ret = get_errno(rmdir(p));
8152 unlock_user(p, arg1, 0);
8153 break;
8154 #endif
8155 case TARGET_NR_dup:
8156 ret = get_errno(dup(arg1));
8157 if (ret >= 0) {
8158 fd_trans_dup(arg1, ret);
8160 break;
8161 #ifdef TARGET_NR_pipe
8162 case TARGET_NR_pipe:
8163 ret = do_pipe(cpu_env, arg1, 0, 0);
8164 break;
8165 #endif
8166 #ifdef TARGET_NR_pipe2
8167 case TARGET_NR_pipe2:
8168 ret = do_pipe(cpu_env, arg1,
8169 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8170 break;
8171 #endif
8172 case TARGET_NR_times:
8174 struct target_tms *tmsp;
8175 struct tms tms;
8176 ret = get_errno(times(&tms));
8177 if (arg1) {
8178 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8179 if (!tmsp)
8180 goto efault;
8181 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8182 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8183 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8184 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8186 if (!is_error(ret))
8187 ret = host_to_target_clock_t(ret);
8189 break;
8190 #ifdef TARGET_NR_prof
8191 case TARGET_NR_prof:
8192 goto unimplemented;
8193 #endif
8194 #ifdef TARGET_NR_signal
8195 case TARGET_NR_signal:
8196 goto unimplemented;
8197 #endif
8198 case TARGET_NR_acct:
8199 if (arg1 == 0) {
8200 ret = get_errno(acct(NULL));
8201 } else {
8202 if (!(p = lock_user_string(arg1)))
8203 goto efault;
8204 ret = get_errno(acct(path(p)));
8205 unlock_user(p, arg1, 0);
8207 break;
8208 #ifdef TARGET_NR_umount2
8209 case TARGET_NR_umount2:
8210 if (!(p = lock_user_string(arg1)))
8211 goto efault;
8212 ret = get_errno(umount2(p, arg2));
8213 unlock_user(p, arg1, 0);
8214 break;
8215 #endif
8216 #ifdef TARGET_NR_lock
8217 case TARGET_NR_lock:
8218 goto unimplemented;
8219 #endif
8220 case TARGET_NR_ioctl:
8221 ret = do_ioctl(arg1, arg2, arg3);
8222 break;
8223 case TARGET_NR_fcntl:
8224 ret = do_fcntl(arg1, arg2, arg3);
8225 break;
8226 #ifdef TARGET_NR_mpx
8227 case TARGET_NR_mpx:
8228 goto unimplemented;
8229 #endif
8230 case TARGET_NR_setpgid:
8231 ret = get_errno(setpgid(arg1, arg2));
8232 break;
8233 #ifdef TARGET_NR_ulimit
8234 case TARGET_NR_ulimit:
8235 goto unimplemented;
8236 #endif
8237 #ifdef TARGET_NR_oldolduname
8238 case TARGET_NR_oldolduname:
8239 goto unimplemented;
8240 #endif
8241 case TARGET_NR_umask:
8242 ret = get_errno(umask(arg1));
8243 break;
8244 case TARGET_NR_chroot:
8245 if (!(p = lock_user_string(arg1)))
8246 goto efault;
8247 ret = get_errno(chroot(p));
8248 unlock_user(p, arg1, 0);
8249 break;
8250 #ifdef TARGET_NR_ustat
8251 case TARGET_NR_ustat:
8252 goto unimplemented;
8253 #endif
8254 #ifdef TARGET_NR_dup2
8255 case TARGET_NR_dup2:
8256 ret = get_errno(dup2(arg1, arg2));
8257 if (ret >= 0) {
8258 fd_trans_dup(arg1, arg2);
8260 break;
8261 #endif
8262 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8263 case TARGET_NR_dup3:
8264 ret = get_errno(dup3(arg1, arg2, arg3));
8265 if (ret >= 0) {
8266 fd_trans_dup(arg1, arg2);
8268 break;
8269 #endif
8270 #ifdef TARGET_NR_getppid /* not on alpha */
8271 case TARGET_NR_getppid:
8272 ret = get_errno(getppid());
8273 break;
8274 #endif
8275 #ifdef TARGET_NR_getpgrp
8276 case TARGET_NR_getpgrp:
8277 ret = get_errno(getpgrp());
8278 break;
8279 #endif
8280 case TARGET_NR_setsid:
8281 ret = get_errno(setsid());
8282 break;
8283 #ifdef TARGET_NR_sigaction
8284 case TARGET_NR_sigaction:
8286 #if defined(TARGET_ALPHA)
8287 struct target_sigaction act, oact, *pact = 0;
8288 struct target_old_sigaction *old_act;
8289 if (arg2) {
8290 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8291 goto efault;
8292 act._sa_handler = old_act->_sa_handler;
8293 target_siginitset(&act.sa_mask, old_act->sa_mask);
8294 act.sa_flags = old_act->sa_flags;
8295 act.sa_restorer = 0;
8296 unlock_user_struct(old_act, arg2, 0);
8297 pact = &act;
8299 ret = get_errno(do_sigaction(arg1, pact, &oact));
8300 if (!is_error(ret) && arg3) {
8301 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8302 goto efault;
8303 old_act->_sa_handler = oact._sa_handler;
8304 old_act->sa_mask = oact.sa_mask.sig[0];
8305 old_act->sa_flags = oact.sa_flags;
8306 unlock_user_struct(old_act, arg3, 1);
8308 #elif defined(TARGET_MIPS)
8309 struct target_sigaction act, oact, *pact, *old_act;
8311 if (arg2) {
8312 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8313 goto efault;
8314 act._sa_handler = old_act->_sa_handler;
8315 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8316 act.sa_flags = old_act->sa_flags;
8317 unlock_user_struct(old_act, arg2, 0);
8318 pact = &act;
8319 } else {
8320 pact = NULL;
8323 ret = get_errno(do_sigaction(arg1, pact, &oact));
8325 if (!is_error(ret) && arg3) {
8326 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8327 goto efault;
8328 old_act->_sa_handler = oact._sa_handler;
8329 old_act->sa_flags = oact.sa_flags;
8330 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8331 old_act->sa_mask.sig[1] = 0;
8332 old_act->sa_mask.sig[2] = 0;
8333 old_act->sa_mask.sig[3] = 0;
8334 unlock_user_struct(old_act, arg3, 1);
8336 #else
8337 struct target_old_sigaction *old_act;
8338 struct target_sigaction act, oact, *pact;
8339 if (arg2) {
8340 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8341 goto efault;
8342 act._sa_handler = old_act->_sa_handler;
8343 target_siginitset(&act.sa_mask, old_act->sa_mask);
8344 act.sa_flags = old_act->sa_flags;
8345 act.sa_restorer = old_act->sa_restorer;
8346 unlock_user_struct(old_act, arg2, 0);
8347 pact = &act;
8348 } else {
8349 pact = NULL;
8351 ret = get_errno(do_sigaction(arg1, pact, &oact));
8352 if (!is_error(ret) && arg3) {
8353 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8354 goto efault;
8355 old_act->_sa_handler = oact._sa_handler;
8356 old_act->sa_mask = oact.sa_mask.sig[0];
8357 old_act->sa_flags = oact.sa_flags;
8358 old_act->sa_restorer = oact.sa_restorer;
8359 unlock_user_struct(old_act, arg3, 1);
8361 #endif
8363 break;
8364 #endif
8365 case TARGET_NR_rt_sigaction:
8367 #if defined(TARGET_ALPHA)
8368 struct target_sigaction act, oact, *pact = 0;
8369 struct target_rt_sigaction *rt_act;
8371 if (arg4 != sizeof(target_sigset_t)) {
8372 ret = -TARGET_EINVAL;
8373 break;
8375 if (arg2) {
8376 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8377 goto efault;
8378 act._sa_handler = rt_act->_sa_handler;
8379 act.sa_mask = rt_act->sa_mask;
8380 act.sa_flags = rt_act->sa_flags;
8381 act.sa_restorer = arg5;
8382 unlock_user_struct(rt_act, arg2, 0);
8383 pact = &act;
8385 ret = get_errno(do_sigaction(arg1, pact, &oact));
8386 if (!is_error(ret) && arg3) {
8387 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8388 goto efault;
8389 rt_act->_sa_handler = oact._sa_handler;
8390 rt_act->sa_mask = oact.sa_mask;
8391 rt_act->sa_flags = oact.sa_flags;
8392 unlock_user_struct(rt_act, arg3, 1);
8394 #else
8395 struct target_sigaction *act;
8396 struct target_sigaction *oact;
8398 if (arg4 != sizeof(target_sigset_t)) {
8399 ret = -TARGET_EINVAL;
8400 break;
8402 if (arg2) {
8403 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8404 goto efault;
8405 } else
8406 act = NULL;
8407 if (arg3) {
8408 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8409 ret = -TARGET_EFAULT;
8410 goto rt_sigaction_fail;
8412 } else
8413 oact = NULL;
8414 ret = get_errno(do_sigaction(arg1, act, oact));
8415 rt_sigaction_fail:
8416 if (act)
8417 unlock_user_struct(act, arg2, 0);
8418 if (oact)
8419 unlock_user_struct(oact, arg3, 1);
8420 #endif
8422 break;
8423 #ifdef TARGET_NR_sgetmask /* not on alpha */
8424 case TARGET_NR_sgetmask:
8426 sigset_t cur_set;
8427 abi_ulong target_set;
8428 ret = do_sigprocmask(0, NULL, &cur_set);
8429 if (!ret) {
8430 host_to_target_old_sigset(&target_set, &cur_set);
8431 ret = target_set;
8434 break;
8435 #endif
8436 #ifdef TARGET_NR_ssetmask /* not on alpha */
8437 case TARGET_NR_ssetmask:
8439 sigset_t set, oset, cur_set;
8440 abi_ulong target_set = arg1;
8441 /* We only have one word of the new mask so we must read
8442 * the rest of it with do_sigprocmask() and OR in this word.
8443 * We are guaranteed that a do_sigprocmask() that only queries
8444 * the signal mask will not fail.
8446 ret = do_sigprocmask(0, NULL, &cur_set);
8447 assert(!ret);
8448 target_to_host_old_sigset(&set, &target_set);
8449 sigorset(&set, &set, &cur_set);
8450 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8451 if (!ret) {
8452 host_to_target_old_sigset(&target_set, &oset);
8453 ret = target_set;
8456 break;
8457 #endif
8458 #ifdef TARGET_NR_sigprocmask
8459 case TARGET_NR_sigprocmask:
8461 #if defined(TARGET_ALPHA)
8462 sigset_t set, oldset;
8463 abi_ulong mask;
8464 int how;
8466 switch (arg1) {
8467 case TARGET_SIG_BLOCK:
8468 how = SIG_BLOCK;
8469 break;
8470 case TARGET_SIG_UNBLOCK:
8471 how = SIG_UNBLOCK;
8472 break;
8473 case TARGET_SIG_SETMASK:
8474 how = SIG_SETMASK;
8475 break;
8476 default:
8477 ret = -TARGET_EINVAL;
8478 goto fail;
8480 mask = arg2;
8481 target_to_host_old_sigset(&set, &mask);
8483 ret = do_sigprocmask(how, &set, &oldset);
8484 if (!is_error(ret)) {
8485 host_to_target_old_sigset(&mask, &oldset);
8486 ret = mask;
8487 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8489 #else
8490 sigset_t set, oldset, *set_ptr;
8491 int how;
8493 if (arg2) {
8494 switch (arg1) {
8495 case TARGET_SIG_BLOCK:
8496 how = SIG_BLOCK;
8497 break;
8498 case TARGET_SIG_UNBLOCK:
8499 how = SIG_UNBLOCK;
8500 break;
8501 case TARGET_SIG_SETMASK:
8502 how = SIG_SETMASK;
8503 break;
8504 default:
8505 ret = -TARGET_EINVAL;
8506 goto fail;
8508 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8509 goto efault;
8510 target_to_host_old_sigset(&set, p);
8511 unlock_user(p, arg2, 0);
8512 set_ptr = &set;
8513 } else {
8514 how = 0;
8515 set_ptr = NULL;
8517 ret = do_sigprocmask(how, set_ptr, &oldset);
8518 if (!is_error(ret) && arg3) {
8519 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8520 goto efault;
8521 host_to_target_old_sigset(p, &oldset);
8522 unlock_user(p, arg3, sizeof(target_sigset_t));
8524 #endif
8526 break;
8527 #endif
8528 case TARGET_NR_rt_sigprocmask:
8530 int how = arg1;
8531 sigset_t set, oldset, *set_ptr;
8533 if (arg4 != sizeof(target_sigset_t)) {
8534 ret = -TARGET_EINVAL;
8535 break;
8538 if (arg2) {
8539 switch(how) {
8540 case TARGET_SIG_BLOCK:
8541 how = SIG_BLOCK;
8542 break;
8543 case TARGET_SIG_UNBLOCK:
8544 how = SIG_UNBLOCK;
8545 break;
8546 case TARGET_SIG_SETMASK:
8547 how = SIG_SETMASK;
8548 break;
8549 default:
8550 ret = -TARGET_EINVAL;
8551 goto fail;
8553 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8554 goto efault;
8555 target_to_host_sigset(&set, p);
8556 unlock_user(p, arg2, 0);
8557 set_ptr = &set;
8558 } else {
8559 how = 0;
8560 set_ptr = NULL;
8562 ret = do_sigprocmask(how, set_ptr, &oldset);
8563 if (!is_error(ret) && arg3) {
8564 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8565 goto efault;
8566 host_to_target_sigset(p, &oldset);
8567 unlock_user(p, arg3, sizeof(target_sigset_t));
8570 break;
8571 #ifdef TARGET_NR_sigpending
8572 case TARGET_NR_sigpending:
8574 sigset_t set;
8575 ret = get_errno(sigpending(&set));
8576 if (!is_error(ret)) {
8577 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8578 goto efault;
8579 host_to_target_old_sigset(p, &set);
8580 unlock_user(p, arg1, sizeof(target_sigset_t));
8583 break;
8584 #endif
8585 case TARGET_NR_rt_sigpending:
8587 sigset_t set;
8589 /* Yes, this check is >, not != like most. We follow the kernel's
8590 * logic and it does it like this because it implements
8591 * NR_sigpending through the same code path, and in that case
8592 * the old_sigset_t is smaller in size.
8594 if (arg2 > sizeof(target_sigset_t)) {
8595 ret = -TARGET_EINVAL;
8596 break;
8599 ret = get_errno(sigpending(&set));
8600 if (!is_error(ret)) {
8601 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8602 goto efault;
8603 host_to_target_sigset(p, &set);
8604 unlock_user(p, arg1, sizeof(target_sigset_t));
8607 break;
8608 #ifdef TARGET_NR_sigsuspend
8609 case TARGET_NR_sigsuspend:
8611 TaskState *ts = cpu->opaque;
8612 #if defined(TARGET_ALPHA)
8613 abi_ulong mask = arg1;
8614 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8615 #else
8616 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8617 goto efault;
8618 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8619 unlock_user(p, arg1, 0);
8620 #endif
8621 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8622 SIGSET_T_SIZE));
8623 if (ret != -TARGET_ERESTARTSYS) {
8624 ts->in_sigsuspend = 1;
8627 break;
8628 #endif
8629 case TARGET_NR_rt_sigsuspend:
8631 TaskState *ts = cpu->opaque;
8633 if (arg2 != sizeof(target_sigset_t)) {
8634 ret = -TARGET_EINVAL;
8635 break;
8637 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8638 goto efault;
8639 target_to_host_sigset(&ts->sigsuspend_mask, p);
8640 unlock_user(p, arg1, 0);
8641 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8642 SIGSET_T_SIZE));
8643 if (ret != -TARGET_ERESTARTSYS) {
8644 ts->in_sigsuspend = 1;
8647 break;
8648 case TARGET_NR_rt_sigtimedwait:
8650 sigset_t set;
8651 struct timespec uts, *puts;
8652 siginfo_t uinfo;
8654 if (arg4 != sizeof(target_sigset_t)) {
8655 ret = -TARGET_EINVAL;
8656 break;
8659 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8660 goto efault;
8661 target_to_host_sigset(&set, p);
8662 unlock_user(p, arg1, 0);
8663 if (arg3) {
8664 puts = &uts;
8665 target_to_host_timespec(puts, arg3);
8666 } else {
8667 puts = NULL;
8669 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8670 SIGSET_T_SIZE));
8671 if (!is_error(ret)) {
8672 if (arg2) {
8673 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8675 if (!p) {
8676 goto efault;
8678 host_to_target_siginfo(p, &uinfo);
8679 unlock_user(p, arg2, sizeof(target_siginfo_t));
8681 ret = host_to_target_signal(ret);
8684 break;
8685 case TARGET_NR_rt_sigqueueinfo:
8687 siginfo_t uinfo;
8689 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8690 if (!p) {
8691 goto efault;
8693 target_to_host_siginfo(&uinfo, p);
8694 unlock_user(p, arg1, 0);
8695 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8697 break;
8698 #ifdef TARGET_NR_sigreturn
8699 case TARGET_NR_sigreturn:
8700 if (block_signals()) {
8701 ret = -TARGET_ERESTARTSYS;
8702 } else {
8703 ret = do_sigreturn(cpu_env);
8705 break;
8706 #endif
8707 case TARGET_NR_rt_sigreturn:
8708 if (block_signals()) {
8709 ret = -TARGET_ERESTARTSYS;
8710 } else {
8711 ret = do_rt_sigreturn(cpu_env);
8713 break;
8714 case TARGET_NR_sethostname:
8715 if (!(p = lock_user_string(arg1)))
8716 goto efault;
8717 ret = get_errno(sethostname(p, arg2));
8718 unlock_user(p, arg1, 0);
8719 break;
8720 case TARGET_NR_setrlimit:
8722 int resource = target_to_host_resource(arg1);
8723 struct target_rlimit *target_rlim;
8724 struct rlimit rlim;
8725 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8726 goto efault;
8727 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8728 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8729 unlock_user_struct(target_rlim, arg2, 0);
8730 ret = get_errno(setrlimit(resource, &rlim));
8732 break;
8733 case TARGET_NR_getrlimit:
8735 int resource = target_to_host_resource(arg1);
8736 struct target_rlimit *target_rlim;
8737 struct rlimit rlim;
8739 ret = get_errno(getrlimit(resource, &rlim));
8740 if (!is_error(ret)) {
8741 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8742 goto efault;
8743 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8744 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8745 unlock_user_struct(target_rlim, arg2, 1);
8748 break;
8749 case TARGET_NR_getrusage:
8751 struct rusage rusage;
8752 ret = get_errno(getrusage(arg1, &rusage));
8753 if (!is_error(ret)) {
8754 ret = host_to_target_rusage(arg2, &rusage);
8757 break;
8758 case TARGET_NR_gettimeofday:
8760 struct timeval tv;
8761 ret = get_errno(gettimeofday(&tv, NULL));
8762 if (!is_error(ret)) {
8763 if (copy_to_user_timeval(arg1, &tv))
8764 goto efault;
8767 break;
8768 case TARGET_NR_settimeofday:
8770 struct timeval tv, *ptv = NULL;
8771 struct timezone tz, *ptz = NULL;
8773 if (arg1) {
8774 if (copy_from_user_timeval(&tv, arg1)) {
8775 goto efault;
8777 ptv = &tv;
8780 if (arg2) {
8781 if (copy_from_user_timezone(&tz, arg2)) {
8782 goto efault;
8784 ptz = &tz;
8787 ret = get_errno(settimeofday(ptv, ptz));
8789 break;
8790 #if defined(TARGET_NR_select)
8791 case TARGET_NR_select:
8792 #if defined(TARGET_WANT_NI_OLD_SELECT)
8793 /* some architectures used to have old_select here
8794 * but now ENOSYS it.
8796 ret = -TARGET_ENOSYS;
8797 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8798 ret = do_old_select(arg1);
8799 #else
8800 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8801 #endif
8802 break;
8803 #endif
8804 #ifdef TARGET_NR_pselect6
8805 case TARGET_NR_pselect6:
8807 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8808 fd_set rfds, wfds, efds;
8809 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8810 struct timespec ts, *ts_ptr;
8813 * The 6th arg is actually two args smashed together,
8814 * so we cannot use the C library.
8816 sigset_t set;
8817 struct {
8818 sigset_t *set;
8819 size_t size;
8820 } sig, *sig_ptr;
8822 abi_ulong arg_sigset, arg_sigsize, *arg7;
8823 target_sigset_t *target_sigset;
8825 n = arg1;
8826 rfd_addr = arg2;
8827 wfd_addr = arg3;
8828 efd_addr = arg4;
8829 ts_addr = arg5;
8831 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8832 if (ret) {
8833 goto fail;
8835 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8836 if (ret) {
8837 goto fail;
8839 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8840 if (ret) {
8841 goto fail;
8845 * This takes a timespec, and not a timeval, so we cannot
8846 * use the do_select() helper ...
8848 if (ts_addr) {
8849 if (target_to_host_timespec(&ts, ts_addr)) {
8850 goto efault;
8852 ts_ptr = &ts;
8853 } else {
8854 ts_ptr = NULL;
8857 /* Extract the two packed args for the sigset */
8858 if (arg6) {
8859 sig_ptr = &sig;
8860 sig.size = SIGSET_T_SIZE;
8862 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8863 if (!arg7) {
8864 goto efault;
8866 arg_sigset = tswapal(arg7[0]);
8867 arg_sigsize = tswapal(arg7[1]);
8868 unlock_user(arg7, arg6, 0);
8870 if (arg_sigset) {
8871 sig.set = &set;
8872 if (arg_sigsize != sizeof(*target_sigset)) {
8873 /* Like the kernel, we enforce correct size sigsets */
8874 ret = -TARGET_EINVAL;
8875 goto fail;
8877 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8878 sizeof(*target_sigset), 1);
8879 if (!target_sigset) {
8880 goto efault;
8882 target_to_host_sigset(&set, target_sigset);
8883 unlock_user(target_sigset, arg_sigset, 0);
8884 } else {
8885 sig.set = NULL;
8887 } else {
8888 sig_ptr = NULL;
8891 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8892 ts_ptr, sig_ptr));
8894 if (!is_error(ret)) {
8895 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8896 goto efault;
8897 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8898 goto efault;
8899 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8900 goto efault;
8902 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8903 goto efault;
8906 break;
8907 #endif
8908 #ifdef TARGET_NR_symlink
8909 case TARGET_NR_symlink:
8911 void *p2;
8912 p = lock_user_string(arg1);
8913 p2 = lock_user_string(arg2);
8914 if (!p || !p2)
8915 ret = -TARGET_EFAULT;
8916 else
8917 ret = get_errno(symlink(p, p2));
8918 unlock_user(p2, arg2, 0);
8919 unlock_user(p, arg1, 0);
8921 break;
8922 #endif
8923 #if defined(TARGET_NR_symlinkat)
8924 case TARGET_NR_symlinkat:
8926 void *p2;
8927 p = lock_user_string(arg1);
8928 p2 = lock_user_string(arg3);
8929 if (!p || !p2)
8930 ret = -TARGET_EFAULT;
8931 else
8932 ret = get_errno(symlinkat(p, arg2, p2));
8933 unlock_user(p2, arg3, 0);
8934 unlock_user(p, arg1, 0);
8936 break;
8937 #endif
8938 #ifdef TARGET_NR_oldlstat
8939 case TARGET_NR_oldlstat:
8940 goto unimplemented;
8941 #endif
8942 #ifdef TARGET_NR_readlink
8943 case TARGET_NR_readlink:
8945 void *p2;
8946 p = lock_user_string(arg1);
8947 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8948 if (!p || !p2) {
8949 ret = -TARGET_EFAULT;
8950 } else if (!arg3) {
8951 /* Short circuit this for the magic exe check. */
8952 ret = -TARGET_EINVAL;
8953 } else if (is_proc_myself((const char *)p, "exe")) {
8954 char real[PATH_MAX], *temp;
8955 temp = realpath(exec_path, real);
8956 /* Return value is # of bytes that we wrote to the buffer. */
8957 if (temp == NULL) {
8958 ret = get_errno(-1);
8959 } else {
8960 /* Don't worry about sign mismatch as earlier mapping
8961 * logic would have thrown a bad address error. */
8962 ret = MIN(strlen(real), arg3);
8963 /* We cannot NUL terminate the string. */
8964 memcpy(p2, real, ret);
8966 } else {
8967 ret = get_errno(readlink(path(p), p2, arg3));
8969 unlock_user(p2, arg2, ret);
8970 unlock_user(p, arg1, 0);
8972 break;
8973 #endif
8974 #if defined(TARGET_NR_readlinkat)
8975 case TARGET_NR_readlinkat:
8977 void *p2;
8978 p = lock_user_string(arg2);
8979 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8980 if (!p || !p2) {
8981 ret = -TARGET_EFAULT;
8982 } else if (is_proc_myself((const char *)p, "exe")) {
8983 char real[PATH_MAX], *temp;
8984 temp = realpath(exec_path, real);
8985 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
8986 snprintf((char *)p2, arg4, "%s", real);
8987 } else {
8988 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
8990 unlock_user(p2, arg3, ret);
8991 unlock_user(p, arg2, 0);
8993 break;
8994 #endif
8995 #ifdef TARGET_NR_uselib
8996 case TARGET_NR_uselib:
8997 goto unimplemented;
8998 #endif
8999 #ifdef TARGET_NR_swapon
9000 case TARGET_NR_swapon:
9001 if (!(p = lock_user_string(arg1)))
9002 goto efault;
9003 ret = get_errno(swapon(p, arg2));
9004 unlock_user(p, arg1, 0);
9005 break;
9006 #endif
9007 case TARGET_NR_reboot:
9008 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9009 /* arg4 must be ignored in all other cases */
9010 p = lock_user_string(arg4);
9011 if (!p) {
9012 goto efault;
9014 ret = get_errno(reboot(arg1, arg2, arg3, p));
9015 unlock_user(p, arg4, 0);
9016 } else {
9017 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9019 break;
9020 #ifdef TARGET_NR_readdir
9021 case TARGET_NR_readdir:
9022 goto unimplemented;
9023 #endif
9024 #ifdef TARGET_NR_mmap
9025 case TARGET_NR_mmap:
9026 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9027 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9028 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9029 || defined(TARGET_S390X)
9031 abi_ulong *v;
9032 abi_ulong v1, v2, v3, v4, v5, v6;
9033 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9034 goto efault;
9035 v1 = tswapal(v[0]);
9036 v2 = tswapal(v[1]);
9037 v3 = tswapal(v[2]);
9038 v4 = tswapal(v[3]);
9039 v5 = tswapal(v[4]);
9040 v6 = tswapal(v[5]);
9041 unlock_user(v, arg1, 0);
9042 ret = get_errno(target_mmap(v1, v2, v3,
9043 target_to_host_bitmask(v4, mmap_flags_tbl),
9044 v5, v6));
9046 #else
9047 ret = get_errno(target_mmap(arg1, arg2, arg3,
9048 target_to_host_bitmask(arg4, mmap_flags_tbl),
9049 arg5,
9050 arg6));
9051 #endif
9052 break;
9053 #endif
9054 #ifdef TARGET_NR_mmap2
9055 case TARGET_NR_mmap2:
9056 #ifndef MMAP_SHIFT
9057 #define MMAP_SHIFT 12
9058 #endif
9059 ret = get_errno(target_mmap(arg1, arg2, arg3,
9060 target_to_host_bitmask(arg4, mmap_flags_tbl),
9061 arg5,
9062 arg6 << MMAP_SHIFT));
9063 break;
9064 #endif
9065 case TARGET_NR_munmap:
9066 ret = get_errno(target_munmap(arg1, arg2));
9067 break;
9068 case TARGET_NR_mprotect:
9070 TaskState *ts = cpu->opaque;
9071 /* Special hack to detect libc making the stack executable. */
9072 if ((arg3 & PROT_GROWSDOWN)
9073 && arg1 >= ts->info->stack_limit
9074 && arg1 <= ts->info->start_stack) {
9075 arg3 &= ~PROT_GROWSDOWN;
9076 arg2 = arg2 + arg1 - ts->info->stack_limit;
9077 arg1 = ts->info->stack_limit;
9080 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9081 break;
9082 #ifdef TARGET_NR_mremap
9083 case TARGET_NR_mremap:
9084 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9085 break;
9086 #endif
9087 /* ??? msync/mlock/munlock are broken for softmmu. */
9088 #ifdef TARGET_NR_msync
9089 case TARGET_NR_msync:
9090 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9091 break;
9092 #endif
9093 #ifdef TARGET_NR_mlock
9094 case TARGET_NR_mlock:
9095 ret = get_errno(mlock(g2h(arg1), arg2));
9096 break;
9097 #endif
9098 #ifdef TARGET_NR_munlock
9099 case TARGET_NR_munlock:
9100 ret = get_errno(munlock(g2h(arg1), arg2));
9101 break;
9102 #endif
9103 #ifdef TARGET_NR_mlockall
9104 case TARGET_NR_mlockall:
9105 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9106 break;
9107 #endif
9108 #ifdef TARGET_NR_munlockall
9109 case TARGET_NR_munlockall:
9110 ret = get_errno(munlockall());
9111 break;
9112 #endif
9113 case TARGET_NR_truncate:
9114 if (!(p = lock_user_string(arg1)))
9115 goto efault;
9116 ret = get_errno(truncate(p, arg2));
9117 unlock_user(p, arg1, 0);
9118 break;
9119 case TARGET_NR_ftruncate:
9120 ret = get_errno(ftruncate(arg1, arg2));
9121 break;
9122 case TARGET_NR_fchmod:
9123 ret = get_errno(fchmod(arg1, arg2));
9124 break;
9125 #if defined(TARGET_NR_fchmodat)
9126 case TARGET_NR_fchmodat:
9127 if (!(p = lock_user_string(arg2)))
9128 goto efault;
9129 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9130 unlock_user(p, arg2, 0);
9131 break;
9132 #endif
9133 case TARGET_NR_getpriority:
9134 /* Note that negative values are valid for getpriority, so we must
9135 differentiate based on errno settings. */
9136 errno = 0;
9137 ret = getpriority(arg1, arg2);
9138 if (ret == -1 && errno != 0) {
9139 ret = -host_to_target_errno(errno);
9140 break;
9142 #ifdef TARGET_ALPHA
9143 /* Return value is the unbiased priority. Signal no error. */
9144 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9145 #else
9146 /* Return value is a biased priority to avoid negative numbers. */
9147 ret = 20 - ret;
9148 #endif
9149 break;
9150 case TARGET_NR_setpriority:
9151 ret = get_errno(setpriority(arg1, arg2, arg3));
9152 break;
9153 #ifdef TARGET_NR_profil
9154 case TARGET_NR_profil:
9155 goto unimplemented;
9156 #endif
9157 case TARGET_NR_statfs:
9158 if (!(p = lock_user_string(arg1)))
9159 goto efault;
9160 ret = get_errno(statfs(path(p), &stfs));
9161 unlock_user(p, arg1, 0);
9162 convert_statfs:
9163 if (!is_error(ret)) {
9164 struct target_statfs *target_stfs;
9166 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9167 goto efault;
9168 __put_user(stfs.f_type, &target_stfs->f_type);
9169 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9170 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9171 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9172 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9173 __put_user(stfs.f_files, &target_stfs->f_files);
9174 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9175 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9176 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9177 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9178 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9179 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9180 unlock_user_struct(target_stfs, arg2, 1);
9182 break;
9183 case TARGET_NR_fstatfs:
9184 ret = get_errno(fstatfs(arg1, &stfs));
9185 goto convert_statfs;
9186 #ifdef TARGET_NR_statfs64
9187 case TARGET_NR_statfs64:
9188 if (!(p = lock_user_string(arg1)))
9189 goto efault;
9190 ret = get_errno(statfs(path(p), &stfs));
9191 unlock_user(p, arg1, 0);
9192 convert_statfs64:
9193 if (!is_error(ret)) {
9194 struct target_statfs64 *target_stfs;
9196 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9197 goto efault;
9198 __put_user(stfs.f_type, &target_stfs->f_type);
9199 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9200 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9201 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9202 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9203 __put_user(stfs.f_files, &target_stfs->f_files);
9204 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9205 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9206 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9207 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9208 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9209 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9210 unlock_user_struct(target_stfs, arg3, 1);
9212 break;
9213 case TARGET_NR_fstatfs64:
9214 ret = get_errno(fstatfs(arg1, &stfs));
9215 goto convert_statfs64;
9216 #endif
9217 #ifdef TARGET_NR_ioperm
9218 case TARGET_NR_ioperm:
9219 goto unimplemented;
9220 #endif
9221 #ifdef TARGET_NR_socketcall
9222 case TARGET_NR_socketcall:
9223 ret = do_socketcall(arg1, arg2);
9224 break;
9225 #endif
9226 #ifdef TARGET_NR_accept
9227 case TARGET_NR_accept:
9228 ret = do_accept4(arg1, arg2, arg3, 0);
9229 break;
9230 #endif
9231 #ifdef TARGET_NR_accept4
9232 case TARGET_NR_accept4:
9233 ret = do_accept4(arg1, arg2, arg3, arg4);
9234 break;
9235 #endif
9236 #ifdef TARGET_NR_bind
9237 case TARGET_NR_bind:
9238 ret = do_bind(arg1, arg2, arg3);
9239 break;
9240 #endif
9241 #ifdef TARGET_NR_connect
9242 case TARGET_NR_connect:
9243 ret = do_connect(arg1, arg2, arg3);
9244 break;
9245 #endif
9246 #ifdef TARGET_NR_getpeername
9247 case TARGET_NR_getpeername:
9248 ret = do_getpeername(arg1, arg2, arg3);
9249 break;
9250 #endif
9251 #ifdef TARGET_NR_getsockname
9252 case TARGET_NR_getsockname:
9253 ret = do_getsockname(arg1, arg2, arg3);
9254 break;
9255 #endif
9256 #ifdef TARGET_NR_getsockopt
9257 case TARGET_NR_getsockopt:
9258 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9259 break;
9260 #endif
9261 #ifdef TARGET_NR_listen
9262 case TARGET_NR_listen:
9263 ret = get_errno(listen(arg1, arg2));
9264 break;
9265 #endif
9266 #ifdef TARGET_NR_recv
9267 case TARGET_NR_recv:
9268 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9269 break;
9270 #endif
9271 #ifdef TARGET_NR_recvfrom
9272 case TARGET_NR_recvfrom:
9273 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9274 break;
9275 #endif
9276 #ifdef TARGET_NR_recvmsg
9277 case TARGET_NR_recvmsg:
9278 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9279 break;
9280 #endif
9281 #ifdef TARGET_NR_send
9282 case TARGET_NR_send:
9283 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9284 break;
9285 #endif
9286 #ifdef TARGET_NR_sendmsg
9287 case TARGET_NR_sendmsg:
9288 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9289 break;
9290 #endif
9291 #ifdef TARGET_NR_sendmmsg
9292 case TARGET_NR_sendmmsg:
9293 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9294 break;
9295 case TARGET_NR_recvmmsg:
9296 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9297 break;
9298 #endif
9299 #ifdef TARGET_NR_sendto
9300 case TARGET_NR_sendto:
9301 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9302 break;
9303 #endif
9304 #ifdef TARGET_NR_shutdown
9305 case TARGET_NR_shutdown:
9306 ret = get_errno(shutdown(arg1, arg2));
9307 break;
9308 #endif
9309 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9310 case TARGET_NR_getrandom:
9311 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9312 if (!p) {
9313 goto efault;
9315 ret = get_errno(getrandom(p, arg2, arg3));
9316 unlock_user(p, arg1, ret);
9317 break;
9318 #endif
9319 #ifdef TARGET_NR_socket
9320 case TARGET_NR_socket:
9321 ret = do_socket(arg1, arg2, arg3);
9322 fd_trans_unregister(ret);
9323 break;
9324 #endif
9325 #ifdef TARGET_NR_socketpair
9326 case TARGET_NR_socketpair:
9327 ret = do_socketpair(arg1, arg2, arg3, arg4);
9328 break;
9329 #endif
9330 #ifdef TARGET_NR_setsockopt
9331 case TARGET_NR_setsockopt:
9332 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9333 break;
9334 #endif
9335 #if defined(TARGET_NR_syslog)
9336 case TARGET_NR_syslog:
9338 int len = arg2;
9340 switch (arg1) {
9341 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9342 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9343 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9344 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9345 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9346 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9347 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9348 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9350 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9352 break;
9353 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9354 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9355 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9357 ret = -TARGET_EINVAL;
9358 if (len < 0) {
9359 goto fail;
9361 ret = 0;
9362 if (len == 0) {
9363 break;
9365 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9366 if (!p) {
9367 ret = -TARGET_EFAULT;
9368 goto fail;
9370 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9371 unlock_user(p, arg2, arg3);
9373 break;
9374 default:
9375 ret = -EINVAL;
9376 break;
9379 break;
9380 #endif
9381 case TARGET_NR_setitimer:
9383 struct itimerval value, ovalue, *pvalue;
9385 if (arg2) {
9386 pvalue = &value;
9387 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9388 || copy_from_user_timeval(&pvalue->it_value,
9389 arg2 + sizeof(struct target_timeval)))
9390 goto efault;
9391 } else {
9392 pvalue = NULL;
9394 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9395 if (!is_error(ret) && arg3) {
9396 if (copy_to_user_timeval(arg3,
9397 &ovalue.it_interval)
9398 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9399 &ovalue.it_value))
9400 goto efault;
9403 break;
9404 case TARGET_NR_getitimer:
9406 struct itimerval value;
9408 ret = get_errno(getitimer(arg1, &value));
9409 if (!is_error(ret) && arg2) {
9410 if (copy_to_user_timeval(arg2,
9411 &value.it_interval)
9412 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9413 &value.it_value))
9414 goto efault;
9417 break;
9418 #ifdef TARGET_NR_stat
9419 case TARGET_NR_stat:
9420 if (!(p = lock_user_string(arg1)))
9421 goto efault;
9422 ret = get_errno(stat(path(p), &st));
9423 unlock_user(p, arg1, 0);
9424 goto do_stat;
9425 #endif
9426 #ifdef TARGET_NR_lstat
9427 case TARGET_NR_lstat:
9428 if (!(p = lock_user_string(arg1)))
9429 goto efault;
9430 ret = get_errno(lstat(path(p), &st));
9431 unlock_user(p, arg1, 0);
9432 goto do_stat;
9433 #endif
9434 case TARGET_NR_fstat:
9436 ret = get_errno(fstat(arg1, &st));
9437 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9438 do_stat:
9439 #endif
9440 if (!is_error(ret)) {
9441 struct target_stat *target_st;
9443 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9444 goto efault;
9445 memset(target_st, 0, sizeof(*target_st));
9446 __put_user(st.st_dev, &target_st->st_dev);
9447 __put_user(st.st_ino, &target_st->st_ino);
9448 __put_user(st.st_mode, &target_st->st_mode);
9449 __put_user(st.st_uid, &target_st->st_uid);
9450 __put_user(st.st_gid, &target_st->st_gid);
9451 __put_user(st.st_nlink, &target_st->st_nlink);
9452 __put_user(st.st_rdev, &target_st->st_rdev);
9453 __put_user(st.st_size, &target_st->st_size);
9454 __put_user(st.st_blksize, &target_st->st_blksize);
9455 __put_user(st.st_blocks, &target_st->st_blocks);
9456 __put_user(st.st_atime, &target_st->target_st_atime);
9457 __put_user(st.st_mtime, &target_st->target_st_mtime);
9458 __put_user(st.st_ctime, &target_st->target_st_ctime);
9459 unlock_user_struct(target_st, arg2, 1);
9462 break;
9463 #ifdef TARGET_NR_olduname
9464 case TARGET_NR_olduname:
9465 goto unimplemented;
9466 #endif
9467 #ifdef TARGET_NR_iopl
9468 case TARGET_NR_iopl:
9469 goto unimplemented;
9470 #endif
9471 case TARGET_NR_vhangup:
9472 ret = get_errno(vhangup());
9473 break;
9474 #ifdef TARGET_NR_idle
9475 case TARGET_NR_idle:
9476 goto unimplemented;
9477 #endif
9478 #ifdef TARGET_NR_syscall
9479 case TARGET_NR_syscall:
9480 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9481 arg6, arg7, arg8, 0);
9482 break;
9483 #endif
9484 case TARGET_NR_wait4:
9486 int status;
9487 abi_long status_ptr = arg2;
9488 struct rusage rusage, *rusage_ptr;
9489 abi_ulong target_rusage = arg4;
9490 abi_long rusage_err;
9491 if (target_rusage)
9492 rusage_ptr = &rusage;
9493 else
9494 rusage_ptr = NULL;
9495 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9496 if (!is_error(ret)) {
9497 if (status_ptr && ret) {
9498 status = host_to_target_waitstatus(status);
9499 if (put_user_s32(status, status_ptr))
9500 goto efault;
9502 if (target_rusage) {
9503 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9504 if (rusage_err) {
9505 ret = rusage_err;
9510 break;
9511 #ifdef TARGET_NR_swapoff
9512 case TARGET_NR_swapoff:
9513 if (!(p = lock_user_string(arg1)))
9514 goto efault;
9515 ret = get_errno(swapoff(p));
9516 unlock_user(p, arg1, 0);
9517 break;
9518 #endif
9519 case TARGET_NR_sysinfo:
9521 struct target_sysinfo *target_value;
9522 struct sysinfo value;
9523 ret = get_errno(sysinfo(&value));
9524 if (!is_error(ret) && arg1)
9526 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9527 goto efault;
9528 __put_user(value.uptime, &target_value->uptime);
9529 __put_user(value.loads[0], &target_value->loads[0]);
9530 __put_user(value.loads[1], &target_value->loads[1]);
9531 __put_user(value.loads[2], &target_value->loads[2]);
9532 __put_user(value.totalram, &target_value->totalram);
9533 __put_user(value.freeram, &target_value->freeram);
9534 __put_user(value.sharedram, &target_value->sharedram);
9535 __put_user(value.bufferram, &target_value->bufferram);
9536 __put_user(value.totalswap, &target_value->totalswap);
9537 __put_user(value.freeswap, &target_value->freeswap);
9538 __put_user(value.procs, &target_value->procs);
9539 __put_user(value.totalhigh, &target_value->totalhigh);
9540 __put_user(value.freehigh, &target_value->freehigh);
9541 __put_user(value.mem_unit, &target_value->mem_unit);
9542 unlock_user_struct(target_value, arg1, 1);
9545 break;
9546 #ifdef TARGET_NR_ipc
9547 case TARGET_NR_ipc:
9548 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9549 break;
9550 #endif
9551 #ifdef TARGET_NR_semget
9552 case TARGET_NR_semget:
9553 ret = get_errno(semget(arg1, arg2, arg3));
9554 break;
9555 #endif
9556 #ifdef TARGET_NR_semop
9557 case TARGET_NR_semop:
9558 ret = do_semop(arg1, arg2, arg3);
9559 break;
9560 #endif
9561 #ifdef TARGET_NR_semctl
9562 case TARGET_NR_semctl:
9563 ret = do_semctl(arg1, arg2, arg3, arg4);
9564 break;
9565 #endif
9566 #ifdef TARGET_NR_msgctl
9567 case TARGET_NR_msgctl:
9568 ret = do_msgctl(arg1, arg2, arg3);
9569 break;
9570 #endif
9571 #ifdef TARGET_NR_msgget
9572 case TARGET_NR_msgget:
9573 ret = get_errno(msgget(arg1, arg2));
9574 break;
9575 #endif
9576 #ifdef TARGET_NR_msgrcv
9577 case TARGET_NR_msgrcv:
9578 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9579 break;
9580 #endif
9581 #ifdef TARGET_NR_msgsnd
9582 case TARGET_NR_msgsnd:
9583 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9584 break;
9585 #endif
9586 #ifdef TARGET_NR_shmget
9587 case TARGET_NR_shmget:
9588 ret = get_errno(shmget(arg1, arg2, arg3));
9589 break;
9590 #endif
9591 #ifdef TARGET_NR_shmctl
9592 case TARGET_NR_shmctl:
9593 ret = do_shmctl(arg1, arg2, arg3);
9594 break;
9595 #endif
9596 #ifdef TARGET_NR_shmat
9597 case TARGET_NR_shmat:
9598 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9599 break;
9600 #endif
9601 #ifdef TARGET_NR_shmdt
9602 case TARGET_NR_shmdt:
9603 ret = do_shmdt(arg1);
9604 break;
9605 #endif
9606 case TARGET_NR_fsync:
9607 ret = get_errno(fsync(arg1));
9608 break;
9609 case TARGET_NR_clone:
9610 /* Linux manages to have three different orderings for its
9611 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9612 * match the kernel's CONFIG_CLONE_* settings.
9613 * Microblaze is further special in that it uses a sixth
9614 * implicit argument to clone for the TLS pointer.
9616 #if defined(TARGET_MICROBLAZE)
9617 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9618 #elif defined(TARGET_CLONE_BACKWARDS)
9619 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9620 #elif defined(TARGET_CLONE_BACKWARDS2)
9621 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9622 #else
9623 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9624 #endif
9625 break;
9626 #ifdef __NR_exit_group
9627 /* new thread calls */
9628 case TARGET_NR_exit_group:
9629 #ifdef TARGET_GPROF
9630 _mcleanup();
9631 #endif
9632 gdb_exit(cpu_env, arg1);
9633 ret = get_errno(exit_group(arg1));
9634 break;
9635 #endif
9636 case TARGET_NR_setdomainname:
9637 if (!(p = lock_user_string(arg1)))
9638 goto efault;
9639 ret = get_errno(setdomainname(p, arg2));
9640 unlock_user(p, arg1, 0);
9641 break;
9642 case TARGET_NR_uname:
9643 /* no need to transcode because we use the linux syscall */
9645 struct new_utsname * buf;
9647 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9648 goto efault;
9649 ret = get_errno(sys_uname(buf));
9650 if (!is_error(ret)) {
9651 /* Overwrite the native machine name with whatever is being
9652 emulated. */
9653 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9654 /* Allow the user to override the reported release. */
9655 if (qemu_uname_release && *qemu_uname_release) {
9656 g_strlcpy(buf->release, qemu_uname_release,
9657 sizeof(buf->release));
9660 unlock_user_struct(buf, arg1, 1);
9662 break;
9663 #ifdef TARGET_I386
9664 case TARGET_NR_modify_ldt:
9665 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9666 break;
9667 #if !defined(TARGET_X86_64)
9668 case TARGET_NR_vm86old:
9669 goto unimplemented;
9670 case TARGET_NR_vm86:
9671 ret = do_vm86(cpu_env, arg1, arg2);
9672 break;
9673 #endif
9674 #endif
9675 case TARGET_NR_adjtimex:
9677 struct timex host_buf;
9679 if (target_to_host_timex(&host_buf, arg1) != 0) {
9680 goto efault;
9682 ret = get_errno(adjtimex(&host_buf));
9683 if (!is_error(ret)) {
9684 if (host_to_target_timex(arg1, &host_buf) != 0) {
9685 goto efault;
9689 break;
9690 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9691 case TARGET_NR_clock_adjtime:
9693 struct timex htx, *phtx = &htx;
9695 if (target_to_host_timex(phtx, arg2) != 0) {
9696 goto efault;
9698 ret = get_errno(clock_adjtime(arg1, phtx));
9699 if (!is_error(ret) && phtx) {
9700 if (host_to_target_timex(arg2, phtx) != 0) {
9701 goto efault;
9705 break;
9706 #endif
9707 #ifdef TARGET_NR_create_module
9708 case TARGET_NR_create_module:
9709 #endif
9710 case TARGET_NR_init_module:
9711 case TARGET_NR_delete_module:
9712 #ifdef TARGET_NR_get_kernel_syms
9713 case TARGET_NR_get_kernel_syms:
9714 #endif
9715 goto unimplemented;
9716 case TARGET_NR_quotactl:
9717 goto unimplemented;
9718 case TARGET_NR_getpgid:
9719 ret = get_errno(getpgid(arg1));
9720 break;
9721 case TARGET_NR_fchdir:
9722 ret = get_errno(fchdir(arg1));
9723 break;
9724 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9725 case TARGET_NR_bdflush:
9726 goto unimplemented;
9727 #endif
9728 #ifdef TARGET_NR_sysfs
9729 case TARGET_NR_sysfs:
9730 goto unimplemented;
9731 #endif
9732 case TARGET_NR_personality:
9733 ret = get_errno(personality(arg1));
9734 break;
9735 #ifdef TARGET_NR_afs_syscall
9736 case TARGET_NR_afs_syscall:
9737 goto unimplemented;
9738 #endif
9739 #ifdef TARGET_NR__llseek /* Not on alpha */
9740 case TARGET_NR__llseek:
9742 int64_t res;
9743 #if !defined(__NR_llseek)
9744 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9745 if (res == -1) {
9746 ret = get_errno(res);
9747 } else {
9748 ret = 0;
9750 #else
9751 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9752 #endif
9753 if ((ret == 0) && put_user_s64(res, arg4)) {
9754 goto efault;
9757 break;
9758 #endif
9759 #ifdef TARGET_NR_getdents
9760 case TARGET_NR_getdents:
9761 #ifdef __NR_getdents
9762 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9764 struct target_dirent *target_dirp;
9765 struct linux_dirent *dirp;
9766 abi_long count = arg3;
9768 dirp = g_try_malloc(count);
9769 if (!dirp) {
9770 ret = -TARGET_ENOMEM;
9771 goto fail;
9774 ret = get_errno(sys_getdents(arg1, dirp, count));
9775 if (!is_error(ret)) {
9776 struct linux_dirent *de;
9777 struct target_dirent *tde;
9778 int len = ret;
9779 int reclen, treclen;
9780 int count1, tnamelen;
9782 count1 = 0;
9783 de = dirp;
9784 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9785 goto efault;
9786 tde = target_dirp;
9787 while (len > 0) {
9788 reclen = de->d_reclen;
9789 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9790 assert(tnamelen >= 0);
9791 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9792 assert(count1 + treclen <= count);
9793 tde->d_reclen = tswap16(treclen);
9794 tde->d_ino = tswapal(de->d_ino);
9795 tde->d_off = tswapal(de->d_off);
9796 memcpy(tde->d_name, de->d_name, tnamelen);
9797 de = (struct linux_dirent *)((char *)de + reclen);
9798 len -= reclen;
9799 tde = (struct target_dirent *)((char *)tde + treclen);
9800 count1 += treclen;
9802 ret = count1;
9803 unlock_user(target_dirp, arg2, ret);
9805 g_free(dirp);
9807 #else
9809 struct linux_dirent *dirp;
9810 abi_long count = arg3;
9812 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9813 goto efault;
9814 ret = get_errno(sys_getdents(arg1, dirp, count));
9815 if (!is_error(ret)) {
9816 struct linux_dirent *de;
9817 int len = ret;
9818 int reclen;
9819 de = dirp;
9820 while (len > 0) {
9821 reclen = de->d_reclen;
9822 if (reclen > len)
9823 break;
9824 de->d_reclen = tswap16(reclen);
9825 tswapls(&de->d_ino);
9826 tswapls(&de->d_off);
9827 de = (struct linux_dirent *)((char *)de + reclen);
9828 len -= reclen;
9831 unlock_user(dirp, arg2, ret);
9833 #endif
9834 #else
9835 /* Implement getdents in terms of getdents64 */
9837 struct linux_dirent64 *dirp;
9838 abi_long count = arg3;
9840 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9841 if (!dirp) {
9842 goto efault;
9844 ret = get_errno(sys_getdents64(arg1, dirp, count));
9845 if (!is_error(ret)) {
9846 /* Convert the dirent64 structs to target dirent. We do this
9847 * in-place, since we can guarantee that a target_dirent is no
9848 * larger than a dirent64; however this means we have to be
9849 * careful to read everything before writing in the new format.
9851 struct linux_dirent64 *de;
9852 struct target_dirent *tde;
9853 int len = ret;
9854 int tlen = 0;
9856 de = dirp;
9857 tde = (struct target_dirent *)dirp;
9858 while (len > 0) {
9859 int namelen, treclen;
9860 int reclen = de->d_reclen;
9861 uint64_t ino = de->d_ino;
9862 int64_t off = de->d_off;
9863 uint8_t type = de->d_type;
9865 namelen = strlen(de->d_name);
9866 treclen = offsetof(struct target_dirent, d_name)
9867 + namelen + 2;
9868 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9870 memmove(tde->d_name, de->d_name, namelen + 1);
9871 tde->d_ino = tswapal(ino);
9872 tde->d_off = tswapal(off);
9873 tde->d_reclen = tswap16(treclen);
9874 /* The target_dirent type is in what was formerly a padding
9875 * byte at the end of the structure:
9877 *(((char *)tde) + treclen - 1) = type;
9879 de = (struct linux_dirent64 *)((char *)de + reclen);
9880 tde = (struct target_dirent *)((char *)tde + treclen);
9881 len -= reclen;
9882 tlen += treclen;
9884 ret = tlen;
9886 unlock_user(dirp, arg2, ret);
9888 #endif
9889 break;
9890 #endif /* TARGET_NR_getdents */
9891 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9892 case TARGET_NR_getdents64:
9894 struct linux_dirent64 *dirp;
9895 abi_long count = arg3;
9896 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9897 goto efault;
9898 ret = get_errno(sys_getdents64(arg1, dirp, count));
9899 if (!is_error(ret)) {
9900 struct linux_dirent64 *de;
9901 int len = ret;
9902 int reclen;
9903 de = dirp;
9904 while (len > 0) {
9905 reclen = de->d_reclen;
9906 if (reclen > len)
9907 break;
9908 de->d_reclen = tswap16(reclen);
9909 tswap64s((uint64_t *)&de->d_ino);
9910 tswap64s((uint64_t *)&de->d_off);
9911 de = (struct linux_dirent64 *)((char *)de + reclen);
9912 len -= reclen;
9915 unlock_user(dirp, arg2, ret);
9917 break;
9918 #endif /* TARGET_NR_getdents64 */
9919 #if defined(TARGET_NR__newselect)
9920 case TARGET_NR__newselect:
9921 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9922 break;
9923 #endif
9924 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9925 # ifdef TARGET_NR_poll
9926 case TARGET_NR_poll:
9927 # endif
9928 # ifdef TARGET_NR_ppoll
9929 case TARGET_NR_ppoll:
9930 # endif
9932 struct target_pollfd *target_pfd;
9933 unsigned int nfds = arg2;
9934 struct pollfd *pfd;
9935 unsigned int i;
9937 pfd = NULL;
9938 target_pfd = NULL;
9939 if (nfds) {
9940 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9941 ret = -TARGET_EINVAL;
9942 break;
9945 target_pfd = lock_user(VERIFY_WRITE, arg1,
9946 sizeof(struct target_pollfd) * nfds, 1);
9947 if (!target_pfd) {
9948 goto efault;
9951 pfd = alloca(sizeof(struct pollfd) * nfds);
9952 for (i = 0; i < nfds; i++) {
9953 pfd[i].fd = tswap32(target_pfd[i].fd);
9954 pfd[i].events = tswap16(target_pfd[i].events);
9958 switch (num) {
9959 # ifdef TARGET_NR_ppoll
9960 case TARGET_NR_ppoll:
9962 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9963 target_sigset_t *target_set;
9964 sigset_t _set, *set = &_set;
9966 if (arg3) {
9967 if (target_to_host_timespec(timeout_ts, arg3)) {
9968 unlock_user(target_pfd, arg1, 0);
9969 goto efault;
9971 } else {
9972 timeout_ts = NULL;
9975 if (arg4) {
9976 if (arg5 != sizeof(target_sigset_t)) {
9977 unlock_user(target_pfd, arg1, 0);
9978 ret = -TARGET_EINVAL;
9979 break;
9982 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
9983 if (!target_set) {
9984 unlock_user(target_pfd, arg1, 0);
9985 goto efault;
9987 target_to_host_sigset(set, target_set);
9988 } else {
9989 set = NULL;
9992 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
9993 set, SIGSET_T_SIZE));
9995 if (!is_error(ret) && arg3) {
9996 host_to_target_timespec(arg3, timeout_ts);
9998 if (arg4) {
9999 unlock_user(target_set, arg4, 0);
10001 break;
10003 # endif
10004 # ifdef TARGET_NR_poll
10005 case TARGET_NR_poll:
10007 struct timespec ts, *pts;
10009 if (arg3 >= 0) {
10010 /* Convert ms to secs, ns */
10011 ts.tv_sec = arg3 / 1000;
10012 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10013 pts = &ts;
10014 } else {
10015 /* -ve poll() timeout means "infinite" */
10016 pts = NULL;
10018 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10019 break;
10021 # endif
10022 default:
10023 g_assert_not_reached();
10026 if (!is_error(ret)) {
10027 for(i = 0; i < nfds; i++) {
10028 target_pfd[i].revents = tswap16(pfd[i].revents);
10031 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10033 break;
10034 #endif
10035 case TARGET_NR_flock:
10036 /* NOTE: the flock constant seems to be the same for every
10037 Linux platform */
10038 ret = get_errno(safe_flock(arg1, arg2));
10039 break;
10040 case TARGET_NR_readv:
10042 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10043 if (vec != NULL) {
10044 ret = get_errno(safe_readv(arg1, vec, arg3));
10045 unlock_iovec(vec, arg2, arg3, 1);
10046 } else {
10047 ret = -host_to_target_errno(errno);
10050 break;
10051 case TARGET_NR_writev:
10053 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10054 if (vec != NULL) {
10055 ret = get_errno(safe_writev(arg1, vec, arg3));
10056 unlock_iovec(vec, arg2, arg3, 0);
10057 } else {
10058 ret = -host_to_target_errno(errno);
10061 break;
10062 case TARGET_NR_getsid:
10063 ret = get_errno(getsid(arg1));
10064 break;
10065 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10066 case TARGET_NR_fdatasync:
10067 ret = get_errno(fdatasync(arg1));
10068 break;
10069 #endif
10070 #ifdef TARGET_NR__sysctl
10071 case TARGET_NR__sysctl:
10072 /* We don't implement this, but ENOTDIR is always a safe
10073 return value. */
10074 ret = -TARGET_ENOTDIR;
10075 break;
10076 #endif
10077 case TARGET_NR_sched_getaffinity:
10079 unsigned int mask_size;
10080 unsigned long *mask;
10083 * sched_getaffinity needs multiples of ulong, so need to take
10084 * care of mismatches between target ulong and host ulong sizes.
10086 if (arg2 & (sizeof(abi_ulong) - 1)) {
10087 ret = -TARGET_EINVAL;
10088 break;
10090 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10092 mask = alloca(mask_size);
10093 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10095 if (!is_error(ret)) {
10096 if (ret > arg2) {
10097 /* More data returned than the caller's buffer will fit.
10098 * This only happens if sizeof(abi_long) < sizeof(long)
10099 * and the caller passed us a buffer holding an odd number
10100 * of abi_longs. If the host kernel is actually using the
10101 * extra 4 bytes then fail EINVAL; otherwise we can just
10102 * ignore them and only copy the interesting part.
10104 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10105 if (numcpus > arg2 * 8) {
10106 ret = -TARGET_EINVAL;
10107 break;
10109 ret = arg2;
10112 if (copy_to_user(arg3, mask, ret)) {
10113 goto efault;
10117 break;
10118 case TARGET_NR_sched_setaffinity:
10120 unsigned int mask_size;
10121 unsigned long *mask;
10124 * sched_setaffinity needs multiples of ulong, so need to take
10125 * care of mismatches between target ulong and host ulong sizes.
10127 if (arg2 & (sizeof(abi_ulong) - 1)) {
10128 ret = -TARGET_EINVAL;
10129 break;
10131 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10133 mask = alloca(mask_size);
10134 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10135 goto efault;
10137 memcpy(mask, p, arg2);
10138 unlock_user_struct(p, arg2, 0);
10140 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10142 break;
10143 case TARGET_NR_sched_setparam:
10145 struct sched_param *target_schp;
10146 struct sched_param schp;
10148 if (arg2 == 0) {
10149 return -TARGET_EINVAL;
10151 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10152 goto efault;
10153 schp.sched_priority = tswap32(target_schp->sched_priority);
10154 unlock_user_struct(target_schp, arg2, 0);
10155 ret = get_errno(sched_setparam(arg1, &schp));
10157 break;
10158 case TARGET_NR_sched_getparam:
10160 struct sched_param *target_schp;
10161 struct sched_param schp;
10163 if (arg2 == 0) {
10164 return -TARGET_EINVAL;
10166 ret = get_errno(sched_getparam(arg1, &schp));
10167 if (!is_error(ret)) {
10168 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10169 goto efault;
10170 target_schp->sched_priority = tswap32(schp.sched_priority);
10171 unlock_user_struct(target_schp, arg2, 1);
10174 break;
10175 case TARGET_NR_sched_setscheduler:
10177 struct sched_param *target_schp;
10178 struct sched_param schp;
10179 if (arg3 == 0) {
10180 return -TARGET_EINVAL;
10182 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10183 goto efault;
10184 schp.sched_priority = tswap32(target_schp->sched_priority);
10185 unlock_user_struct(target_schp, arg3, 0);
10186 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10188 break;
10189 case TARGET_NR_sched_getscheduler:
10190 ret = get_errno(sched_getscheduler(arg1));
10191 break;
10192 case TARGET_NR_sched_yield:
10193 ret = get_errno(sched_yield());
10194 break;
10195 case TARGET_NR_sched_get_priority_max:
10196 ret = get_errno(sched_get_priority_max(arg1));
10197 break;
10198 case TARGET_NR_sched_get_priority_min:
10199 ret = get_errno(sched_get_priority_min(arg1));
10200 break;
10201 case TARGET_NR_sched_rr_get_interval:
10203 struct timespec ts;
10204 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10205 if (!is_error(ret)) {
10206 ret = host_to_target_timespec(arg2, &ts);
10209 break;
10210 case TARGET_NR_nanosleep:
10212 struct timespec req, rem;
10213 target_to_host_timespec(&req, arg1);
10214 ret = get_errno(safe_nanosleep(&req, &rem));
10215 if (is_error(ret) && arg2) {
10216 host_to_target_timespec(arg2, &rem);
10219 break;
10220 #ifdef TARGET_NR_query_module
10221 case TARGET_NR_query_module:
10222 goto unimplemented;
10223 #endif
10224 #ifdef TARGET_NR_nfsservctl
10225 case TARGET_NR_nfsservctl:
10226 goto unimplemented;
10227 #endif
10228 case TARGET_NR_prctl:
10229 switch (arg1) {
10230 case PR_GET_PDEATHSIG:
10232 int deathsig;
10233 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10234 if (!is_error(ret) && arg2
10235 && put_user_ual(deathsig, arg2)) {
10236 goto efault;
10238 break;
10240 #ifdef PR_GET_NAME
10241 case PR_GET_NAME:
10243 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10244 if (!name) {
10245 goto efault;
10247 ret = get_errno(prctl(arg1, (unsigned long)name,
10248 arg3, arg4, arg5));
10249 unlock_user(name, arg2, 16);
10250 break;
10252 case PR_SET_NAME:
10254 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10255 if (!name) {
10256 goto efault;
10258 ret = get_errno(prctl(arg1, (unsigned long)name,
10259 arg3, arg4, arg5));
10260 unlock_user(name, arg2, 0);
10261 break;
10263 #endif
10264 default:
10265 /* Most prctl options have no pointer arguments */
10266 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10267 break;
10269 break;
10270 #ifdef TARGET_NR_arch_prctl
10271 case TARGET_NR_arch_prctl:
10272 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10273 ret = do_arch_prctl(cpu_env, arg1, arg2);
10274 break;
10275 #else
10276 goto unimplemented;
10277 #endif
10278 #endif
10279 #ifdef TARGET_NR_pread64
10280 case TARGET_NR_pread64:
10281 if (regpairs_aligned(cpu_env)) {
10282 arg4 = arg5;
10283 arg5 = arg6;
10285 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10286 goto efault;
10287 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10288 unlock_user(p, arg2, ret);
10289 break;
10290 case TARGET_NR_pwrite64:
10291 if (regpairs_aligned(cpu_env)) {
10292 arg4 = arg5;
10293 arg5 = arg6;
10295 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10296 goto efault;
10297 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10298 unlock_user(p, arg2, 0);
10299 break;
10300 #endif
10301 case TARGET_NR_getcwd:
10302 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10303 goto efault;
10304 ret = get_errno(sys_getcwd1(p, arg2));
10305 unlock_user(p, arg1, ret);
10306 break;
10307 case TARGET_NR_capget:
10308 case TARGET_NR_capset:
10310 struct target_user_cap_header *target_header;
10311 struct target_user_cap_data *target_data = NULL;
10312 struct __user_cap_header_struct header;
10313 struct __user_cap_data_struct data[2];
10314 struct __user_cap_data_struct *dataptr = NULL;
10315 int i, target_datalen;
10316 int data_items = 1;
10318 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10319 goto efault;
10321 header.version = tswap32(target_header->version);
10322 header.pid = tswap32(target_header->pid);
10324 if (header.version != _LINUX_CAPABILITY_VERSION) {
10325 /* Version 2 and up takes pointer to two user_data structs */
10326 data_items = 2;
10329 target_datalen = sizeof(*target_data) * data_items;
10331 if (arg2) {
10332 if (num == TARGET_NR_capget) {
10333 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10334 } else {
10335 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10337 if (!target_data) {
10338 unlock_user_struct(target_header, arg1, 0);
10339 goto efault;
10342 if (num == TARGET_NR_capset) {
10343 for (i = 0; i < data_items; i++) {
10344 data[i].effective = tswap32(target_data[i].effective);
10345 data[i].permitted = tswap32(target_data[i].permitted);
10346 data[i].inheritable = tswap32(target_data[i].inheritable);
10350 dataptr = data;
10353 if (num == TARGET_NR_capget) {
10354 ret = get_errno(capget(&header, dataptr));
10355 } else {
10356 ret = get_errno(capset(&header, dataptr));
10359 /* The kernel always updates version for both capget and capset */
10360 target_header->version = tswap32(header.version);
10361 unlock_user_struct(target_header, arg1, 1);
10363 if (arg2) {
10364 if (num == TARGET_NR_capget) {
10365 for (i = 0; i < data_items; i++) {
10366 target_data[i].effective = tswap32(data[i].effective);
10367 target_data[i].permitted = tswap32(data[i].permitted);
10368 target_data[i].inheritable = tswap32(data[i].inheritable);
10370 unlock_user(target_data, arg2, target_datalen);
10371 } else {
10372 unlock_user(target_data, arg2, 0);
10375 break;
10377 case TARGET_NR_sigaltstack:
10378 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10379 break;
10381 #ifdef CONFIG_SENDFILE
10382 case TARGET_NR_sendfile:
10384 off_t *offp = NULL;
10385 off_t off;
10386 if (arg3) {
10387 ret = get_user_sal(off, arg3);
10388 if (is_error(ret)) {
10389 break;
10391 offp = &off;
10393 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10394 if (!is_error(ret) && arg3) {
10395 abi_long ret2 = put_user_sal(off, arg3);
10396 if (is_error(ret2)) {
10397 ret = ret2;
10400 break;
10402 #ifdef TARGET_NR_sendfile64
10403 case TARGET_NR_sendfile64:
10405 off_t *offp = NULL;
10406 off_t off;
10407 if (arg3) {
10408 ret = get_user_s64(off, arg3);
10409 if (is_error(ret)) {
10410 break;
10412 offp = &off;
10414 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10415 if (!is_error(ret) && arg3) {
10416 abi_long ret2 = put_user_s64(off, arg3);
10417 if (is_error(ret2)) {
10418 ret = ret2;
10421 break;
10423 #endif
10424 #else
10425 case TARGET_NR_sendfile:
10426 #ifdef TARGET_NR_sendfile64
10427 case TARGET_NR_sendfile64:
10428 #endif
10429 goto unimplemented;
10430 #endif
10432 #ifdef TARGET_NR_getpmsg
10433 case TARGET_NR_getpmsg:
10434 goto unimplemented;
10435 #endif
10436 #ifdef TARGET_NR_putpmsg
10437 case TARGET_NR_putpmsg:
10438 goto unimplemented;
10439 #endif
10440 #ifdef TARGET_NR_vfork
10441 case TARGET_NR_vfork:
10442 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10443 0, 0, 0, 0));
10444 break;
10445 #endif
10446 #ifdef TARGET_NR_ugetrlimit
10447 case TARGET_NR_ugetrlimit:
10449 struct rlimit rlim;
10450 int resource = target_to_host_resource(arg1);
10451 ret = get_errno(getrlimit(resource, &rlim));
10452 if (!is_error(ret)) {
10453 struct target_rlimit *target_rlim;
10454 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10455 goto efault;
10456 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10457 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10458 unlock_user_struct(target_rlim, arg2, 1);
10460 break;
10462 #endif
10463 #ifdef TARGET_NR_truncate64
10464 case TARGET_NR_truncate64:
10465 if (!(p = lock_user_string(arg1)))
10466 goto efault;
10467 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10468 unlock_user(p, arg1, 0);
10469 break;
10470 #endif
10471 #ifdef TARGET_NR_ftruncate64
10472 case TARGET_NR_ftruncate64:
10473 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10474 break;
10475 #endif
10476 #ifdef TARGET_NR_stat64
10477 case TARGET_NR_stat64:
10478 if (!(p = lock_user_string(arg1)))
10479 goto efault;
10480 ret = get_errno(stat(path(p), &st));
10481 unlock_user(p, arg1, 0);
10482 if (!is_error(ret))
10483 ret = host_to_target_stat64(cpu_env, arg2, &st);
10484 break;
10485 #endif
10486 #ifdef TARGET_NR_lstat64
10487 case TARGET_NR_lstat64:
10488 if (!(p = lock_user_string(arg1)))
10489 goto efault;
10490 ret = get_errno(lstat(path(p), &st));
10491 unlock_user(p, arg1, 0);
10492 if (!is_error(ret))
10493 ret = host_to_target_stat64(cpu_env, arg2, &st);
10494 break;
10495 #endif
10496 #ifdef TARGET_NR_fstat64
10497 case TARGET_NR_fstat64:
10498 ret = get_errno(fstat(arg1, &st));
10499 if (!is_error(ret))
10500 ret = host_to_target_stat64(cpu_env, arg2, &st);
10501 break;
10502 #endif
10503 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10504 #ifdef TARGET_NR_fstatat64
10505 case TARGET_NR_fstatat64:
10506 #endif
10507 #ifdef TARGET_NR_newfstatat
10508 case TARGET_NR_newfstatat:
10509 #endif
10510 if (!(p = lock_user_string(arg2)))
10511 goto efault;
10512 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10513 if (!is_error(ret))
10514 ret = host_to_target_stat64(cpu_env, arg3, &st);
10515 break;
10516 #endif
10517 #ifdef TARGET_NR_lchown
10518 case TARGET_NR_lchown:
10519 if (!(p = lock_user_string(arg1)))
10520 goto efault;
10521 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10522 unlock_user(p, arg1, 0);
10523 break;
10524 #endif
10525 #ifdef TARGET_NR_getuid
10526 case TARGET_NR_getuid:
10527 ret = get_errno(high2lowuid(getuid()));
10528 break;
10529 #endif
10530 #ifdef TARGET_NR_getgid
10531 case TARGET_NR_getgid:
10532 ret = get_errno(high2lowgid(getgid()));
10533 break;
10534 #endif
10535 #ifdef TARGET_NR_geteuid
10536 case TARGET_NR_geteuid:
10537 ret = get_errno(high2lowuid(geteuid()));
10538 break;
10539 #endif
10540 #ifdef TARGET_NR_getegid
10541 case TARGET_NR_getegid:
10542 ret = get_errno(high2lowgid(getegid()));
10543 break;
10544 #endif
10545 case TARGET_NR_setreuid:
10546 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10547 break;
10548 case TARGET_NR_setregid:
10549 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10550 break;
10551 case TARGET_NR_getgroups:
10553 int gidsetsize = arg1;
10554 target_id *target_grouplist;
10555 gid_t *grouplist;
10556 int i;
10558 grouplist = alloca(gidsetsize * sizeof(gid_t));
10559 ret = get_errno(getgroups(gidsetsize, grouplist));
10560 if (gidsetsize == 0)
10561 break;
10562 if (!is_error(ret)) {
10563 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10564 if (!target_grouplist)
10565 goto efault;
10566 for(i = 0;i < ret; i++)
10567 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10568 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10571 break;
10572 case TARGET_NR_setgroups:
10574 int gidsetsize = arg1;
10575 target_id *target_grouplist;
10576 gid_t *grouplist = NULL;
10577 int i;
10578 if (gidsetsize) {
10579 grouplist = alloca(gidsetsize * sizeof(gid_t));
10580 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10581 if (!target_grouplist) {
10582 ret = -TARGET_EFAULT;
10583 goto fail;
10585 for (i = 0; i < gidsetsize; i++) {
10586 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10588 unlock_user(target_grouplist, arg2, 0);
10590 ret = get_errno(setgroups(gidsetsize, grouplist));
10592 break;
10593 case TARGET_NR_fchown:
10594 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10595 break;
10596 #if defined(TARGET_NR_fchownat)
10597 case TARGET_NR_fchownat:
10598 if (!(p = lock_user_string(arg2)))
10599 goto efault;
10600 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10601 low2highgid(arg4), arg5));
10602 unlock_user(p, arg2, 0);
10603 break;
10604 #endif
10605 #ifdef TARGET_NR_setresuid
10606 case TARGET_NR_setresuid:
10607 ret = get_errno(sys_setresuid(low2highuid(arg1),
10608 low2highuid(arg2),
10609 low2highuid(arg3)));
10610 break;
10611 #endif
10612 #ifdef TARGET_NR_getresuid
10613 case TARGET_NR_getresuid:
10615 uid_t ruid, euid, suid;
10616 ret = get_errno(getresuid(&ruid, &euid, &suid));
10617 if (!is_error(ret)) {
10618 if (put_user_id(high2lowuid(ruid), arg1)
10619 || put_user_id(high2lowuid(euid), arg2)
10620 || put_user_id(high2lowuid(suid), arg3))
10621 goto efault;
10624 break;
10625 #endif
10626 #ifdef TARGET_NR_getresgid
10627 case TARGET_NR_setresgid:
10628 ret = get_errno(sys_setresgid(low2highgid(arg1),
10629 low2highgid(arg2),
10630 low2highgid(arg3)));
10631 break;
10632 #endif
10633 #ifdef TARGET_NR_getresgid
10634 case TARGET_NR_getresgid:
10636 gid_t rgid, egid, sgid;
10637 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10638 if (!is_error(ret)) {
10639 if (put_user_id(high2lowgid(rgid), arg1)
10640 || put_user_id(high2lowgid(egid), arg2)
10641 || put_user_id(high2lowgid(sgid), arg3))
10642 goto efault;
10645 break;
10646 #endif
10647 #ifdef TARGET_NR_chown
10648 case TARGET_NR_chown:
10649 if (!(p = lock_user_string(arg1)))
10650 goto efault;
10651 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10652 unlock_user(p, arg1, 0);
10653 break;
10654 #endif
10655 case TARGET_NR_setuid:
10656 ret = get_errno(sys_setuid(low2highuid(arg1)));
10657 break;
10658 case TARGET_NR_setgid:
10659 ret = get_errno(sys_setgid(low2highgid(arg1)));
10660 break;
10661 case TARGET_NR_setfsuid:
10662 ret = get_errno(setfsuid(arg1));
10663 break;
10664 case TARGET_NR_setfsgid:
10665 ret = get_errno(setfsgid(arg1));
10666 break;
10668 #ifdef TARGET_NR_lchown32
10669 case TARGET_NR_lchown32:
10670 if (!(p = lock_user_string(arg1)))
10671 goto efault;
10672 ret = get_errno(lchown(p, arg2, arg3));
10673 unlock_user(p, arg1, 0);
10674 break;
10675 #endif
10676 #ifdef TARGET_NR_getuid32
10677 case TARGET_NR_getuid32:
10678 ret = get_errno(getuid());
10679 break;
10680 #endif
10682 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10683 /* Alpha specific */
10684 case TARGET_NR_getxuid:
10686 uid_t euid;
10687 euid=geteuid();
10688 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10690 ret = get_errno(getuid());
10691 break;
10692 #endif
10693 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10694 /* Alpha specific */
10695 case TARGET_NR_getxgid:
10697 uid_t egid;
10698 egid=getegid();
10699 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10701 ret = get_errno(getgid());
10702 break;
10703 #endif
10704 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10705 /* Alpha specific */
10706 case TARGET_NR_osf_getsysinfo:
10707 ret = -TARGET_EOPNOTSUPP;
10708 switch (arg1) {
10709 case TARGET_GSI_IEEE_FP_CONTROL:
10711 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10713 /* Copied from linux ieee_fpcr_to_swcr. */
10714 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10715 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10716 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10717 | SWCR_TRAP_ENABLE_DZE
10718 | SWCR_TRAP_ENABLE_OVF);
10719 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10720 | SWCR_TRAP_ENABLE_INE);
10721 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10722 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10724 if (put_user_u64 (swcr, arg2))
10725 goto efault;
10726 ret = 0;
10728 break;
10730 /* case GSI_IEEE_STATE_AT_SIGNAL:
10731 -- Not implemented in linux kernel.
10732 case GSI_UACPROC:
10733 -- Retrieves current unaligned access state; not much used.
10734 case GSI_PROC_TYPE:
10735 -- Retrieves implver information; surely not used.
10736 case GSI_GET_HWRPB:
10737 -- Grabs a copy of the HWRPB; surely not used.
10740 break;
10741 #endif
10742 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10743 /* Alpha specific */
10744 case TARGET_NR_osf_setsysinfo:
10745 ret = -TARGET_EOPNOTSUPP;
10746 switch (arg1) {
10747 case TARGET_SSI_IEEE_FP_CONTROL:
10749 uint64_t swcr, fpcr, orig_fpcr;
10751 if (get_user_u64 (swcr, arg2)) {
10752 goto efault;
10754 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10755 fpcr = orig_fpcr & FPCR_DYN_MASK;
10757 /* Copied from linux ieee_swcr_to_fpcr. */
10758 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10759 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10760 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10761 | SWCR_TRAP_ENABLE_DZE
10762 | SWCR_TRAP_ENABLE_OVF)) << 48;
10763 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10764 | SWCR_TRAP_ENABLE_INE)) << 57;
10765 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10766 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10768 cpu_alpha_store_fpcr(cpu_env, fpcr);
10769 ret = 0;
10771 break;
10773 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10775 uint64_t exc, fpcr, orig_fpcr;
10776 int si_code;
10778 if (get_user_u64(exc, arg2)) {
10779 goto efault;
10782 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10784 /* We only add to the exception status here. */
10785 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10787 cpu_alpha_store_fpcr(cpu_env, fpcr);
10788 ret = 0;
10790 /* Old exceptions are not signaled. */
10791 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10793 /* If any exceptions set by this call,
10794 and are unmasked, send a signal. */
10795 si_code = 0;
10796 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10797 si_code = TARGET_FPE_FLTRES;
10799 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10800 si_code = TARGET_FPE_FLTUND;
10802 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10803 si_code = TARGET_FPE_FLTOVF;
10805 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10806 si_code = TARGET_FPE_FLTDIV;
10808 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10809 si_code = TARGET_FPE_FLTINV;
10811 if (si_code != 0) {
10812 target_siginfo_t info;
10813 info.si_signo = SIGFPE;
10814 info.si_errno = 0;
10815 info.si_code = si_code;
10816 info._sifields._sigfault._addr
10817 = ((CPUArchState *)cpu_env)->pc;
10818 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10819 QEMU_SI_FAULT, &info);
10822 break;
10824 /* case SSI_NVPAIRS:
10825 -- Used with SSIN_UACPROC to enable unaligned accesses.
10826 case SSI_IEEE_STATE_AT_SIGNAL:
10827 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10828 -- Not implemented in linux kernel
10831 break;
10832 #endif
10833 #ifdef TARGET_NR_osf_sigprocmask
10834 /* Alpha specific. */
10835 case TARGET_NR_osf_sigprocmask:
10837 abi_ulong mask;
10838 int how;
10839 sigset_t set, oldset;
10841 switch(arg1) {
10842 case TARGET_SIG_BLOCK:
10843 how = SIG_BLOCK;
10844 break;
10845 case TARGET_SIG_UNBLOCK:
10846 how = SIG_UNBLOCK;
10847 break;
10848 case TARGET_SIG_SETMASK:
10849 how = SIG_SETMASK;
10850 break;
10851 default:
10852 ret = -TARGET_EINVAL;
10853 goto fail;
10855 mask = arg2;
10856 target_to_host_old_sigset(&set, &mask);
10857 ret = do_sigprocmask(how, &set, &oldset);
10858 if (!ret) {
10859 host_to_target_old_sigset(&mask, &oldset);
10860 ret = mask;
10863 break;
10864 #endif
10866 #ifdef TARGET_NR_getgid32
10867 case TARGET_NR_getgid32:
10868 ret = get_errno(getgid());
10869 break;
10870 #endif
10871 #ifdef TARGET_NR_geteuid32
10872 case TARGET_NR_geteuid32:
10873 ret = get_errno(geteuid());
10874 break;
10875 #endif
10876 #ifdef TARGET_NR_getegid32
10877 case TARGET_NR_getegid32:
10878 ret = get_errno(getegid());
10879 break;
10880 #endif
10881 #ifdef TARGET_NR_setreuid32
10882 case TARGET_NR_setreuid32:
10883 ret = get_errno(setreuid(arg1, arg2));
10884 break;
10885 #endif
10886 #ifdef TARGET_NR_setregid32
10887 case TARGET_NR_setregid32:
10888 ret = get_errno(setregid(arg1, arg2));
10889 break;
10890 #endif
10891 #ifdef TARGET_NR_getgroups32
10892 case TARGET_NR_getgroups32:
10894 int gidsetsize = arg1;
10895 uint32_t *target_grouplist;
10896 gid_t *grouplist;
10897 int i;
10899 grouplist = alloca(gidsetsize * sizeof(gid_t));
10900 ret = get_errno(getgroups(gidsetsize, grouplist));
10901 if (gidsetsize == 0)
10902 break;
10903 if (!is_error(ret)) {
10904 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10905 if (!target_grouplist) {
10906 ret = -TARGET_EFAULT;
10907 goto fail;
10909 for(i = 0;i < ret; i++)
10910 target_grouplist[i] = tswap32(grouplist[i]);
10911 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10914 break;
10915 #endif
10916 #ifdef TARGET_NR_setgroups32
10917 case TARGET_NR_setgroups32:
10919 int gidsetsize = arg1;
10920 uint32_t *target_grouplist;
10921 gid_t *grouplist;
10922 int i;
10924 grouplist = alloca(gidsetsize * sizeof(gid_t));
10925 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10926 if (!target_grouplist) {
10927 ret = -TARGET_EFAULT;
10928 goto fail;
10930 for(i = 0;i < gidsetsize; i++)
10931 grouplist[i] = tswap32(target_grouplist[i]);
10932 unlock_user(target_grouplist, arg2, 0);
10933 ret = get_errno(setgroups(gidsetsize, grouplist));
10935 break;
10936 #endif
10937 #ifdef TARGET_NR_fchown32
10938 case TARGET_NR_fchown32:
10939 ret = get_errno(fchown(arg1, arg2, arg3));
10940 break;
10941 #endif
10942 #ifdef TARGET_NR_setresuid32
10943 case TARGET_NR_setresuid32:
10944 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10945 break;
10946 #endif
10947 #ifdef TARGET_NR_getresuid32
10948 case TARGET_NR_getresuid32:
10950 uid_t ruid, euid, suid;
10951 ret = get_errno(getresuid(&ruid, &euid, &suid));
10952 if (!is_error(ret)) {
10953 if (put_user_u32(ruid, arg1)
10954 || put_user_u32(euid, arg2)
10955 || put_user_u32(suid, arg3))
10956 goto efault;
10959 break;
10960 #endif
10961 #ifdef TARGET_NR_setresgid32
10962 case TARGET_NR_setresgid32:
10963 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
10964 break;
10965 #endif
10966 #ifdef TARGET_NR_getresgid32
10967 case TARGET_NR_getresgid32:
10969 gid_t rgid, egid, sgid;
10970 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10971 if (!is_error(ret)) {
10972 if (put_user_u32(rgid, arg1)
10973 || put_user_u32(egid, arg2)
10974 || put_user_u32(sgid, arg3))
10975 goto efault;
10978 break;
10979 #endif
10980 #ifdef TARGET_NR_chown32
10981 case TARGET_NR_chown32:
10982 if (!(p = lock_user_string(arg1)))
10983 goto efault;
10984 ret = get_errno(chown(p, arg2, arg3));
10985 unlock_user(p, arg1, 0);
10986 break;
10987 #endif
10988 #ifdef TARGET_NR_setuid32
10989 case TARGET_NR_setuid32:
10990 ret = get_errno(sys_setuid(arg1));
10991 break;
10992 #endif
10993 #ifdef TARGET_NR_setgid32
10994 case TARGET_NR_setgid32:
10995 ret = get_errno(sys_setgid(arg1));
10996 break;
10997 #endif
10998 #ifdef TARGET_NR_setfsuid32
10999 case TARGET_NR_setfsuid32:
11000 ret = get_errno(setfsuid(arg1));
11001 break;
11002 #endif
11003 #ifdef TARGET_NR_setfsgid32
11004 case TARGET_NR_setfsgid32:
11005 ret = get_errno(setfsgid(arg1));
11006 break;
11007 #endif
11009 case TARGET_NR_pivot_root:
11010 goto unimplemented;
11011 #ifdef TARGET_NR_mincore
11012 case TARGET_NR_mincore:
11014 void *a;
11015 ret = -TARGET_EFAULT;
11016 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
11017 goto efault;
11018 if (!(p = lock_user_string(arg3)))
11019 goto mincore_fail;
11020 ret = get_errno(mincore(a, arg2, p));
11021 unlock_user(p, arg3, ret);
11022 mincore_fail:
11023 unlock_user(a, arg1, 0);
11025 break;
11026 #endif
11027 #ifdef TARGET_NR_arm_fadvise64_64
11028 case TARGET_NR_arm_fadvise64_64:
11029 /* arm_fadvise64_64 looks like fadvise64_64 but
11030 * with different argument order: fd, advice, offset, len
11031 * rather than the usual fd, offset, len, advice.
11032 * Note that offset and len are both 64-bit so appear as
11033 * pairs of 32-bit registers.
11035 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11036 target_offset64(arg5, arg6), arg2);
11037 ret = -host_to_target_errno(ret);
11038 break;
11039 #endif
11041 #if TARGET_ABI_BITS == 32
11043 #ifdef TARGET_NR_fadvise64_64
11044 case TARGET_NR_fadvise64_64:
11045 /* 6 args: fd, offset (high, low), len (high, low), advice */
11046 if (regpairs_aligned(cpu_env)) {
11047 /* offset is in (3,4), len in (5,6) and advice in 7 */
11048 arg2 = arg3;
11049 arg3 = arg4;
11050 arg4 = arg5;
11051 arg5 = arg6;
11052 arg6 = arg7;
11054 ret = -host_to_target_errno(posix_fadvise(arg1,
11055 target_offset64(arg2, arg3),
11056 target_offset64(arg4, arg5),
11057 arg6));
11058 break;
11059 #endif
11061 #ifdef TARGET_NR_fadvise64
11062 case TARGET_NR_fadvise64:
11063 /* 5 args: fd, offset (high, low), len, advice */
11064 if (regpairs_aligned(cpu_env)) {
11065 /* offset is in (3,4), len in 5 and advice in 6 */
11066 arg2 = arg3;
11067 arg3 = arg4;
11068 arg4 = arg5;
11069 arg5 = arg6;
11071 ret = -host_to_target_errno(posix_fadvise(arg1,
11072 target_offset64(arg2, arg3),
11073 arg4, arg5));
11074 break;
11075 #endif
11077 #else /* not a 32-bit ABI */
11078 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11079 #ifdef TARGET_NR_fadvise64_64
11080 case TARGET_NR_fadvise64_64:
11081 #endif
11082 #ifdef TARGET_NR_fadvise64
11083 case TARGET_NR_fadvise64:
11084 #endif
11085 #ifdef TARGET_S390X
11086 switch (arg4) {
11087 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11088 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11089 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11090 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11091 default: break;
11093 #endif
11094 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11095 break;
11096 #endif
11097 #endif /* end of 64-bit ABI fadvise handling */
11099 #ifdef TARGET_NR_madvise
11100 case TARGET_NR_madvise:
11101 /* A straight passthrough may not be safe because qemu sometimes
11102 turns private file-backed mappings into anonymous mappings.
11103 This will break MADV_DONTNEED.
11104 This is a hint, so ignoring and returning success is ok. */
11105 ret = get_errno(0);
11106 break;
11107 #endif
11108 #if TARGET_ABI_BITS == 32
11109 case TARGET_NR_fcntl64:
11111 int cmd;
11112 struct flock64 fl;
11113 from_flock64_fn *copyfrom = copy_from_user_flock64;
11114 to_flock64_fn *copyto = copy_to_user_flock64;
11116 #ifdef TARGET_ARM
11117 if (((CPUARMState *)cpu_env)->eabi) {
11118 copyfrom = copy_from_user_eabi_flock64;
11119 copyto = copy_to_user_eabi_flock64;
11121 #endif
11123 cmd = target_to_host_fcntl_cmd(arg2);
11124 if (cmd == -TARGET_EINVAL) {
11125 ret = cmd;
11126 break;
11129 switch(arg2) {
11130 case TARGET_F_GETLK64:
11131 ret = copyfrom(&fl, arg3);
11132 if (ret) {
11133 break;
11135 ret = get_errno(fcntl(arg1, cmd, &fl));
11136 if (ret == 0) {
11137 ret = copyto(arg3, &fl);
11139 break;
11141 case TARGET_F_SETLK64:
11142 case TARGET_F_SETLKW64:
11143 ret = copyfrom(&fl, arg3);
11144 if (ret) {
11145 break;
11147 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11148 break;
11149 default:
11150 ret = do_fcntl(arg1, arg2, arg3);
11151 break;
11153 break;
11155 #endif
11156 #ifdef TARGET_NR_cacheflush
11157 case TARGET_NR_cacheflush:
11158 /* self-modifying code is handled automatically, so nothing needed */
11159 ret = 0;
11160 break;
11161 #endif
11162 #ifdef TARGET_NR_security
11163 case TARGET_NR_security:
11164 goto unimplemented;
11165 #endif
11166 #ifdef TARGET_NR_getpagesize
11167 case TARGET_NR_getpagesize:
11168 ret = TARGET_PAGE_SIZE;
11169 break;
11170 #endif
11171 case TARGET_NR_gettid:
11172 ret = get_errno(gettid());
11173 break;
11174 #ifdef TARGET_NR_readahead
11175 case TARGET_NR_readahead:
11176 #if TARGET_ABI_BITS == 32
11177 if (regpairs_aligned(cpu_env)) {
11178 arg2 = arg3;
11179 arg3 = arg4;
11180 arg4 = arg5;
11182 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11183 #else
11184 ret = get_errno(readahead(arg1, arg2, arg3));
11185 #endif
11186 break;
11187 #endif
11188 #ifdef CONFIG_ATTR
11189 #ifdef TARGET_NR_setxattr
11190 case TARGET_NR_listxattr:
11191 case TARGET_NR_llistxattr:
11193 void *p, *b = 0;
11194 if (arg2) {
11195 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11196 if (!b) {
11197 ret = -TARGET_EFAULT;
11198 break;
11201 p = lock_user_string(arg1);
11202 if (p) {
11203 if (num == TARGET_NR_listxattr) {
11204 ret = get_errno(listxattr(p, b, arg3));
11205 } else {
11206 ret = get_errno(llistxattr(p, b, arg3));
11208 } else {
11209 ret = -TARGET_EFAULT;
11211 unlock_user(p, arg1, 0);
11212 unlock_user(b, arg2, arg3);
11213 break;
11215 case TARGET_NR_flistxattr:
11217 void *b = 0;
11218 if (arg2) {
11219 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11220 if (!b) {
11221 ret = -TARGET_EFAULT;
11222 break;
11225 ret = get_errno(flistxattr(arg1, b, arg3));
11226 unlock_user(b, arg2, arg3);
11227 break;
11229 case TARGET_NR_setxattr:
11230 case TARGET_NR_lsetxattr:
11232 void *p, *n, *v = 0;
11233 if (arg3) {
11234 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11235 if (!v) {
11236 ret = -TARGET_EFAULT;
11237 break;
11240 p = lock_user_string(arg1);
11241 n = lock_user_string(arg2);
11242 if (p && n) {
11243 if (num == TARGET_NR_setxattr) {
11244 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11245 } else {
11246 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11248 } else {
11249 ret = -TARGET_EFAULT;
11251 unlock_user(p, arg1, 0);
11252 unlock_user(n, arg2, 0);
11253 unlock_user(v, arg3, 0);
11255 break;
11256 case TARGET_NR_fsetxattr:
11258 void *n, *v = 0;
11259 if (arg3) {
11260 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11261 if (!v) {
11262 ret = -TARGET_EFAULT;
11263 break;
11266 n = lock_user_string(arg2);
11267 if (n) {
11268 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11269 } else {
11270 ret = -TARGET_EFAULT;
11272 unlock_user(n, arg2, 0);
11273 unlock_user(v, arg3, 0);
11275 break;
11276 case TARGET_NR_getxattr:
11277 case TARGET_NR_lgetxattr:
11279 void *p, *n, *v = 0;
11280 if (arg3) {
11281 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11282 if (!v) {
11283 ret = -TARGET_EFAULT;
11284 break;
11287 p = lock_user_string(arg1);
11288 n = lock_user_string(arg2);
11289 if (p && n) {
11290 if (num == TARGET_NR_getxattr) {
11291 ret = get_errno(getxattr(p, n, v, arg4));
11292 } else {
11293 ret = get_errno(lgetxattr(p, n, v, arg4));
11295 } else {
11296 ret = -TARGET_EFAULT;
11298 unlock_user(p, arg1, 0);
11299 unlock_user(n, arg2, 0);
11300 unlock_user(v, arg3, arg4);
11302 break;
11303 case TARGET_NR_fgetxattr:
11305 void *n, *v = 0;
11306 if (arg3) {
11307 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11308 if (!v) {
11309 ret = -TARGET_EFAULT;
11310 break;
11313 n = lock_user_string(arg2);
11314 if (n) {
11315 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11316 } else {
11317 ret = -TARGET_EFAULT;
11319 unlock_user(n, arg2, 0);
11320 unlock_user(v, arg3, arg4);
11322 break;
11323 case TARGET_NR_removexattr:
11324 case TARGET_NR_lremovexattr:
11326 void *p, *n;
11327 p = lock_user_string(arg1);
11328 n = lock_user_string(arg2);
11329 if (p && n) {
11330 if (num == TARGET_NR_removexattr) {
11331 ret = get_errno(removexattr(p, n));
11332 } else {
11333 ret = get_errno(lremovexattr(p, n));
11335 } else {
11336 ret = -TARGET_EFAULT;
11338 unlock_user(p, arg1, 0);
11339 unlock_user(n, arg2, 0);
11341 break;
11342 case TARGET_NR_fremovexattr:
11344 void *n;
11345 n = lock_user_string(arg2);
11346 if (n) {
11347 ret = get_errno(fremovexattr(arg1, n));
11348 } else {
11349 ret = -TARGET_EFAULT;
11351 unlock_user(n, arg2, 0);
11353 break;
11354 #endif
11355 #endif /* CONFIG_ATTR */
11356 #ifdef TARGET_NR_set_thread_area
11357 case TARGET_NR_set_thread_area:
11358 #if defined(TARGET_MIPS)
11359 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11360 ret = 0;
11361 break;
11362 #elif defined(TARGET_CRIS)
11363 if (arg1 & 0xff)
11364 ret = -TARGET_EINVAL;
11365 else {
11366 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11367 ret = 0;
11369 break;
11370 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11371 ret = do_set_thread_area(cpu_env, arg1);
11372 break;
11373 #elif defined(TARGET_M68K)
11375 TaskState *ts = cpu->opaque;
11376 ts->tp_value = arg1;
11377 ret = 0;
11378 break;
11380 #else
11381 goto unimplemented_nowarn;
11382 #endif
11383 #endif
11384 #ifdef TARGET_NR_get_thread_area
11385 case TARGET_NR_get_thread_area:
11386 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11387 ret = do_get_thread_area(cpu_env, arg1);
11388 break;
11389 #elif defined(TARGET_M68K)
11391 TaskState *ts = cpu->opaque;
11392 ret = ts->tp_value;
11393 break;
11395 #else
11396 goto unimplemented_nowarn;
11397 #endif
11398 #endif
11399 #ifdef TARGET_NR_getdomainname
11400 case TARGET_NR_getdomainname:
11401 goto unimplemented_nowarn;
11402 #endif
11404 #ifdef TARGET_NR_clock_gettime
11405 case TARGET_NR_clock_gettime:
11407 struct timespec ts;
11408 ret = get_errno(clock_gettime(arg1, &ts));
11409 if (!is_error(ret)) {
11410 host_to_target_timespec(arg2, &ts);
11412 break;
11414 #endif
11415 #ifdef TARGET_NR_clock_getres
11416 case TARGET_NR_clock_getres:
11418 struct timespec ts;
11419 ret = get_errno(clock_getres(arg1, &ts));
11420 if (!is_error(ret)) {
11421 host_to_target_timespec(arg2, &ts);
11423 break;
11425 #endif
11426 #ifdef TARGET_NR_clock_nanosleep
11427 case TARGET_NR_clock_nanosleep:
11429 struct timespec ts;
11430 target_to_host_timespec(&ts, arg3);
11431 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11432 &ts, arg4 ? &ts : NULL));
11433 if (arg4)
11434 host_to_target_timespec(arg4, &ts);
11436 #if defined(TARGET_PPC)
11437 /* clock_nanosleep is odd in that it returns positive errno values.
11438 * On PPC, CR0 bit 3 should be set in such a situation. */
11439 if (ret && ret != -TARGET_ERESTARTSYS) {
11440 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11442 #endif
11443 break;
11445 #endif
11447 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11448 case TARGET_NR_set_tid_address:
11449 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11450 break;
11451 #endif
11453 case TARGET_NR_tkill:
11454 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11455 break;
11457 case TARGET_NR_tgkill:
11458 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11459 target_to_host_signal(arg3)));
11460 break;
11462 #ifdef TARGET_NR_set_robust_list
11463 case TARGET_NR_set_robust_list:
11464 case TARGET_NR_get_robust_list:
11465 /* The ABI for supporting robust futexes has userspace pass
11466 * the kernel a pointer to a linked list which is updated by
11467 * userspace after the syscall; the list is walked by the kernel
11468 * when the thread exits. Since the linked list in QEMU guest
11469 * memory isn't a valid linked list for the host and we have
11470 * no way to reliably intercept the thread-death event, we can't
11471 * support these. Silently return ENOSYS so that guest userspace
11472 * falls back to a non-robust futex implementation (which should
11473 * be OK except in the corner case of the guest crashing while
11474 * holding a mutex that is shared with another process via
11475 * shared memory).
11477 goto unimplemented_nowarn;
11478 #endif
11480 #if defined(TARGET_NR_utimensat)
11481 case TARGET_NR_utimensat:
11483 struct timespec *tsp, ts[2];
11484 if (!arg3) {
11485 tsp = NULL;
11486 } else {
11487 target_to_host_timespec(ts, arg3);
11488 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11489 tsp = ts;
11491 if (!arg2)
11492 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11493 else {
11494 if (!(p = lock_user_string(arg2))) {
11495 ret = -TARGET_EFAULT;
11496 goto fail;
11498 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11499 unlock_user(p, arg2, 0);
11502 break;
11503 #endif
11504 case TARGET_NR_futex:
11505 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11506 break;
11507 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11508 case TARGET_NR_inotify_init:
11509 ret = get_errno(sys_inotify_init());
11510 break;
11511 #endif
11512 #ifdef CONFIG_INOTIFY1
11513 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11514 case TARGET_NR_inotify_init1:
11515 ret = get_errno(sys_inotify_init1(arg1));
11516 break;
11517 #endif
11518 #endif
11519 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11520 case TARGET_NR_inotify_add_watch:
11521 p = lock_user_string(arg2);
11522 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11523 unlock_user(p, arg2, 0);
11524 break;
11525 #endif
11526 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11527 case TARGET_NR_inotify_rm_watch:
11528 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11529 break;
11530 #endif
11532 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11533 case TARGET_NR_mq_open:
11535 struct mq_attr posix_mq_attr;
11536 int host_flags;
11538 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11539 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11540 goto efault;
11542 p = lock_user_string(arg1 - 1);
11543 if (!p) {
11544 goto efault;
11546 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11547 unlock_user (p, arg1, 0);
11549 break;
11551 case TARGET_NR_mq_unlink:
11552 p = lock_user_string(arg1 - 1);
11553 if (!p) {
11554 ret = -TARGET_EFAULT;
11555 break;
11557 ret = get_errno(mq_unlink(p));
11558 unlock_user (p, arg1, 0);
11559 break;
11561 case TARGET_NR_mq_timedsend:
11563 struct timespec ts;
11565 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11566 if (arg5 != 0) {
11567 target_to_host_timespec(&ts, arg5);
11568 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11569 host_to_target_timespec(arg5, &ts);
11570 } else {
11571 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11573 unlock_user (p, arg2, arg3);
11575 break;
11577 case TARGET_NR_mq_timedreceive:
11579 struct timespec ts;
11580 unsigned int prio;
11582 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11583 if (arg5 != 0) {
11584 target_to_host_timespec(&ts, arg5);
11585 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11586 &prio, &ts));
11587 host_to_target_timespec(arg5, &ts);
11588 } else {
11589 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11590 &prio, NULL));
11592 unlock_user (p, arg2, arg3);
11593 if (arg4 != 0)
11594 put_user_u32(prio, arg4);
11596 break;
11598 /* Not implemented for now... */
11599 /* case TARGET_NR_mq_notify: */
11600 /* break; */
11602 case TARGET_NR_mq_getsetattr:
11604 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11605 ret = 0;
11606 if (arg3 != 0) {
11607 ret = mq_getattr(arg1, &posix_mq_attr_out);
11608 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11610 if (arg2 != 0) {
11611 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11612 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11616 break;
11617 #endif
11619 #ifdef CONFIG_SPLICE
11620 #ifdef TARGET_NR_tee
11621 case TARGET_NR_tee:
11623 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11625 break;
11626 #endif
11627 #ifdef TARGET_NR_splice
11628 case TARGET_NR_splice:
11630 loff_t loff_in, loff_out;
11631 loff_t *ploff_in = NULL, *ploff_out = NULL;
11632 if (arg2) {
11633 if (get_user_u64(loff_in, arg2)) {
11634 goto efault;
11636 ploff_in = &loff_in;
11638 if (arg4) {
11639 if (get_user_u64(loff_out, arg4)) {
11640 goto efault;
11642 ploff_out = &loff_out;
11644 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11645 if (arg2) {
11646 if (put_user_u64(loff_in, arg2)) {
11647 goto efault;
11650 if (arg4) {
11651 if (put_user_u64(loff_out, arg4)) {
11652 goto efault;
11656 break;
11657 #endif
11658 #ifdef TARGET_NR_vmsplice
11659 case TARGET_NR_vmsplice:
11661 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11662 if (vec != NULL) {
11663 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11664 unlock_iovec(vec, arg2, arg3, 0);
11665 } else {
11666 ret = -host_to_target_errno(errno);
11669 break;
11670 #endif
11671 #endif /* CONFIG_SPLICE */
11672 #ifdef CONFIG_EVENTFD
11673 #if defined(TARGET_NR_eventfd)
11674 case TARGET_NR_eventfd:
11675 ret = get_errno(eventfd(arg1, 0));
11676 fd_trans_unregister(ret);
11677 break;
11678 #endif
11679 #if defined(TARGET_NR_eventfd2)
11680 case TARGET_NR_eventfd2:
11682 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11683 if (arg2 & TARGET_O_NONBLOCK) {
11684 host_flags |= O_NONBLOCK;
11686 if (arg2 & TARGET_O_CLOEXEC) {
11687 host_flags |= O_CLOEXEC;
11689 ret = get_errno(eventfd(arg1, host_flags));
11690 fd_trans_unregister(ret);
11691 break;
11693 #endif
11694 #endif /* CONFIG_EVENTFD */
11695 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11696 case TARGET_NR_fallocate:
11697 #if TARGET_ABI_BITS == 32
11698 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11699 target_offset64(arg5, arg6)));
11700 #else
11701 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11702 #endif
11703 break;
11704 #endif
11705 #if defined(CONFIG_SYNC_FILE_RANGE)
11706 #if defined(TARGET_NR_sync_file_range)
11707 case TARGET_NR_sync_file_range:
11708 #if TARGET_ABI_BITS == 32
11709 #if defined(TARGET_MIPS)
11710 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11711 target_offset64(arg5, arg6), arg7));
11712 #else
11713 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11714 target_offset64(arg4, arg5), arg6));
11715 #endif /* !TARGET_MIPS */
11716 #else
11717 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11718 #endif
11719 break;
11720 #endif
11721 #if defined(TARGET_NR_sync_file_range2)
11722 case TARGET_NR_sync_file_range2:
11723 /* This is like sync_file_range but the arguments are reordered */
11724 #if TARGET_ABI_BITS == 32
11725 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11726 target_offset64(arg5, arg6), arg2));
11727 #else
11728 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11729 #endif
11730 break;
11731 #endif
11732 #endif
11733 #if defined(TARGET_NR_signalfd4)
11734 case TARGET_NR_signalfd4:
11735 ret = do_signalfd4(arg1, arg2, arg4);
11736 break;
11737 #endif
11738 #if defined(TARGET_NR_signalfd)
11739 case TARGET_NR_signalfd:
11740 ret = do_signalfd4(arg1, arg2, 0);
11741 break;
11742 #endif
11743 #if defined(CONFIG_EPOLL)
11744 #if defined(TARGET_NR_epoll_create)
11745 case TARGET_NR_epoll_create:
11746 ret = get_errno(epoll_create(arg1));
11747 break;
11748 #endif
11749 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11750 case TARGET_NR_epoll_create1:
11751 ret = get_errno(epoll_create1(arg1));
11752 break;
11753 #endif
11754 #if defined(TARGET_NR_epoll_ctl)
11755 case TARGET_NR_epoll_ctl:
11757 struct epoll_event ep;
11758 struct epoll_event *epp = 0;
11759 if (arg4) {
11760 struct target_epoll_event *target_ep;
11761 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11762 goto efault;
11764 ep.events = tswap32(target_ep->events);
11765 /* The epoll_data_t union is just opaque data to the kernel,
11766 * so we transfer all 64 bits across and need not worry what
11767 * actual data type it is.
11769 ep.data.u64 = tswap64(target_ep->data.u64);
11770 unlock_user_struct(target_ep, arg4, 0);
11771 epp = &ep;
11773 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11774 break;
11776 #endif
11778 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11779 #if defined(TARGET_NR_epoll_wait)
11780 case TARGET_NR_epoll_wait:
11781 #endif
11782 #if defined(TARGET_NR_epoll_pwait)
11783 case TARGET_NR_epoll_pwait:
11784 #endif
11786 struct target_epoll_event *target_ep;
11787 struct epoll_event *ep;
11788 int epfd = arg1;
11789 int maxevents = arg3;
11790 int timeout = arg4;
11792 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11793 ret = -TARGET_EINVAL;
11794 break;
11797 target_ep = lock_user(VERIFY_WRITE, arg2,
11798 maxevents * sizeof(struct target_epoll_event), 1);
11799 if (!target_ep) {
11800 goto efault;
11803 ep = g_try_new(struct epoll_event, maxevents);
11804 if (!ep) {
11805 unlock_user(target_ep, arg2, 0);
11806 ret = -TARGET_ENOMEM;
11807 break;
11810 switch (num) {
11811 #if defined(TARGET_NR_epoll_pwait)
11812 case TARGET_NR_epoll_pwait:
11814 target_sigset_t *target_set;
11815 sigset_t _set, *set = &_set;
11817 if (arg5) {
11818 if (arg6 != sizeof(target_sigset_t)) {
11819 ret = -TARGET_EINVAL;
11820 break;
11823 target_set = lock_user(VERIFY_READ, arg5,
11824 sizeof(target_sigset_t), 1);
11825 if (!target_set) {
11826 ret = -TARGET_EFAULT;
11827 break;
11829 target_to_host_sigset(set, target_set);
11830 unlock_user(target_set, arg5, 0);
11831 } else {
11832 set = NULL;
11835 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11836 set, SIGSET_T_SIZE));
11837 break;
11839 #endif
11840 #if defined(TARGET_NR_epoll_wait)
11841 case TARGET_NR_epoll_wait:
11842 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11843 NULL, 0));
11844 break;
11845 #endif
11846 default:
11847 ret = -TARGET_ENOSYS;
11849 if (!is_error(ret)) {
11850 int i;
11851 for (i = 0; i < ret; i++) {
11852 target_ep[i].events = tswap32(ep[i].events);
11853 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11855 unlock_user(target_ep, arg2,
11856 ret * sizeof(struct target_epoll_event));
11857 } else {
11858 unlock_user(target_ep, arg2, 0);
11860 g_free(ep);
11861 break;
11863 #endif
11864 #endif
11865 #ifdef TARGET_NR_prlimit64
11866 case TARGET_NR_prlimit64:
11868 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11869 struct target_rlimit64 *target_rnew, *target_rold;
11870 struct host_rlimit64 rnew, rold, *rnewp = 0;
11871 int resource = target_to_host_resource(arg2);
11872 if (arg3) {
11873 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11874 goto efault;
11876 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11877 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11878 unlock_user_struct(target_rnew, arg3, 0);
11879 rnewp = &rnew;
11882 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11883 if (!is_error(ret) && arg4) {
11884 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11885 goto efault;
11887 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11888 target_rold->rlim_max = tswap64(rold.rlim_max);
11889 unlock_user_struct(target_rold, arg4, 1);
11891 break;
11893 #endif
11894 #ifdef TARGET_NR_gethostname
11895 case TARGET_NR_gethostname:
11897 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11898 if (name) {
11899 ret = get_errno(gethostname(name, arg2));
11900 unlock_user(name, arg1, arg2);
11901 } else {
11902 ret = -TARGET_EFAULT;
11904 break;
11906 #endif
11907 #ifdef TARGET_NR_atomic_cmpxchg_32
11908 case TARGET_NR_atomic_cmpxchg_32:
11910 /* should use start_exclusive from main.c */
11911 abi_ulong mem_value;
11912 if (get_user_u32(mem_value, arg6)) {
11913 target_siginfo_t info;
11914 info.si_signo = SIGSEGV;
11915 info.si_errno = 0;
11916 info.si_code = TARGET_SEGV_MAPERR;
11917 info._sifields._sigfault._addr = arg6;
11918 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11919 QEMU_SI_FAULT, &info);
11920 ret = 0xdeadbeef;
11923 if (mem_value == arg2)
11924 put_user_u32(arg1, arg6);
11925 ret = mem_value;
11926 break;
11928 #endif
11929 #ifdef TARGET_NR_atomic_barrier
11930 case TARGET_NR_atomic_barrier:
11932 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11933 ret = 0;
11934 break;
11936 #endif
11938 #ifdef TARGET_NR_timer_create
11939 case TARGET_NR_timer_create:
11941 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11943 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11945 int clkid = arg1;
11946 int timer_index = next_free_host_timer();
11948 if (timer_index < 0) {
11949 ret = -TARGET_EAGAIN;
11950 } else {
11951 timer_t *phtimer = g_posix_timers + timer_index;
11953 if (arg2) {
11954 phost_sevp = &host_sevp;
11955 ret = target_to_host_sigevent(phost_sevp, arg2);
11956 if (ret != 0) {
11957 break;
11961 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
11962 if (ret) {
11963 phtimer = NULL;
11964 } else {
11965 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
11966 goto efault;
11970 break;
11972 #endif
11974 #ifdef TARGET_NR_timer_settime
11975 case TARGET_NR_timer_settime:
11977 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
11978 * struct itimerspec * old_value */
11979 target_timer_t timerid = get_timer_id(arg1);
11981 if (timerid < 0) {
11982 ret = timerid;
11983 } else if (arg3 == 0) {
11984 ret = -TARGET_EINVAL;
11985 } else {
11986 timer_t htimer = g_posix_timers[timerid];
11987 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
11989 target_to_host_itimerspec(&hspec_new, arg3);
11990 ret = get_errno(
11991 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
11992 host_to_target_itimerspec(arg2, &hspec_old);
11994 break;
11996 #endif
11998 #ifdef TARGET_NR_timer_gettime
11999 case TARGET_NR_timer_gettime:
12001 /* args: timer_t timerid, struct itimerspec *curr_value */
12002 target_timer_t timerid = get_timer_id(arg1);
12004 if (timerid < 0) {
12005 ret = timerid;
12006 } else if (!arg2) {
12007 ret = -TARGET_EFAULT;
12008 } else {
12009 timer_t htimer = g_posix_timers[timerid];
12010 struct itimerspec hspec;
12011 ret = get_errno(timer_gettime(htimer, &hspec));
12013 if (host_to_target_itimerspec(arg2, &hspec)) {
12014 ret = -TARGET_EFAULT;
12017 break;
12019 #endif
12021 #ifdef TARGET_NR_timer_getoverrun
12022 case TARGET_NR_timer_getoverrun:
12024 /* args: timer_t timerid */
12025 target_timer_t timerid = get_timer_id(arg1);
12027 if (timerid < 0) {
12028 ret = timerid;
12029 } else {
12030 timer_t htimer = g_posix_timers[timerid];
12031 ret = get_errno(timer_getoverrun(htimer));
12033 fd_trans_unregister(ret);
12034 break;
12036 #endif
12038 #ifdef TARGET_NR_timer_delete
12039 case TARGET_NR_timer_delete:
12041 /* args: timer_t timerid */
12042 target_timer_t timerid = get_timer_id(arg1);
12044 if (timerid < 0) {
12045 ret = timerid;
12046 } else {
12047 timer_t htimer = g_posix_timers[timerid];
12048 ret = get_errno(timer_delete(htimer));
12049 g_posix_timers[timerid] = 0;
12051 break;
12053 #endif
12055 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12056 case TARGET_NR_timerfd_create:
12057 ret = get_errno(timerfd_create(arg1,
12058 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12059 break;
12060 #endif
12062 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12063 case TARGET_NR_timerfd_gettime:
12065 struct itimerspec its_curr;
12067 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12069 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12070 goto efault;
12073 break;
12074 #endif
12076 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12077 case TARGET_NR_timerfd_settime:
12079 struct itimerspec its_new, its_old, *p_new;
12081 if (arg3) {
12082 if (target_to_host_itimerspec(&its_new, arg3)) {
12083 goto efault;
12085 p_new = &its_new;
12086 } else {
12087 p_new = NULL;
12090 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12092 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12093 goto efault;
12096 break;
12097 #endif
12099 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12100 case TARGET_NR_ioprio_get:
12101 ret = get_errno(ioprio_get(arg1, arg2));
12102 break;
12103 #endif
12105 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12106 case TARGET_NR_ioprio_set:
12107 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12108 break;
12109 #endif
12111 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12112 case TARGET_NR_setns:
12113 ret = get_errno(setns(arg1, arg2));
12114 break;
12115 #endif
12116 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12117 case TARGET_NR_unshare:
12118 ret = get_errno(unshare(arg1));
12119 break;
12120 #endif
12121 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12122 case TARGET_NR_kcmp:
12123 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12124 break;
12125 #endif
12127 default:
12128 unimplemented:
12129 gemu_log("qemu: Unsupported syscall: %d\n", num);
12130 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12131 unimplemented_nowarn:
12132 #endif
12133 ret = -TARGET_ENOSYS;
12134 break;
12136 fail:
12137 #ifdef DEBUG
12138 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12139 #endif
12140 if(do_strace)
12141 print_syscall_ret(num, ret);
12142 trace_guest_user_syscall_ret(cpu, num, ret);
12143 return ret;
12144 efault:
12145 ret = -TARGET_EFAULT;
12146 goto fail;