linux-user: Handle TIOCSTART and TIOCSTOP
[qemu/armbru.git] / linux-user / syscall.c
blobacb004f035cfe3cea35889de529dec7414d01f0d
1 /*
2 * Linux syscalls
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include <elf.h>
24 #include <endian.h>
25 #include <grp.h>
26 #include <sys/ipc.h>
27 #include <sys/msg.h>
28 #include <sys/wait.h>
29 #include <sys/mount.h>
30 #include <sys/file.h>
31 #include <sys/fsuid.h>
32 #include <sys/personality.h>
33 #include <sys/prctl.h>
34 #include <sys/resource.h>
35 #include <sys/swap.h>
36 #include <linux/capability.h>
37 #include <sched.h>
38 #include <sys/timex.h>
39 #ifdef __ia64__
40 int __clone2(int (*fn)(void *), void *child_stack_base,
41 size_t stack_size, int flags, void *arg, ...);
42 #endif
43 #include <sys/socket.h>
44 #include <sys/un.h>
45 #include <sys/uio.h>
46 #include <poll.h>
47 #include <sys/times.h>
48 #include <sys/shm.h>
49 #include <sys/sem.h>
50 #include <sys/statfs.h>
51 #include <time.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/ip.h>
57 #include <netinet/tcp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include "qemu-common.h"
61 #ifdef CONFIG_TIMERFD
62 #include <sys/timerfd.h>
63 #endif
64 #ifdef TARGET_GPROF
65 #include <sys/gmon.h>
66 #endif
67 #ifdef CONFIG_EVENTFD
68 #include <sys/eventfd.h>
69 #endif
70 #ifdef CONFIG_EPOLL
71 #include <sys/epoll.h>
72 #endif
73 #ifdef CONFIG_ATTR
74 #include "qemu/xattr.h"
75 #endif
76 #ifdef CONFIG_SENDFILE
77 #include <sys/sendfile.h>
78 #endif
80 #define termios host_termios
81 #define winsize host_winsize
82 #define termio host_termio
83 #define sgttyb host_sgttyb /* same as target */
84 #define tchars host_tchars /* same as target */
85 #define ltchars host_ltchars /* same as target */
87 #include <linux/termios.h>
88 #include <linux/unistd.h>
89 #include <linux/cdrom.h>
90 #include <linux/hdreg.h>
91 #include <linux/soundcard.h>
92 #include <linux/kd.h>
93 #include <linux/mtio.h>
94 #include <linux/fs.h>
95 #if defined(CONFIG_FIEMAP)
96 #include <linux/fiemap.h>
97 #endif
98 #include <linux/fb.h>
99 #include <linux/vt.h>
100 #include <linux/dm-ioctl.h>
101 #include <linux/reboot.h>
102 #include <linux/route.h>
103 #include <linux/filter.h>
104 #include <linux/blkpg.h>
105 #include <netpacket/packet.h>
106 #include <linux/netlink.h>
107 #ifdef CONFIG_RTNETLINK
108 #include <linux/rtnetlink.h>
109 #include <linux/if_bridge.h>
110 #endif
111 #include <linux/audit.h>
112 #include "linux_loop.h"
113 #include "uname.h"
115 #include "qemu.h"
117 #ifndef CLONE_IO
118 #define CLONE_IO 0x80000000 /* Clone io context */
119 #endif
121 /* We can't directly call the host clone syscall, because this will
122 * badly confuse libc (breaking mutexes, for example). So we must
123 * divide clone flags into:
124 * * flag combinations that look like pthread_create()
125 * * flag combinations that look like fork()
126 * * flags we can implement within QEMU itself
127 * * flags we can't support and will return an error for
129 /* For thread creation, all these flags must be present; for
130 * fork, none must be present.
132 #define CLONE_THREAD_FLAGS \
133 (CLONE_VM | CLONE_FS | CLONE_FILES | \
134 CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
136 /* These flags are ignored:
137 * CLONE_DETACHED is now ignored by the kernel;
138 * CLONE_IO is just an optimisation hint to the I/O scheduler
140 #define CLONE_IGNORED_FLAGS \
141 (CLONE_DETACHED | CLONE_IO)
143 /* Flags for fork which we can implement within QEMU itself */
144 #define CLONE_OPTIONAL_FORK_FLAGS \
145 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
146 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
148 /* Flags for thread creation which we can implement within QEMU itself */
149 #define CLONE_OPTIONAL_THREAD_FLAGS \
150 (CLONE_SETTLS | CLONE_PARENT_SETTID | \
151 CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
153 #define CLONE_INVALID_FORK_FLAGS \
154 (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
156 #define CLONE_INVALID_THREAD_FLAGS \
157 (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
158 CLONE_IGNORED_FLAGS))
160 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
161 * have almost all been allocated. We cannot support any of
162 * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
163 * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
164 * The checks against the invalid thread masks above will catch these.
165 * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
168 //#define DEBUG
169 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
170 * once. This exercises the codepaths for restart.
172 //#define DEBUG_ERESTARTSYS
174 //#include <linux/msdos_fs.h>
175 #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
176 #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
178 #undef _syscall0
179 #undef _syscall1
180 #undef _syscall2
181 #undef _syscall3
182 #undef _syscall4
183 #undef _syscall5
184 #undef _syscall6
186 #define _syscall0(type,name) \
187 static type name (void) \
189 return syscall(__NR_##name); \
192 #define _syscall1(type,name,type1,arg1) \
193 static type name (type1 arg1) \
195 return syscall(__NR_##name, arg1); \
198 #define _syscall2(type,name,type1,arg1,type2,arg2) \
199 static type name (type1 arg1,type2 arg2) \
201 return syscall(__NR_##name, arg1, arg2); \
204 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
205 static type name (type1 arg1,type2 arg2,type3 arg3) \
207 return syscall(__NR_##name, arg1, arg2, arg3); \
210 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
211 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
213 return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
216 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
217 type5,arg5) \
218 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
220 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
224 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
225 type5,arg5,type6,arg6) \
226 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
227 type6 arg6) \
229 return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
233 #define __NR_sys_uname __NR_uname
234 #define __NR_sys_getcwd1 __NR_getcwd
235 #define __NR_sys_getdents __NR_getdents
236 #define __NR_sys_getdents64 __NR_getdents64
237 #define __NR_sys_getpriority __NR_getpriority
238 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
239 #define __NR_sys_syslog __NR_syslog
240 #define __NR_sys_futex __NR_futex
241 #define __NR_sys_inotify_init __NR_inotify_init
242 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
243 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
245 #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
246 defined(__s390x__)
247 #define __NR__llseek __NR_lseek
248 #endif
250 /* Newer kernel ports have llseek() instead of _llseek() */
251 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
252 #define TARGET_NR__llseek TARGET_NR_llseek
253 #endif
255 #ifdef __NR_gettid
256 _syscall0(int, gettid)
257 #else
258 /* This is a replacement for the host gettid() and must return a host
259 errno. */
260 static int gettid(void) {
261 return -ENOSYS;
263 #endif
264 #if defined(TARGET_NR_getdents) && defined(__NR_getdents)
265 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
266 #endif
267 #if !defined(__NR_getdents) || \
268 (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
269 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
270 #endif
271 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
272 _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
273 loff_t *, res, uint, wh);
274 #endif
275 _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
276 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
277 #ifdef __NR_exit_group
278 _syscall1(int,exit_group,int,error_code)
279 #endif
280 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
281 _syscall1(int,set_tid_address,int *,tidptr)
282 #endif
283 #if defined(TARGET_NR_futex) && defined(__NR_futex)
284 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
285 const struct timespec *,timeout,int *,uaddr2,int,val3)
286 #endif
287 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
288 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
289 unsigned long *, user_mask_ptr);
290 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
291 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
292 unsigned long *, user_mask_ptr);
293 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
294 void *, arg);
295 _syscall2(int, capget, struct __user_cap_header_struct *, header,
296 struct __user_cap_data_struct *, data);
297 _syscall2(int, capset, struct __user_cap_header_struct *, header,
298 struct __user_cap_data_struct *, data);
299 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
300 _syscall2(int, ioprio_get, int, which, int, who)
301 #endif
302 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
303 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
304 #endif
305 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
306 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
307 #endif
309 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
310 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
311 unsigned long, idx1, unsigned long, idx2)
312 #endif
314 static bitmask_transtbl fcntl_flags_tbl[] = {
315 { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
316 { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
317 { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
318 { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
319 { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
320 { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
321 { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
322 { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
323 { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
324 { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
325 { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
326 { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
327 { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
328 #if defined(O_DIRECT)
329 { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
330 #endif
331 #if defined(O_NOATIME)
332 { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
333 #endif
334 #if defined(O_CLOEXEC)
335 { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
336 #endif
337 #if defined(O_PATH)
338 { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
339 #endif
340 /* Don't terminate the list prematurely on 64-bit host+guest. */
341 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
342 { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
343 #endif
344 { 0, 0, 0, 0 }
347 enum {
348 QEMU_IFLA_BR_UNSPEC,
349 QEMU_IFLA_BR_FORWARD_DELAY,
350 QEMU_IFLA_BR_HELLO_TIME,
351 QEMU_IFLA_BR_MAX_AGE,
352 QEMU_IFLA_BR_AGEING_TIME,
353 QEMU_IFLA_BR_STP_STATE,
354 QEMU_IFLA_BR_PRIORITY,
355 QEMU_IFLA_BR_VLAN_FILTERING,
356 QEMU_IFLA_BR_VLAN_PROTOCOL,
357 QEMU_IFLA_BR_GROUP_FWD_MASK,
358 QEMU_IFLA_BR_ROOT_ID,
359 QEMU_IFLA_BR_BRIDGE_ID,
360 QEMU_IFLA_BR_ROOT_PORT,
361 QEMU_IFLA_BR_ROOT_PATH_COST,
362 QEMU_IFLA_BR_TOPOLOGY_CHANGE,
363 QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
364 QEMU_IFLA_BR_HELLO_TIMER,
365 QEMU_IFLA_BR_TCN_TIMER,
366 QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER,
367 QEMU_IFLA_BR_GC_TIMER,
368 QEMU_IFLA_BR_GROUP_ADDR,
369 QEMU_IFLA_BR_FDB_FLUSH,
370 QEMU_IFLA_BR_MCAST_ROUTER,
371 QEMU_IFLA_BR_MCAST_SNOOPING,
372 QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR,
373 QEMU_IFLA_BR_MCAST_QUERIER,
374 QEMU_IFLA_BR_MCAST_HASH_ELASTICITY,
375 QEMU_IFLA_BR_MCAST_HASH_MAX,
376 QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT,
377 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT,
378 QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL,
379 QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL,
380 QEMU_IFLA_BR_MCAST_QUERIER_INTVL,
381 QEMU_IFLA_BR_MCAST_QUERY_INTVL,
382 QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL,
383 QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL,
384 QEMU_IFLA_BR_NF_CALL_IPTABLES,
385 QEMU_IFLA_BR_NF_CALL_IP6TABLES,
386 QEMU_IFLA_BR_NF_CALL_ARPTABLES,
387 QEMU_IFLA_BR_VLAN_DEFAULT_PVID,
388 QEMU_IFLA_BR_PAD,
389 QEMU_IFLA_BR_VLAN_STATS_ENABLED,
390 QEMU_IFLA_BR_MCAST_STATS_ENABLED,
391 QEMU___IFLA_BR_MAX,
394 enum {
395 QEMU_IFLA_UNSPEC,
396 QEMU_IFLA_ADDRESS,
397 QEMU_IFLA_BROADCAST,
398 QEMU_IFLA_IFNAME,
399 QEMU_IFLA_MTU,
400 QEMU_IFLA_LINK,
401 QEMU_IFLA_QDISC,
402 QEMU_IFLA_STATS,
403 QEMU_IFLA_COST,
404 QEMU_IFLA_PRIORITY,
405 QEMU_IFLA_MASTER,
406 QEMU_IFLA_WIRELESS,
407 QEMU_IFLA_PROTINFO,
408 QEMU_IFLA_TXQLEN,
409 QEMU_IFLA_MAP,
410 QEMU_IFLA_WEIGHT,
411 QEMU_IFLA_OPERSTATE,
412 QEMU_IFLA_LINKMODE,
413 QEMU_IFLA_LINKINFO,
414 QEMU_IFLA_NET_NS_PID,
415 QEMU_IFLA_IFALIAS,
416 QEMU_IFLA_NUM_VF,
417 QEMU_IFLA_VFINFO_LIST,
418 QEMU_IFLA_STATS64,
419 QEMU_IFLA_VF_PORTS,
420 QEMU_IFLA_PORT_SELF,
421 QEMU_IFLA_AF_SPEC,
422 QEMU_IFLA_GROUP,
423 QEMU_IFLA_NET_NS_FD,
424 QEMU_IFLA_EXT_MASK,
425 QEMU_IFLA_PROMISCUITY,
426 QEMU_IFLA_NUM_TX_QUEUES,
427 QEMU_IFLA_NUM_RX_QUEUES,
428 QEMU_IFLA_CARRIER,
429 QEMU_IFLA_PHYS_PORT_ID,
430 QEMU_IFLA_CARRIER_CHANGES,
431 QEMU_IFLA_PHYS_SWITCH_ID,
432 QEMU_IFLA_LINK_NETNSID,
433 QEMU_IFLA_PHYS_PORT_NAME,
434 QEMU_IFLA_PROTO_DOWN,
435 QEMU_IFLA_GSO_MAX_SEGS,
436 QEMU_IFLA_GSO_MAX_SIZE,
437 QEMU_IFLA_PAD,
438 QEMU_IFLA_XDP,
439 QEMU___IFLA_MAX
442 enum {
443 QEMU_IFLA_BRPORT_UNSPEC,
444 QEMU_IFLA_BRPORT_STATE,
445 QEMU_IFLA_BRPORT_PRIORITY,
446 QEMU_IFLA_BRPORT_COST,
447 QEMU_IFLA_BRPORT_MODE,
448 QEMU_IFLA_BRPORT_GUARD,
449 QEMU_IFLA_BRPORT_PROTECT,
450 QEMU_IFLA_BRPORT_FAST_LEAVE,
451 QEMU_IFLA_BRPORT_LEARNING,
452 QEMU_IFLA_BRPORT_UNICAST_FLOOD,
453 QEMU_IFLA_BRPORT_PROXYARP,
454 QEMU_IFLA_BRPORT_LEARNING_SYNC,
455 QEMU_IFLA_BRPORT_PROXYARP_WIFI,
456 QEMU_IFLA_BRPORT_ROOT_ID,
457 QEMU_IFLA_BRPORT_BRIDGE_ID,
458 QEMU_IFLA_BRPORT_DESIGNATED_PORT,
459 QEMU_IFLA_BRPORT_DESIGNATED_COST,
460 QEMU_IFLA_BRPORT_ID,
461 QEMU_IFLA_BRPORT_NO,
462 QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
463 QEMU_IFLA_BRPORT_CONFIG_PENDING,
464 QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER,
465 QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER,
466 QEMU_IFLA_BRPORT_HOLD_TIMER,
467 QEMU_IFLA_BRPORT_FLUSH,
468 QEMU_IFLA_BRPORT_MULTICAST_ROUTER,
469 QEMU_IFLA_BRPORT_PAD,
470 QEMU___IFLA_BRPORT_MAX
473 enum {
474 QEMU_IFLA_INFO_UNSPEC,
475 QEMU_IFLA_INFO_KIND,
476 QEMU_IFLA_INFO_DATA,
477 QEMU_IFLA_INFO_XSTATS,
478 QEMU_IFLA_INFO_SLAVE_KIND,
479 QEMU_IFLA_INFO_SLAVE_DATA,
480 QEMU___IFLA_INFO_MAX,
483 enum {
484 QEMU_IFLA_INET_UNSPEC,
485 QEMU_IFLA_INET_CONF,
486 QEMU___IFLA_INET_MAX,
489 enum {
490 QEMU_IFLA_INET6_UNSPEC,
491 QEMU_IFLA_INET6_FLAGS,
492 QEMU_IFLA_INET6_CONF,
493 QEMU_IFLA_INET6_STATS,
494 QEMU_IFLA_INET6_MCAST,
495 QEMU_IFLA_INET6_CACHEINFO,
496 QEMU_IFLA_INET6_ICMP6STATS,
497 QEMU_IFLA_INET6_TOKEN,
498 QEMU_IFLA_INET6_ADDR_GEN_MODE,
499 QEMU___IFLA_INET6_MAX
502 typedef abi_long (*TargetFdDataFunc)(void *, size_t);
503 typedef abi_long (*TargetFdAddrFunc)(void *, abi_ulong, socklen_t);
504 typedef struct TargetFdTrans {
505 TargetFdDataFunc host_to_target_data;
506 TargetFdDataFunc target_to_host_data;
507 TargetFdAddrFunc target_to_host_addr;
508 } TargetFdTrans;
510 static TargetFdTrans **target_fd_trans;
512 static unsigned int target_fd_max;
514 static TargetFdDataFunc fd_trans_target_to_host_data(int fd)
516 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
517 return target_fd_trans[fd]->target_to_host_data;
519 return NULL;
522 static TargetFdDataFunc fd_trans_host_to_target_data(int fd)
524 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
525 return target_fd_trans[fd]->host_to_target_data;
527 return NULL;
530 static TargetFdAddrFunc fd_trans_target_to_host_addr(int fd)
532 if (fd >= 0 && fd < target_fd_max && target_fd_trans[fd]) {
533 return target_fd_trans[fd]->target_to_host_addr;
535 return NULL;
538 static void fd_trans_register(int fd, TargetFdTrans *trans)
540 unsigned int oldmax;
542 if (fd >= target_fd_max) {
543 oldmax = target_fd_max;
544 target_fd_max = ((fd >> 6) + 1) << 6; /* by slice of 64 entries */
545 target_fd_trans = g_renew(TargetFdTrans *,
546 target_fd_trans, target_fd_max);
547 memset((void *)(target_fd_trans + oldmax), 0,
548 (target_fd_max - oldmax) * sizeof(TargetFdTrans *));
550 target_fd_trans[fd] = trans;
553 static void fd_trans_unregister(int fd)
555 if (fd >= 0 && fd < target_fd_max) {
556 target_fd_trans[fd] = NULL;
560 static void fd_trans_dup(int oldfd, int newfd)
562 fd_trans_unregister(newfd);
563 if (oldfd < target_fd_max && target_fd_trans[oldfd]) {
564 fd_trans_register(newfd, target_fd_trans[oldfd]);
568 static int sys_getcwd1(char *buf, size_t size)
570 if (getcwd(buf, size) == NULL) {
571 /* getcwd() sets errno */
572 return (-1);
574 return strlen(buf)+1;
577 #ifdef TARGET_NR_utimensat
578 #if defined(__NR_utimensat)
579 #define __NR_sys_utimensat __NR_utimensat
580 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
581 const struct timespec *,tsp,int,flags)
582 #else
583 static int sys_utimensat(int dirfd, const char *pathname,
584 const struct timespec times[2], int flags)
586 errno = ENOSYS;
587 return -1;
589 #endif
590 #endif /* TARGET_NR_utimensat */
592 #ifdef CONFIG_INOTIFY
593 #include <sys/inotify.h>
595 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
596 static int sys_inotify_init(void)
598 return (inotify_init());
600 #endif
601 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
602 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
604 return (inotify_add_watch(fd, pathname, mask));
606 #endif
607 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
608 static int sys_inotify_rm_watch(int fd, int32_t wd)
610 return (inotify_rm_watch(fd, wd));
612 #endif
613 #ifdef CONFIG_INOTIFY1
614 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
615 static int sys_inotify_init1(int flags)
617 return (inotify_init1(flags));
619 #endif
620 #endif
621 #else
622 /* Userspace can usually survive runtime without inotify */
623 #undef TARGET_NR_inotify_init
624 #undef TARGET_NR_inotify_init1
625 #undef TARGET_NR_inotify_add_watch
626 #undef TARGET_NR_inotify_rm_watch
627 #endif /* CONFIG_INOTIFY */
629 #if defined(TARGET_NR_prlimit64)
630 #ifndef __NR_prlimit64
631 # define __NR_prlimit64 -1
632 #endif
633 #define __NR_sys_prlimit64 __NR_prlimit64
634 /* The glibc rlimit structure may not be that used by the underlying syscall */
635 struct host_rlimit64 {
636 uint64_t rlim_cur;
637 uint64_t rlim_max;
639 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
640 const struct host_rlimit64 *, new_limit,
641 struct host_rlimit64 *, old_limit)
642 #endif
645 #if defined(TARGET_NR_timer_create)
646 /* Maxiumum of 32 active POSIX timers allowed at any one time. */
647 static timer_t g_posix_timers[32] = { 0, } ;
649 static inline int next_free_host_timer(void)
651 int k ;
652 /* FIXME: Does finding the next free slot require a lock? */
653 for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
654 if (g_posix_timers[k] == 0) {
655 g_posix_timers[k] = (timer_t) 1;
656 return k;
659 return -1;
661 #endif
663 /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
664 #ifdef TARGET_ARM
665 static inline int regpairs_aligned(void *cpu_env) {
666 return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
668 #elif defined(TARGET_MIPS) && (TARGET_ABI_BITS == 32)
669 static inline int regpairs_aligned(void *cpu_env) { return 1; }
670 #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
671 /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
672 * of registers which translates to the same as ARM/MIPS, because we start with
673 * r3 as arg1 */
674 static inline int regpairs_aligned(void *cpu_env) { return 1; }
675 #else
676 static inline int regpairs_aligned(void *cpu_env) { return 0; }
677 #endif
679 #define ERRNO_TABLE_SIZE 1200
681 /* target_to_host_errno_table[] is initialized from
682 * host_to_target_errno_table[] in syscall_init(). */
683 static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
687 * This list is the union of errno values overridden in asm-<arch>/errno.h
688 * minus the errnos that are not actually generic to all archs.
690 static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
691 [EAGAIN] = TARGET_EAGAIN,
692 [EIDRM] = TARGET_EIDRM,
693 [ECHRNG] = TARGET_ECHRNG,
694 [EL2NSYNC] = TARGET_EL2NSYNC,
695 [EL3HLT] = TARGET_EL3HLT,
696 [EL3RST] = TARGET_EL3RST,
697 [ELNRNG] = TARGET_ELNRNG,
698 [EUNATCH] = TARGET_EUNATCH,
699 [ENOCSI] = TARGET_ENOCSI,
700 [EL2HLT] = TARGET_EL2HLT,
701 [EDEADLK] = TARGET_EDEADLK,
702 [ENOLCK] = TARGET_ENOLCK,
703 [EBADE] = TARGET_EBADE,
704 [EBADR] = TARGET_EBADR,
705 [EXFULL] = TARGET_EXFULL,
706 [ENOANO] = TARGET_ENOANO,
707 [EBADRQC] = TARGET_EBADRQC,
708 [EBADSLT] = TARGET_EBADSLT,
709 [EBFONT] = TARGET_EBFONT,
710 [ENOSTR] = TARGET_ENOSTR,
711 [ENODATA] = TARGET_ENODATA,
712 [ETIME] = TARGET_ETIME,
713 [ENOSR] = TARGET_ENOSR,
714 [ENONET] = TARGET_ENONET,
715 [ENOPKG] = TARGET_ENOPKG,
716 [EREMOTE] = TARGET_EREMOTE,
717 [ENOLINK] = TARGET_ENOLINK,
718 [EADV] = TARGET_EADV,
719 [ESRMNT] = TARGET_ESRMNT,
720 [ECOMM] = TARGET_ECOMM,
721 [EPROTO] = TARGET_EPROTO,
722 [EDOTDOT] = TARGET_EDOTDOT,
723 [EMULTIHOP] = TARGET_EMULTIHOP,
724 [EBADMSG] = TARGET_EBADMSG,
725 [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
726 [EOVERFLOW] = TARGET_EOVERFLOW,
727 [ENOTUNIQ] = TARGET_ENOTUNIQ,
728 [EBADFD] = TARGET_EBADFD,
729 [EREMCHG] = TARGET_EREMCHG,
730 [ELIBACC] = TARGET_ELIBACC,
731 [ELIBBAD] = TARGET_ELIBBAD,
732 [ELIBSCN] = TARGET_ELIBSCN,
733 [ELIBMAX] = TARGET_ELIBMAX,
734 [ELIBEXEC] = TARGET_ELIBEXEC,
735 [EILSEQ] = TARGET_EILSEQ,
736 [ENOSYS] = TARGET_ENOSYS,
737 [ELOOP] = TARGET_ELOOP,
738 [ERESTART] = TARGET_ERESTART,
739 [ESTRPIPE] = TARGET_ESTRPIPE,
740 [ENOTEMPTY] = TARGET_ENOTEMPTY,
741 [EUSERS] = TARGET_EUSERS,
742 [ENOTSOCK] = TARGET_ENOTSOCK,
743 [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
744 [EMSGSIZE] = TARGET_EMSGSIZE,
745 [EPROTOTYPE] = TARGET_EPROTOTYPE,
746 [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
747 [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
748 [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
749 [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
750 [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
751 [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
752 [EADDRINUSE] = TARGET_EADDRINUSE,
753 [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
754 [ENETDOWN] = TARGET_ENETDOWN,
755 [ENETUNREACH] = TARGET_ENETUNREACH,
756 [ENETRESET] = TARGET_ENETRESET,
757 [ECONNABORTED] = TARGET_ECONNABORTED,
758 [ECONNRESET] = TARGET_ECONNRESET,
759 [ENOBUFS] = TARGET_ENOBUFS,
760 [EISCONN] = TARGET_EISCONN,
761 [ENOTCONN] = TARGET_ENOTCONN,
762 [EUCLEAN] = TARGET_EUCLEAN,
763 [ENOTNAM] = TARGET_ENOTNAM,
764 [ENAVAIL] = TARGET_ENAVAIL,
765 [EISNAM] = TARGET_EISNAM,
766 [EREMOTEIO] = TARGET_EREMOTEIO,
767 [EDQUOT] = TARGET_EDQUOT,
768 [ESHUTDOWN] = TARGET_ESHUTDOWN,
769 [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
770 [ETIMEDOUT] = TARGET_ETIMEDOUT,
771 [ECONNREFUSED] = TARGET_ECONNREFUSED,
772 [EHOSTDOWN] = TARGET_EHOSTDOWN,
773 [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
774 [EALREADY] = TARGET_EALREADY,
775 [EINPROGRESS] = TARGET_EINPROGRESS,
776 [ESTALE] = TARGET_ESTALE,
777 [ECANCELED] = TARGET_ECANCELED,
778 [ENOMEDIUM] = TARGET_ENOMEDIUM,
779 [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
780 #ifdef ENOKEY
781 [ENOKEY] = TARGET_ENOKEY,
782 #endif
783 #ifdef EKEYEXPIRED
784 [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
785 #endif
786 #ifdef EKEYREVOKED
787 [EKEYREVOKED] = TARGET_EKEYREVOKED,
788 #endif
789 #ifdef EKEYREJECTED
790 [EKEYREJECTED] = TARGET_EKEYREJECTED,
791 #endif
792 #ifdef EOWNERDEAD
793 [EOWNERDEAD] = TARGET_EOWNERDEAD,
794 #endif
795 #ifdef ENOTRECOVERABLE
796 [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
797 #endif
798 #ifdef ENOMSG
799 [ENOMSG] = TARGET_ENOMSG,
800 #endif
803 static inline int host_to_target_errno(int err)
805 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
806 host_to_target_errno_table[err]) {
807 return host_to_target_errno_table[err];
809 return err;
812 static inline int target_to_host_errno(int err)
814 if (err >= 0 && err < ERRNO_TABLE_SIZE &&
815 target_to_host_errno_table[err]) {
816 return target_to_host_errno_table[err];
818 return err;
821 static inline abi_long get_errno(abi_long ret)
823 if (ret == -1)
824 return -host_to_target_errno(errno);
825 else
826 return ret;
829 static inline int is_error(abi_long ret)
831 return (abi_ulong)ret >= (abi_ulong)(-4096);
834 const char *target_strerror(int err)
836 if (err == TARGET_ERESTARTSYS) {
837 return "To be restarted";
839 if (err == TARGET_QEMU_ESIGRETURN) {
840 return "Successful exit from sigreturn";
843 if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
844 return NULL;
846 return strerror(target_to_host_errno(err));
849 #define safe_syscall0(type, name) \
850 static type safe_##name(void) \
852 return safe_syscall(__NR_##name); \
855 #define safe_syscall1(type, name, type1, arg1) \
856 static type safe_##name(type1 arg1) \
858 return safe_syscall(__NR_##name, arg1); \
861 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
862 static type safe_##name(type1 arg1, type2 arg2) \
864 return safe_syscall(__NR_##name, arg1, arg2); \
867 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
868 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
870 return safe_syscall(__NR_##name, arg1, arg2, arg3); \
873 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
874 type4, arg4) \
875 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
877 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
880 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
881 type4, arg4, type5, arg5) \
882 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
883 type5 arg5) \
885 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
888 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
889 type4, arg4, type5, arg5, type6, arg6) \
890 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
891 type5 arg5, type6 arg6) \
893 return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
896 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
897 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
898 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
899 int, flags, mode_t, mode)
900 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
901 struct rusage *, rusage)
902 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
903 int, options, struct rusage *, rusage)
904 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
905 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
906 fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
907 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
908 struct timespec *, tsp, const sigset_t *, sigmask,
909 size_t, sigsetsize)
910 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
911 int, maxevents, int, timeout, const sigset_t *, sigmask,
912 size_t, sigsetsize)
913 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
914 const struct timespec *,timeout,int *,uaddr2,int,val3)
915 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
916 safe_syscall2(int, kill, pid_t, pid, int, sig)
917 safe_syscall2(int, tkill, int, tid, int, sig)
918 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
919 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
920 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
921 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
922 unsigned long, pos_l, unsigned long, pos_h)
923 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
924 unsigned long, pos_l, unsigned long, pos_h)
925 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
926 socklen_t, addrlen)
927 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
928 int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
929 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
930 int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
931 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
932 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
933 safe_syscall2(int, flock, int, fd, int, operation)
934 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
935 const struct timespec *, uts, size_t, sigsetsize)
936 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
937 int, flags)
938 safe_syscall2(int, nanosleep, const struct timespec *, req,
939 struct timespec *, rem)
940 #ifdef TARGET_NR_clock_nanosleep
941 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
942 const struct timespec *, req, struct timespec *, rem)
943 #endif
944 #ifdef __NR_msgsnd
945 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
946 int, flags)
947 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
948 long, msgtype, int, flags)
949 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
950 unsigned, nsops, const struct timespec *, timeout)
951 #else
952 /* This host kernel architecture uses a single ipc syscall; fake up
953 * wrappers for the sub-operations to hide this implementation detail.
954 * Annoyingly we can't include linux/ipc.h to get the constant definitions
955 * for the call parameter because some structs in there conflict with the
956 * sys/ipc.h ones. So we just define them here, and rely on them being
957 * the same for all host architectures.
959 #define Q_SEMTIMEDOP 4
960 #define Q_MSGSND 11
961 #define Q_MSGRCV 12
962 #define Q_IPCCALL(VERSION, OP) ((VERSION) << 16 | (OP))
964 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
965 void *, ptr, long, fifth)
966 static int safe_msgsnd(int msgid, const void *msgp, size_t sz, int flags)
968 return safe_ipc(Q_IPCCALL(0, Q_MSGSND), msgid, sz, flags, (void *)msgp, 0);
970 static int safe_msgrcv(int msgid, void *msgp, size_t sz, long type, int flags)
972 return safe_ipc(Q_IPCCALL(1, Q_MSGRCV), msgid, sz, flags, msgp, type);
974 static int safe_semtimedop(int semid, struct sembuf *tsops, unsigned nsops,
975 const struct timespec *timeout)
977 return safe_ipc(Q_IPCCALL(0, Q_SEMTIMEDOP), semid, nsops, 0, tsops,
978 (long)timeout);
980 #endif
981 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
982 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
983 size_t, len, unsigned, prio, const struct timespec *, timeout)
984 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
985 size_t, len, unsigned *, prio, const struct timespec *, timeout)
986 #endif
987 /* We do ioctl like this rather than via safe_syscall3 to preserve the
988 * "third argument might be integer or pointer or not present" behaviour of
989 * the libc function.
991 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
992 /* Similarly for fcntl. Note that callers must always:
993 * pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
994 * use the flock64 struct rather than unsuffixed flock
995 * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
997 #ifdef __NR_fcntl64
998 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
999 #else
1000 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
1001 #endif
1003 static inline int host_to_target_sock_type(int host_type)
1005 int target_type;
1007 switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
1008 case SOCK_DGRAM:
1009 target_type = TARGET_SOCK_DGRAM;
1010 break;
1011 case SOCK_STREAM:
1012 target_type = TARGET_SOCK_STREAM;
1013 break;
1014 default:
1015 target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
1016 break;
1019 #if defined(SOCK_CLOEXEC)
1020 if (host_type & SOCK_CLOEXEC) {
1021 target_type |= TARGET_SOCK_CLOEXEC;
1023 #endif
1025 #if defined(SOCK_NONBLOCK)
1026 if (host_type & SOCK_NONBLOCK) {
1027 target_type |= TARGET_SOCK_NONBLOCK;
1029 #endif
1031 return target_type;
1034 static abi_ulong target_brk;
1035 static abi_ulong target_original_brk;
1036 static abi_ulong brk_page;
1038 void target_set_brk(abi_ulong new_brk)
1040 target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
1041 brk_page = HOST_PAGE_ALIGN(target_brk);
1044 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
1045 #define DEBUGF_BRK(message, args...)
1047 /* do_brk() must return target values and target errnos. */
1048 abi_long do_brk(abi_ulong new_brk)
1050 abi_long mapped_addr;
1051 abi_ulong new_alloc_size;
1053 DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
1055 if (!new_brk) {
1056 DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
1057 return target_brk;
1059 if (new_brk < target_original_brk) {
1060 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
1061 target_brk);
1062 return target_brk;
1065 /* If the new brk is less than the highest page reserved to the
1066 * target heap allocation, set it and we're almost done... */
1067 if (new_brk <= brk_page) {
1068 /* Heap contents are initialized to zero, as for anonymous
1069 * mapped pages. */
1070 if (new_brk > target_brk) {
1071 memset(g2h(target_brk), 0, new_brk - target_brk);
1073 target_brk = new_brk;
1074 DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
1075 return target_brk;
1078 /* We need to allocate more memory after the brk... Note that
1079 * we don't use MAP_FIXED because that will map over the top of
1080 * any existing mapping (like the one with the host libc or qemu
1081 * itself); instead we treat "mapped but at wrong address" as
1082 * a failure and unmap again.
1084 new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
1085 mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
1086 PROT_READ|PROT_WRITE,
1087 MAP_ANON|MAP_PRIVATE, 0, 0));
1089 if (mapped_addr == brk_page) {
1090 /* Heap contents are initialized to zero, as for anonymous
1091 * mapped pages. Technically the new pages are already
1092 * initialized to zero since they *are* anonymous mapped
1093 * pages, however we have to take care with the contents that
1094 * come from the remaining part of the previous page: it may
1095 * contains garbage data due to a previous heap usage (grown
1096 * then shrunken). */
1097 memset(g2h(target_brk), 0, brk_page - target_brk);
1099 target_brk = new_brk;
1100 brk_page = HOST_PAGE_ALIGN(target_brk);
1101 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
1102 target_brk);
1103 return target_brk;
1104 } else if (mapped_addr != -1) {
1105 /* Mapped but at wrong address, meaning there wasn't actually
1106 * enough space for this brk.
1108 target_munmap(mapped_addr, new_alloc_size);
1109 mapped_addr = -1;
1110 DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
1112 else {
1113 DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
1116 #if defined(TARGET_ALPHA)
1117 /* We (partially) emulate OSF/1 on Alpha, which requires we
1118 return a proper errno, not an unchanged brk value. */
1119 return -TARGET_ENOMEM;
1120 #endif
1121 /* For everything else, return the previous break. */
1122 return target_brk;
1125 static inline abi_long copy_from_user_fdset(fd_set *fds,
1126 abi_ulong target_fds_addr,
1127 int n)
1129 int i, nw, j, k;
1130 abi_ulong b, *target_fds;
1132 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1133 if (!(target_fds = lock_user(VERIFY_READ,
1134 target_fds_addr,
1135 sizeof(abi_ulong) * nw,
1136 1)))
1137 return -TARGET_EFAULT;
1139 FD_ZERO(fds);
1140 k = 0;
1141 for (i = 0; i < nw; i++) {
1142 /* grab the abi_ulong */
1143 __get_user(b, &target_fds[i]);
1144 for (j = 0; j < TARGET_ABI_BITS; j++) {
1145 /* check the bit inside the abi_ulong */
1146 if ((b >> j) & 1)
1147 FD_SET(k, fds);
1148 k++;
1152 unlock_user(target_fds, target_fds_addr, 0);
1154 return 0;
1157 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
1158 abi_ulong target_fds_addr,
1159 int n)
1161 if (target_fds_addr) {
1162 if (copy_from_user_fdset(fds, target_fds_addr, n))
1163 return -TARGET_EFAULT;
1164 *fds_ptr = fds;
1165 } else {
1166 *fds_ptr = NULL;
1168 return 0;
1171 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
1172 const fd_set *fds,
1173 int n)
1175 int i, nw, j, k;
1176 abi_long v;
1177 abi_ulong *target_fds;
1179 nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
1180 if (!(target_fds = lock_user(VERIFY_WRITE,
1181 target_fds_addr,
1182 sizeof(abi_ulong) * nw,
1183 0)))
1184 return -TARGET_EFAULT;
1186 k = 0;
1187 for (i = 0; i < nw; i++) {
1188 v = 0;
1189 for (j = 0; j < TARGET_ABI_BITS; j++) {
1190 v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
1191 k++;
1193 __put_user(v, &target_fds[i]);
1196 unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
1198 return 0;
1201 #if defined(__alpha__)
1202 #define HOST_HZ 1024
1203 #else
1204 #define HOST_HZ 100
1205 #endif
1207 static inline abi_long host_to_target_clock_t(long ticks)
1209 #if HOST_HZ == TARGET_HZ
1210 return ticks;
1211 #else
1212 return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
1213 #endif
1216 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
1217 const struct rusage *rusage)
1219 struct target_rusage *target_rusage;
1221 if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1222 return -TARGET_EFAULT;
1223 target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1224 target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1225 target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1226 target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1227 target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1228 target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1229 target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1230 target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1231 target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1232 target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1233 target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1234 target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1235 target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1236 target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1237 target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1238 target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1239 target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1240 target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1241 unlock_user_struct(target_rusage, target_addr, 1);
1243 return 0;
1246 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1248 abi_ulong target_rlim_swap;
1249 rlim_t result;
1251 target_rlim_swap = tswapal(target_rlim);
1252 if (target_rlim_swap == TARGET_RLIM_INFINITY)
1253 return RLIM_INFINITY;
1255 result = target_rlim_swap;
1256 if (target_rlim_swap != (rlim_t)result)
1257 return RLIM_INFINITY;
1259 return result;
1262 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1264 abi_ulong target_rlim_swap;
1265 abi_ulong result;
1267 if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1268 target_rlim_swap = TARGET_RLIM_INFINITY;
1269 else
1270 target_rlim_swap = rlim;
1271 result = tswapal(target_rlim_swap);
1273 return result;
1276 static inline int target_to_host_resource(int code)
1278 switch (code) {
1279 case TARGET_RLIMIT_AS:
1280 return RLIMIT_AS;
1281 case TARGET_RLIMIT_CORE:
1282 return RLIMIT_CORE;
1283 case TARGET_RLIMIT_CPU:
1284 return RLIMIT_CPU;
1285 case TARGET_RLIMIT_DATA:
1286 return RLIMIT_DATA;
1287 case TARGET_RLIMIT_FSIZE:
1288 return RLIMIT_FSIZE;
1289 case TARGET_RLIMIT_LOCKS:
1290 return RLIMIT_LOCKS;
1291 case TARGET_RLIMIT_MEMLOCK:
1292 return RLIMIT_MEMLOCK;
1293 case TARGET_RLIMIT_MSGQUEUE:
1294 return RLIMIT_MSGQUEUE;
1295 case TARGET_RLIMIT_NICE:
1296 return RLIMIT_NICE;
1297 case TARGET_RLIMIT_NOFILE:
1298 return RLIMIT_NOFILE;
1299 case TARGET_RLIMIT_NPROC:
1300 return RLIMIT_NPROC;
1301 case TARGET_RLIMIT_RSS:
1302 return RLIMIT_RSS;
1303 case TARGET_RLIMIT_RTPRIO:
1304 return RLIMIT_RTPRIO;
1305 case TARGET_RLIMIT_SIGPENDING:
1306 return RLIMIT_SIGPENDING;
1307 case TARGET_RLIMIT_STACK:
1308 return RLIMIT_STACK;
1309 default:
1310 return code;
1314 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1315 abi_ulong target_tv_addr)
1317 struct target_timeval *target_tv;
1319 if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
1320 return -TARGET_EFAULT;
1322 __get_user(tv->tv_sec, &target_tv->tv_sec);
1323 __get_user(tv->tv_usec, &target_tv->tv_usec);
1325 unlock_user_struct(target_tv, target_tv_addr, 0);
1327 return 0;
1330 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1331 const struct timeval *tv)
1333 struct target_timeval *target_tv;
1335 if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
1336 return -TARGET_EFAULT;
1338 __put_user(tv->tv_sec, &target_tv->tv_sec);
1339 __put_user(tv->tv_usec, &target_tv->tv_usec);
1341 unlock_user_struct(target_tv, target_tv_addr, 1);
1343 return 0;
1346 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1347 abi_ulong target_tz_addr)
1349 struct target_timezone *target_tz;
1351 if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1352 return -TARGET_EFAULT;
1355 __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1356 __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1358 unlock_user_struct(target_tz, target_tz_addr, 0);
1360 return 0;
1363 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1364 #include <mqueue.h>
1366 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1367 abi_ulong target_mq_attr_addr)
1369 struct target_mq_attr *target_mq_attr;
1371 if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1372 target_mq_attr_addr, 1))
1373 return -TARGET_EFAULT;
1375 __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1376 __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1377 __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1378 __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1380 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1382 return 0;
1385 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1386 const struct mq_attr *attr)
1388 struct target_mq_attr *target_mq_attr;
1390 if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1391 target_mq_attr_addr, 0))
1392 return -TARGET_EFAULT;
1394 __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1395 __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1396 __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1397 __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1399 unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1401 return 0;
1403 #endif
1405 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1406 /* do_select() must return target values and target errnos. */
1407 static abi_long do_select(int n,
1408 abi_ulong rfd_addr, abi_ulong wfd_addr,
1409 abi_ulong efd_addr, abi_ulong target_tv_addr)
1411 fd_set rfds, wfds, efds;
1412 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1413 struct timeval tv;
1414 struct timespec ts, *ts_ptr;
1415 abi_long ret;
1417 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1418 if (ret) {
1419 return ret;
1421 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1422 if (ret) {
1423 return ret;
1425 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1426 if (ret) {
1427 return ret;
1430 if (target_tv_addr) {
1431 if (copy_from_user_timeval(&tv, target_tv_addr))
1432 return -TARGET_EFAULT;
1433 ts.tv_sec = tv.tv_sec;
1434 ts.tv_nsec = tv.tv_usec * 1000;
1435 ts_ptr = &ts;
1436 } else {
1437 ts_ptr = NULL;
1440 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1441 ts_ptr, NULL));
1443 if (!is_error(ret)) {
1444 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1445 return -TARGET_EFAULT;
1446 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1447 return -TARGET_EFAULT;
1448 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1449 return -TARGET_EFAULT;
1451 if (target_tv_addr) {
1452 tv.tv_sec = ts.tv_sec;
1453 tv.tv_usec = ts.tv_nsec / 1000;
1454 if (copy_to_user_timeval(target_tv_addr, &tv)) {
1455 return -TARGET_EFAULT;
1460 return ret;
1463 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1464 static abi_long do_old_select(abi_ulong arg1)
1466 struct target_sel_arg_struct *sel;
1467 abi_ulong inp, outp, exp, tvp;
1468 long nsel;
1470 if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1471 return -TARGET_EFAULT;
1474 nsel = tswapal(sel->n);
1475 inp = tswapal(sel->inp);
1476 outp = tswapal(sel->outp);
1477 exp = tswapal(sel->exp);
1478 tvp = tswapal(sel->tvp);
1480 unlock_user_struct(sel, arg1, 0);
1482 return do_select(nsel, inp, outp, exp, tvp);
1484 #endif
1485 #endif
1487 static abi_long do_pipe2(int host_pipe[], int flags)
1489 #ifdef CONFIG_PIPE2
1490 return pipe2(host_pipe, flags);
1491 #else
1492 return -ENOSYS;
1493 #endif
1496 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1497 int flags, int is_pipe2)
1499 int host_pipe[2];
1500 abi_long ret;
1501 ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1503 if (is_error(ret))
1504 return get_errno(ret);
1506 /* Several targets have special calling conventions for the original
1507 pipe syscall, but didn't replicate this into the pipe2 syscall. */
1508 if (!is_pipe2) {
1509 #if defined(TARGET_ALPHA)
1510 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1511 return host_pipe[0];
1512 #elif defined(TARGET_MIPS)
1513 ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1514 return host_pipe[0];
1515 #elif defined(TARGET_SH4)
1516 ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1517 return host_pipe[0];
1518 #elif defined(TARGET_SPARC)
1519 ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1520 return host_pipe[0];
1521 #endif
1524 if (put_user_s32(host_pipe[0], pipedes)
1525 || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1526 return -TARGET_EFAULT;
1527 return get_errno(ret);
1530 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1531 abi_ulong target_addr,
1532 socklen_t len)
1534 struct target_ip_mreqn *target_smreqn;
1536 target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1537 if (!target_smreqn)
1538 return -TARGET_EFAULT;
1539 mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1540 mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1541 if (len == sizeof(struct target_ip_mreqn))
1542 mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1543 unlock_user(target_smreqn, target_addr, 0);
1545 return 0;
1548 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1549 abi_ulong target_addr,
1550 socklen_t len)
1552 const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1553 sa_family_t sa_family;
1554 struct target_sockaddr *target_saddr;
1556 if (fd_trans_target_to_host_addr(fd)) {
1557 return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1560 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1561 if (!target_saddr)
1562 return -TARGET_EFAULT;
1564 sa_family = tswap16(target_saddr->sa_family);
1566 /* Oops. The caller might send a incomplete sun_path; sun_path
1567 * must be terminated by \0 (see the manual page), but
1568 * unfortunately it is quite common to specify sockaddr_un
1569 * length as "strlen(x->sun_path)" while it should be
1570 * "strlen(...) + 1". We'll fix that here if needed.
1571 * Linux kernel has a similar feature.
1574 if (sa_family == AF_UNIX) {
1575 if (len < unix_maxlen && len > 0) {
1576 char *cp = (char*)target_saddr;
1578 if ( cp[len-1] && !cp[len] )
1579 len++;
1581 if (len > unix_maxlen)
1582 len = unix_maxlen;
1585 memcpy(addr, target_saddr, len);
1586 addr->sa_family = sa_family;
1587 if (sa_family == AF_NETLINK) {
1588 struct sockaddr_nl *nladdr;
1590 nladdr = (struct sockaddr_nl *)addr;
1591 nladdr->nl_pid = tswap32(nladdr->nl_pid);
1592 nladdr->nl_groups = tswap32(nladdr->nl_groups);
1593 } else if (sa_family == AF_PACKET) {
1594 struct target_sockaddr_ll *lladdr;
1596 lladdr = (struct target_sockaddr_ll *)addr;
1597 lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1598 lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1600 unlock_user(target_saddr, target_addr, 0);
1602 return 0;
1605 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1606 struct sockaddr *addr,
1607 socklen_t len)
1609 struct target_sockaddr *target_saddr;
1611 if (len == 0) {
1612 return 0;
1615 target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1616 if (!target_saddr)
1617 return -TARGET_EFAULT;
1618 memcpy(target_saddr, addr, len);
1619 if (len >= offsetof(struct target_sockaddr, sa_family) +
1620 sizeof(target_saddr->sa_family)) {
1621 target_saddr->sa_family = tswap16(addr->sa_family);
1623 if (addr->sa_family == AF_NETLINK && len >= sizeof(struct sockaddr_nl)) {
1624 struct sockaddr_nl *target_nl = (struct sockaddr_nl *)target_saddr;
1625 target_nl->nl_pid = tswap32(target_nl->nl_pid);
1626 target_nl->nl_groups = tswap32(target_nl->nl_groups);
1627 } else if (addr->sa_family == AF_PACKET) {
1628 struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1629 target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1630 target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1632 unlock_user(target_saddr, target_addr, len);
1634 return 0;
1637 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1638 struct target_msghdr *target_msgh)
1640 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1641 abi_long msg_controllen;
1642 abi_ulong target_cmsg_addr;
1643 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1644 socklen_t space = 0;
1646 msg_controllen = tswapal(target_msgh->msg_controllen);
1647 if (msg_controllen < sizeof (struct target_cmsghdr))
1648 goto the_end;
1649 target_cmsg_addr = tswapal(target_msgh->msg_control);
1650 target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1651 target_cmsg_start = target_cmsg;
1652 if (!target_cmsg)
1653 return -TARGET_EFAULT;
1655 while (cmsg && target_cmsg) {
1656 void *data = CMSG_DATA(cmsg);
1657 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1659 int len = tswapal(target_cmsg->cmsg_len)
1660 - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
1662 space += CMSG_SPACE(len);
1663 if (space > msgh->msg_controllen) {
1664 space -= CMSG_SPACE(len);
1665 /* This is a QEMU bug, since we allocated the payload
1666 * area ourselves (unlike overflow in host-to-target
1667 * conversion, which is just the guest giving us a buffer
1668 * that's too small). It can't happen for the payload types
1669 * we currently support; if it becomes an issue in future
1670 * we would need to improve our allocation strategy to
1671 * something more intelligent than "twice the size of the
1672 * target buffer we're reading from".
1674 gemu_log("Host cmsg overflow\n");
1675 break;
1678 if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1679 cmsg->cmsg_level = SOL_SOCKET;
1680 } else {
1681 cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1683 cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1684 cmsg->cmsg_len = CMSG_LEN(len);
1686 if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1687 int *fd = (int *)data;
1688 int *target_fd = (int *)target_data;
1689 int i, numfds = len / sizeof(int);
1691 for (i = 0; i < numfds; i++) {
1692 __get_user(fd[i], target_fd + i);
1694 } else if (cmsg->cmsg_level == SOL_SOCKET
1695 && cmsg->cmsg_type == SCM_CREDENTIALS) {
1696 struct ucred *cred = (struct ucred *)data;
1697 struct target_ucred *target_cred =
1698 (struct target_ucred *)target_data;
1700 __get_user(cred->pid, &target_cred->pid);
1701 __get_user(cred->uid, &target_cred->uid);
1702 __get_user(cred->gid, &target_cred->gid);
1703 } else {
1704 gemu_log("Unsupported ancillary data: %d/%d\n",
1705 cmsg->cmsg_level, cmsg->cmsg_type);
1706 memcpy(data, target_data, len);
1709 cmsg = CMSG_NXTHDR(msgh, cmsg);
1710 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1711 target_cmsg_start);
1713 unlock_user(target_cmsg, target_cmsg_addr, 0);
1714 the_end:
1715 msgh->msg_controllen = space;
1716 return 0;
1719 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1720 struct msghdr *msgh)
1722 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1723 abi_long msg_controllen;
1724 abi_ulong target_cmsg_addr;
1725 struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1726 socklen_t space = 0;
1728 msg_controllen = tswapal(target_msgh->msg_controllen);
1729 if (msg_controllen < sizeof (struct target_cmsghdr))
1730 goto the_end;
1731 target_cmsg_addr = tswapal(target_msgh->msg_control);
1732 target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1733 target_cmsg_start = target_cmsg;
1734 if (!target_cmsg)
1735 return -TARGET_EFAULT;
1737 while (cmsg && target_cmsg) {
1738 void *data = CMSG_DATA(cmsg);
1739 void *target_data = TARGET_CMSG_DATA(target_cmsg);
1741 int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
1742 int tgt_len, tgt_space;
1744 /* We never copy a half-header but may copy half-data;
1745 * this is Linux's behaviour in put_cmsg(). Note that
1746 * truncation here is a guest problem (which we report
1747 * to the guest via the CTRUNC bit), unlike truncation
1748 * in target_to_host_cmsg, which is a QEMU bug.
1750 if (msg_controllen < sizeof(struct cmsghdr)) {
1751 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1752 break;
1755 if (cmsg->cmsg_level == SOL_SOCKET) {
1756 target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1757 } else {
1758 target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1760 target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1762 tgt_len = TARGET_CMSG_LEN(len);
1764 /* Payload types which need a different size of payload on
1765 * the target must adjust tgt_len here.
1767 switch (cmsg->cmsg_level) {
1768 case SOL_SOCKET:
1769 switch (cmsg->cmsg_type) {
1770 case SO_TIMESTAMP:
1771 tgt_len = sizeof(struct target_timeval);
1772 break;
1773 default:
1774 break;
1776 default:
1777 break;
1780 if (msg_controllen < tgt_len) {
1781 target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1782 tgt_len = msg_controllen;
1785 /* We must now copy-and-convert len bytes of payload
1786 * into tgt_len bytes of destination space. Bear in mind
1787 * that in both source and destination we may be dealing
1788 * with a truncated value!
1790 switch (cmsg->cmsg_level) {
1791 case SOL_SOCKET:
1792 switch (cmsg->cmsg_type) {
1793 case SCM_RIGHTS:
1795 int *fd = (int *)data;
1796 int *target_fd = (int *)target_data;
1797 int i, numfds = tgt_len / sizeof(int);
1799 for (i = 0; i < numfds; i++) {
1800 __put_user(fd[i], target_fd + i);
1802 break;
1804 case SO_TIMESTAMP:
1806 struct timeval *tv = (struct timeval *)data;
1807 struct target_timeval *target_tv =
1808 (struct target_timeval *)target_data;
1810 if (len != sizeof(struct timeval) ||
1811 tgt_len != sizeof(struct target_timeval)) {
1812 goto unimplemented;
1815 /* copy struct timeval to target */
1816 __put_user(tv->tv_sec, &target_tv->tv_sec);
1817 __put_user(tv->tv_usec, &target_tv->tv_usec);
1818 break;
1820 case SCM_CREDENTIALS:
1822 struct ucred *cred = (struct ucred *)data;
1823 struct target_ucred *target_cred =
1824 (struct target_ucred *)target_data;
1826 __put_user(cred->pid, &target_cred->pid);
1827 __put_user(cred->uid, &target_cred->uid);
1828 __put_user(cred->gid, &target_cred->gid);
1829 break;
1831 default:
1832 goto unimplemented;
1834 break;
1836 default:
1837 unimplemented:
1838 gemu_log("Unsupported ancillary data: %d/%d\n",
1839 cmsg->cmsg_level, cmsg->cmsg_type);
1840 memcpy(target_data, data, MIN(len, tgt_len));
1841 if (tgt_len > len) {
1842 memset(target_data + len, 0, tgt_len - len);
1846 target_cmsg->cmsg_len = tswapal(tgt_len);
1847 tgt_space = TARGET_CMSG_SPACE(len);
1848 if (msg_controllen < tgt_space) {
1849 tgt_space = msg_controllen;
1851 msg_controllen -= tgt_space;
1852 space += tgt_space;
1853 cmsg = CMSG_NXTHDR(msgh, cmsg);
1854 target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1855 target_cmsg_start);
1857 unlock_user(target_cmsg, target_cmsg_addr, space);
1858 the_end:
1859 target_msgh->msg_controllen = tswapal(space);
1860 return 0;
1863 static void tswap_nlmsghdr(struct nlmsghdr *nlh)
1865 nlh->nlmsg_len = tswap32(nlh->nlmsg_len);
1866 nlh->nlmsg_type = tswap16(nlh->nlmsg_type);
1867 nlh->nlmsg_flags = tswap16(nlh->nlmsg_flags);
1868 nlh->nlmsg_seq = tswap32(nlh->nlmsg_seq);
1869 nlh->nlmsg_pid = tswap32(nlh->nlmsg_pid);
1872 static abi_long host_to_target_for_each_nlmsg(struct nlmsghdr *nlh,
1873 size_t len,
1874 abi_long (*host_to_target_nlmsg)
1875 (struct nlmsghdr *))
1877 uint32_t nlmsg_len;
1878 abi_long ret;
1880 while (len > sizeof(struct nlmsghdr)) {
1882 nlmsg_len = nlh->nlmsg_len;
1883 if (nlmsg_len < sizeof(struct nlmsghdr) ||
1884 nlmsg_len > len) {
1885 break;
1888 switch (nlh->nlmsg_type) {
1889 case NLMSG_DONE:
1890 tswap_nlmsghdr(nlh);
1891 return 0;
1892 case NLMSG_NOOP:
1893 break;
1894 case NLMSG_ERROR:
1896 struct nlmsgerr *e = NLMSG_DATA(nlh);
1897 e->error = tswap32(e->error);
1898 tswap_nlmsghdr(&e->msg);
1899 tswap_nlmsghdr(nlh);
1900 return 0;
1902 default:
1903 ret = host_to_target_nlmsg(nlh);
1904 if (ret < 0) {
1905 tswap_nlmsghdr(nlh);
1906 return ret;
1908 break;
1910 tswap_nlmsghdr(nlh);
1911 len -= NLMSG_ALIGN(nlmsg_len);
1912 nlh = (struct nlmsghdr *)(((char*)nlh) + NLMSG_ALIGN(nlmsg_len));
1914 return 0;
1917 static abi_long target_to_host_for_each_nlmsg(struct nlmsghdr *nlh,
1918 size_t len,
1919 abi_long (*target_to_host_nlmsg)
1920 (struct nlmsghdr *))
1922 int ret;
1924 while (len > sizeof(struct nlmsghdr)) {
1925 if (tswap32(nlh->nlmsg_len) < sizeof(struct nlmsghdr) ||
1926 tswap32(nlh->nlmsg_len) > len) {
1927 break;
1929 tswap_nlmsghdr(nlh);
1930 switch (nlh->nlmsg_type) {
1931 case NLMSG_DONE:
1932 return 0;
1933 case NLMSG_NOOP:
1934 break;
1935 case NLMSG_ERROR:
1937 struct nlmsgerr *e = NLMSG_DATA(nlh);
1938 e->error = tswap32(e->error);
1939 tswap_nlmsghdr(&e->msg);
1940 return 0;
1942 default:
1943 ret = target_to_host_nlmsg(nlh);
1944 if (ret < 0) {
1945 return ret;
1948 len -= NLMSG_ALIGN(nlh->nlmsg_len);
1949 nlh = (struct nlmsghdr *)(((char *)nlh) + NLMSG_ALIGN(nlh->nlmsg_len));
1951 return 0;
1954 #ifdef CONFIG_RTNETLINK
1955 static abi_long host_to_target_for_each_nlattr(struct nlattr *nlattr,
1956 size_t len, void *context,
1957 abi_long (*host_to_target_nlattr)
1958 (struct nlattr *,
1959 void *context))
1961 unsigned short nla_len;
1962 abi_long ret;
1964 while (len > sizeof(struct nlattr)) {
1965 nla_len = nlattr->nla_len;
1966 if (nla_len < sizeof(struct nlattr) ||
1967 nla_len > len) {
1968 break;
1970 ret = host_to_target_nlattr(nlattr, context);
1971 nlattr->nla_len = tswap16(nlattr->nla_len);
1972 nlattr->nla_type = tswap16(nlattr->nla_type);
1973 if (ret < 0) {
1974 return ret;
1976 len -= NLA_ALIGN(nla_len);
1977 nlattr = (struct nlattr *)(((char *)nlattr) + NLA_ALIGN(nla_len));
1979 return 0;
1982 static abi_long host_to_target_for_each_rtattr(struct rtattr *rtattr,
1983 size_t len,
1984 abi_long (*host_to_target_rtattr)
1985 (struct rtattr *))
1987 unsigned short rta_len;
1988 abi_long ret;
1990 while (len > sizeof(struct rtattr)) {
1991 rta_len = rtattr->rta_len;
1992 if (rta_len < sizeof(struct rtattr) ||
1993 rta_len > len) {
1994 break;
1996 ret = host_to_target_rtattr(rtattr);
1997 rtattr->rta_len = tswap16(rtattr->rta_len);
1998 rtattr->rta_type = tswap16(rtattr->rta_type);
1999 if (ret < 0) {
2000 return ret;
2002 len -= RTA_ALIGN(rta_len);
2003 rtattr = (struct rtattr *)(((char *)rtattr) + RTA_ALIGN(rta_len));
2005 return 0;
2008 #define NLA_DATA(nla) ((void *)((char *)(nla)) + NLA_HDRLEN)
2010 static abi_long host_to_target_data_bridge_nlattr(struct nlattr *nlattr,
2011 void *context)
2013 uint16_t *u16;
2014 uint32_t *u32;
2015 uint64_t *u64;
2017 switch (nlattr->nla_type) {
2018 /* no data */
2019 case QEMU_IFLA_BR_FDB_FLUSH:
2020 break;
2021 /* binary */
2022 case QEMU_IFLA_BR_GROUP_ADDR:
2023 break;
2024 /* uint8_t */
2025 case QEMU_IFLA_BR_VLAN_FILTERING:
2026 case QEMU_IFLA_BR_TOPOLOGY_CHANGE:
2027 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_DETECTED:
2028 case QEMU_IFLA_BR_MCAST_ROUTER:
2029 case QEMU_IFLA_BR_MCAST_SNOOPING:
2030 case QEMU_IFLA_BR_MCAST_QUERY_USE_IFADDR:
2031 case QEMU_IFLA_BR_MCAST_QUERIER:
2032 case QEMU_IFLA_BR_NF_CALL_IPTABLES:
2033 case QEMU_IFLA_BR_NF_CALL_IP6TABLES:
2034 case QEMU_IFLA_BR_NF_CALL_ARPTABLES:
2035 break;
2036 /* uint16_t */
2037 case QEMU_IFLA_BR_PRIORITY:
2038 case QEMU_IFLA_BR_VLAN_PROTOCOL:
2039 case QEMU_IFLA_BR_GROUP_FWD_MASK:
2040 case QEMU_IFLA_BR_ROOT_PORT:
2041 case QEMU_IFLA_BR_VLAN_DEFAULT_PVID:
2042 u16 = NLA_DATA(nlattr);
2043 *u16 = tswap16(*u16);
2044 break;
2045 /* uint32_t */
2046 case QEMU_IFLA_BR_FORWARD_DELAY:
2047 case QEMU_IFLA_BR_HELLO_TIME:
2048 case QEMU_IFLA_BR_MAX_AGE:
2049 case QEMU_IFLA_BR_AGEING_TIME:
2050 case QEMU_IFLA_BR_STP_STATE:
2051 case QEMU_IFLA_BR_ROOT_PATH_COST:
2052 case QEMU_IFLA_BR_MCAST_HASH_ELASTICITY:
2053 case QEMU_IFLA_BR_MCAST_HASH_MAX:
2054 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_CNT:
2055 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_CNT:
2056 u32 = NLA_DATA(nlattr);
2057 *u32 = tswap32(*u32);
2058 break;
2059 /* uint64_t */
2060 case QEMU_IFLA_BR_HELLO_TIMER:
2061 case QEMU_IFLA_BR_TCN_TIMER:
2062 case QEMU_IFLA_BR_GC_TIMER:
2063 case QEMU_IFLA_BR_TOPOLOGY_CHANGE_TIMER:
2064 case QEMU_IFLA_BR_MCAST_LAST_MEMBER_INTVL:
2065 case QEMU_IFLA_BR_MCAST_MEMBERSHIP_INTVL:
2066 case QEMU_IFLA_BR_MCAST_QUERIER_INTVL:
2067 case QEMU_IFLA_BR_MCAST_QUERY_INTVL:
2068 case QEMU_IFLA_BR_MCAST_QUERY_RESPONSE_INTVL:
2069 case QEMU_IFLA_BR_MCAST_STARTUP_QUERY_INTVL:
2070 u64 = NLA_DATA(nlattr);
2071 *u64 = tswap64(*u64);
2072 break;
2073 /* ifla_bridge_id: uin8_t[] */
2074 case QEMU_IFLA_BR_ROOT_ID:
2075 case QEMU_IFLA_BR_BRIDGE_ID:
2076 break;
2077 default:
2078 gemu_log("Unknown QEMU_IFLA_BR type %d\n", nlattr->nla_type);
2079 break;
2081 return 0;
2084 static abi_long host_to_target_slave_data_bridge_nlattr(struct nlattr *nlattr,
2085 void *context)
2087 uint16_t *u16;
2088 uint32_t *u32;
2089 uint64_t *u64;
2091 switch (nlattr->nla_type) {
2092 /* uint8_t */
2093 case QEMU_IFLA_BRPORT_STATE:
2094 case QEMU_IFLA_BRPORT_MODE:
2095 case QEMU_IFLA_BRPORT_GUARD:
2096 case QEMU_IFLA_BRPORT_PROTECT:
2097 case QEMU_IFLA_BRPORT_FAST_LEAVE:
2098 case QEMU_IFLA_BRPORT_LEARNING:
2099 case QEMU_IFLA_BRPORT_UNICAST_FLOOD:
2100 case QEMU_IFLA_BRPORT_PROXYARP:
2101 case QEMU_IFLA_BRPORT_LEARNING_SYNC:
2102 case QEMU_IFLA_BRPORT_PROXYARP_WIFI:
2103 case QEMU_IFLA_BRPORT_TOPOLOGY_CHANGE_ACK:
2104 case QEMU_IFLA_BRPORT_CONFIG_PENDING:
2105 case QEMU_IFLA_BRPORT_MULTICAST_ROUTER:
2106 break;
2107 /* uint16_t */
2108 case QEMU_IFLA_BRPORT_PRIORITY:
2109 case QEMU_IFLA_BRPORT_DESIGNATED_PORT:
2110 case QEMU_IFLA_BRPORT_DESIGNATED_COST:
2111 case QEMU_IFLA_BRPORT_ID:
2112 case QEMU_IFLA_BRPORT_NO:
2113 u16 = NLA_DATA(nlattr);
2114 *u16 = tswap16(*u16);
2115 break;
2116 /* uin32_t */
2117 case QEMU_IFLA_BRPORT_COST:
2118 u32 = NLA_DATA(nlattr);
2119 *u32 = tswap32(*u32);
2120 break;
2121 /* uint64_t */
2122 case QEMU_IFLA_BRPORT_MESSAGE_AGE_TIMER:
2123 case QEMU_IFLA_BRPORT_FORWARD_DELAY_TIMER:
2124 case QEMU_IFLA_BRPORT_HOLD_TIMER:
2125 u64 = NLA_DATA(nlattr);
2126 *u64 = tswap64(*u64);
2127 break;
2128 /* ifla_bridge_id: uint8_t[] */
2129 case QEMU_IFLA_BRPORT_ROOT_ID:
2130 case QEMU_IFLA_BRPORT_BRIDGE_ID:
2131 break;
2132 default:
2133 gemu_log("Unknown QEMU_IFLA_BRPORT type %d\n", nlattr->nla_type);
2134 break;
2136 return 0;
2139 struct linkinfo_context {
2140 int len;
2141 char *name;
2142 int slave_len;
2143 char *slave_name;
2146 static abi_long host_to_target_data_linkinfo_nlattr(struct nlattr *nlattr,
2147 void *context)
2149 struct linkinfo_context *li_context = context;
2151 switch (nlattr->nla_type) {
2152 /* string */
2153 case QEMU_IFLA_INFO_KIND:
2154 li_context->name = NLA_DATA(nlattr);
2155 li_context->len = nlattr->nla_len - NLA_HDRLEN;
2156 break;
2157 case QEMU_IFLA_INFO_SLAVE_KIND:
2158 li_context->slave_name = NLA_DATA(nlattr);
2159 li_context->slave_len = nlattr->nla_len - NLA_HDRLEN;
2160 break;
2161 /* stats */
2162 case QEMU_IFLA_INFO_XSTATS:
2163 /* FIXME: only used by CAN */
2164 break;
2165 /* nested */
2166 case QEMU_IFLA_INFO_DATA:
2167 if (strncmp(li_context->name, "bridge",
2168 li_context->len) == 0) {
2169 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2170 nlattr->nla_len,
2171 NULL,
2172 host_to_target_data_bridge_nlattr);
2173 } else {
2174 gemu_log("Unknown QEMU_IFLA_INFO_KIND %s\n", li_context->name);
2176 break;
2177 case QEMU_IFLA_INFO_SLAVE_DATA:
2178 if (strncmp(li_context->slave_name, "bridge",
2179 li_context->slave_len) == 0) {
2180 return host_to_target_for_each_nlattr(NLA_DATA(nlattr),
2181 nlattr->nla_len,
2182 NULL,
2183 host_to_target_slave_data_bridge_nlattr);
2184 } else {
2185 gemu_log("Unknown QEMU_IFLA_INFO_SLAVE_KIND %s\n",
2186 li_context->slave_name);
2188 break;
2189 default:
2190 gemu_log("Unknown host QEMU_IFLA_INFO type: %d\n", nlattr->nla_type);
2191 break;
2194 return 0;
2197 static abi_long host_to_target_data_inet_nlattr(struct nlattr *nlattr,
2198 void *context)
2200 uint32_t *u32;
2201 int i;
2203 switch (nlattr->nla_type) {
2204 case QEMU_IFLA_INET_CONF:
2205 u32 = NLA_DATA(nlattr);
2206 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2207 i++) {
2208 u32[i] = tswap32(u32[i]);
2210 break;
2211 default:
2212 gemu_log("Unknown host AF_INET type: %d\n", nlattr->nla_type);
2214 return 0;
2217 static abi_long host_to_target_data_inet6_nlattr(struct nlattr *nlattr,
2218 void *context)
2220 uint32_t *u32;
2221 uint64_t *u64;
2222 struct ifla_cacheinfo *ci;
2223 int i;
2225 switch (nlattr->nla_type) {
2226 /* binaries */
2227 case QEMU_IFLA_INET6_TOKEN:
2228 break;
2229 /* uint8_t */
2230 case QEMU_IFLA_INET6_ADDR_GEN_MODE:
2231 break;
2232 /* uint32_t */
2233 case QEMU_IFLA_INET6_FLAGS:
2234 u32 = NLA_DATA(nlattr);
2235 *u32 = tswap32(*u32);
2236 break;
2237 /* uint32_t[] */
2238 case QEMU_IFLA_INET6_CONF:
2239 u32 = NLA_DATA(nlattr);
2240 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u32);
2241 i++) {
2242 u32[i] = tswap32(u32[i]);
2244 break;
2245 /* ifla_cacheinfo */
2246 case QEMU_IFLA_INET6_CACHEINFO:
2247 ci = NLA_DATA(nlattr);
2248 ci->max_reasm_len = tswap32(ci->max_reasm_len);
2249 ci->tstamp = tswap32(ci->tstamp);
2250 ci->reachable_time = tswap32(ci->reachable_time);
2251 ci->retrans_time = tswap32(ci->retrans_time);
2252 break;
2253 /* uint64_t[] */
2254 case QEMU_IFLA_INET6_STATS:
2255 case QEMU_IFLA_INET6_ICMP6STATS:
2256 u64 = NLA_DATA(nlattr);
2257 for (i = 0; i < (nlattr->nla_len - NLA_HDRLEN) / sizeof(*u64);
2258 i++) {
2259 u64[i] = tswap64(u64[i]);
2261 break;
2262 default:
2263 gemu_log("Unknown host AF_INET6 type: %d\n", nlattr->nla_type);
2265 return 0;
2268 static abi_long host_to_target_data_spec_nlattr(struct nlattr *nlattr,
2269 void *context)
2271 switch (nlattr->nla_type) {
2272 case AF_INET:
2273 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2274 NULL,
2275 host_to_target_data_inet_nlattr);
2276 case AF_INET6:
2277 return host_to_target_for_each_nlattr(NLA_DATA(nlattr), nlattr->nla_len,
2278 NULL,
2279 host_to_target_data_inet6_nlattr);
2280 default:
2281 gemu_log("Unknown host AF_SPEC type: %d\n", nlattr->nla_type);
2282 break;
2284 return 0;
2287 static abi_long host_to_target_data_link_rtattr(struct rtattr *rtattr)
2289 uint32_t *u32;
2290 struct rtnl_link_stats *st;
2291 struct rtnl_link_stats64 *st64;
2292 struct rtnl_link_ifmap *map;
2293 struct linkinfo_context li_context;
2295 switch (rtattr->rta_type) {
2296 /* binary stream */
2297 case QEMU_IFLA_ADDRESS:
2298 case QEMU_IFLA_BROADCAST:
2299 /* string */
2300 case QEMU_IFLA_IFNAME:
2301 case QEMU_IFLA_QDISC:
2302 break;
2303 /* uin8_t */
2304 case QEMU_IFLA_OPERSTATE:
2305 case QEMU_IFLA_LINKMODE:
2306 case QEMU_IFLA_CARRIER:
2307 case QEMU_IFLA_PROTO_DOWN:
2308 break;
2309 /* uint32_t */
2310 case QEMU_IFLA_MTU:
2311 case QEMU_IFLA_LINK:
2312 case QEMU_IFLA_WEIGHT:
2313 case QEMU_IFLA_TXQLEN:
2314 case QEMU_IFLA_CARRIER_CHANGES:
2315 case QEMU_IFLA_NUM_RX_QUEUES:
2316 case QEMU_IFLA_NUM_TX_QUEUES:
2317 case QEMU_IFLA_PROMISCUITY:
2318 case QEMU_IFLA_EXT_MASK:
2319 case QEMU_IFLA_LINK_NETNSID:
2320 case QEMU_IFLA_GROUP:
2321 case QEMU_IFLA_MASTER:
2322 case QEMU_IFLA_NUM_VF:
2323 u32 = RTA_DATA(rtattr);
2324 *u32 = tswap32(*u32);
2325 break;
2326 /* struct rtnl_link_stats */
2327 case QEMU_IFLA_STATS:
2328 st = RTA_DATA(rtattr);
2329 st->rx_packets = tswap32(st->rx_packets);
2330 st->tx_packets = tswap32(st->tx_packets);
2331 st->rx_bytes = tswap32(st->rx_bytes);
2332 st->tx_bytes = tswap32(st->tx_bytes);
2333 st->rx_errors = tswap32(st->rx_errors);
2334 st->tx_errors = tswap32(st->tx_errors);
2335 st->rx_dropped = tswap32(st->rx_dropped);
2336 st->tx_dropped = tswap32(st->tx_dropped);
2337 st->multicast = tswap32(st->multicast);
2338 st->collisions = tswap32(st->collisions);
2340 /* detailed rx_errors: */
2341 st->rx_length_errors = tswap32(st->rx_length_errors);
2342 st->rx_over_errors = tswap32(st->rx_over_errors);
2343 st->rx_crc_errors = tswap32(st->rx_crc_errors);
2344 st->rx_frame_errors = tswap32(st->rx_frame_errors);
2345 st->rx_fifo_errors = tswap32(st->rx_fifo_errors);
2346 st->rx_missed_errors = tswap32(st->rx_missed_errors);
2348 /* detailed tx_errors */
2349 st->tx_aborted_errors = tswap32(st->tx_aborted_errors);
2350 st->tx_carrier_errors = tswap32(st->tx_carrier_errors);
2351 st->tx_fifo_errors = tswap32(st->tx_fifo_errors);
2352 st->tx_heartbeat_errors = tswap32(st->tx_heartbeat_errors);
2353 st->tx_window_errors = tswap32(st->tx_window_errors);
2355 /* for cslip etc */
2356 st->rx_compressed = tswap32(st->rx_compressed);
2357 st->tx_compressed = tswap32(st->tx_compressed);
2358 break;
2359 /* struct rtnl_link_stats64 */
2360 case QEMU_IFLA_STATS64:
2361 st64 = RTA_DATA(rtattr);
2362 st64->rx_packets = tswap64(st64->rx_packets);
2363 st64->tx_packets = tswap64(st64->tx_packets);
2364 st64->rx_bytes = tswap64(st64->rx_bytes);
2365 st64->tx_bytes = tswap64(st64->tx_bytes);
2366 st64->rx_errors = tswap64(st64->rx_errors);
2367 st64->tx_errors = tswap64(st64->tx_errors);
2368 st64->rx_dropped = tswap64(st64->rx_dropped);
2369 st64->tx_dropped = tswap64(st64->tx_dropped);
2370 st64->multicast = tswap64(st64->multicast);
2371 st64->collisions = tswap64(st64->collisions);
2373 /* detailed rx_errors: */
2374 st64->rx_length_errors = tswap64(st64->rx_length_errors);
2375 st64->rx_over_errors = tswap64(st64->rx_over_errors);
2376 st64->rx_crc_errors = tswap64(st64->rx_crc_errors);
2377 st64->rx_frame_errors = tswap64(st64->rx_frame_errors);
2378 st64->rx_fifo_errors = tswap64(st64->rx_fifo_errors);
2379 st64->rx_missed_errors = tswap64(st64->rx_missed_errors);
2381 /* detailed tx_errors */
2382 st64->tx_aborted_errors = tswap64(st64->tx_aborted_errors);
2383 st64->tx_carrier_errors = tswap64(st64->tx_carrier_errors);
2384 st64->tx_fifo_errors = tswap64(st64->tx_fifo_errors);
2385 st64->tx_heartbeat_errors = tswap64(st64->tx_heartbeat_errors);
2386 st64->tx_window_errors = tswap64(st64->tx_window_errors);
2388 /* for cslip etc */
2389 st64->rx_compressed = tswap64(st64->rx_compressed);
2390 st64->tx_compressed = tswap64(st64->tx_compressed);
2391 break;
2392 /* struct rtnl_link_ifmap */
2393 case QEMU_IFLA_MAP:
2394 map = RTA_DATA(rtattr);
2395 map->mem_start = tswap64(map->mem_start);
2396 map->mem_end = tswap64(map->mem_end);
2397 map->base_addr = tswap64(map->base_addr);
2398 map->irq = tswap16(map->irq);
2399 break;
2400 /* nested */
2401 case QEMU_IFLA_LINKINFO:
2402 memset(&li_context, 0, sizeof(li_context));
2403 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2404 &li_context,
2405 host_to_target_data_linkinfo_nlattr);
2406 case QEMU_IFLA_AF_SPEC:
2407 return host_to_target_for_each_nlattr(RTA_DATA(rtattr), rtattr->rta_len,
2408 NULL,
2409 host_to_target_data_spec_nlattr);
2410 default:
2411 gemu_log("Unknown host QEMU_IFLA type: %d\n", rtattr->rta_type);
2412 break;
2414 return 0;
2417 static abi_long host_to_target_data_addr_rtattr(struct rtattr *rtattr)
2419 uint32_t *u32;
2420 struct ifa_cacheinfo *ci;
2422 switch (rtattr->rta_type) {
2423 /* binary: depends on family type */
2424 case IFA_ADDRESS:
2425 case IFA_LOCAL:
2426 break;
2427 /* string */
2428 case IFA_LABEL:
2429 break;
2430 /* u32 */
2431 case IFA_FLAGS:
2432 case IFA_BROADCAST:
2433 u32 = RTA_DATA(rtattr);
2434 *u32 = tswap32(*u32);
2435 break;
2436 /* struct ifa_cacheinfo */
2437 case IFA_CACHEINFO:
2438 ci = RTA_DATA(rtattr);
2439 ci->ifa_prefered = tswap32(ci->ifa_prefered);
2440 ci->ifa_valid = tswap32(ci->ifa_valid);
2441 ci->cstamp = tswap32(ci->cstamp);
2442 ci->tstamp = tswap32(ci->tstamp);
2443 break;
2444 default:
2445 gemu_log("Unknown host IFA type: %d\n", rtattr->rta_type);
2446 break;
2448 return 0;
2451 static abi_long host_to_target_data_route_rtattr(struct rtattr *rtattr)
2453 uint32_t *u32;
2454 switch (rtattr->rta_type) {
2455 /* binary: depends on family type */
2456 case RTA_GATEWAY:
2457 case RTA_DST:
2458 case RTA_PREFSRC:
2459 break;
2460 /* u32 */
2461 case RTA_PRIORITY:
2462 case RTA_TABLE:
2463 case RTA_OIF:
2464 u32 = RTA_DATA(rtattr);
2465 *u32 = tswap32(*u32);
2466 break;
2467 default:
2468 gemu_log("Unknown host RTA type: %d\n", rtattr->rta_type);
2469 break;
2471 return 0;
2474 static abi_long host_to_target_link_rtattr(struct rtattr *rtattr,
2475 uint32_t rtattr_len)
2477 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2478 host_to_target_data_link_rtattr);
2481 static abi_long host_to_target_addr_rtattr(struct rtattr *rtattr,
2482 uint32_t rtattr_len)
2484 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2485 host_to_target_data_addr_rtattr);
2488 static abi_long host_to_target_route_rtattr(struct rtattr *rtattr,
2489 uint32_t rtattr_len)
2491 return host_to_target_for_each_rtattr(rtattr, rtattr_len,
2492 host_to_target_data_route_rtattr);
2495 static abi_long host_to_target_data_route(struct nlmsghdr *nlh)
2497 uint32_t nlmsg_len;
2498 struct ifinfomsg *ifi;
2499 struct ifaddrmsg *ifa;
2500 struct rtmsg *rtm;
2502 nlmsg_len = nlh->nlmsg_len;
2503 switch (nlh->nlmsg_type) {
2504 case RTM_NEWLINK:
2505 case RTM_DELLINK:
2506 case RTM_GETLINK:
2507 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2508 ifi = NLMSG_DATA(nlh);
2509 ifi->ifi_type = tswap16(ifi->ifi_type);
2510 ifi->ifi_index = tswap32(ifi->ifi_index);
2511 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2512 ifi->ifi_change = tswap32(ifi->ifi_change);
2513 host_to_target_link_rtattr(IFLA_RTA(ifi),
2514 nlmsg_len - NLMSG_LENGTH(sizeof(*ifi)));
2516 break;
2517 case RTM_NEWADDR:
2518 case RTM_DELADDR:
2519 case RTM_GETADDR:
2520 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2521 ifa = NLMSG_DATA(nlh);
2522 ifa->ifa_index = tswap32(ifa->ifa_index);
2523 host_to_target_addr_rtattr(IFA_RTA(ifa),
2524 nlmsg_len - NLMSG_LENGTH(sizeof(*ifa)));
2526 break;
2527 case RTM_NEWROUTE:
2528 case RTM_DELROUTE:
2529 case RTM_GETROUTE:
2530 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2531 rtm = NLMSG_DATA(nlh);
2532 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2533 host_to_target_route_rtattr(RTM_RTA(rtm),
2534 nlmsg_len - NLMSG_LENGTH(sizeof(*rtm)));
2536 break;
2537 default:
2538 return -TARGET_EINVAL;
2540 return 0;
2543 static inline abi_long host_to_target_nlmsg_route(struct nlmsghdr *nlh,
2544 size_t len)
2546 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_route);
2549 static abi_long target_to_host_for_each_rtattr(struct rtattr *rtattr,
2550 size_t len,
2551 abi_long (*target_to_host_rtattr)
2552 (struct rtattr *))
2554 abi_long ret;
2556 while (len >= sizeof(struct rtattr)) {
2557 if (tswap16(rtattr->rta_len) < sizeof(struct rtattr) ||
2558 tswap16(rtattr->rta_len) > len) {
2559 break;
2561 rtattr->rta_len = tswap16(rtattr->rta_len);
2562 rtattr->rta_type = tswap16(rtattr->rta_type);
2563 ret = target_to_host_rtattr(rtattr);
2564 if (ret < 0) {
2565 return ret;
2567 len -= RTA_ALIGN(rtattr->rta_len);
2568 rtattr = (struct rtattr *)(((char *)rtattr) +
2569 RTA_ALIGN(rtattr->rta_len));
2571 return 0;
2574 static abi_long target_to_host_data_link_rtattr(struct rtattr *rtattr)
2576 switch (rtattr->rta_type) {
2577 default:
2578 gemu_log("Unknown target QEMU_IFLA type: %d\n", rtattr->rta_type);
2579 break;
2581 return 0;
2584 static abi_long target_to_host_data_addr_rtattr(struct rtattr *rtattr)
2586 switch (rtattr->rta_type) {
2587 /* binary: depends on family type */
2588 case IFA_LOCAL:
2589 case IFA_ADDRESS:
2590 break;
2591 default:
2592 gemu_log("Unknown target IFA type: %d\n", rtattr->rta_type);
2593 break;
2595 return 0;
2598 static abi_long target_to_host_data_route_rtattr(struct rtattr *rtattr)
2600 uint32_t *u32;
2601 switch (rtattr->rta_type) {
2602 /* binary: depends on family type */
2603 case RTA_DST:
2604 case RTA_SRC:
2605 case RTA_GATEWAY:
2606 break;
2607 /* u32 */
2608 case RTA_PRIORITY:
2609 case RTA_OIF:
2610 u32 = RTA_DATA(rtattr);
2611 *u32 = tswap32(*u32);
2612 break;
2613 default:
2614 gemu_log("Unknown target RTA type: %d\n", rtattr->rta_type);
2615 break;
2617 return 0;
2620 static void target_to_host_link_rtattr(struct rtattr *rtattr,
2621 uint32_t rtattr_len)
2623 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2624 target_to_host_data_link_rtattr);
2627 static void target_to_host_addr_rtattr(struct rtattr *rtattr,
2628 uint32_t rtattr_len)
2630 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2631 target_to_host_data_addr_rtattr);
2634 static void target_to_host_route_rtattr(struct rtattr *rtattr,
2635 uint32_t rtattr_len)
2637 target_to_host_for_each_rtattr(rtattr, rtattr_len,
2638 target_to_host_data_route_rtattr);
2641 static abi_long target_to_host_data_route(struct nlmsghdr *nlh)
2643 struct ifinfomsg *ifi;
2644 struct ifaddrmsg *ifa;
2645 struct rtmsg *rtm;
2647 switch (nlh->nlmsg_type) {
2648 case RTM_GETLINK:
2649 break;
2650 case RTM_NEWLINK:
2651 case RTM_DELLINK:
2652 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifi))) {
2653 ifi = NLMSG_DATA(nlh);
2654 ifi->ifi_type = tswap16(ifi->ifi_type);
2655 ifi->ifi_index = tswap32(ifi->ifi_index);
2656 ifi->ifi_flags = tswap32(ifi->ifi_flags);
2657 ifi->ifi_change = tswap32(ifi->ifi_change);
2658 target_to_host_link_rtattr(IFLA_RTA(ifi), nlh->nlmsg_len -
2659 NLMSG_LENGTH(sizeof(*ifi)));
2661 break;
2662 case RTM_GETADDR:
2663 case RTM_NEWADDR:
2664 case RTM_DELADDR:
2665 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*ifa))) {
2666 ifa = NLMSG_DATA(nlh);
2667 ifa->ifa_index = tswap32(ifa->ifa_index);
2668 target_to_host_addr_rtattr(IFA_RTA(ifa), nlh->nlmsg_len -
2669 NLMSG_LENGTH(sizeof(*ifa)));
2671 break;
2672 case RTM_GETROUTE:
2673 break;
2674 case RTM_NEWROUTE:
2675 case RTM_DELROUTE:
2676 if (nlh->nlmsg_len >= NLMSG_LENGTH(sizeof(*rtm))) {
2677 rtm = NLMSG_DATA(nlh);
2678 rtm->rtm_flags = tswap32(rtm->rtm_flags);
2679 target_to_host_route_rtattr(RTM_RTA(rtm), nlh->nlmsg_len -
2680 NLMSG_LENGTH(sizeof(*rtm)));
2682 break;
2683 default:
2684 return -TARGET_EOPNOTSUPP;
2686 return 0;
2689 static abi_long target_to_host_nlmsg_route(struct nlmsghdr *nlh, size_t len)
2691 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_route);
2693 #endif /* CONFIG_RTNETLINK */
2695 static abi_long host_to_target_data_audit(struct nlmsghdr *nlh)
2697 switch (nlh->nlmsg_type) {
2698 default:
2699 gemu_log("Unknown host audit message type %d\n",
2700 nlh->nlmsg_type);
2701 return -TARGET_EINVAL;
2703 return 0;
2706 static inline abi_long host_to_target_nlmsg_audit(struct nlmsghdr *nlh,
2707 size_t len)
2709 return host_to_target_for_each_nlmsg(nlh, len, host_to_target_data_audit);
2712 static abi_long target_to_host_data_audit(struct nlmsghdr *nlh)
2714 switch (nlh->nlmsg_type) {
2715 case AUDIT_USER:
2716 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
2717 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
2718 break;
2719 default:
2720 gemu_log("Unknown target audit message type %d\n",
2721 nlh->nlmsg_type);
2722 return -TARGET_EINVAL;
2725 return 0;
2728 static abi_long target_to_host_nlmsg_audit(struct nlmsghdr *nlh, size_t len)
2730 return target_to_host_for_each_nlmsg(nlh, len, target_to_host_data_audit);
2733 /* do_setsockopt() Must return target values and target errnos. */
2734 static abi_long do_setsockopt(int sockfd, int level, int optname,
2735 abi_ulong optval_addr, socklen_t optlen)
2737 abi_long ret;
2738 int val;
2739 struct ip_mreqn *ip_mreq;
2740 struct ip_mreq_source *ip_mreq_source;
2742 switch(level) {
2743 case SOL_TCP:
2744 /* TCP options all take an 'int' value. */
2745 if (optlen < sizeof(uint32_t))
2746 return -TARGET_EINVAL;
2748 if (get_user_u32(val, optval_addr))
2749 return -TARGET_EFAULT;
2750 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2751 break;
2752 case SOL_IP:
2753 switch(optname) {
2754 case IP_TOS:
2755 case IP_TTL:
2756 case IP_HDRINCL:
2757 case IP_ROUTER_ALERT:
2758 case IP_RECVOPTS:
2759 case IP_RETOPTS:
2760 case IP_PKTINFO:
2761 case IP_MTU_DISCOVER:
2762 case IP_RECVERR:
2763 case IP_RECVTOS:
2764 #ifdef IP_FREEBIND
2765 case IP_FREEBIND:
2766 #endif
2767 case IP_MULTICAST_TTL:
2768 case IP_MULTICAST_LOOP:
2769 val = 0;
2770 if (optlen >= sizeof(uint32_t)) {
2771 if (get_user_u32(val, optval_addr))
2772 return -TARGET_EFAULT;
2773 } else if (optlen >= 1) {
2774 if (get_user_u8(val, optval_addr))
2775 return -TARGET_EFAULT;
2777 ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2778 break;
2779 case IP_ADD_MEMBERSHIP:
2780 case IP_DROP_MEMBERSHIP:
2781 if (optlen < sizeof (struct target_ip_mreq) ||
2782 optlen > sizeof (struct target_ip_mreqn))
2783 return -TARGET_EINVAL;
2785 ip_mreq = (struct ip_mreqn *) alloca(optlen);
2786 target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2787 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2788 break;
2790 case IP_BLOCK_SOURCE:
2791 case IP_UNBLOCK_SOURCE:
2792 case IP_ADD_SOURCE_MEMBERSHIP:
2793 case IP_DROP_SOURCE_MEMBERSHIP:
2794 if (optlen != sizeof (struct target_ip_mreq_source))
2795 return -TARGET_EINVAL;
2797 ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2798 ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2799 unlock_user (ip_mreq_source, optval_addr, 0);
2800 break;
2802 default:
2803 goto unimplemented;
2805 break;
2806 case SOL_IPV6:
2807 switch (optname) {
2808 case IPV6_MTU_DISCOVER:
2809 case IPV6_MTU:
2810 case IPV6_V6ONLY:
2811 case IPV6_RECVPKTINFO:
2812 val = 0;
2813 if (optlen < sizeof(uint32_t)) {
2814 return -TARGET_EINVAL;
2816 if (get_user_u32(val, optval_addr)) {
2817 return -TARGET_EFAULT;
2819 ret = get_errno(setsockopt(sockfd, level, optname,
2820 &val, sizeof(val)));
2821 break;
2822 default:
2823 goto unimplemented;
2825 break;
2826 case SOL_RAW:
2827 switch (optname) {
2828 case ICMP_FILTER:
2829 /* struct icmp_filter takes an u32 value */
2830 if (optlen < sizeof(uint32_t)) {
2831 return -TARGET_EINVAL;
2834 if (get_user_u32(val, optval_addr)) {
2835 return -TARGET_EFAULT;
2837 ret = get_errno(setsockopt(sockfd, level, optname,
2838 &val, sizeof(val)));
2839 break;
2841 default:
2842 goto unimplemented;
2844 break;
2845 case TARGET_SOL_SOCKET:
2846 switch (optname) {
2847 case TARGET_SO_RCVTIMEO:
2849 struct timeval tv;
2851 optname = SO_RCVTIMEO;
2853 set_timeout:
2854 if (optlen != sizeof(struct target_timeval)) {
2855 return -TARGET_EINVAL;
2858 if (copy_from_user_timeval(&tv, optval_addr)) {
2859 return -TARGET_EFAULT;
2862 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2863 &tv, sizeof(tv)));
2864 return ret;
2866 case TARGET_SO_SNDTIMEO:
2867 optname = SO_SNDTIMEO;
2868 goto set_timeout;
2869 case TARGET_SO_ATTACH_FILTER:
2871 struct target_sock_fprog *tfprog;
2872 struct target_sock_filter *tfilter;
2873 struct sock_fprog fprog;
2874 struct sock_filter *filter;
2875 int i;
2877 if (optlen != sizeof(*tfprog)) {
2878 return -TARGET_EINVAL;
2880 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2881 return -TARGET_EFAULT;
2883 if (!lock_user_struct(VERIFY_READ, tfilter,
2884 tswapal(tfprog->filter), 0)) {
2885 unlock_user_struct(tfprog, optval_addr, 1);
2886 return -TARGET_EFAULT;
2889 fprog.len = tswap16(tfprog->len);
2890 filter = g_try_new(struct sock_filter, fprog.len);
2891 if (filter == NULL) {
2892 unlock_user_struct(tfilter, tfprog->filter, 1);
2893 unlock_user_struct(tfprog, optval_addr, 1);
2894 return -TARGET_ENOMEM;
2896 for (i = 0; i < fprog.len; i++) {
2897 filter[i].code = tswap16(tfilter[i].code);
2898 filter[i].jt = tfilter[i].jt;
2899 filter[i].jf = tfilter[i].jf;
2900 filter[i].k = tswap32(tfilter[i].k);
2902 fprog.filter = filter;
2904 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2905 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2906 g_free(filter);
2908 unlock_user_struct(tfilter, tfprog->filter, 1);
2909 unlock_user_struct(tfprog, optval_addr, 1);
2910 return ret;
2912 case TARGET_SO_BINDTODEVICE:
2914 char *dev_ifname, *addr_ifname;
2916 if (optlen > IFNAMSIZ - 1) {
2917 optlen = IFNAMSIZ - 1;
2919 dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2920 if (!dev_ifname) {
2921 return -TARGET_EFAULT;
2923 optname = SO_BINDTODEVICE;
2924 addr_ifname = alloca(IFNAMSIZ);
2925 memcpy(addr_ifname, dev_ifname, optlen);
2926 addr_ifname[optlen] = 0;
2927 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2928 addr_ifname, optlen));
2929 unlock_user (dev_ifname, optval_addr, 0);
2930 return ret;
2932 /* Options with 'int' argument. */
2933 case TARGET_SO_DEBUG:
2934 optname = SO_DEBUG;
2935 break;
2936 case TARGET_SO_REUSEADDR:
2937 optname = SO_REUSEADDR;
2938 break;
2939 case TARGET_SO_TYPE:
2940 optname = SO_TYPE;
2941 break;
2942 case TARGET_SO_ERROR:
2943 optname = SO_ERROR;
2944 break;
2945 case TARGET_SO_DONTROUTE:
2946 optname = SO_DONTROUTE;
2947 break;
2948 case TARGET_SO_BROADCAST:
2949 optname = SO_BROADCAST;
2950 break;
2951 case TARGET_SO_SNDBUF:
2952 optname = SO_SNDBUF;
2953 break;
2954 case TARGET_SO_SNDBUFFORCE:
2955 optname = SO_SNDBUFFORCE;
2956 break;
2957 case TARGET_SO_RCVBUF:
2958 optname = SO_RCVBUF;
2959 break;
2960 case TARGET_SO_RCVBUFFORCE:
2961 optname = SO_RCVBUFFORCE;
2962 break;
2963 case TARGET_SO_KEEPALIVE:
2964 optname = SO_KEEPALIVE;
2965 break;
2966 case TARGET_SO_OOBINLINE:
2967 optname = SO_OOBINLINE;
2968 break;
2969 case TARGET_SO_NO_CHECK:
2970 optname = SO_NO_CHECK;
2971 break;
2972 case TARGET_SO_PRIORITY:
2973 optname = SO_PRIORITY;
2974 break;
2975 #ifdef SO_BSDCOMPAT
2976 case TARGET_SO_BSDCOMPAT:
2977 optname = SO_BSDCOMPAT;
2978 break;
2979 #endif
2980 case TARGET_SO_PASSCRED:
2981 optname = SO_PASSCRED;
2982 break;
2983 case TARGET_SO_PASSSEC:
2984 optname = SO_PASSSEC;
2985 break;
2986 case TARGET_SO_TIMESTAMP:
2987 optname = SO_TIMESTAMP;
2988 break;
2989 case TARGET_SO_RCVLOWAT:
2990 optname = SO_RCVLOWAT;
2991 break;
2992 break;
2993 default:
2994 goto unimplemented;
2996 if (optlen < sizeof(uint32_t))
2997 return -TARGET_EINVAL;
2999 if (get_user_u32(val, optval_addr))
3000 return -TARGET_EFAULT;
3001 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
3002 break;
3003 default:
3004 unimplemented:
3005 gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
3006 ret = -TARGET_ENOPROTOOPT;
3008 return ret;
3011 /* do_getsockopt() Must return target values and target errnos. */
3012 static abi_long do_getsockopt(int sockfd, int level, int optname,
3013 abi_ulong optval_addr, abi_ulong optlen)
3015 abi_long ret;
3016 int len, val;
3017 socklen_t lv;
3019 switch(level) {
3020 case TARGET_SOL_SOCKET:
3021 level = SOL_SOCKET;
3022 switch (optname) {
3023 /* These don't just return a single integer */
3024 case TARGET_SO_LINGER:
3025 case TARGET_SO_RCVTIMEO:
3026 case TARGET_SO_SNDTIMEO:
3027 case TARGET_SO_PEERNAME:
3028 goto unimplemented;
3029 case TARGET_SO_PEERCRED: {
3030 struct ucred cr;
3031 socklen_t crlen;
3032 struct target_ucred *tcr;
3034 if (get_user_u32(len, optlen)) {
3035 return -TARGET_EFAULT;
3037 if (len < 0) {
3038 return -TARGET_EINVAL;
3041 crlen = sizeof(cr);
3042 ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
3043 &cr, &crlen));
3044 if (ret < 0) {
3045 return ret;
3047 if (len > crlen) {
3048 len = crlen;
3050 if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
3051 return -TARGET_EFAULT;
3053 __put_user(cr.pid, &tcr->pid);
3054 __put_user(cr.uid, &tcr->uid);
3055 __put_user(cr.gid, &tcr->gid);
3056 unlock_user_struct(tcr, optval_addr, 1);
3057 if (put_user_u32(len, optlen)) {
3058 return -TARGET_EFAULT;
3060 break;
3062 /* Options with 'int' argument. */
3063 case TARGET_SO_DEBUG:
3064 optname = SO_DEBUG;
3065 goto int_case;
3066 case TARGET_SO_REUSEADDR:
3067 optname = SO_REUSEADDR;
3068 goto int_case;
3069 case TARGET_SO_TYPE:
3070 optname = SO_TYPE;
3071 goto int_case;
3072 case TARGET_SO_ERROR:
3073 optname = SO_ERROR;
3074 goto int_case;
3075 case TARGET_SO_DONTROUTE:
3076 optname = SO_DONTROUTE;
3077 goto int_case;
3078 case TARGET_SO_BROADCAST:
3079 optname = SO_BROADCAST;
3080 goto int_case;
3081 case TARGET_SO_SNDBUF:
3082 optname = SO_SNDBUF;
3083 goto int_case;
3084 case TARGET_SO_RCVBUF:
3085 optname = SO_RCVBUF;
3086 goto int_case;
3087 case TARGET_SO_KEEPALIVE:
3088 optname = SO_KEEPALIVE;
3089 goto int_case;
3090 case TARGET_SO_OOBINLINE:
3091 optname = SO_OOBINLINE;
3092 goto int_case;
3093 case TARGET_SO_NO_CHECK:
3094 optname = SO_NO_CHECK;
3095 goto int_case;
3096 case TARGET_SO_PRIORITY:
3097 optname = SO_PRIORITY;
3098 goto int_case;
3099 #ifdef SO_BSDCOMPAT
3100 case TARGET_SO_BSDCOMPAT:
3101 optname = SO_BSDCOMPAT;
3102 goto int_case;
3103 #endif
3104 case TARGET_SO_PASSCRED:
3105 optname = SO_PASSCRED;
3106 goto int_case;
3107 case TARGET_SO_TIMESTAMP:
3108 optname = SO_TIMESTAMP;
3109 goto int_case;
3110 case TARGET_SO_RCVLOWAT:
3111 optname = SO_RCVLOWAT;
3112 goto int_case;
3113 case TARGET_SO_ACCEPTCONN:
3114 optname = SO_ACCEPTCONN;
3115 goto int_case;
3116 default:
3117 goto int_case;
3119 break;
3120 case SOL_TCP:
3121 /* TCP options all take an 'int' value. */
3122 int_case:
3123 if (get_user_u32(len, optlen))
3124 return -TARGET_EFAULT;
3125 if (len < 0)
3126 return -TARGET_EINVAL;
3127 lv = sizeof(lv);
3128 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3129 if (ret < 0)
3130 return ret;
3131 if (optname == SO_TYPE) {
3132 val = host_to_target_sock_type(val);
3134 if (len > lv)
3135 len = lv;
3136 if (len == 4) {
3137 if (put_user_u32(val, optval_addr))
3138 return -TARGET_EFAULT;
3139 } else {
3140 if (put_user_u8(val, optval_addr))
3141 return -TARGET_EFAULT;
3143 if (put_user_u32(len, optlen))
3144 return -TARGET_EFAULT;
3145 break;
3146 case SOL_IP:
3147 switch(optname) {
3148 case IP_TOS:
3149 case IP_TTL:
3150 case IP_HDRINCL:
3151 case IP_ROUTER_ALERT:
3152 case IP_RECVOPTS:
3153 case IP_RETOPTS:
3154 case IP_PKTINFO:
3155 case IP_MTU_DISCOVER:
3156 case IP_RECVERR:
3157 case IP_RECVTOS:
3158 #ifdef IP_FREEBIND
3159 case IP_FREEBIND:
3160 #endif
3161 case IP_MULTICAST_TTL:
3162 case IP_MULTICAST_LOOP:
3163 if (get_user_u32(len, optlen))
3164 return -TARGET_EFAULT;
3165 if (len < 0)
3166 return -TARGET_EINVAL;
3167 lv = sizeof(lv);
3168 ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
3169 if (ret < 0)
3170 return ret;
3171 if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
3172 len = 1;
3173 if (put_user_u32(len, optlen)
3174 || put_user_u8(val, optval_addr))
3175 return -TARGET_EFAULT;
3176 } else {
3177 if (len > sizeof(int))
3178 len = sizeof(int);
3179 if (put_user_u32(len, optlen)
3180 || put_user_u32(val, optval_addr))
3181 return -TARGET_EFAULT;
3183 break;
3184 default:
3185 ret = -TARGET_ENOPROTOOPT;
3186 break;
3188 break;
3189 default:
3190 unimplemented:
3191 gemu_log("getsockopt level=%d optname=%d not yet supported\n",
3192 level, optname);
3193 ret = -TARGET_EOPNOTSUPP;
3194 break;
3196 return ret;
3199 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3200 abi_ulong count, int copy)
3202 struct target_iovec *target_vec;
3203 struct iovec *vec;
3204 abi_ulong total_len, max_len;
3205 int i;
3206 int err = 0;
3207 bool bad_address = false;
3209 if (count == 0) {
3210 errno = 0;
3211 return NULL;
3213 if (count > IOV_MAX) {
3214 errno = EINVAL;
3215 return NULL;
3218 vec = g_try_new0(struct iovec, count);
3219 if (vec == NULL) {
3220 errno = ENOMEM;
3221 return NULL;
3224 target_vec = lock_user(VERIFY_READ, target_addr,
3225 count * sizeof(struct target_iovec), 1);
3226 if (target_vec == NULL) {
3227 err = EFAULT;
3228 goto fail2;
3231 /* ??? If host page size > target page size, this will result in a
3232 value larger than what we can actually support. */
3233 max_len = 0x7fffffff & TARGET_PAGE_MASK;
3234 total_len = 0;
3236 for (i = 0; i < count; i++) {
3237 abi_ulong base = tswapal(target_vec[i].iov_base);
3238 abi_long len = tswapal(target_vec[i].iov_len);
3240 if (len < 0) {
3241 err = EINVAL;
3242 goto fail;
3243 } else if (len == 0) {
3244 /* Zero length pointer is ignored. */
3245 vec[i].iov_base = 0;
3246 } else {
3247 vec[i].iov_base = lock_user(type, base, len, copy);
3248 /* If the first buffer pointer is bad, this is a fault. But
3249 * subsequent bad buffers will result in a partial write; this
3250 * is realized by filling the vector with null pointers and
3251 * zero lengths. */
3252 if (!vec[i].iov_base) {
3253 if (i == 0) {
3254 err = EFAULT;
3255 goto fail;
3256 } else {
3257 bad_address = true;
3260 if (bad_address) {
3261 len = 0;
3263 if (len > max_len - total_len) {
3264 len = max_len - total_len;
3267 vec[i].iov_len = len;
3268 total_len += len;
3271 unlock_user(target_vec, target_addr, 0);
3272 return vec;
3274 fail:
3275 while (--i >= 0) {
3276 if (tswapal(target_vec[i].iov_len) > 0) {
3277 unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3280 unlock_user(target_vec, target_addr, 0);
3281 fail2:
3282 g_free(vec);
3283 errno = err;
3284 return NULL;
3287 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3288 abi_ulong count, int copy)
3290 struct target_iovec *target_vec;
3291 int i;
3293 target_vec = lock_user(VERIFY_READ, target_addr,
3294 count * sizeof(struct target_iovec), 1);
3295 if (target_vec) {
3296 for (i = 0; i < count; i++) {
3297 abi_ulong base = tswapal(target_vec[i].iov_base);
3298 abi_long len = tswapal(target_vec[i].iov_len);
3299 if (len < 0) {
3300 break;
3302 unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3304 unlock_user(target_vec, target_addr, 0);
3307 g_free(vec);
3310 static inline int target_to_host_sock_type(int *type)
3312 int host_type = 0;
3313 int target_type = *type;
3315 switch (target_type & TARGET_SOCK_TYPE_MASK) {
3316 case TARGET_SOCK_DGRAM:
3317 host_type = SOCK_DGRAM;
3318 break;
3319 case TARGET_SOCK_STREAM:
3320 host_type = SOCK_STREAM;
3321 break;
3322 default:
3323 host_type = target_type & TARGET_SOCK_TYPE_MASK;
3324 break;
3326 if (target_type & TARGET_SOCK_CLOEXEC) {
3327 #if defined(SOCK_CLOEXEC)
3328 host_type |= SOCK_CLOEXEC;
3329 #else
3330 return -TARGET_EINVAL;
3331 #endif
3333 if (target_type & TARGET_SOCK_NONBLOCK) {
3334 #if defined(SOCK_NONBLOCK)
3335 host_type |= SOCK_NONBLOCK;
3336 #elif !defined(O_NONBLOCK)
3337 return -TARGET_EINVAL;
3338 #endif
3340 *type = host_type;
3341 return 0;
3344 /* Try to emulate socket type flags after socket creation. */
3345 static int sock_flags_fixup(int fd, int target_type)
3347 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3348 if (target_type & TARGET_SOCK_NONBLOCK) {
3349 int flags = fcntl(fd, F_GETFL);
3350 if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3351 close(fd);
3352 return -TARGET_EINVAL;
3355 #endif
3356 return fd;
3359 static abi_long packet_target_to_host_sockaddr(void *host_addr,
3360 abi_ulong target_addr,
3361 socklen_t len)
3363 struct sockaddr *addr = host_addr;
3364 struct target_sockaddr *target_saddr;
3366 target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
3367 if (!target_saddr) {
3368 return -TARGET_EFAULT;
3371 memcpy(addr, target_saddr, len);
3372 addr->sa_family = tswap16(target_saddr->sa_family);
3373 /* spkt_protocol is big-endian */
3375 unlock_user(target_saddr, target_addr, 0);
3376 return 0;
3379 static TargetFdTrans target_packet_trans = {
3380 .target_to_host_addr = packet_target_to_host_sockaddr,
3383 #ifdef CONFIG_RTNETLINK
3384 static abi_long netlink_route_target_to_host(void *buf, size_t len)
3386 abi_long ret;
3388 ret = target_to_host_nlmsg_route(buf, len);
3389 if (ret < 0) {
3390 return ret;
3393 return len;
3396 static abi_long netlink_route_host_to_target(void *buf, size_t len)
3398 abi_long ret;
3400 ret = host_to_target_nlmsg_route(buf, len);
3401 if (ret < 0) {
3402 return ret;
3405 return len;
3408 static TargetFdTrans target_netlink_route_trans = {
3409 .target_to_host_data = netlink_route_target_to_host,
3410 .host_to_target_data = netlink_route_host_to_target,
3412 #endif /* CONFIG_RTNETLINK */
3414 static abi_long netlink_audit_target_to_host(void *buf, size_t len)
3416 abi_long ret;
3418 ret = target_to_host_nlmsg_audit(buf, len);
3419 if (ret < 0) {
3420 return ret;
3423 return len;
3426 static abi_long netlink_audit_host_to_target(void *buf, size_t len)
3428 abi_long ret;
3430 ret = host_to_target_nlmsg_audit(buf, len);
3431 if (ret < 0) {
3432 return ret;
3435 return len;
3438 static TargetFdTrans target_netlink_audit_trans = {
3439 .target_to_host_data = netlink_audit_target_to_host,
3440 .host_to_target_data = netlink_audit_host_to_target,
3443 /* do_socket() Must return target values and target errnos. */
3444 static abi_long do_socket(int domain, int type, int protocol)
3446 int target_type = type;
3447 int ret;
3449 ret = target_to_host_sock_type(&type);
3450 if (ret) {
3451 return ret;
3454 if (domain == PF_NETLINK && !(
3455 #ifdef CONFIG_RTNETLINK
3456 protocol == NETLINK_ROUTE ||
3457 #endif
3458 protocol == NETLINK_KOBJECT_UEVENT ||
3459 protocol == NETLINK_AUDIT)) {
3460 return -EPFNOSUPPORT;
3463 if (domain == AF_PACKET ||
3464 (domain == AF_INET && type == SOCK_PACKET)) {
3465 protocol = tswap16(protocol);
3468 ret = get_errno(socket(domain, type, protocol));
3469 if (ret >= 0) {
3470 ret = sock_flags_fixup(ret, target_type);
3471 if (type == SOCK_PACKET) {
3472 /* Manage an obsolete case :
3473 * if socket type is SOCK_PACKET, bind by name
3475 fd_trans_register(ret, &target_packet_trans);
3476 } else if (domain == PF_NETLINK) {
3477 switch (protocol) {
3478 #ifdef CONFIG_RTNETLINK
3479 case NETLINK_ROUTE:
3480 fd_trans_register(ret, &target_netlink_route_trans);
3481 break;
3482 #endif
3483 case NETLINK_KOBJECT_UEVENT:
3484 /* nothing to do: messages are strings */
3485 break;
3486 case NETLINK_AUDIT:
3487 fd_trans_register(ret, &target_netlink_audit_trans);
3488 break;
3489 default:
3490 g_assert_not_reached();
3494 return ret;
3497 /* do_bind() Must return target values and target errnos. */
3498 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3499 socklen_t addrlen)
3501 void *addr;
3502 abi_long ret;
3504 if ((int)addrlen < 0) {
3505 return -TARGET_EINVAL;
3508 addr = alloca(addrlen+1);
3510 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3511 if (ret)
3512 return ret;
3514 return get_errno(bind(sockfd, addr, addrlen));
3517 /* do_connect() Must return target values and target errnos. */
3518 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3519 socklen_t addrlen)
3521 void *addr;
3522 abi_long ret;
3524 if ((int)addrlen < 0) {
3525 return -TARGET_EINVAL;
3528 addr = alloca(addrlen+1);
3530 ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3531 if (ret)
3532 return ret;
3534 return get_errno(safe_connect(sockfd, addr, addrlen));
3537 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3538 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3539 int flags, int send)
3541 abi_long ret, len;
3542 struct msghdr msg;
3543 abi_ulong count;
3544 struct iovec *vec;
3545 abi_ulong target_vec;
3547 if (msgp->msg_name) {
3548 msg.msg_namelen = tswap32(msgp->msg_namelen);
3549 msg.msg_name = alloca(msg.msg_namelen+1);
3550 ret = target_to_host_sockaddr(fd, msg.msg_name,
3551 tswapal(msgp->msg_name),
3552 msg.msg_namelen);
3553 if (ret == -TARGET_EFAULT) {
3554 /* For connected sockets msg_name and msg_namelen must
3555 * be ignored, so returning EFAULT immediately is wrong.
3556 * Instead, pass a bad msg_name to the host kernel, and
3557 * let it decide whether to return EFAULT or not.
3559 msg.msg_name = (void *)-1;
3560 } else if (ret) {
3561 goto out2;
3563 } else {
3564 msg.msg_name = NULL;
3565 msg.msg_namelen = 0;
3567 msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3568 msg.msg_control = alloca(msg.msg_controllen);
3569 msg.msg_flags = tswap32(msgp->msg_flags);
3571 count = tswapal(msgp->msg_iovlen);
3572 target_vec = tswapal(msgp->msg_iov);
3574 if (count > IOV_MAX) {
3575 /* sendrcvmsg returns a different errno for this condition than
3576 * readv/writev, so we must catch it here before lock_iovec() does.
3578 ret = -TARGET_EMSGSIZE;
3579 goto out2;
3582 vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3583 target_vec, count, send);
3584 if (vec == NULL) {
3585 ret = -host_to_target_errno(errno);
3586 goto out2;
3588 msg.msg_iovlen = count;
3589 msg.msg_iov = vec;
3591 if (send) {
3592 if (fd_trans_target_to_host_data(fd)) {
3593 void *host_msg;
3595 host_msg = g_malloc(msg.msg_iov->iov_len);
3596 memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3597 ret = fd_trans_target_to_host_data(fd)(host_msg,
3598 msg.msg_iov->iov_len);
3599 if (ret >= 0) {
3600 msg.msg_iov->iov_base = host_msg;
3601 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3603 g_free(host_msg);
3604 } else {
3605 ret = target_to_host_cmsg(&msg, msgp);
3606 if (ret == 0) {
3607 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3610 } else {
3611 ret = get_errno(safe_recvmsg(fd, &msg, flags));
3612 if (!is_error(ret)) {
3613 len = ret;
3614 if (fd_trans_host_to_target_data(fd)) {
3615 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3616 len);
3617 } else {
3618 ret = host_to_target_cmsg(msgp, &msg);
3620 if (!is_error(ret)) {
3621 msgp->msg_namelen = tswap32(msg.msg_namelen);
3622 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3623 ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3624 msg.msg_name, msg.msg_namelen);
3625 if (ret) {
3626 goto out;
3630 ret = len;
3635 out:
3636 unlock_iovec(vec, target_vec, count, !send);
3637 out2:
3638 return ret;
3641 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3642 int flags, int send)
3644 abi_long ret;
3645 struct target_msghdr *msgp;
3647 if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3648 msgp,
3649 target_msg,
3650 send ? 1 : 0)) {
3651 return -TARGET_EFAULT;
3653 ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3654 unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3655 return ret;
3658 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3659 * so it might not have this *mmsg-specific flag either.
3661 #ifndef MSG_WAITFORONE
3662 #define MSG_WAITFORONE 0x10000
3663 #endif
3665 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3666 unsigned int vlen, unsigned int flags,
3667 int send)
3669 struct target_mmsghdr *mmsgp;
3670 abi_long ret = 0;
3671 int i;
3673 if (vlen > UIO_MAXIOV) {
3674 vlen = UIO_MAXIOV;
3677 mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3678 if (!mmsgp) {
3679 return -TARGET_EFAULT;
3682 for (i = 0; i < vlen; i++) {
3683 ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3684 if (is_error(ret)) {
3685 break;
3687 mmsgp[i].msg_len = tswap32(ret);
3688 /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3689 if (flags & MSG_WAITFORONE) {
3690 flags |= MSG_DONTWAIT;
3694 unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3696 /* Return number of datagrams sent if we sent any at all;
3697 * otherwise return the error.
3699 if (i) {
3700 return i;
3702 return ret;
3705 /* do_accept4() Must return target values and target errnos. */
3706 static abi_long do_accept4(int fd, abi_ulong target_addr,
3707 abi_ulong target_addrlen_addr, int flags)
3709 socklen_t addrlen;
3710 void *addr;
3711 abi_long ret;
3712 int host_flags;
3714 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3716 if (target_addr == 0) {
3717 return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3720 /* linux returns EINVAL if addrlen pointer is invalid */
3721 if (get_user_u32(addrlen, target_addrlen_addr))
3722 return -TARGET_EINVAL;
3724 if ((int)addrlen < 0) {
3725 return -TARGET_EINVAL;
3728 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3729 return -TARGET_EINVAL;
3731 addr = alloca(addrlen);
3733 ret = get_errno(safe_accept4(fd, addr, &addrlen, host_flags));
3734 if (!is_error(ret)) {
3735 host_to_target_sockaddr(target_addr, addr, addrlen);
3736 if (put_user_u32(addrlen, target_addrlen_addr))
3737 ret = -TARGET_EFAULT;
3739 return ret;
3742 /* do_getpeername() Must return target values and target errnos. */
3743 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3744 abi_ulong target_addrlen_addr)
3746 socklen_t addrlen;
3747 void *addr;
3748 abi_long ret;
3750 if (get_user_u32(addrlen, target_addrlen_addr))
3751 return -TARGET_EFAULT;
3753 if ((int)addrlen < 0) {
3754 return -TARGET_EINVAL;
3757 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3758 return -TARGET_EFAULT;
3760 addr = alloca(addrlen);
3762 ret = get_errno(getpeername(fd, addr, &addrlen));
3763 if (!is_error(ret)) {
3764 host_to_target_sockaddr(target_addr, addr, addrlen);
3765 if (put_user_u32(addrlen, target_addrlen_addr))
3766 ret = -TARGET_EFAULT;
3768 return ret;
3771 /* do_getsockname() Must return target values and target errnos. */
3772 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3773 abi_ulong target_addrlen_addr)
3775 socklen_t addrlen;
3776 void *addr;
3777 abi_long ret;
3779 if (get_user_u32(addrlen, target_addrlen_addr))
3780 return -TARGET_EFAULT;
3782 if ((int)addrlen < 0) {
3783 return -TARGET_EINVAL;
3786 if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
3787 return -TARGET_EFAULT;
3789 addr = alloca(addrlen);
3791 ret = get_errno(getsockname(fd, addr, &addrlen));
3792 if (!is_error(ret)) {
3793 host_to_target_sockaddr(target_addr, addr, addrlen);
3794 if (put_user_u32(addrlen, target_addrlen_addr))
3795 ret = -TARGET_EFAULT;
3797 return ret;
3800 /* do_socketpair() Must return target values and target errnos. */
3801 static abi_long do_socketpair(int domain, int type, int protocol,
3802 abi_ulong target_tab_addr)
3804 int tab[2];
3805 abi_long ret;
3807 target_to_host_sock_type(&type);
3809 ret = get_errno(socketpair(domain, type, protocol, tab));
3810 if (!is_error(ret)) {
3811 if (put_user_s32(tab[0], target_tab_addr)
3812 || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3813 ret = -TARGET_EFAULT;
3815 return ret;
3818 /* do_sendto() Must return target values and target errnos. */
3819 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3820 abi_ulong target_addr, socklen_t addrlen)
3822 void *addr;
3823 void *host_msg;
3824 void *copy_msg = NULL;
3825 abi_long ret;
3827 if ((int)addrlen < 0) {
3828 return -TARGET_EINVAL;
3831 host_msg = lock_user(VERIFY_READ, msg, len, 1);
3832 if (!host_msg)
3833 return -TARGET_EFAULT;
3834 if (fd_trans_target_to_host_data(fd)) {
3835 copy_msg = host_msg;
3836 host_msg = g_malloc(len);
3837 memcpy(host_msg, copy_msg, len);
3838 ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3839 if (ret < 0) {
3840 goto fail;
3843 if (target_addr) {
3844 addr = alloca(addrlen+1);
3845 ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3846 if (ret) {
3847 goto fail;
3849 ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3850 } else {
3851 ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3853 fail:
3854 if (copy_msg) {
3855 g_free(host_msg);
3856 host_msg = copy_msg;
3858 unlock_user(host_msg, msg, 0);
3859 return ret;
3862 /* do_recvfrom() Must return target values and target errnos. */
3863 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3864 abi_ulong target_addr,
3865 abi_ulong target_addrlen)
3867 socklen_t addrlen;
3868 void *addr;
3869 void *host_msg;
3870 abi_long ret;
3872 host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3873 if (!host_msg)
3874 return -TARGET_EFAULT;
3875 if (target_addr) {
3876 if (get_user_u32(addrlen, target_addrlen)) {
3877 ret = -TARGET_EFAULT;
3878 goto fail;
3880 if ((int)addrlen < 0) {
3881 ret = -TARGET_EINVAL;
3882 goto fail;
3884 addr = alloca(addrlen);
3885 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3886 addr, &addrlen));
3887 } else {
3888 addr = NULL; /* To keep compiler quiet. */
3889 ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3891 if (!is_error(ret)) {
3892 if (fd_trans_host_to_target_data(fd)) {
3893 ret = fd_trans_host_to_target_data(fd)(host_msg, ret);
3895 if (target_addr) {
3896 host_to_target_sockaddr(target_addr, addr, addrlen);
3897 if (put_user_u32(addrlen, target_addrlen)) {
3898 ret = -TARGET_EFAULT;
3899 goto fail;
3902 unlock_user(host_msg, msg, len);
3903 } else {
3904 fail:
3905 unlock_user(host_msg, msg, 0);
3907 return ret;
3910 #ifdef TARGET_NR_socketcall
3911 /* do_socketcall() must return target values and target errnos. */
3912 static abi_long do_socketcall(int num, abi_ulong vptr)
3914 static const unsigned nargs[] = { /* number of arguments per operation */
3915 [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
3916 [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
3917 [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
3918 [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
3919 [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
3920 [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3921 [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3922 [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
3923 [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
3924 [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
3925 [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
3926 [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
3927 [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
3928 [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3929 [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
3930 [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
3931 [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
3932 [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
3933 [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
3934 [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
3936 abi_long a[6]; /* max 6 args */
3937 unsigned i;
3939 /* check the range of the first argument num */
3940 /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3941 if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3942 return -TARGET_EINVAL;
3944 /* ensure we have space for args */
3945 if (nargs[num] > ARRAY_SIZE(a)) {
3946 return -TARGET_EINVAL;
3948 /* collect the arguments in a[] according to nargs[] */
3949 for (i = 0; i < nargs[num]; ++i) {
3950 if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3951 return -TARGET_EFAULT;
3954 /* now when we have the args, invoke the appropriate underlying function */
3955 switch (num) {
3956 case TARGET_SYS_SOCKET: /* domain, type, protocol */
3957 return do_socket(a[0], a[1], a[2]);
3958 case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3959 return do_bind(a[0], a[1], a[2]);
3960 case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3961 return do_connect(a[0], a[1], a[2]);
3962 case TARGET_SYS_LISTEN: /* sockfd, backlog */
3963 return get_errno(listen(a[0], a[1]));
3964 case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3965 return do_accept4(a[0], a[1], a[2], 0);
3966 case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3967 return do_getsockname(a[0], a[1], a[2]);
3968 case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3969 return do_getpeername(a[0], a[1], a[2]);
3970 case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3971 return do_socketpair(a[0], a[1], a[2], a[3]);
3972 case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3973 return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3974 case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3975 return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3976 case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3977 return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3978 case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3979 return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3980 case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3981 return get_errno(shutdown(a[0], a[1]));
3982 case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3983 return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3984 case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3985 return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3986 case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3987 return do_sendrecvmsg(a[0], a[1], a[2], 1);
3988 case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3989 return do_sendrecvmsg(a[0], a[1], a[2], 0);
3990 case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3991 return do_accept4(a[0], a[1], a[2], a[3]);
3992 case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3993 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3994 case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3995 return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3996 default:
3997 gemu_log("Unsupported socketcall: %d\n", num);
3998 return -TARGET_EINVAL;
4001 #endif
4003 #define N_SHM_REGIONS 32
4005 static struct shm_region {
4006 abi_ulong start;
4007 abi_ulong size;
4008 bool in_use;
4009 } shm_regions[N_SHM_REGIONS];
4011 #ifndef TARGET_SEMID64_DS
4012 /* asm-generic version of this struct */
4013 struct target_semid64_ds
4015 struct target_ipc_perm sem_perm;
4016 abi_ulong sem_otime;
4017 #if TARGET_ABI_BITS == 32
4018 abi_ulong __unused1;
4019 #endif
4020 abi_ulong sem_ctime;
4021 #if TARGET_ABI_BITS == 32
4022 abi_ulong __unused2;
4023 #endif
4024 abi_ulong sem_nsems;
4025 abi_ulong __unused3;
4026 abi_ulong __unused4;
4028 #endif
4030 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
4031 abi_ulong target_addr)
4033 struct target_ipc_perm *target_ip;
4034 struct target_semid64_ds *target_sd;
4036 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4037 return -TARGET_EFAULT;
4038 target_ip = &(target_sd->sem_perm);
4039 host_ip->__key = tswap32(target_ip->__key);
4040 host_ip->uid = tswap32(target_ip->uid);
4041 host_ip->gid = tswap32(target_ip->gid);
4042 host_ip->cuid = tswap32(target_ip->cuid);
4043 host_ip->cgid = tswap32(target_ip->cgid);
4044 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4045 host_ip->mode = tswap32(target_ip->mode);
4046 #else
4047 host_ip->mode = tswap16(target_ip->mode);
4048 #endif
4049 #if defined(TARGET_PPC)
4050 host_ip->__seq = tswap32(target_ip->__seq);
4051 #else
4052 host_ip->__seq = tswap16(target_ip->__seq);
4053 #endif
4054 unlock_user_struct(target_sd, target_addr, 0);
4055 return 0;
4058 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
4059 struct ipc_perm *host_ip)
4061 struct target_ipc_perm *target_ip;
4062 struct target_semid64_ds *target_sd;
4064 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4065 return -TARGET_EFAULT;
4066 target_ip = &(target_sd->sem_perm);
4067 target_ip->__key = tswap32(host_ip->__key);
4068 target_ip->uid = tswap32(host_ip->uid);
4069 target_ip->gid = tswap32(host_ip->gid);
4070 target_ip->cuid = tswap32(host_ip->cuid);
4071 target_ip->cgid = tswap32(host_ip->cgid);
4072 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
4073 target_ip->mode = tswap32(host_ip->mode);
4074 #else
4075 target_ip->mode = tswap16(host_ip->mode);
4076 #endif
4077 #if defined(TARGET_PPC)
4078 target_ip->__seq = tswap32(host_ip->__seq);
4079 #else
4080 target_ip->__seq = tswap16(host_ip->__seq);
4081 #endif
4082 unlock_user_struct(target_sd, target_addr, 1);
4083 return 0;
4086 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
4087 abi_ulong target_addr)
4089 struct target_semid64_ds *target_sd;
4091 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4092 return -TARGET_EFAULT;
4093 if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
4094 return -TARGET_EFAULT;
4095 host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
4096 host_sd->sem_otime = tswapal(target_sd->sem_otime);
4097 host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
4098 unlock_user_struct(target_sd, target_addr, 0);
4099 return 0;
4102 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
4103 struct semid_ds *host_sd)
4105 struct target_semid64_ds *target_sd;
4107 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4108 return -TARGET_EFAULT;
4109 if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
4110 return -TARGET_EFAULT;
4111 target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
4112 target_sd->sem_otime = tswapal(host_sd->sem_otime);
4113 target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
4114 unlock_user_struct(target_sd, target_addr, 1);
4115 return 0;
4118 struct target_seminfo {
4119 int semmap;
4120 int semmni;
4121 int semmns;
4122 int semmnu;
4123 int semmsl;
4124 int semopm;
4125 int semume;
4126 int semusz;
4127 int semvmx;
4128 int semaem;
4131 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
4132 struct seminfo *host_seminfo)
4134 struct target_seminfo *target_seminfo;
4135 if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
4136 return -TARGET_EFAULT;
4137 __put_user(host_seminfo->semmap, &target_seminfo->semmap);
4138 __put_user(host_seminfo->semmni, &target_seminfo->semmni);
4139 __put_user(host_seminfo->semmns, &target_seminfo->semmns);
4140 __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
4141 __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
4142 __put_user(host_seminfo->semopm, &target_seminfo->semopm);
4143 __put_user(host_seminfo->semume, &target_seminfo->semume);
4144 __put_user(host_seminfo->semusz, &target_seminfo->semusz);
4145 __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
4146 __put_user(host_seminfo->semaem, &target_seminfo->semaem);
4147 unlock_user_struct(target_seminfo, target_addr, 1);
4148 return 0;
4151 union semun {
4152 int val;
4153 struct semid_ds *buf;
4154 unsigned short *array;
4155 struct seminfo *__buf;
4158 union target_semun {
4159 int val;
4160 abi_ulong buf;
4161 abi_ulong array;
4162 abi_ulong __buf;
4165 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
4166 abi_ulong target_addr)
4168 int nsems;
4169 unsigned short *array;
4170 union semun semun;
4171 struct semid_ds semid_ds;
4172 int i, ret;
4174 semun.buf = &semid_ds;
4176 ret = semctl(semid, 0, IPC_STAT, semun);
4177 if (ret == -1)
4178 return get_errno(ret);
4180 nsems = semid_ds.sem_nsems;
4182 *host_array = g_try_new(unsigned short, nsems);
4183 if (!*host_array) {
4184 return -TARGET_ENOMEM;
4186 array = lock_user(VERIFY_READ, target_addr,
4187 nsems*sizeof(unsigned short), 1);
4188 if (!array) {
4189 g_free(*host_array);
4190 return -TARGET_EFAULT;
4193 for(i=0; i<nsems; i++) {
4194 __get_user((*host_array)[i], &array[i]);
4196 unlock_user(array, target_addr, 0);
4198 return 0;
4201 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
4202 unsigned short **host_array)
4204 int nsems;
4205 unsigned short *array;
4206 union semun semun;
4207 struct semid_ds semid_ds;
4208 int i, ret;
4210 semun.buf = &semid_ds;
4212 ret = semctl(semid, 0, IPC_STAT, semun);
4213 if (ret == -1)
4214 return get_errno(ret);
4216 nsems = semid_ds.sem_nsems;
4218 array = lock_user(VERIFY_WRITE, target_addr,
4219 nsems*sizeof(unsigned short), 0);
4220 if (!array)
4221 return -TARGET_EFAULT;
4223 for(i=0; i<nsems; i++) {
4224 __put_user((*host_array)[i], &array[i]);
4226 g_free(*host_array);
4227 unlock_user(array, target_addr, 1);
4229 return 0;
4232 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4233 abi_ulong target_arg)
4235 union target_semun target_su = { .buf = target_arg };
4236 union semun arg;
4237 struct semid_ds dsarg;
4238 unsigned short *array = NULL;
4239 struct seminfo seminfo;
4240 abi_long ret = -TARGET_EINVAL;
4241 abi_long err;
4242 cmd &= 0xff;
4244 switch( cmd ) {
4245 case GETVAL:
4246 case SETVAL:
4247 /* In 64 bit cross-endian situations, we will erroneously pick up
4248 * the wrong half of the union for the "val" element. To rectify
4249 * this, the entire 8-byte structure is byteswapped, followed by
4250 * a swap of the 4 byte val field. In other cases, the data is
4251 * already in proper host byte order. */
4252 if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4253 target_su.buf = tswapal(target_su.buf);
4254 arg.val = tswap32(target_su.val);
4255 } else {
4256 arg.val = target_su.val;
4258 ret = get_errno(semctl(semid, semnum, cmd, arg));
4259 break;
4260 case GETALL:
4261 case SETALL:
4262 err = target_to_host_semarray(semid, &array, target_su.array);
4263 if (err)
4264 return err;
4265 arg.array = array;
4266 ret = get_errno(semctl(semid, semnum, cmd, arg));
4267 err = host_to_target_semarray(semid, target_su.array, &array);
4268 if (err)
4269 return err;
4270 break;
4271 case IPC_STAT:
4272 case IPC_SET:
4273 case SEM_STAT:
4274 err = target_to_host_semid_ds(&dsarg, target_su.buf);
4275 if (err)
4276 return err;
4277 arg.buf = &dsarg;
4278 ret = get_errno(semctl(semid, semnum, cmd, arg));
4279 err = host_to_target_semid_ds(target_su.buf, &dsarg);
4280 if (err)
4281 return err;
4282 break;
4283 case IPC_INFO:
4284 case SEM_INFO:
4285 arg.__buf = &seminfo;
4286 ret = get_errno(semctl(semid, semnum, cmd, arg));
4287 err = host_to_target_seminfo(target_su.__buf, &seminfo);
4288 if (err)
4289 return err;
4290 break;
4291 case IPC_RMID:
4292 case GETPID:
4293 case GETNCNT:
4294 case GETZCNT:
4295 ret = get_errno(semctl(semid, semnum, cmd, NULL));
4296 break;
4299 return ret;
4302 struct target_sembuf {
4303 unsigned short sem_num;
4304 short sem_op;
4305 short sem_flg;
4308 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4309 abi_ulong target_addr,
4310 unsigned nsops)
4312 struct target_sembuf *target_sembuf;
4313 int i;
4315 target_sembuf = lock_user(VERIFY_READ, target_addr,
4316 nsops*sizeof(struct target_sembuf), 1);
4317 if (!target_sembuf)
4318 return -TARGET_EFAULT;
4320 for(i=0; i<nsops; i++) {
4321 __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4322 __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4323 __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4326 unlock_user(target_sembuf, target_addr, 0);
4328 return 0;
4331 static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
4333 struct sembuf sops[nsops];
4335 if (target_to_host_sembuf(sops, ptr, nsops))
4336 return -TARGET_EFAULT;
4338 return get_errno(safe_semtimedop(semid, sops, nsops, NULL));
4341 struct target_msqid_ds
4343 struct target_ipc_perm msg_perm;
4344 abi_ulong msg_stime;
4345 #if TARGET_ABI_BITS == 32
4346 abi_ulong __unused1;
4347 #endif
4348 abi_ulong msg_rtime;
4349 #if TARGET_ABI_BITS == 32
4350 abi_ulong __unused2;
4351 #endif
4352 abi_ulong msg_ctime;
4353 #if TARGET_ABI_BITS == 32
4354 abi_ulong __unused3;
4355 #endif
4356 abi_ulong __msg_cbytes;
4357 abi_ulong msg_qnum;
4358 abi_ulong msg_qbytes;
4359 abi_ulong msg_lspid;
4360 abi_ulong msg_lrpid;
4361 abi_ulong __unused4;
4362 abi_ulong __unused5;
4365 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4366 abi_ulong target_addr)
4368 struct target_msqid_ds *target_md;
4370 if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4371 return -TARGET_EFAULT;
4372 if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4373 return -TARGET_EFAULT;
4374 host_md->msg_stime = tswapal(target_md->msg_stime);
4375 host_md->msg_rtime = tswapal(target_md->msg_rtime);
4376 host_md->msg_ctime = tswapal(target_md->msg_ctime);
4377 host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4378 host_md->msg_qnum = tswapal(target_md->msg_qnum);
4379 host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4380 host_md->msg_lspid = tswapal(target_md->msg_lspid);
4381 host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4382 unlock_user_struct(target_md, target_addr, 0);
4383 return 0;
4386 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4387 struct msqid_ds *host_md)
4389 struct target_msqid_ds *target_md;
4391 if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4392 return -TARGET_EFAULT;
4393 if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4394 return -TARGET_EFAULT;
4395 target_md->msg_stime = tswapal(host_md->msg_stime);
4396 target_md->msg_rtime = tswapal(host_md->msg_rtime);
4397 target_md->msg_ctime = tswapal(host_md->msg_ctime);
4398 target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4399 target_md->msg_qnum = tswapal(host_md->msg_qnum);
4400 target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4401 target_md->msg_lspid = tswapal(host_md->msg_lspid);
4402 target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4403 unlock_user_struct(target_md, target_addr, 1);
4404 return 0;
4407 struct target_msginfo {
4408 int msgpool;
4409 int msgmap;
4410 int msgmax;
4411 int msgmnb;
4412 int msgmni;
4413 int msgssz;
4414 int msgtql;
4415 unsigned short int msgseg;
4418 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4419 struct msginfo *host_msginfo)
4421 struct target_msginfo *target_msginfo;
4422 if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4423 return -TARGET_EFAULT;
4424 __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4425 __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4426 __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4427 __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4428 __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4429 __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4430 __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4431 __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4432 unlock_user_struct(target_msginfo, target_addr, 1);
4433 return 0;
4436 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4438 struct msqid_ds dsarg;
4439 struct msginfo msginfo;
4440 abi_long ret = -TARGET_EINVAL;
4442 cmd &= 0xff;
4444 switch (cmd) {
4445 case IPC_STAT:
4446 case IPC_SET:
4447 case MSG_STAT:
4448 if (target_to_host_msqid_ds(&dsarg,ptr))
4449 return -TARGET_EFAULT;
4450 ret = get_errno(msgctl(msgid, cmd, &dsarg));
4451 if (host_to_target_msqid_ds(ptr,&dsarg))
4452 return -TARGET_EFAULT;
4453 break;
4454 case IPC_RMID:
4455 ret = get_errno(msgctl(msgid, cmd, NULL));
4456 break;
4457 case IPC_INFO:
4458 case MSG_INFO:
4459 ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4460 if (host_to_target_msginfo(ptr, &msginfo))
4461 return -TARGET_EFAULT;
4462 break;
4465 return ret;
4468 struct target_msgbuf {
4469 abi_long mtype;
4470 char mtext[1];
4473 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4474 ssize_t msgsz, int msgflg)
4476 struct target_msgbuf *target_mb;
4477 struct msgbuf *host_mb;
4478 abi_long ret = 0;
4480 if (msgsz < 0) {
4481 return -TARGET_EINVAL;
4484 if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4485 return -TARGET_EFAULT;
4486 host_mb = g_try_malloc(msgsz + sizeof(long));
4487 if (!host_mb) {
4488 unlock_user_struct(target_mb, msgp, 0);
4489 return -TARGET_ENOMEM;
4491 host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4492 memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4493 ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4494 g_free(host_mb);
4495 unlock_user_struct(target_mb, msgp, 0);
4497 return ret;
4500 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4501 ssize_t msgsz, abi_long msgtyp,
4502 int msgflg)
4504 struct target_msgbuf *target_mb;
4505 char *target_mtext;
4506 struct msgbuf *host_mb;
4507 abi_long ret = 0;
4509 if (msgsz < 0) {
4510 return -TARGET_EINVAL;
4513 if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4514 return -TARGET_EFAULT;
4516 host_mb = g_try_malloc(msgsz + sizeof(long));
4517 if (!host_mb) {
4518 ret = -TARGET_ENOMEM;
4519 goto end;
4521 ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4523 if (ret > 0) {
4524 abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4525 target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4526 if (!target_mtext) {
4527 ret = -TARGET_EFAULT;
4528 goto end;
4530 memcpy(target_mb->mtext, host_mb->mtext, ret);
4531 unlock_user(target_mtext, target_mtext_addr, ret);
4534 target_mb->mtype = tswapal(host_mb->mtype);
4536 end:
4537 if (target_mb)
4538 unlock_user_struct(target_mb, msgp, 1);
4539 g_free(host_mb);
4540 return ret;
4543 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4544 abi_ulong target_addr)
4546 struct target_shmid_ds *target_sd;
4548 if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4549 return -TARGET_EFAULT;
4550 if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4551 return -TARGET_EFAULT;
4552 __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4553 __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4554 __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4555 __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4556 __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4557 __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4558 __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4559 unlock_user_struct(target_sd, target_addr, 0);
4560 return 0;
4563 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4564 struct shmid_ds *host_sd)
4566 struct target_shmid_ds *target_sd;
4568 if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4569 return -TARGET_EFAULT;
4570 if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4571 return -TARGET_EFAULT;
4572 __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4573 __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4574 __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4575 __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4576 __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4577 __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4578 __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4579 unlock_user_struct(target_sd, target_addr, 1);
4580 return 0;
4583 struct target_shminfo {
4584 abi_ulong shmmax;
4585 abi_ulong shmmin;
4586 abi_ulong shmmni;
4587 abi_ulong shmseg;
4588 abi_ulong shmall;
4591 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4592 struct shminfo *host_shminfo)
4594 struct target_shminfo *target_shminfo;
4595 if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4596 return -TARGET_EFAULT;
4597 __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4598 __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4599 __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4600 __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4601 __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4602 unlock_user_struct(target_shminfo, target_addr, 1);
4603 return 0;
4606 struct target_shm_info {
4607 int used_ids;
4608 abi_ulong shm_tot;
4609 abi_ulong shm_rss;
4610 abi_ulong shm_swp;
4611 abi_ulong swap_attempts;
4612 abi_ulong swap_successes;
4615 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4616 struct shm_info *host_shm_info)
4618 struct target_shm_info *target_shm_info;
4619 if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4620 return -TARGET_EFAULT;
4621 __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4622 __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4623 __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4624 __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4625 __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4626 __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4627 unlock_user_struct(target_shm_info, target_addr, 1);
4628 return 0;
4631 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4633 struct shmid_ds dsarg;
4634 struct shminfo shminfo;
4635 struct shm_info shm_info;
4636 abi_long ret = -TARGET_EINVAL;
4638 cmd &= 0xff;
4640 switch(cmd) {
4641 case IPC_STAT:
4642 case IPC_SET:
4643 case SHM_STAT:
4644 if (target_to_host_shmid_ds(&dsarg, buf))
4645 return -TARGET_EFAULT;
4646 ret = get_errno(shmctl(shmid, cmd, &dsarg));
4647 if (host_to_target_shmid_ds(buf, &dsarg))
4648 return -TARGET_EFAULT;
4649 break;
4650 case IPC_INFO:
4651 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4652 if (host_to_target_shminfo(buf, &shminfo))
4653 return -TARGET_EFAULT;
4654 break;
4655 case SHM_INFO:
4656 ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4657 if (host_to_target_shm_info(buf, &shm_info))
4658 return -TARGET_EFAULT;
4659 break;
4660 case IPC_RMID:
4661 case SHM_LOCK:
4662 case SHM_UNLOCK:
4663 ret = get_errno(shmctl(shmid, cmd, NULL));
4664 break;
4667 return ret;
4670 #ifndef TARGET_FORCE_SHMLBA
4671 /* For most architectures, SHMLBA is the same as the page size;
4672 * some architectures have larger values, in which case they should
4673 * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4674 * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4675 * and defining its own value for SHMLBA.
4677 * The kernel also permits SHMLBA to be set by the architecture to a
4678 * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4679 * this means that addresses are rounded to the large size if
4680 * SHM_RND is set but addresses not aligned to that size are not rejected
4681 * as long as they are at least page-aligned. Since the only architecture
4682 * which uses this is ia64 this code doesn't provide for that oddity.
4684 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4686 return TARGET_PAGE_SIZE;
4688 #endif
4690 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4691 int shmid, abi_ulong shmaddr, int shmflg)
4693 abi_long raddr;
4694 void *host_raddr;
4695 struct shmid_ds shm_info;
4696 int i,ret;
4697 abi_ulong shmlba;
4699 /* find out the length of the shared memory segment */
4700 ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4701 if (is_error(ret)) {
4702 /* can't get length, bail out */
4703 return ret;
4706 shmlba = target_shmlba(cpu_env);
4708 if (shmaddr & (shmlba - 1)) {
4709 if (shmflg & SHM_RND) {
4710 shmaddr &= ~(shmlba - 1);
4711 } else {
4712 return -TARGET_EINVAL;
4716 mmap_lock();
4718 if (shmaddr)
4719 host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
4720 else {
4721 abi_ulong mmap_start;
4723 mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
4725 if (mmap_start == -1) {
4726 errno = ENOMEM;
4727 host_raddr = (void *)-1;
4728 } else
4729 host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
4732 if (host_raddr == (void *)-1) {
4733 mmap_unlock();
4734 return get_errno((long)host_raddr);
4736 raddr=h2g((unsigned long)host_raddr);
4738 page_set_flags(raddr, raddr + shm_info.shm_segsz,
4739 PAGE_VALID | PAGE_READ |
4740 ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
4742 for (i = 0; i < N_SHM_REGIONS; i++) {
4743 if (!shm_regions[i].in_use) {
4744 shm_regions[i].in_use = true;
4745 shm_regions[i].start = raddr;
4746 shm_regions[i].size = shm_info.shm_segsz;
4747 break;
4751 mmap_unlock();
4752 return raddr;
4756 static inline abi_long do_shmdt(abi_ulong shmaddr)
4758 int i;
4760 for (i = 0; i < N_SHM_REGIONS; ++i) {
4761 if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4762 shm_regions[i].in_use = false;
4763 page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4764 break;
4768 return get_errno(shmdt(g2h(shmaddr)));
4771 #ifdef TARGET_NR_ipc
4772 /* ??? This only works with linear mappings. */
4773 /* do_ipc() must return target values and target errnos. */
4774 static abi_long do_ipc(CPUArchState *cpu_env,
4775 unsigned int call, abi_long first,
4776 abi_long second, abi_long third,
4777 abi_long ptr, abi_long fifth)
4779 int version;
4780 abi_long ret = 0;
4782 version = call >> 16;
4783 call &= 0xffff;
4785 switch (call) {
4786 case IPCOP_semop:
4787 ret = do_semop(first, ptr, second);
4788 break;
4790 case IPCOP_semget:
4791 ret = get_errno(semget(first, second, third));
4792 break;
4794 case IPCOP_semctl: {
4795 /* The semun argument to semctl is passed by value, so dereference the
4796 * ptr argument. */
4797 abi_ulong atptr;
4798 get_user_ual(atptr, ptr);
4799 ret = do_semctl(first, second, third, atptr);
4800 break;
4803 case IPCOP_msgget:
4804 ret = get_errno(msgget(first, second));
4805 break;
4807 case IPCOP_msgsnd:
4808 ret = do_msgsnd(first, ptr, second, third);
4809 break;
4811 case IPCOP_msgctl:
4812 ret = do_msgctl(first, second, ptr);
4813 break;
4815 case IPCOP_msgrcv:
4816 switch (version) {
4817 case 0:
4819 struct target_ipc_kludge {
4820 abi_long msgp;
4821 abi_long msgtyp;
4822 } *tmp;
4824 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4825 ret = -TARGET_EFAULT;
4826 break;
4829 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4831 unlock_user_struct(tmp, ptr, 0);
4832 break;
4834 default:
4835 ret = do_msgrcv(first, ptr, second, fifth, third);
4837 break;
4839 case IPCOP_shmat:
4840 switch (version) {
4841 default:
4843 abi_ulong raddr;
4844 raddr = do_shmat(cpu_env, first, ptr, second);
4845 if (is_error(raddr))
4846 return get_errno(raddr);
4847 if (put_user_ual(raddr, third))
4848 return -TARGET_EFAULT;
4849 break;
4851 case 1:
4852 ret = -TARGET_EINVAL;
4853 break;
4855 break;
4856 case IPCOP_shmdt:
4857 ret = do_shmdt(ptr);
4858 break;
4860 case IPCOP_shmget:
4861 /* IPC_* flag values are the same on all linux platforms */
4862 ret = get_errno(shmget(first, second, third));
4863 break;
4865 /* IPC_* and SHM_* command values are the same on all linux platforms */
4866 case IPCOP_shmctl:
4867 ret = do_shmctl(first, second, ptr);
4868 break;
4869 default:
4870 gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
4871 ret = -TARGET_ENOSYS;
4872 break;
4874 return ret;
4876 #endif
4878 /* kernel structure types definitions */
4880 #define STRUCT(name, ...) STRUCT_ ## name,
4881 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4882 enum {
4883 #include "syscall_types.h"
4884 STRUCT_MAX
4886 #undef STRUCT
4887 #undef STRUCT_SPECIAL
4889 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
4890 #define STRUCT_SPECIAL(name)
4891 #include "syscall_types.h"
4892 #undef STRUCT
4893 #undef STRUCT_SPECIAL
4895 typedef struct IOCTLEntry IOCTLEntry;
4897 typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
4898 int fd, int cmd, abi_long arg);
4900 struct IOCTLEntry {
4901 int target_cmd;
4902 unsigned int host_cmd;
4903 const char *name;
4904 int access;
4905 do_ioctl_fn *do_ioctl;
4906 const argtype arg_type[5];
4909 #define IOC_R 0x0001
4910 #define IOC_W 0x0002
4911 #define IOC_RW (IOC_R | IOC_W)
4913 #define MAX_STRUCT_SIZE 4096
4915 #ifdef CONFIG_FIEMAP
4916 /* So fiemap access checks don't overflow on 32 bit systems.
4917 * This is very slightly smaller than the limit imposed by
4918 * the underlying kernel.
4920 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
4921 / sizeof(struct fiemap_extent))
4923 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4924 int fd, int cmd, abi_long arg)
4926 /* The parameter for this ioctl is a struct fiemap followed
4927 * by an array of struct fiemap_extent whose size is set
4928 * in fiemap->fm_extent_count. The array is filled in by the
4929 * ioctl.
4931 int target_size_in, target_size_out;
4932 struct fiemap *fm;
4933 const argtype *arg_type = ie->arg_type;
4934 const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4935 void *argptr, *p;
4936 abi_long ret;
4937 int i, extent_size = thunk_type_size(extent_arg_type, 0);
4938 uint32_t outbufsz;
4939 int free_fm = 0;
4941 assert(arg_type[0] == TYPE_PTR);
4942 assert(ie->access == IOC_RW);
4943 arg_type++;
4944 target_size_in = thunk_type_size(arg_type, 0);
4945 argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4946 if (!argptr) {
4947 return -TARGET_EFAULT;
4949 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4950 unlock_user(argptr, arg, 0);
4951 fm = (struct fiemap *)buf_temp;
4952 if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4953 return -TARGET_EINVAL;
4956 outbufsz = sizeof (*fm) +
4957 (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4959 if (outbufsz > MAX_STRUCT_SIZE) {
4960 /* We can't fit all the extents into the fixed size buffer.
4961 * Allocate one that is large enough and use it instead.
4963 fm = g_try_malloc(outbufsz);
4964 if (!fm) {
4965 return -TARGET_ENOMEM;
4967 memcpy(fm, buf_temp, sizeof(struct fiemap));
4968 free_fm = 1;
4970 ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4971 if (!is_error(ret)) {
4972 target_size_out = target_size_in;
4973 /* An extent_count of 0 means we were only counting the extents
4974 * so there are no structs to copy
4976 if (fm->fm_extent_count != 0) {
4977 target_size_out += fm->fm_mapped_extents * extent_size;
4979 argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4980 if (!argptr) {
4981 ret = -TARGET_EFAULT;
4982 } else {
4983 /* Convert the struct fiemap */
4984 thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4985 if (fm->fm_extent_count != 0) {
4986 p = argptr + target_size_in;
4987 /* ...and then all the struct fiemap_extents */
4988 for (i = 0; i < fm->fm_mapped_extents; i++) {
4989 thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4990 THUNK_TARGET);
4991 p += extent_size;
4994 unlock_user(argptr, arg, target_size_out);
4997 if (free_fm) {
4998 g_free(fm);
5000 return ret;
5002 #endif
5004 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
5005 int fd, int cmd, abi_long arg)
5007 const argtype *arg_type = ie->arg_type;
5008 int target_size;
5009 void *argptr;
5010 int ret;
5011 struct ifconf *host_ifconf;
5012 uint32_t outbufsz;
5013 const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
5014 int target_ifreq_size;
5015 int nb_ifreq;
5016 int free_buf = 0;
5017 int i;
5018 int target_ifc_len;
5019 abi_long target_ifc_buf;
5020 int host_ifc_len;
5021 char *host_ifc_buf;
5023 assert(arg_type[0] == TYPE_PTR);
5024 assert(ie->access == IOC_RW);
5026 arg_type++;
5027 target_size = thunk_type_size(arg_type, 0);
5029 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5030 if (!argptr)
5031 return -TARGET_EFAULT;
5032 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5033 unlock_user(argptr, arg, 0);
5035 host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
5036 target_ifc_len = host_ifconf->ifc_len;
5037 target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
5039 target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
5040 nb_ifreq = target_ifc_len / target_ifreq_size;
5041 host_ifc_len = nb_ifreq * sizeof(struct ifreq);
5043 outbufsz = sizeof(*host_ifconf) + host_ifc_len;
5044 if (outbufsz > MAX_STRUCT_SIZE) {
5045 /* We can't fit all the extents into the fixed size buffer.
5046 * Allocate one that is large enough and use it instead.
5048 host_ifconf = malloc(outbufsz);
5049 if (!host_ifconf) {
5050 return -TARGET_ENOMEM;
5052 memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
5053 free_buf = 1;
5055 host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
5057 host_ifconf->ifc_len = host_ifc_len;
5058 host_ifconf->ifc_buf = host_ifc_buf;
5060 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
5061 if (!is_error(ret)) {
5062 /* convert host ifc_len to target ifc_len */
5064 nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
5065 target_ifc_len = nb_ifreq * target_ifreq_size;
5066 host_ifconf->ifc_len = target_ifc_len;
5068 /* restore target ifc_buf */
5070 host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
5072 /* copy struct ifconf to target user */
5074 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5075 if (!argptr)
5076 return -TARGET_EFAULT;
5077 thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
5078 unlock_user(argptr, arg, target_size);
5080 /* copy ifreq[] to target user */
5082 argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
5083 for (i = 0; i < nb_ifreq ; i++) {
5084 thunk_convert(argptr + i * target_ifreq_size,
5085 host_ifc_buf + i * sizeof(struct ifreq),
5086 ifreq_arg_type, THUNK_TARGET);
5088 unlock_user(argptr, target_ifc_buf, target_ifc_len);
5091 if (free_buf) {
5092 free(host_ifconf);
5095 return ret;
5098 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5099 int cmd, abi_long arg)
5101 void *argptr;
5102 struct dm_ioctl *host_dm;
5103 abi_long guest_data;
5104 uint32_t guest_data_size;
5105 int target_size;
5106 const argtype *arg_type = ie->arg_type;
5107 abi_long ret;
5108 void *big_buf = NULL;
5109 char *host_data;
5111 arg_type++;
5112 target_size = thunk_type_size(arg_type, 0);
5113 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5114 if (!argptr) {
5115 ret = -TARGET_EFAULT;
5116 goto out;
5118 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5119 unlock_user(argptr, arg, 0);
5121 /* buf_temp is too small, so fetch things into a bigger buffer */
5122 big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5123 memcpy(big_buf, buf_temp, target_size);
5124 buf_temp = big_buf;
5125 host_dm = big_buf;
5127 guest_data = arg + host_dm->data_start;
5128 if ((guest_data - arg) < 0) {
5129 ret = -TARGET_EINVAL;
5130 goto out;
5132 guest_data_size = host_dm->data_size - host_dm->data_start;
5133 host_data = (char*)host_dm + host_dm->data_start;
5135 argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5136 if (!argptr) {
5137 ret = -TARGET_EFAULT;
5138 goto out;
5141 switch (ie->host_cmd) {
5142 case DM_REMOVE_ALL:
5143 case DM_LIST_DEVICES:
5144 case DM_DEV_CREATE:
5145 case DM_DEV_REMOVE:
5146 case DM_DEV_SUSPEND:
5147 case DM_DEV_STATUS:
5148 case DM_DEV_WAIT:
5149 case DM_TABLE_STATUS:
5150 case DM_TABLE_CLEAR:
5151 case DM_TABLE_DEPS:
5152 case DM_LIST_VERSIONS:
5153 /* no input data */
5154 break;
5155 case DM_DEV_RENAME:
5156 case DM_DEV_SET_GEOMETRY:
5157 /* data contains only strings */
5158 memcpy(host_data, argptr, guest_data_size);
5159 break;
5160 case DM_TARGET_MSG:
5161 memcpy(host_data, argptr, guest_data_size);
5162 *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5163 break;
5164 case DM_TABLE_LOAD:
5166 void *gspec = argptr;
5167 void *cur_data = host_data;
5168 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5169 int spec_size = thunk_type_size(arg_type, 0);
5170 int i;
5172 for (i = 0; i < host_dm->target_count; i++) {
5173 struct dm_target_spec *spec = cur_data;
5174 uint32_t next;
5175 int slen;
5177 thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5178 slen = strlen((char*)gspec + spec_size) + 1;
5179 next = spec->next;
5180 spec->next = sizeof(*spec) + slen;
5181 strcpy((char*)&spec[1], gspec + spec_size);
5182 gspec += next;
5183 cur_data += spec->next;
5185 break;
5187 default:
5188 ret = -TARGET_EINVAL;
5189 unlock_user(argptr, guest_data, 0);
5190 goto out;
5192 unlock_user(argptr, guest_data, 0);
5194 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5195 if (!is_error(ret)) {
5196 guest_data = arg + host_dm->data_start;
5197 guest_data_size = host_dm->data_size - host_dm->data_start;
5198 argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5199 switch (ie->host_cmd) {
5200 case DM_REMOVE_ALL:
5201 case DM_DEV_CREATE:
5202 case DM_DEV_REMOVE:
5203 case DM_DEV_RENAME:
5204 case DM_DEV_SUSPEND:
5205 case DM_DEV_STATUS:
5206 case DM_TABLE_LOAD:
5207 case DM_TABLE_CLEAR:
5208 case DM_TARGET_MSG:
5209 case DM_DEV_SET_GEOMETRY:
5210 /* no return data */
5211 break;
5212 case DM_LIST_DEVICES:
5214 struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5215 uint32_t remaining_data = guest_data_size;
5216 void *cur_data = argptr;
5217 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5218 int nl_size = 12; /* can't use thunk_size due to alignment */
5220 while (1) {
5221 uint32_t next = nl->next;
5222 if (next) {
5223 nl->next = nl_size + (strlen(nl->name) + 1);
5225 if (remaining_data < nl->next) {
5226 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5227 break;
5229 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5230 strcpy(cur_data + nl_size, nl->name);
5231 cur_data += nl->next;
5232 remaining_data -= nl->next;
5233 if (!next) {
5234 break;
5236 nl = (void*)nl + next;
5238 break;
5240 case DM_DEV_WAIT:
5241 case DM_TABLE_STATUS:
5243 struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5244 void *cur_data = argptr;
5245 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5246 int spec_size = thunk_type_size(arg_type, 0);
5247 int i;
5249 for (i = 0; i < host_dm->target_count; i++) {
5250 uint32_t next = spec->next;
5251 int slen = strlen((char*)&spec[1]) + 1;
5252 spec->next = (cur_data - argptr) + spec_size + slen;
5253 if (guest_data_size < spec->next) {
5254 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5255 break;
5257 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5258 strcpy(cur_data + spec_size, (char*)&spec[1]);
5259 cur_data = argptr + spec->next;
5260 spec = (void*)host_dm + host_dm->data_start + next;
5262 break;
5264 case DM_TABLE_DEPS:
5266 void *hdata = (void*)host_dm + host_dm->data_start;
5267 int count = *(uint32_t*)hdata;
5268 uint64_t *hdev = hdata + 8;
5269 uint64_t *gdev = argptr + 8;
5270 int i;
5272 *(uint32_t*)argptr = tswap32(count);
5273 for (i = 0; i < count; i++) {
5274 *gdev = tswap64(*hdev);
5275 gdev++;
5276 hdev++;
5278 break;
5280 case DM_LIST_VERSIONS:
5282 struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5283 uint32_t remaining_data = guest_data_size;
5284 void *cur_data = argptr;
5285 const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5286 int vers_size = thunk_type_size(arg_type, 0);
5288 while (1) {
5289 uint32_t next = vers->next;
5290 if (next) {
5291 vers->next = vers_size + (strlen(vers->name) + 1);
5293 if (remaining_data < vers->next) {
5294 host_dm->flags |= DM_BUFFER_FULL_FLAG;
5295 break;
5297 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5298 strcpy(cur_data + vers_size, vers->name);
5299 cur_data += vers->next;
5300 remaining_data -= vers->next;
5301 if (!next) {
5302 break;
5304 vers = (void*)vers + next;
5306 break;
5308 default:
5309 unlock_user(argptr, guest_data, 0);
5310 ret = -TARGET_EINVAL;
5311 goto out;
5313 unlock_user(argptr, guest_data, guest_data_size);
5315 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5316 if (!argptr) {
5317 ret = -TARGET_EFAULT;
5318 goto out;
5320 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5321 unlock_user(argptr, arg, target_size);
5323 out:
5324 g_free(big_buf);
5325 return ret;
5328 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5329 int cmd, abi_long arg)
5331 void *argptr;
5332 int target_size;
5333 const argtype *arg_type = ie->arg_type;
5334 const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5335 abi_long ret;
5337 struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5338 struct blkpg_partition host_part;
5340 /* Read and convert blkpg */
5341 arg_type++;
5342 target_size = thunk_type_size(arg_type, 0);
5343 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5344 if (!argptr) {
5345 ret = -TARGET_EFAULT;
5346 goto out;
5348 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5349 unlock_user(argptr, arg, 0);
5351 switch (host_blkpg->op) {
5352 case BLKPG_ADD_PARTITION:
5353 case BLKPG_DEL_PARTITION:
5354 /* payload is struct blkpg_partition */
5355 break;
5356 default:
5357 /* Unknown opcode */
5358 ret = -TARGET_EINVAL;
5359 goto out;
5362 /* Read and convert blkpg->data */
5363 arg = (abi_long)(uintptr_t)host_blkpg->data;
5364 target_size = thunk_type_size(part_arg_type, 0);
5365 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5366 if (!argptr) {
5367 ret = -TARGET_EFAULT;
5368 goto out;
5370 thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5371 unlock_user(argptr, arg, 0);
5373 /* Swizzle the data pointer to our local copy and call! */
5374 host_blkpg->data = &host_part;
5375 ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5377 out:
5378 return ret;
5381 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5382 int fd, int cmd, abi_long arg)
5384 const argtype *arg_type = ie->arg_type;
5385 const StructEntry *se;
5386 const argtype *field_types;
5387 const int *dst_offsets, *src_offsets;
5388 int target_size;
5389 void *argptr;
5390 abi_ulong *target_rt_dev_ptr;
5391 unsigned long *host_rt_dev_ptr;
5392 abi_long ret;
5393 int i;
5395 assert(ie->access == IOC_W);
5396 assert(*arg_type == TYPE_PTR);
5397 arg_type++;
5398 assert(*arg_type == TYPE_STRUCT);
5399 target_size = thunk_type_size(arg_type, 0);
5400 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401 if (!argptr) {
5402 return -TARGET_EFAULT;
5404 arg_type++;
5405 assert(*arg_type == (int)STRUCT_rtentry);
5406 se = struct_entries + *arg_type++;
5407 assert(se->convert[0] == NULL);
5408 /* convert struct here to be able to catch rt_dev string */
5409 field_types = se->field_types;
5410 dst_offsets = se->field_offsets[THUNK_HOST];
5411 src_offsets = se->field_offsets[THUNK_TARGET];
5412 for (i = 0; i < se->nb_fields; i++) {
5413 if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5414 assert(*field_types == TYPE_PTRVOID);
5415 target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5416 host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5417 if (*target_rt_dev_ptr != 0) {
5418 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5419 tswapal(*target_rt_dev_ptr));
5420 if (!*host_rt_dev_ptr) {
5421 unlock_user(argptr, arg, 0);
5422 return -TARGET_EFAULT;
5424 } else {
5425 *host_rt_dev_ptr = 0;
5427 field_types++;
5428 continue;
5430 field_types = thunk_convert(buf_temp + dst_offsets[i],
5431 argptr + src_offsets[i],
5432 field_types, THUNK_HOST);
5434 unlock_user(argptr, arg, 0);
5436 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5437 if (*host_rt_dev_ptr != 0) {
5438 unlock_user((void *)*host_rt_dev_ptr,
5439 *target_rt_dev_ptr, 0);
5441 return ret;
5444 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5445 int fd, int cmd, abi_long arg)
5447 int sig = target_to_host_signal(arg);
5448 return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5451 static IOCTLEntry ioctl_entries[] = {
5452 #define IOCTL(cmd, access, ...) \
5453 { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
5454 #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
5455 { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
5456 #define IOCTL_IGNORE(cmd) \
5457 { TARGET_ ## cmd, 0, #cmd },
5458 #include "ioctls.h"
5459 { 0, 0, },
5462 /* ??? Implement proper locking for ioctls. */
5463 /* do_ioctl() Must return target values and target errnos. */
5464 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5466 const IOCTLEntry *ie;
5467 const argtype *arg_type;
5468 abi_long ret;
5469 uint8_t buf_temp[MAX_STRUCT_SIZE];
5470 int target_size;
5471 void *argptr;
5473 ie = ioctl_entries;
5474 for(;;) {
5475 if (ie->target_cmd == 0) {
5476 gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5477 return -TARGET_ENOSYS;
5479 if (ie->target_cmd == cmd)
5480 break;
5481 ie++;
5483 arg_type = ie->arg_type;
5484 #if defined(DEBUG)
5485 gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
5486 #endif
5487 if (ie->do_ioctl) {
5488 return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5489 } else if (!ie->host_cmd) {
5490 /* Some architectures define BSD ioctls in their headers
5491 that are not implemented in Linux. */
5492 return -TARGET_ENOSYS;
5495 switch(arg_type[0]) {
5496 case TYPE_NULL:
5497 /* no argument */
5498 ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5499 break;
5500 case TYPE_PTRVOID:
5501 case TYPE_INT:
5502 ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5503 break;
5504 case TYPE_PTR:
5505 arg_type++;
5506 target_size = thunk_type_size(arg_type, 0);
5507 switch(ie->access) {
5508 case IOC_R:
5509 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5510 if (!is_error(ret)) {
5511 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5512 if (!argptr)
5513 return -TARGET_EFAULT;
5514 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5515 unlock_user(argptr, arg, target_size);
5517 break;
5518 case IOC_W:
5519 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5520 if (!argptr)
5521 return -TARGET_EFAULT;
5522 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5523 unlock_user(argptr, arg, 0);
5524 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5525 break;
5526 default:
5527 case IOC_RW:
5528 argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5529 if (!argptr)
5530 return -TARGET_EFAULT;
5531 thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5532 unlock_user(argptr, arg, 0);
5533 ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5534 if (!is_error(ret)) {
5535 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5536 if (!argptr)
5537 return -TARGET_EFAULT;
5538 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5539 unlock_user(argptr, arg, target_size);
5541 break;
5543 break;
5544 default:
5545 gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5546 (long)cmd, arg_type[0]);
5547 ret = -TARGET_ENOSYS;
5548 break;
5550 return ret;
5553 static const bitmask_transtbl iflag_tbl[] = {
5554 { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5555 { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5556 { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5557 { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5558 { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5559 { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5560 { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5561 { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5562 { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5563 { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5564 { TARGET_IXON, TARGET_IXON, IXON, IXON },
5565 { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5566 { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5567 { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5568 { 0, 0, 0, 0 }
5571 static const bitmask_transtbl oflag_tbl[] = {
5572 { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5573 { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5574 { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5575 { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5576 { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5577 { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5578 { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5579 { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5580 { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5581 { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5582 { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5583 { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5584 { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5585 { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5586 { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5587 { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5588 { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5589 { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5590 { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5591 { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5592 { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5593 { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5594 { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5595 { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5596 { 0, 0, 0, 0 }
5599 static const bitmask_transtbl cflag_tbl[] = {
5600 { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5601 { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5602 { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5603 { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5604 { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5605 { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5606 { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5607 { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5608 { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5609 { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5610 { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5611 { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5612 { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5613 { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5614 { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5615 { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5616 { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5617 { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5618 { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5619 { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5620 { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5621 { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5622 { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5623 { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5624 { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5625 { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5626 { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5627 { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5628 { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5629 { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5630 { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5631 { 0, 0, 0, 0 }
5634 static const bitmask_transtbl lflag_tbl[] = {
5635 { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5636 { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5637 { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5638 { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5639 { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5640 { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5641 { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5642 { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5643 { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5644 { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5645 { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5646 { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5647 { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5648 { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5649 { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5650 { 0, 0, 0, 0 }
5653 static void target_to_host_termios (void *dst, const void *src)
5655 struct host_termios *host = dst;
5656 const struct target_termios *target = src;
5658 host->c_iflag =
5659 target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5660 host->c_oflag =
5661 target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5662 host->c_cflag =
5663 target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5664 host->c_lflag =
5665 target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5666 host->c_line = target->c_line;
5668 memset(host->c_cc, 0, sizeof(host->c_cc));
5669 host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5670 host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5671 host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5672 host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5673 host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5674 host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5675 host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5676 host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5677 host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5678 host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5679 host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5680 host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5681 host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5682 host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5683 host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5684 host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5685 host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5688 static void host_to_target_termios (void *dst, const void *src)
5690 struct target_termios *target = dst;
5691 const struct host_termios *host = src;
5693 target->c_iflag =
5694 tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5695 target->c_oflag =
5696 tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5697 target->c_cflag =
5698 tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5699 target->c_lflag =
5700 tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5701 target->c_line = host->c_line;
5703 memset(target->c_cc, 0, sizeof(target->c_cc));
5704 target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5705 target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5706 target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5707 target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5708 target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5709 target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5710 target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5711 target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5712 target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5713 target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5714 target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5715 target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5716 target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5717 target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5718 target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5719 target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5720 target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5723 static const StructEntry struct_termios_def = {
5724 .convert = { host_to_target_termios, target_to_host_termios },
5725 .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5726 .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5729 static bitmask_transtbl mmap_flags_tbl[] = {
5730 { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5731 { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5732 { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5733 { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
5734 { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
5735 { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
5736 { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
5737 { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5738 { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
5739 MAP_NORESERVE },
5740 { 0, 0, 0, 0 }
5743 #if defined(TARGET_I386)
5745 /* NOTE: there is really one LDT for all the threads */
5746 static uint8_t *ldt_table;
5748 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5750 int size;
5751 void *p;
5753 if (!ldt_table)
5754 return 0;
5755 size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5756 if (size > bytecount)
5757 size = bytecount;
5758 p = lock_user(VERIFY_WRITE, ptr, size, 0);
5759 if (!p)
5760 return -TARGET_EFAULT;
5761 /* ??? Should this by byteswapped? */
5762 memcpy(p, ldt_table, size);
5763 unlock_user(p, ptr, size);
5764 return size;
5767 /* XXX: add locking support */
5768 static abi_long write_ldt(CPUX86State *env,
5769 abi_ulong ptr, unsigned long bytecount, int oldmode)
5771 struct target_modify_ldt_ldt_s ldt_info;
5772 struct target_modify_ldt_ldt_s *target_ldt_info;
5773 int seg_32bit, contents, read_exec_only, limit_in_pages;
5774 int seg_not_present, useable, lm;
5775 uint32_t *lp, entry_1, entry_2;
5777 if (bytecount != sizeof(ldt_info))
5778 return -TARGET_EINVAL;
5779 if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
5780 return -TARGET_EFAULT;
5781 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5782 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5783 ldt_info.limit = tswap32(target_ldt_info->limit);
5784 ldt_info.flags = tswap32(target_ldt_info->flags);
5785 unlock_user_struct(target_ldt_info, ptr, 0);
5787 if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
5788 return -TARGET_EINVAL;
5789 seg_32bit = ldt_info.flags & 1;
5790 contents = (ldt_info.flags >> 1) & 3;
5791 read_exec_only = (ldt_info.flags >> 3) & 1;
5792 limit_in_pages = (ldt_info.flags >> 4) & 1;
5793 seg_not_present = (ldt_info.flags >> 5) & 1;
5794 useable = (ldt_info.flags >> 6) & 1;
5795 #ifdef TARGET_ABI32
5796 lm = 0;
5797 #else
5798 lm = (ldt_info.flags >> 7) & 1;
5799 #endif
5800 if (contents == 3) {
5801 if (oldmode)
5802 return -TARGET_EINVAL;
5803 if (seg_not_present == 0)
5804 return -TARGET_EINVAL;
5806 /* allocate the LDT */
5807 if (!ldt_table) {
5808 env->ldt.base = target_mmap(0,
5809 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
5810 PROT_READ|PROT_WRITE,
5811 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
5812 if (env->ldt.base == -1)
5813 return -TARGET_ENOMEM;
5814 memset(g2h(env->ldt.base), 0,
5815 TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
5816 env->ldt.limit = 0xffff;
5817 ldt_table = g2h(env->ldt.base);
5820 /* NOTE: same code as Linux kernel */
5821 /* Allow LDTs to be cleared by the user. */
5822 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5823 if (oldmode ||
5824 (contents == 0 &&
5825 read_exec_only == 1 &&
5826 seg_32bit == 0 &&
5827 limit_in_pages == 0 &&
5828 seg_not_present == 1 &&
5829 useable == 0 )) {
5830 entry_1 = 0;
5831 entry_2 = 0;
5832 goto install;
5836 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5837 (ldt_info.limit & 0x0ffff);
5838 entry_2 = (ldt_info.base_addr & 0xff000000) |
5839 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5840 (ldt_info.limit & 0xf0000) |
5841 ((read_exec_only ^ 1) << 9) |
5842 (contents << 10) |
5843 ((seg_not_present ^ 1) << 15) |
5844 (seg_32bit << 22) |
5845 (limit_in_pages << 23) |
5846 (lm << 21) |
5847 0x7000;
5848 if (!oldmode)
5849 entry_2 |= (useable << 20);
5851 /* Install the new entry ... */
5852 install:
5853 lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
5854 lp[0] = tswap32(entry_1);
5855 lp[1] = tswap32(entry_2);
5856 return 0;
5859 /* specific and weird i386 syscalls */
5860 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
5861 unsigned long bytecount)
5863 abi_long ret;
5865 switch (func) {
5866 case 0:
5867 ret = read_ldt(ptr, bytecount);
5868 break;
5869 case 1:
5870 ret = write_ldt(env, ptr, bytecount, 1);
5871 break;
5872 case 0x11:
5873 ret = write_ldt(env, ptr, bytecount, 0);
5874 break;
5875 default:
5876 ret = -TARGET_ENOSYS;
5877 break;
5879 return ret;
5882 #if defined(TARGET_I386) && defined(TARGET_ABI32)
5883 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
5885 uint64_t *gdt_table = g2h(env->gdt.base);
5886 struct target_modify_ldt_ldt_s ldt_info;
5887 struct target_modify_ldt_ldt_s *target_ldt_info;
5888 int seg_32bit, contents, read_exec_only, limit_in_pages;
5889 int seg_not_present, useable, lm;
5890 uint32_t *lp, entry_1, entry_2;
5891 int i;
5893 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5894 if (!target_ldt_info)
5895 return -TARGET_EFAULT;
5896 ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
5897 ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
5898 ldt_info.limit = tswap32(target_ldt_info->limit);
5899 ldt_info.flags = tswap32(target_ldt_info->flags);
5900 if (ldt_info.entry_number == -1) {
5901 for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
5902 if (gdt_table[i] == 0) {
5903 ldt_info.entry_number = i;
5904 target_ldt_info->entry_number = tswap32(i);
5905 break;
5909 unlock_user_struct(target_ldt_info, ptr, 1);
5911 if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
5912 ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
5913 return -TARGET_EINVAL;
5914 seg_32bit = ldt_info.flags & 1;
5915 contents = (ldt_info.flags >> 1) & 3;
5916 read_exec_only = (ldt_info.flags >> 3) & 1;
5917 limit_in_pages = (ldt_info.flags >> 4) & 1;
5918 seg_not_present = (ldt_info.flags >> 5) & 1;
5919 useable = (ldt_info.flags >> 6) & 1;
5920 #ifdef TARGET_ABI32
5921 lm = 0;
5922 #else
5923 lm = (ldt_info.flags >> 7) & 1;
5924 #endif
5926 if (contents == 3) {
5927 if (seg_not_present == 0)
5928 return -TARGET_EINVAL;
5931 /* NOTE: same code as Linux kernel */
5932 /* Allow LDTs to be cleared by the user. */
5933 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
5934 if ((contents == 0 &&
5935 read_exec_only == 1 &&
5936 seg_32bit == 0 &&
5937 limit_in_pages == 0 &&
5938 seg_not_present == 1 &&
5939 useable == 0 )) {
5940 entry_1 = 0;
5941 entry_2 = 0;
5942 goto install;
5946 entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
5947 (ldt_info.limit & 0x0ffff);
5948 entry_2 = (ldt_info.base_addr & 0xff000000) |
5949 ((ldt_info.base_addr & 0x00ff0000) >> 16) |
5950 (ldt_info.limit & 0xf0000) |
5951 ((read_exec_only ^ 1) << 9) |
5952 (contents << 10) |
5953 ((seg_not_present ^ 1) << 15) |
5954 (seg_32bit << 22) |
5955 (limit_in_pages << 23) |
5956 (useable << 20) |
5957 (lm << 21) |
5958 0x7000;
5960 /* Install the new entry ... */
5961 install:
5962 lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
5963 lp[0] = tswap32(entry_1);
5964 lp[1] = tswap32(entry_2);
5965 return 0;
5968 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
5970 struct target_modify_ldt_ldt_s *target_ldt_info;
5971 uint64_t *gdt_table = g2h(env->gdt.base);
5972 uint32_t base_addr, limit, flags;
5973 int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
5974 int seg_not_present, useable, lm;
5975 uint32_t *lp, entry_1, entry_2;
5977 lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
5978 if (!target_ldt_info)
5979 return -TARGET_EFAULT;
5980 idx = tswap32(target_ldt_info->entry_number);
5981 if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
5982 idx > TARGET_GDT_ENTRY_TLS_MAX) {
5983 unlock_user_struct(target_ldt_info, ptr, 1);
5984 return -TARGET_EINVAL;
5986 lp = (uint32_t *)(gdt_table + idx);
5987 entry_1 = tswap32(lp[0]);
5988 entry_2 = tswap32(lp[1]);
5990 read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
5991 contents = (entry_2 >> 10) & 3;
5992 seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
5993 seg_32bit = (entry_2 >> 22) & 1;
5994 limit_in_pages = (entry_2 >> 23) & 1;
5995 useable = (entry_2 >> 20) & 1;
5996 #ifdef TARGET_ABI32
5997 lm = 0;
5998 #else
5999 lm = (entry_2 >> 21) & 1;
6000 #endif
6001 flags = (seg_32bit << 0) | (contents << 1) |
6002 (read_exec_only << 3) | (limit_in_pages << 4) |
6003 (seg_not_present << 5) | (useable << 6) | (lm << 7);
6004 limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
6005 base_addr = (entry_1 >> 16) |
6006 (entry_2 & 0xff000000) |
6007 ((entry_2 & 0xff) << 16);
6008 target_ldt_info->base_addr = tswapal(base_addr);
6009 target_ldt_info->limit = tswap32(limit);
6010 target_ldt_info->flags = tswap32(flags);
6011 unlock_user_struct(target_ldt_info, ptr, 1);
6012 return 0;
6014 #endif /* TARGET_I386 && TARGET_ABI32 */
6016 #ifndef TARGET_ABI32
6017 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6019 abi_long ret = 0;
6020 abi_ulong val;
6021 int idx;
6023 switch(code) {
6024 case TARGET_ARCH_SET_GS:
6025 case TARGET_ARCH_SET_FS:
6026 if (code == TARGET_ARCH_SET_GS)
6027 idx = R_GS;
6028 else
6029 idx = R_FS;
6030 cpu_x86_load_seg(env, idx, 0);
6031 env->segs[idx].base = addr;
6032 break;
6033 case TARGET_ARCH_GET_GS:
6034 case TARGET_ARCH_GET_FS:
6035 if (code == TARGET_ARCH_GET_GS)
6036 idx = R_GS;
6037 else
6038 idx = R_FS;
6039 val = env->segs[idx].base;
6040 if (put_user(val, addr, abi_ulong))
6041 ret = -TARGET_EFAULT;
6042 break;
6043 default:
6044 ret = -TARGET_EINVAL;
6045 break;
6047 return ret;
6049 #endif
6051 #endif /* defined(TARGET_I386) */
6053 #define NEW_STACK_SIZE 0x40000
6056 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6057 typedef struct {
6058 CPUArchState *env;
6059 pthread_mutex_t mutex;
6060 pthread_cond_t cond;
6061 pthread_t thread;
6062 uint32_t tid;
6063 abi_ulong child_tidptr;
6064 abi_ulong parent_tidptr;
6065 sigset_t sigmask;
6066 } new_thread_info;
6068 static void *clone_func(void *arg)
6070 new_thread_info *info = arg;
6071 CPUArchState *env;
6072 CPUState *cpu;
6073 TaskState *ts;
6075 rcu_register_thread();
6076 env = info->env;
6077 cpu = ENV_GET_CPU(env);
6078 thread_cpu = cpu;
6079 ts = (TaskState *)cpu->opaque;
6080 info->tid = gettid();
6081 cpu->host_tid = info->tid;
6082 task_settid(ts);
6083 if (info->child_tidptr)
6084 put_user_u32(info->tid, info->child_tidptr);
6085 if (info->parent_tidptr)
6086 put_user_u32(info->tid, info->parent_tidptr);
6087 /* Enable signals. */
6088 sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6089 /* Signal to the parent that we're ready. */
6090 pthread_mutex_lock(&info->mutex);
6091 pthread_cond_broadcast(&info->cond);
6092 pthread_mutex_unlock(&info->mutex);
6093 /* Wait until the parent has finshed initializing the tls state. */
6094 pthread_mutex_lock(&clone_lock);
6095 pthread_mutex_unlock(&clone_lock);
6096 cpu_loop(env);
6097 /* never exits */
6098 return NULL;
6101 /* do_fork() Must return host values and target errnos (unlike most
6102 do_*() functions). */
6103 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6104 abi_ulong parent_tidptr, target_ulong newtls,
6105 abi_ulong child_tidptr)
6107 CPUState *cpu = ENV_GET_CPU(env);
6108 int ret;
6109 TaskState *ts;
6110 CPUState *new_cpu;
6111 CPUArchState *new_env;
6112 sigset_t sigmask;
6114 flags &= ~CLONE_IGNORED_FLAGS;
6116 /* Emulate vfork() with fork() */
6117 if (flags & CLONE_VFORK)
6118 flags &= ~(CLONE_VFORK | CLONE_VM);
6120 if (flags & CLONE_VM) {
6121 TaskState *parent_ts = (TaskState *)cpu->opaque;
6122 new_thread_info info;
6123 pthread_attr_t attr;
6125 if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6126 (flags & CLONE_INVALID_THREAD_FLAGS)) {
6127 return -TARGET_EINVAL;
6130 ts = g_new0(TaskState, 1);
6131 init_task_state(ts);
6132 /* we create a new CPU instance. */
6133 new_env = cpu_copy(env);
6134 /* Init regs that differ from the parent. */
6135 cpu_clone_regs(new_env, newsp);
6136 new_cpu = ENV_GET_CPU(new_env);
6137 new_cpu->opaque = ts;
6138 ts->bprm = parent_ts->bprm;
6139 ts->info = parent_ts->info;
6140 ts->signal_mask = parent_ts->signal_mask;
6142 if (flags & CLONE_CHILD_CLEARTID) {
6143 ts->child_tidptr = child_tidptr;
6146 if (flags & CLONE_SETTLS) {
6147 cpu_set_tls (new_env, newtls);
6150 /* Grab a mutex so that thread setup appears atomic. */
6151 pthread_mutex_lock(&clone_lock);
6153 memset(&info, 0, sizeof(info));
6154 pthread_mutex_init(&info.mutex, NULL);
6155 pthread_mutex_lock(&info.mutex);
6156 pthread_cond_init(&info.cond, NULL);
6157 info.env = new_env;
6158 if (flags & CLONE_CHILD_SETTID) {
6159 info.child_tidptr = child_tidptr;
6161 if (flags & CLONE_PARENT_SETTID) {
6162 info.parent_tidptr = parent_tidptr;
6165 ret = pthread_attr_init(&attr);
6166 ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6167 ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6168 /* It is not safe to deliver signals until the child has finished
6169 initializing, so temporarily block all signals. */
6170 sigfillset(&sigmask);
6171 sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6173 /* If this is our first additional thread, we need to ensure we
6174 * generate code for parallel execution and flush old translations.
6176 if (!parallel_cpus) {
6177 parallel_cpus = true;
6178 tb_flush(cpu);
6181 ret = pthread_create(&info.thread, &attr, clone_func, &info);
6182 /* TODO: Free new CPU state if thread creation failed. */
6184 sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6185 pthread_attr_destroy(&attr);
6186 if (ret == 0) {
6187 /* Wait for the child to initialize. */
6188 pthread_cond_wait(&info.cond, &info.mutex);
6189 ret = info.tid;
6190 } else {
6191 ret = -1;
6193 pthread_mutex_unlock(&info.mutex);
6194 pthread_cond_destroy(&info.cond);
6195 pthread_mutex_destroy(&info.mutex);
6196 pthread_mutex_unlock(&clone_lock);
6197 } else {
6198 /* if no CLONE_VM, we consider it is a fork */
6199 if (flags & CLONE_INVALID_FORK_FLAGS) {
6200 return -TARGET_EINVAL;
6203 /* We can't support custom termination signals */
6204 if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6205 return -TARGET_EINVAL;
6208 if (block_signals()) {
6209 return -TARGET_ERESTARTSYS;
6212 fork_start();
6213 ret = fork();
6214 if (ret == 0) {
6215 /* Child Process. */
6216 rcu_after_fork();
6217 cpu_clone_regs(env, newsp);
6218 fork_end(1);
6219 /* There is a race condition here. The parent process could
6220 theoretically read the TID in the child process before the child
6221 tid is set. This would require using either ptrace
6222 (not implemented) or having *_tidptr to point at a shared memory
6223 mapping. We can't repeat the spinlock hack used above because
6224 the child process gets its own copy of the lock. */
6225 if (flags & CLONE_CHILD_SETTID)
6226 put_user_u32(gettid(), child_tidptr);
6227 if (flags & CLONE_PARENT_SETTID)
6228 put_user_u32(gettid(), parent_tidptr);
6229 ts = (TaskState *)cpu->opaque;
6230 if (flags & CLONE_SETTLS)
6231 cpu_set_tls (env, newtls);
6232 if (flags & CLONE_CHILD_CLEARTID)
6233 ts->child_tidptr = child_tidptr;
6234 } else {
6235 fork_end(0);
6238 return ret;
6241 /* warning : doesn't handle linux specific flags... */
6242 static int target_to_host_fcntl_cmd(int cmd)
6244 switch(cmd) {
6245 case TARGET_F_DUPFD:
6246 case TARGET_F_GETFD:
6247 case TARGET_F_SETFD:
6248 case TARGET_F_GETFL:
6249 case TARGET_F_SETFL:
6250 return cmd;
6251 case TARGET_F_GETLK:
6252 return F_GETLK64;
6253 case TARGET_F_SETLK:
6254 return F_SETLK64;
6255 case TARGET_F_SETLKW:
6256 return F_SETLKW64;
6257 case TARGET_F_GETOWN:
6258 return F_GETOWN;
6259 case TARGET_F_SETOWN:
6260 return F_SETOWN;
6261 case TARGET_F_GETSIG:
6262 return F_GETSIG;
6263 case TARGET_F_SETSIG:
6264 return F_SETSIG;
6265 #if TARGET_ABI_BITS == 32
6266 case TARGET_F_GETLK64:
6267 return F_GETLK64;
6268 case TARGET_F_SETLK64:
6269 return F_SETLK64;
6270 case TARGET_F_SETLKW64:
6271 return F_SETLKW64;
6272 #endif
6273 case TARGET_F_SETLEASE:
6274 return F_SETLEASE;
6275 case TARGET_F_GETLEASE:
6276 return F_GETLEASE;
6277 #ifdef F_DUPFD_CLOEXEC
6278 case TARGET_F_DUPFD_CLOEXEC:
6279 return F_DUPFD_CLOEXEC;
6280 #endif
6281 case TARGET_F_NOTIFY:
6282 return F_NOTIFY;
6283 #ifdef F_GETOWN_EX
6284 case TARGET_F_GETOWN_EX:
6285 return F_GETOWN_EX;
6286 #endif
6287 #ifdef F_SETOWN_EX
6288 case TARGET_F_SETOWN_EX:
6289 return F_SETOWN_EX;
6290 #endif
6291 #ifdef F_SETPIPE_SZ
6292 case TARGET_F_SETPIPE_SZ:
6293 return F_SETPIPE_SZ;
6294 case TARGET_F_GETPIPE_SZ:
6295 return F_GETPIPE_SZ;
6296 #endif
6297 default:
6298 return -TARGET_EINVAL;
6300 return -TARGET_EINVAL;
6303 #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
6304 static const bitmask_transtbl flock_tbl[] = {
6305 TRANSTBL_CONVERT(F_RDLCK),
6306 TRANSTBL_CONVERT(F_WRLCK),
6307 TRANSTBL_CONVERT(F_UNLCK),
6308 TRANSTBL_CONVERT(F_EXLCK),
6309 TRANSTBL_CONVERT(F_SHLCK),
6310 { 0, 0, 0, 0 }
6313 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6314 abi_ulong target_flock_addr)
6316 struct target_flock *target_fl;
6317 short l_type;
6319 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6320 return -TARGET_EFAULT;
6323 __get_user(l_type, &target_fl->l_type);
6324 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6325 __get_user(fl->l_whence, &target_fl->l_whence);
6326 __get_user(fl->l_start, &target_fl->l_start);
6327 __get_user(fl->l_len, &target_fl->l_len);
6328 __get_user(fl->l_pid, &target_fl->l_pid);
6329 unlock_user_struct(target_fl, target_flock_addr, 0);
6330 return 0;
6333 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6334 const struct flock64 *fl)
6336 struct target_flock *target_fl;
6337 short l_type;
6339 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6340 return -TARGET_EFAULT;
6343 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6344 __put_user(l_type, &target_fl->l_type);
6345 __put_user(fl->l_whence, &target_fl->l_whence);
6346 __put_user(fl->l_start, &target_fl->l_start);
6347 __put_user(fl->l_len, &target_fl->l_len);
6348 __put_user(fl->l_pid, &target_fl->l_pid);
6349 unlock_user_struct(target_fl, target_flock_addr, 1);
6350 return 0;
6353 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6354 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6356 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6357 static inline abi_long copy_from_user_eabi_flock64(struct flock64 *fl,
6358 abi_ulong target_flock_addr)
6360 struct target_eabi_flock64 *target_fl;
6361 short l_type;
6363 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6364 return -TARGET_EFAULT;
6367 __get_user(l_type, &target_fl->l_type);
6368 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6369 __get_user(fl->l_whence, &target_fl->l_whence);
6370 __get_user(fl->l_start, &target_fl->l_start);
6371 __get_user(fl->l_len, &target_fl->l_len);
6372 __get_user(fl->l_pid, &target_fl->l_pid);
6373 unlock_user_struct(target_fl, target_flock_addr, 0);
6374 return 0;
6377 static inline abi_long copy_to_user_eabi_flock64(abi_ulong target_flock_addr,
6378 const struct flock64 *fl)
6380 struct target_eabi_flock64 *target_fl;
6381 short l_type;
6383 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6384 return -TARGET_EFAULT;
6387 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6388 __put_user(l_type, &target_fl->l_type);
6389 __put_user(fl->l_whence, &target_fl->l_whence);
6390 __put_user(fl->l_start, &target_fl->l_start);
6391 __put_user(fl->l_len, &target_fl->l_len);
6392 __put_user(fl->l_pid, &target_fl->l_pid);
6393 unlock_user_struct(target_fl, target_flock_addr, 1);
6394 return 0;
6396 #endif
6398 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6399 abi_ulong target_flock_addr)
6401 struct target_flock64 *target_fl;
6402 short l_type;
6404 if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6405 return -TARGET_EFAULT;
6408 __get_user(l_type, &target_fl->l_type);
6409 fl->l_type = target_to_host_bitmask(l_type, flock_tbl);
6410 __get_user(fl->l_whence, &target_fl->l_whence);
6411 __get_user(fl->l_start, &target_fl->l_start);
6412 __get_user(fl->l_len, &target_fl->l_len);
6413 __get_user(fl->l_pid, &target_fl->l_pid);
6414 unlock_user_struct(target_fl, target_flock_addr, 0);
6415 return 0;
6418 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6419 const struct flock64 *fl)
6421 struct target_flock64 *target_fl;
6422 short l_type;
6424 if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6425 return -TARGET_EFAULT;
6428 l_type = host_to_target_bitmask(fl->l_type, flock_tbl);
6429 __put_user(l_type, &target_fl->l_type);
6430 __put_user(fl->l_whence, &target_fl->l_whence);
6431 __put_user(fl->l_start, &target_fl->l_start);
6432 __put_user(fl->l_len, &target_fl->l_len);
6433 __put_user(fl->l_pid, &target_fl->l_pid);
6434 unlock_user_struct(target_fl, target_flock_addr, 1);
6435 return 0;
6438 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6440 struct flock64 fl64;
6441 #ifdef F_GETOWN_EX
6442 struct f_owner_ex fox;
6443 struct target_f_owner_ex *target_fox;
6444 #endif
6445 abi_long ret;
6446 int host_cmd = target_to_host_fcntl_cmd(cmd);
6448 if (host_cmd == -TARGET_EINVAL)
6449 return host_cmd;
6451 switch(cmd) {
6452 case TARGET_F_GETLK:
6453 ret = copy_from_user_flock(&fl64, arg);
6454 if (ret) {
6455 return ret;
6457 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6458 if (ret == 0) {
6459 ret = copy_to_user_flock(arg, &fl64);
6461 break;
6463 case TARGET_F_SETLK:
6464 case TARGET_F_SETLKW:
6465 ret = copy_from_user_flock(&fl64, arg);
6466 if (ret) {
6467 return ret;
6469 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6470 break;
6472 case TARGET_F_GETLK64:
6473 ret = copy_from_user_flock64(&fl64, arg);
6474 if (ret) {
6475 return ret;
6477 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6478 if (ret == 0) {
6479 ret = copy_to_user_flock64(arg, &fl64);
6481 break;
6482 case TARGET_F_SETLK64:
6483 case TARGET_F_SETLKW64:
6484 ret = copy_from_user_flock64(&fl64, arg);
6485 if (ret) {
6486 return ret;
6488 ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6489 break;
6491 case TARGET_F_GETFL:
6492 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6493 if (ret >= 0) {
6494 ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6496 break;
6498 case TARGET_F_SETFL:
6499 ret = get_errno(safe_fcntl(fd, host_cmd,
6500 target_to_host_bitmask(arg,
6501 fcntl_flags_tbl)));
6502 break;
6504 #ifdef F_GETOWN_EX
6505 case TARGET_F_GETOWN_EX:
6506 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6507 if (ret >= 0) {
6508 if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6509 return -TARGET_EFAULT;
6510 target_fox->type = tswap32(fox.type);
6511 target_fox->pid = tswap32(fox.pid);
6512 unlock_user_struct(target_fox, arg, 1);
6514 break;
6515 #endif
6517 #ifdef F_SETOWN_EX
6518 case TARGET_F_SETOWN_EX:
6519 if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6520 return -TARGET_EFAULT;
6521 fox.type = tswap32(target_fox->type);
6522 fox.pid = tswap32(target_fox->pid);
6523 unlock_user_struct(target_fox, arg, 0);
6524 ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6525 break;
6526 #endif
6528 case TARGET_F_SETOWN:
6529 case TARGET_F_GETOWN:
6530 case TARGET_F_SETSIG:
6531 case TARGET_F_GETSIG:
6532 case TARGET_F_SETLEASE:
6533 case TARGET_F_GETLEASE:
6534 case TARGET_F_SETPIPE_SZ:
6535 case TARGET_F_GETPIPE_SZ:
6536 ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6537 break;
6539 default:
6540 ret = get_errno(safe_fcntl(fd, cmd, arg));
6541 break;
6543 return ret;
6546 #ifdef USE_UID16
6548 static inline int high2lowuid(int uid)
6550 if (uid > 65535)
6551 return 65534;
6552 else
6553 return uid;
6556 static inline int high2lowgid(int gid)
6558 if (gid > 65535)
6559 return 65534;
6560 else
6561 return gid;
6564 static inline int low2highuid(int uid)
6566 if ((int16_t)uid == -1)
6567 return -1;
6568 else
6569 return uid;
6572 static inline int low2highgid(int gid)
6574 if ((int16_t)gid == -1)
6575 return -1;
6576 else
6577 return gid;
6579 static inline int tswapid(int id)
6581 return tswap16(id);
6584 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6586 #else /* !USE_UID16 */
6587 static inline int high2lowuid(int uid)
6589 return uid;
6591 static inline int high2lowgid(int gid)
6593 return gid;
6595 static inline int low2highuid(int uid)
6597 return uid;
6599 static inline int low2highgid(int gid)
6601 return gid;
6603 static inline int tswapid(int id)
6605 return tswap32(id);
6608 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6610 #endif /* USE_UID16 */
6612 /* We must do direct syscalls for setting UID/GID, because we want to
6613 * implement the Linux system call semantics of "change only for this thread",
6614 * not the libc/POSIX semantics of "change for all threads in process".
6615 * (See http://ewontfix.com/17/ for more details.)
6616 * We use the 32-bit version of the syscalls if present; if it is not
6617 * then either the host architecture supports 32-bit UIDs natively with
6618 * the standard syscall, or the 16-bit UID is the best we can do.
6620 #ifdef __NR_setuid32
6621 #define __NR_sys_setuid __NR_setuid32
6622 #else
6623 #define __NR_sys_setuid __NR_setuid
6624 #endif
6625 #ifdef __NR_setgid32
6626 #define __NR_sys_setgid __NR_setgid32
6627 #else
6628 #define __NR_sys_setgid __NR_setgid
6629 #endif
6630 #ifdef __NR_setresuid32
6631 #define __NR_sys_setresuid __NR_setresuid32
6632 #else
6633 #define __NR_sys_setresuid __NR_setresuid
6634 #endif
6635 #ifdef __NR_setresgid32
6636 #define __NR_sys_setresgid __NR_setresgid32
6637 #else
6638 #define __NR_sys_setresgid __NR_setresgid
6639 #endif
6641 _syscall1(int, sys_setuid, uid_t, uid)
6642 _syscall1(int, sys_setgid, gid_t, gid)
6643 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6644 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6646 void syscall_init(void)
6648 IOCTLEntry *ie;
6649 const argtype *arg_type;
6650 int size;
6651 int i;
6653 thunk_init(STRUCT_MAX);
6655 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6656 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6657 #include "syscall_types.h"
6658 #undef STRUCT
6659 #undef STRUCT_SPECIAL
6661 /* Build target_to_host_errno_table[] table from
6662 * host_to_target_errno_table[]. */
6663 for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
6664 target_to_host_errno_table[host_to_target_errno_table[i]] = i;
6667 /* we patch the ioctl size if necessary. We rely on the fact that
6668 no ioctl has all the bits at '1' in the size field */
6669 ie = ioctl_entries;
6670 while (ie->target_cmd != 0) {
6671 if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6672 TARGET_IOC_SIZEMASK) {
6673 arg_type = ie->arg_type;
6674 if (arg_type[0] != TYPE_PTR) {
6675 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6676 ie->target_cmd);
6677 exit(1);
6679 arg_type++;
6680 size = thunk_type_size(arg_type, 0);
6681 ie->target_cmd = (ie->target_cmd &
6682 ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
6683 (size << TARGET_IOC_SIZESHIFT);
6686 /* automatic consistency check if same arch */
6687 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
6688 (defined(__x86_64__) && defined(TARGET_X86_64))
6689 if (unlikely(ie->target_cmd != ie->host_cmd)) {
6690 fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
6691 ie->name, ie->target_cmd, ie->host_cmd);
6693 #endif
6694 ie++;
6698 #if TARGET_ABI_BITS == 32
6699 static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
6701 #ifdef TARGET_WORDS_BIGENDIAN
6702 return ((uint64_t)word0 << 32) | word1;
6703 #else
6704 return ((uint64_t)word1 << 32) | word0;
6705 #endif
6707 #else /* TARGET_ABI_BITS == 32 */
6708 static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
6710 return word0;
6712 #endif /* TARGET_ABI_BITS != 32 */
6714 #ifdef TARGET_NR_truncate64
6715 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
6716 abi_long arg2,
6717 abi_long arg3,
6718 abi_long arg4)
6720 if (regpairs_aligned(cpu_env)) {
6721 arg2 = arg3;
6722 arg3 = arg4;
6724 return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
6726 #endif
6728 #ifdef TARGET_NR_ftruncate64
6729 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
6730 abi_long arg2,
6731 abi_long arg3,
6732 abi_long arg4)
6734 if (regpairs_aligned(cpu_env)) {
6735 arg2 = arg3;
6736 arg3 = arg4;
6738 return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
6740 #endif
6742 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
6743 abi_ulong target_addr)
6745 struct target_timespec *target_ts;
6747 if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
6748 return -TARGET_EFAULT;
6749 __get_user(host_ts->tv_sec, &target_ts->tv_sec);
6750 __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6751 unlock_user_struct(target_ts, target_addr, 0);
6752 return 0;
6755 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
6756 struct timespec *host_ts)
6758 struct target_timespec *target_ts;
6760 if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
6761 return -TARGET_EFAULT;
6762 __put_user(host_ts->tv_sec, &target_ts->tv_sec);
6763 __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
6764 unlock_user_struct(target_ts, target_addr, 1);
6765 return 0;
6768 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
6769 abi_ulong target_addr)
6771 struct target_itimerspec *target_itspec;
6773 if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
6774 return -TARGET_EFAULT;
6777 host_itspec->it_interval.tv_sec =
6778 tswapal(target_itspec->it_interval.tv_sec);
6779 host_itspec->it_interval.tv_nsec =
6780 tswapal(target_itspec->it_interval.tv_nsec);
6781 host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
6782 host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
6784 unlock_user_struct(target_itspec, target_addr, 1);
6785 return 0;
6788 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
6789 struct itimerspec *host_its)
6791 struct target_itimerspec *target_itspec;
6793 if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
6794 return -TARGET_EFAULT;
6797 target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
6798 target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
6800 target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
6801 target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
6803 unlock_user_struct(target_itspec, target_addr, 0);
6804 return 0;
6807 static inline abi_long target_to_host_timex(struct timex *host_tx,
6808 abi_long target_addr)
6810 struct target_timex *target_tx;
6812 if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
6813 return -TARGET_EFAULT;
6816 __get_user(host_tx->modes, &target_tx->modes);
6817 __get_user(host_tx->offset, &target_tx->offset);
6818 __get_user(host_tx->freq, &target_tx->freq);
6819 __get_user(host_tx->maxerror, &target_tx->maxerror);
6820 __get_user(host_tx->esterror, &target_tx->esterror);
6821 __get_user(host_tx->status, &target_tx->status);
6822 __get_user(host_tx->constant, &target_tx->constant);
6823 __get_user(host_tx->precision, &target_tx->precision);
6824 __get_user(host_tx->tolerance, &target_tx->tolerance);
6825 __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6826 __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6827 __get_user(host_tx->tick, &target_tx->tick);
6828 __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6829 __get_user(host_tx->jitter, &target_tx->jitter);
6830 __get_user(host_tx->shift, &target_tx->shift);
6831 __get_user(host_tx->stabil, &target_tx->stabil);
6832 __get_user(host_tx->jitcnt, &target_tx->jitcnt);
6833 __get_user(host_tx->calcnt, &target_tx->calcnt);
6834 __get_user(host_tx->errcnt, &target_tx->errcnt);
6835 __get_user(host_tx->stbcnt, &target_tx->stbcnt);
6836 __get_user(host_tx->tai, &target_tx->tai);
6838 unlock_user_struct(target_tx, target_addr, 0);
6839 return 0;
6842 static inline abi_long host_to_target_timex(abi_long target_addr,
6843 struct timex *host_tx)
6845 struct target_timex *target_tx;
6847 if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
6848 return -TARGET_EFAULT;
6851 __put_user(host_tx->modes, &target_tx->modes);
6852 __put_user(host_tx->offset, &target_tx->offset);
6853 __put_user(host_tx->freq, &target_tx->freq);
6854 __put_user(host_tx->maxerror, &target_tx->maxerror);
6855 __put_user(host_tx->esterror, &target_tx->esterror);
6856 __put_user(host_tx->status, &target_tx->status);
6857 __put_user(host_tx->constant, &target_tx->constant);
6858 __put_user(host_tx->precision, &target_tx->precision);
6859 __put_user(host_tx->tolerance, &target_tx->tolerance);
6860 __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
6861 __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
6862 __put_user(host_tx->tick, &target_tx->tick);
6863 __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
6864 __put_user(host_tx->jitter, &target_tx->jitter);
6865 __put_user(host_tx->shift, &target_tx->shift);
6866 __put_user(host_tx->stabil, &target_tx->stabil);
6867 __put_user(host_tx->jitcnt, &target_tx->jitcnt);
6868 __put_user(host_tx->calcnt, &target_tx->calcnt);
6869 __put_user(host_tx->errcnt, &target_tx->errcnt);
6870 __put_user(host_tx->stbcnt, &target_tx->stbcnt);
6871 __put_user(host_tx->tai, &target_tx->tai);
6873 unlock_user_struct(target_tx, target_addr, 1);
6874 return 0;
6878 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
6879 abi_ulong target_addr)
6881 struct target_sigevent *target_sevp;
6883 if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
6884 return -TARGET_EFAULT;
6887 /* This union is awkward on 64 bit systems because it has a 32 bit
6888 * integer and a pointer in it; we follow the conversion approach
6889 * used for handling sigval types in signal.c so the guest should get
6890 * the correct value back even if we did a 64 bit byteswap and it's
6891 * using the 32 bit integer.
6893 host_sevp->sigev_value.sival_ptr =
6894 (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
6895 host_sevp->sigev_signo =
6896 target_to_host_signal(tswap32(target_sevp->sigev_signo));
6897 host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
6898 host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
6900 unlock_user_struct(target_sevp, target_addr, 1);
6901 return 0;
6904 #if defined(TARGET_NR_mlockall)
6905 static inline int target_to_host_mlockall_arg(int arg)
6907 int result = 0;
6909 if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
6910 result |= MCL_CURRENT;
6912 if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
6913 result |= MCL_FUTURE;
6915 return result;
6917 #endif
6919 static inline abi_long host_to_target_stat64(void *cpu_env,
6920 abi_ulong target_addr,
6921 struct stat *host_st)
6923 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
6924 if (((CPUARMState *)cpu_env)->eabi) {
6925 struct target_eabi_stat64 *target_st;
6927 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6928 return -TARGET_EFAULT;
6929 memset(target_st, 0, sizeof(struct target_eabi_stat64));
6930 __put_user(host_st->st_dev, &target_st->st_dev);
6931 __put_user(host_st->st_ino, &target_st->st_ino);
6932 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6933 __put_user(host_st->st_ino, &target_st->__st_ino);
6934 #endif
6935 __put_user(host_st->st_mode, &target_st->st_mode);
6936 __put_user(host_st->st_nlink, &target_st->st_nlink);
6937 __put_user(host_st->st_uid, &target_st->st_uid);
6938 __put_user(host_st->st_gid, &target_st->st_gid);
6939 __put_user(host_st->st_rdev, &target_st->st_rdev);
6940 __put_user(host_st->st_size, &target_st->st_size);
6941 __put_user(host_st->st_blksize, &target_st->st_blksize);
6942 __put_user(host_st->st_blocks, &target_st->st_blocks);
6943 __put_user(host_st->st_atime, &target_st->target_st_atime);
6944 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6945 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6946 unlock_user_struct(target_st, target_addr, 1);
6947 } else
6948 #endif
6950 #if defined(TARGET_HAS_STRUCT_STAT64)
6951 struct target_stat64 *target_st;
6952 #else
6953 struct target_stat *target_st;
6954 #endif
6956 if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
6957 return -TARGET_EFAULT;
6958 memset(target_st, 0, sizeof(*target_st));
6959 __put_user(host_st->st_dev, &target_st->st_dev);
6960 __put_user(host_st->st_ino, &target_st->st_ino);
6961 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
6962 __put_user(host_st->st_ino, &target_st->__st_ino);
6963 #endif
6964 __put_user(host_st->st_mode, &target_st->st_mode);
6965 __put_user(host_st->st_nlink, &target_st->st_nlink);
6966 __put_user(host_st->st_uid, &target_st->st_uid);
6967 __put_user(host_st->st_gid, &target_st->st_gid);
6968 __put_user(host_st->st_rdev, &target_st->st_rdev);
6969 /* XXX: better use of kernel struct */
6970 __put_user(host_st->st_size, &target_st->st_size);
6971 __put_user(host_st->st_blksize, &target_st->st_blksize);
6972 __put_user(host_st->st_blocks, &target_st->st_blocks);
6973 __put_user(host_st->st_atime, &target_st->target_st_atime);
6974 __put_user(host_st->st_mtime, &target_st->target_st_mtime);
6975 __put_user(host_st->st_ctime, &target_st->target_st_ctime);
6976 unlock_user_struct(target_st, target_addr, 1);
6979 return 0;
6982 /* ??? Using host futex calls even when target atomic operations
6983 are not really atomic probably breaks things. However implementing
6984 futexes locally would make futexes shared between multiple processes
6985 tricky. However they're probably useless because guest atomic
6986 operations won't work either. */
6987 static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
6988 target_ulong uaddr2, int val3)
6990 struct timespec ts, *pts;
6991 int base_op;
6993 /* ??? We assume FUTEX_* constants are the same on both host
6994 and target. */
6995 #ifdef FUTEX_CMD_MASK
6996 base_op = op & FUTEX_CMD_MASK;
6997 #else
6998 base_op = op;
6999 #endif
7000 switch (base_op) {
7001 case FUTEX_WAIT:
7002 case FUTEX_WAIT_BITSET:
7003 if (timeout) {
7004 pts = &ts;
7005 target_to_host_timespec(pts, timeout);
7006 } else {
7007 pts = NULL;
7009 return get_errno(safe_futex(g2h(uaddr), op, tswap32(val),
7010 pts, NULL, val3));
7011 case FUTEX_WAKE:
7012 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7013 case FUTEX_FD:
7014 return get_errno(safe_futex(g2h(uaddr), op, val, NULL, NULL, 0));
7015 case FUTEX_REQUEUE:
7016 case FUTEX_CMP_REQUEUE:
7017 case FUTEX_WAKE_OP:
7018 /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7019 TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7020 But the prototype takes a `struct timespec *'; insert casts
7021 to satisfy the compiler. We do not need to tswap TIMEOUT
7022 since it's not compared to guest memory. */
7023 pts = (struct timespec *)(uintptr_t) timeout;
7024 return get_errno(safe_futex(g2h(uaddr), op, val, pts,
7025 g2h(uaddr2),
7026 (base_op == FUTEX_CMP_REQUEUE
7027 ? tswap32(val3)
7028 : val3)));
7029 default:
7030 return -TARGET_ENOSYS;
7033 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7034 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7035 abi_long handle, abi_long mount_id,
7036 abi_long flags)
7038 struct file_handle *target_fh;
7039 struct file_handle *fh;
7040 int mid = 0;
7041 abi_long ret;
7042 char *name;
7043 unsigned int size, total_size;
7045 if (get_user_s32(size, handle)) {
7046 return -TARGET_EFAULT;
7049 name = lock_user_string(pathname);
7050 if (!name) {
7051 return -TARGET_EFAULT;
7054 total_size = sizeof(struct file_handle) + size;
7055 target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7056 if (!target_fh) {
7057 unlock_user(name, pathname, 0);
7058 return -TARGET_EFAULT;
7061 fh = g_malloc0(total_size);
7062 fh->handle_bytes = size;
7064 ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7065 unlock_user(name, pathname, 0);
7067 /* man name_to_handle_at(2):
7068 * Other than the use of the handle_bytes field, the caller should treat
7069 * the file_handle structure as an opaque data type
7072 memcpy(target_fh, fh, total_size);
7073 target_fh->handle_bytes = tswap32(fh->handle_bytes);
7074 target_fh->handle_type = tswap32(fh->handle_type);
7075 g_free(fh);
7076 unlock_user(target_fh, handle, total_size);
7078 if (put_user_s32(mid, mount_id)) {
7079 return -TARGET_EFAULT;
7082 return ret;
7085 #endif
7087 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7088 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7089 abi_long flags)
7091 struct file_handle *target_fh;
7092 struct file_handle *fh;
7093 unsigned int size, total_size;
7094 abi_long ret;
7096 if (get_user_s32(size, handle)) {
7097 return -TARGET_EFAULT;
7100 total_size = sizeof(struct file_handle) + size;
7101 target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7102 if (!target_fh) {
7103 return -TARGET_EFAULT;
7106 fh = g_memdup(target_fh, total_size);
7107 fh->handle_bytes = size;
7108 fh->handle_type = tswap32(target_fh->handle_type);
7110 ret = get_errno(open_by_handle_at(mount_fd, fh,
7111 target_to_host_bitmask(flags, fcntl_flags_tbl)));
7113 g_free(fh);
7115 unlock_user(target_fh, handle, total_size);
7117 return ret;
7119 #endif
7121 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7123 /* signalfd siginfo conversion */
7125 static void
7126 host_to_target_signalfd_siginfo(struct signalfd_siginfo *tinfo,
7127 const struct signalfd_siginfo *info)
7129 int sig = host_to_target_signal(info->ssi_signo);
7131 /* linux/signalfd.h defines a ssi_addr_lsb
7132 * not defined in sys/signalfd.h but used by some kernels
7135 #ifdef BUS_MCEERR_AO
7136 if (tinfo->ssi_signo == SIGBUS &&
7137 (tinfo->ssi_code == BUS_MCEERR_AR ||
7138 tinfo->ssi_code == BUS_MCEERR_AO)) {
7139 uint16_t *ssi_addr_lsb = (uint16_t *)(&info->ssi_addr + 1);
7140 uint16_t *tssi_addr_lsb = (uint16_t *)(&tinfo->ssi_addr + 1);
7141 *tssi_addr_lsb = tswap16(*ssi_addr_lsb);
7143 #endif
7145 tinfo->ssi_signo = tswap32(sig);
7146 tinfo->ssi_errno = tswap32(tinfo->ssi_errno);
7147 tinfo->ssi_code = tswap32(info->ssi_code);
7148 tinfo->ssi_pid = tswap32(info->ssi_pid);
7149 tinfo->ssi_uid = tswap32(info->ssi_uid);
7150 tinfo->ssi_fd = tswap32(info->ssi_fd);
7151 tinfo->ssi_tid = tswap32(info->ssi_tid);
7152 tinfo->ssi_band = tswap32(info->ssi_band);
7153 tinfo->ssi_overrun = tswap32(info->ssi_overrun);
7154 tinfo->ssi_trapno = tswap32(info->ssi_trapno);
7155 tinfo->ssi_status = tswap32(info->ssi_status);
7156 tinfo->ssi_int = tswap32(info->ssi_int);
7157 tinfo->ssi_ptr = tswap64(info->ssi_ptr);
7158 tinfo->ssi_utime = tswap64(info->ssi_utime);
7159 tinfo->ssi_stime = tswap64(info->ssi_stime);
7160 tinfo->ssi_addr = tswap64(info->ssi_addr);
7163 static abi_long host_to_target_data_signalfd(void *buf, size_t len)
7165 int i;
7167 for (i = 0; i < len; i += sizeof(struct signalfd_siginfo)) {
7168 host_to_target_signalfd_siginfo(buf + i, buf + i);
7171 return len;
7174 static TargetFdTrans target_signalfd_trans = {
7175 .host_to_target_data = host_to_target_data_signalfd,
7178 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7180 int host_flags;
7181 target_sigset_t *target_mask;
7182 sigset_t host_mask;
7183 abi_long ret;
7185 if (flags & ~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC)) {
7186 return -TARGET_EINVAL;
7188 if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7189 return -TARGET_EFAULT;
7192 target_to_host_sigset(&host_mask, target_mask);
7194 host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7196 ret = get_errno(signalfd(fd, &host_mask, host_flags));
7197 if (ret >= 0) {
7198 fd_trans_register(ret, &target_signalfd_trans);
7201 unlock_user_struct(target_mask, mask, 0);
7203 return ret;
7205 #endif
7207 /* Map host to target signal numbers for the wait family of syscalls.
7208 Assume all other status bits are the same. */
7209 int host_to_target_waitstatus(int status)
7211 if (WIFSIGNALED(status)) {
7212 return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7214 if (WIFSTOPPED(status)) {
7215 return (host_to_target_signal(WSTOPSIG(status)) << 8)
7216 | (status & 0xff);
7218 return status;
7221 static int open_self_cmdline(void *cpu_env, int fd)
7223 int fd_orig = -1;
7224 bool word_skipped = false;
7226 fd_orig = open("/proc/self/cmdline", O_RDONLY);
7227 if (fd_orig < 0) {
7228 return fd_orig;
7231 while (true) {
7232 ssize_t nb_read;
7233 char buf[128];
7234 char *cp_buf = buf;
7236 nb_read = read(fd_orig, buf, sizeof(buf));
7237 if (nb_read < 0) {
7238 int e = errno;
7239 fd_orig = close(fd_orig);
7240 errno = e;
7241 return -1;
7242 } else if (nb_read == 0) {
7243 break;
7246 if (!word_skipped) {
7247 /* Skip the first string, which is the path to qemu-*-static
7248 instead of the actual command. */
7249 cp_buf = memchr(buf, 0, nb_read);
7250 if (cp_buf) {
7251 /* Null byte found, skip one string */
7252 cp_buf++;
7253 nb_read -= cp_buf - buf;
7254 word_skipped = true;
7258 if (word_skipped) {
7259 if (write(fd, cp_buf, nb_read) != nb_read) {
7260 int e = errno;
7261 close(fd_orig);
7262 errno = e;
7263 return -1;
7268 return close(fd_orig);
7271 static int open_self_maps(void *cpu_env, int fd)
7273 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7274 TaskState *ts = cpu->opaque;
7275 FILE *fp;
7276 char *line = NULL;
7277 size_t len = 0;
7278 ssize_t read;
7280 fp = fopen("/proc/self/maps", "r");
7281 if (fp == NULL) {
7282 return -1;
7285 while ((read = getline(&line, &len, fp)) != -1) {
7286 int fields, dev_maj, dev_min, inode;
7287 uint64_t min, max, offset;
7288 char flag_r, flag_w, flag_x, flag_p;
7289 char path[512] = "";
7290 fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
7291 " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
7292 &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
7294 if ((fields < 10) || (fields > 11)) {
7295 continue;
7297 if (h2g_valid(min)) {
7298 int flags = page_get_flags(h2g(min));
7299 max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
7300 if (page_check_range(h2g(min), max - min, flags) == -1) {
7301 continue;
7303 if (h2g(min) == ts->info->stack_limit) {
7304 pstrcpy(path, sizeof(path), " [stack]");
7306 dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
7307 " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
7308 h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
7309 flag_x, flag_p, offset, dev_maj, dev_min, inode,
7310 path[0] ? " " : "", path);
7314 free(line);
7315 fclose(fp);
7317 return 0;
7320 static int open_self_stat(void *cpu_env, int fd)
7322 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7323 TaskState *ts = cpu->opaque;
7324 abi_ulong start_stack = ts->info->start_stack;
7325 int i;
7327 for (i = 0; i < 44; i++) {
7328 char buf[128];
7329 int len;
7330 uint64_t val = 0;
7332 if (i == 0) {
7333 /* pid */
7334 val = getpid();
7335 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7336 } else if (i == 1) {
7337 /* app name */
7338 snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
7339 } else if (i == 27) {
7340 /* stack bottom */
7341 val = start_stack;
7342 snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
7343 } else {
7344 /* for the rest, there is MasterCard */
7345 snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
7348 len = strlen(buf);
7349 if (write(fd, buf, len) != len) {
7350 return -1;
7354 return 0;
7357 static int open_self_auxv(void *cpu_env, int fd)
7359 CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
7360 TaskState *ts = cpu->opaque;
7361 abi_ulong auxv = ts->info->saved_auxv;
7362 abi_ulong len = ts->info->auxv_len;
7363 char *ptr;
7366 * Auxiliary vector is stored in target process stack.
7367 * read in whole auxv vector and copy it to file
7369 ptr = lock_user(VERIFY_READ, auxv, len, 0);
7370 if (ptr != NULL) {
7371 while (len > 0) {
7372 ssize_t r;
7373 r = write(fd, ptr, len);
7374 if (r <= 0) {
7375 break;
7377 len -= r;
7378 ptr += r;
7380 lseek(fd, 0, SEEK_SET);
7381 unlock_user(ptr, auxv, len);
7384 return 0;
7387 static int is_proc_myself(const char *filename, const char *entry)
7389 if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7390 filename += strlen("/proc/");
7391 if (!strncmp(filename, "self/", strlen("self/"))) {
7392 filename += strlen("self/");
7393 } else if (*filename >= '1' && *filename <= '9') {
7394 char myself[80];
7395 snprintf(myself, sizeof(myself), "%d/", getpid());
7396 if (!strncmp(filename, myself, strlen(myself))) {
7397 filename += strlen(myself);
7398 } else {
7399 return 0;
7401 } else {
7402 return 0;
7404 if (!strcmp(filename, entry)) {
7405 return 1;
7408 return 0;
7411 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7412 static int is_proc(const char *filename, const char *entry)
7414 return strcmp(filename, entry) == 0;
7417 static int open_net_route(void *cpu_env, int fd)
7419 FILE *fp;
7420 char *line = NULL;
7421 size_t len = 0;
7422 ssize_t read;
7424 fp = fopen("/proc/net/route", "r");
7425 if (fp == NULL) {
7426 return -1;
7429 /* read header */
7431 read = getline(&line, &len, fp);
7432 dprintf(fd, "%s", line);
7434 /* read routes */
7436 while ((read = getline(&line, &len, fp)) != -1) {
7437 char iface[16];
7438 uint32_t dest, gw, mask;
7439 unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7440 sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7441 iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7442 &mask, &mtu, &window, &irtt);
7443 dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7444 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7445 metric, tswap32(mask), mtu, window, irtt);
7448 free(line);
7449 fclose(fp);
7451 return 0;
7453 #endif
7455 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7457 struct fake_open {
7458 const char *filename;
7459 int (*fill)(void *cpu_env, int fd);
7460 int (*cmp)(const char *s1, const char *s2);
7462 const struct fake_open *fake_open;
7463 static const struct fake_open fakes[] = {
7464 { "maps", open_self_maps, is_proc_myself },
7465 { "stat", open_self_stat, is_proc_myself },
7466 { "auxv", open_self_auxv, is_proc_myself },
7467 { "cmdline", open_self_cmdline, is_proc_myself },
7468 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7469 { "/proc/net/route", open_net_route, is_proc },
7470 #endif
7471 { NULL, NULL, NULL }
7474 if (is_proc_myself(pathname, "exe")) {
7475 int execfd = qemu_getauxval(AT_EXECFD);
7476 return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
7479 for (fake_open = fakes; fake_open->filename; fake_open++) {
7480 if (fake_open->cmp(pathname, fake_open->filename)) {
7481 break;
7485 if (fake_open->filename) {
7486 const char *tmpdir;
7487 char filename[PATH_MAX];
7488 int fd, r;
7490 /* create temporary file to map stat to */
7491 tmpdir = getenv("TMPDIR");
7492 if (!tmpdir)
7493 tmpdir = "/tmp";
7494 snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
7495 fd = mkstemp(filename);
7496 if (fd < 0) {
7497 return fd;
7499 unlink(filename);
7501 if ((r = fake_open->fill(cpu_env, fd))) {
7502 int e = errno;
7503 close(fd);
7504 errno = e;
7505 return r;
7507 lseek(fd, 0, SEEK_SET);
7509 return fd;
7512 return safe_openat(dirfd, path(pathname), flags, mode);
7515 #define TIMER_MAGIC 0x0caf0000
7516 #define TIMER_MAGIC_MASK 0xffff0000
7518 /* Convert QEMU provided timer ID back to internal 16bit index format */
7519 static target_timer_t get_timer_id(abi_long arg)
7521 target_timer_t timerid = arg;
7523 if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
7524 return -TARGET_EINVAL;
7527 timerid &= 0xffff;
7529 if (timerid >= ARRAY_SIZE(g_posix_timers)) {
7530 return -TARGET_EINVAL;
7533 return timerid;
7536 /* do_syscall() should always have a single exit point at the end so
7537 that actions, such as logging of syscall results, can be performed.
7538 All errnos that do_syscall() returns must be -TARGET_<errcode>. */
7539 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
7540 abi_long arg2, abi_long arg3, abi_long arg4,
7541 abi_long arg5, abi_long arg6, abi_long arg7,
7542 abi_long arg8)
7544 CPUState *cpu = ENV_GET_CPU(cpu_env);
7545 abi_long ret;
7546 struct stat st;
7547 struct statfs stfs;
7548 void *p;
7550 #if defined(DEBUG_ERESTARTSYS)
7551 /* Debug-only code for exercising the syscall-restart code paths
7552 * in the per-architecture cpu main loops: restart every syscall
7553 * the guest makes once before letting it through.
7556 static int flag;
7558 flag = !flag;
7559 if (flag) {
7560 return -TARGET_ERESTARTSYS;
7563 #endif
7565 #ifdef DEBUG
7566 gemu_log("syscall %d", num);
7567 #endif
7568 trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
7569 if(do_strace)
7570 print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
7572 switch(num) {
7573 case TARGET_NR_exit:
7574 /* In old applications this may be used to implement _exit(2).
7575 However in threaded applictions it is used for thread termination,
7576 and _exit_group is used for application termination.
7577 Do thread termination if we have more then one thread. */
7579 if (block_signals()) {
7580 ret = -TARGET_ERESTARTSYS;
7581 break;
7584 cpu_list_lock();
7586 if (CPU_NEXT(first_cpu)) {
7587 TaskState *ts;
7589 /* Remove the CPU from the list. */
7590 QTAILQ_REMOVE(&cpus, cpu, node);
7592 cpu_list_unlock();
7594 ts = cpu->opaque;
7595 if (ts->child_tidptr) {
7596 put_user_u32(0, ts->child_tidptr);
7597 sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
7598 NULL, NULL, 0);
7600 thread_cpu = NULL;
7601 object_unref(OBJECT(cpu));
7602 g_free(ts);
7603 rcu_unregister_thread();
7604 pthread_exit(NULL);
7607 cpu_list_unlock();
7608 #ifdef TARGET_GPROF
7609 _mcleanup();
7610 #endif
7611 gdb_exit(cpu_env, arg1);
7612 _exit(arg1);
7613 ret = 0; /* avoid warning */
7614 break;
7615 case TARGET_NR_read:
7616 if (arg3 == 0)
7617 ret = 0;
7618 else {
7619 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
7620 goto efault;
7621 ret = get_errno(safe_read(arg1, p, arg3));
7622 if (ret >= 0 &&
7623 fd_trans_host_to_target_data(arg1)) {
7624 ret = fd_trans_host_to_target_data(arg1)(p, ret);
7626 unlock_user(p, arg2, ret);
7628 break;
7629 case TARGET_NR_write:
7630 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
7631 goto efault;
7632 ret = get_errno(safe_write(arg1, p, arg3));
7633 unlock_user(p, arg2, 0);
7634 break;
7635 #ifdef TARGET_NR_open
7636 case TARGET_NR_open:
7637 if (!(p = lock_user_string(arg1)))
7638 goto efault;
7639 ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
7640 target_to_host_bitmask(arg2, fcntl_flags_tbl),
7641 arg3));
7642 fd_trans_unregister(ret);
7643 unlock_user(p, arg1, 0);
7644 break;
7645 #endif
7646 case TARGET_NR_openat:
7647 if (!(p = lock_user_string(arg2)))
7648 goto efault;
7649 ret = get_errno(do_openat(cpu_env, arg1, p,
7650 target_to_host_bitmask(arg3, fcntl_flags_tbl),
7651 arg4));
7652 fd_trans_unregister(ret);
7653 unlock_user(p, arg2, 0);
7654 break;
7655 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7656 case TARGET_NR_name_to_handle_at:
7657 ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
7658 break;
7659 #endif
7660 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7661 case TARGET_NR_open_by_handle_at:
7662 ret = do_open_by_handle_at(arg1, arg2, arg3);
7663 fd_trans_unregister(ret);
7664 break;
7665 #endif
7666 case TARGET_NR_close:
7667 fd_trans_unregister(arg1);
7668 ret = get_errno(close(arg1));
7669 break;
7670 case TARGET_NR_brk:
7671 ret = do_brk(arg1);
7672 break;
7673 #ifdef TARGET_NR_fork
7674 case TARGET_NR_fork:
7675 ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
7676 break;
7677 #endif
7678 #ifdef TARGET_NR_waitpid
7679 case TARGET_NR_waitpid:
7681 int status;
7682 ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
7683 if (!is_error(ret) && arg2 && ret
7684 && put_user_s32(host_to_target_waitstatus(status), arg2))
7685 goto efault;
7687 break;
7688 #endif
7689 #ifdef TARGET_NR_waitid
7690 case TARGET_NR_waitid:
7692 siginfo_t info;
7693 info.si_pid = 0;
7694 ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
7695 if (!is_error(ret) && arg3 && info.si_pid != 0) {
7696 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
7697 goto efault;
7698 host_to_target_siginfo(p, &info);
7699 unlock_user(p, arg3, sizeof(target_siginfo_t));
7702 break;
7703 #endif
7704 #ifdef TARGET_NR_creat /* not on alpha */
7705 case TARGET_NR_creat:
7706 if (!(p = lock_user_string(arg1)))
7707 goto efault;
7708 ret = get_errno(creat(p, arg2));
7709 fd_trans_unregister(ret);
7710 unlock_user(p, arg1, 0);
7711 break;
7712 #endif
7713 #ifdef TARGET_NR_link
7714 case TARGET_NR_link:
7716 void * p2;
7717 p = lock_user_string(arg1);
7718 p2 = lock_user_string(arg2);
7719 if (!p || !p2)
7720 ret = -TARGET_EFAULT;
7721 else
7722 ret = get_errno(link(p, p2));
7723 unlock_user(p2, arg2, 0);
7724 unlock_user(p, arg1, 0);
7726 break;
7727 #endif
7728 #if defined(TARGET_NR_linkat)
7729 case TARGET_NR_linkat:
7731 void * p2 = NULL;
7732 if (!arg2 || !arg4)
7733 goto efault;
7734 p = lock_user_string(arg2);
7735 p2 = lock_user_string(arg4);
7736 if (!p || !p2)
7737 ret = -TARGET_EFAULT;
7738 else
7739 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
7740 unlock_user(p, arg2, 0);
7741 unlock_user(p2, arg4, 0);
7743 break;
7744 #endif
7745 #ifdef TARGET_NR_unlink
7746 case TARGET_NR_unlink:
7747 if (!(p = lock_user_string(arg1)))
7748 goto efault;
7749 ret = get_errno(unlink(p));
7750 unlock_user(p, arg1, 0);
7751 break;
7752 #endif
7753 #if defined(TARGET_NR_unlinkat)
7754 case TARGET_NR_unlinkat:
7755 if (!(p = lock_user_string(arg2)))
7756 goto efault;
7757 ret = get_errno(unlinkat(arg1, p, arg3));
7758 unlock_user(p, arg2, 0);
7759 break;
7760 #endif
7761 case TARGET_NR_execve:
7763 char **argp, **envp;
7764 int argc, envc;
7765 abi_ulong gp;
7766 abi_ulong guest_argp;
7767 abi_ulong guest_envp;
7768 abi_ulong addr;
7769 char **q;
7770 int total_size = 0;
7772 argc = 0;
7773 guest_argp = arg2;
7774 for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
7775 if (get_user_ual(addr, gp))
7776 goto efault;
7777 if (!addr)
7778 break;
7779 argc++;
7781 envc = 0;
7782 guest_envp = arg3;
7783 for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
7784 if (get_user_ual(addr, gp))
7785 goto efault;
7786 if (!addr)
7787 break;
7788 envc++;
7791 argp = alloca((argc + 1) * sizeof(void *));
7792 envp = alloca((envc + 1) * sizeof(void *));
7794 for (gp = guest_argp, q = argp; gp;
7795 gp += sizeof(abi_ulong), q++) {
7796 if (get_user_ual(addr, gp))
7797 goto execve_efault;
7798 if (!addr)
7799 break;
7800 if (!(*q = lock_user_string(addr)))
7801 goto execve_efault;
7802 total_size += strlen(*q) + 1;
7804 *q = NULL;
7806 for (gp = guest_envp, q = envp; gp;
7807 gp += sizeof(abi_ulong), q++) {
7808 if (get_user_ual(addr, gp))
7809 goto execve_efault;
7810 if (!addr)
7811 break;
7812 if (!(*q = lock_user_string(addr)))
7813 goto execve_efault;
7814 total_size += strlen(*q) + 1;
7816 *q = NULL;
7818 if (!(p = lock_user_string(arg1)))
7819 goto execve_efault;
7820 /* Although execve() is not an interruptible syscall it is
7821 * a special case where we must use the safe_syscall wrapper:
7822 * if we allow a signal to happen before we make the host
7823 * syscall then we will 'lose' it, because at the point of
7824 * execve the process leaves QEMU's control. So we use the
7825 * safe syscall wrapper to ensure that we either take the
7826 * signal as a guest signal, or else it does not happen
7827 * before the execve completes and makes it the other
7828 * program's problem.
7830 ret = get_errno(safe_execve(p, argp, envp));
7831 unlock_user(p, arg1, 0);
7833 goto execve_end;
7835 execve_efault:
7836 ret = -TARGET_EFAULT;
7838 execve_end:
7839 for (gp = guest_argp, q = argp; *q;
7840 gp += sizeof(abi_ulong), q++) {
7841 if (get_user_ual(addr, gp)
7842 || !addr)
7843 break;
7844 unlock_user(*q, addr, 0);
7846 for (gp = guest_envp, q = envp; *q;
7847 gp += sizeof(abi_ulong), q++) {
7848 if (get_user_ual(addr, gp)
7849 || !addr)
7850 break;
7851 unlock_user(*q, addr, 0);
7854 break;
7855 case TARGET_NR_chdir:
7856 if (!(p = lock_user_string(arg1)))
7857 goto efault;
7858 ret = get_errno(chdir(p));
7859 unlock_user(p, arg1, 0);
7860 break;
7861 #ifdef TARGET_NR_time
7862 case TARGET_NR_time:
7864 time_t host_time;
7865 ret = get_errno(time(&host_time));
7866 if (!is_error(ret)
7867 && arg1
7868 && put_user_sal(host_time, arg1))
7869 goto efault;
7871 break;
7872 #endif
7873 #ifdef TARGET_NR_mknod
7874 case TARGET_NR_mknod:
7875 if (!(p = lock_user_string(arg1)))
7876 goto efault;
7877 ret = get_errno(mknod(p, arg2, arg3));
7878 unlock_user(p, arg1, 0);
7879 break;
7880 #endif
7881 #if defined(TARGET_NR_mknodat)
7882 case TARGET_NR_mknodat:
7883 if (!(p = lock_user_string(arg2)))
7884 goto efault;
7885 ret = get_errno(mknodat(arg1, p, arg3, arg4));
7886 unlock_user(p, arg2, 0);
7887 break;
7888 #endif
7889 #ifdef TARGET_NR_chmod
7890 case TARGET_NR_chmod:
7891 if (!(p = lock_user_string(arg1)))
7892 goto efault;
7893 ret = get_errno(chmod(p, arg2));
7894 unlock_user(p, arg1, 0);
7895 break;
7896 #endif
7897 #ifdef TARGET_NR_break
7898 case TARGET_NR_break:
7899 goto unimplemented;
7900 #endif
7901 #ifdef TARGET_NR_oldstat
7902 case TARGET_NR_oldstat:
7903 goto unimplemented;
7904 #endif
7905 case TARGET_NR_lseek:
7906 ret = get_errno(lseek(arg1, arg2, arg3));
7907 break;
7908 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
7909 /* Alpha specific */
7910 case TARGET_NR_getxpid:
7911 ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
7912 ret = get_errno(getpid());
7913 break;
7914 #endif
7915 #ifdef TARGET_NR_getpid
7916 case TARGET_NR_getpid:
7917 ret = get_errno(getpid());
7918 break;
7919 #endif
7920 case TARGET_NR_mount:
7922 /* need to look at the data field */
7923 void *p2, *p3;
7925 if (arg1) {
7926 p = lock_user_string(arg1);
7927 if (!p) {
7928 goto efault;
7930 } else {
7931 p = NULL;
7934 p2 = lock_user_string(arg2);
7935 if (!p2) {
7936 if (arg1) {
7937 unlock_user(p, arg1, 0);
7939 goto efault;
7942 if (arg3) {
7943 p3 = lock_user_string(arg3);
7944 if (!p3) {
7945 if (arg1) {
7946 unlock_user(p, arg1, 0);
7948 unlock_user(p2, arg2, 0);
7949 goto efault;
7951 } else {
7952 p3 = NULL;
7955 /* FIXME - arg5 should be locked, but it isn't clear how to
7956 * do that since it's not guaranteed to be a NULL-terminated
7957 * string.
7959 if (!arg5) {
7960 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
7961 } else {
7962 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
7964 ret = get_errno(ret);
7966 if (arg1) {
7967 unlock_user(p, arg1, 0);
7969 unlock_user(p2, arg2, 0);
7970 if (arg3) {
7971 unlock_user(p3, arg3, 0);
7974 break;
7975 #ifdef TARGET_NR_umount
7976 case TARGET_NR_umount:
7977 if (!(p = lock_user_string(arg1)))
7978 goto efault;
7979 ret = get_errno(umount(p));
7980 unlock_user(p, arg1, 0);
7981 break;
7982 #endif
7983 #ifdef TARGET_NR_stime /* not on alpha */
7984 case TARGET_NR_stime:
7986 time_t host_time;
7987 if (get_user_sal(host_time, arg1))
7988 goto efault;
7989 ret = get_errno(stime(&host_time));
7991 break;
7992 #endif
7993 case TARGET_NR_ptrace:
7994 goto unimplemented;
7995 #ifdef TARGET_NR_alarm /* not on alpha */
7996 case TARGET_NR_alarm:
7997 ret = alarm(arg1);
7998 break;
7999 #endif
8000 #ifdef TARGET_NR_oldfstat
8001 case TARGET_NR_oldfstat:
8002 goto unimplemented;
8003 #endif
8004 #ifdef TARGET_NR_pause /* not on alpha */
8005 case TARGET_NR_pause:
8006 if (!block_signals()) {
8007 sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8009 ret = -TARGET_EINTR;
8010 break;
8011 #endif
8012 #ifdef TARGET_NR_utime
8013 case TARGET_NR_utime:
8015 struct utimbuf tbuf, *host_tbuf;
8016 struct target_utimbuf *target_tbuf;
8017 if (arg2) {
8018 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8019 goto efault;
8020 tbuf.actime = tswapal(target_tbuf->actime);
8021 tbuf.modtime = tswapal(target_tbuf->modtime);
8022 unlock_user_struct(target_tbuf, arg2, 0);
8023 host_tbuf = &tbuf;
8024 } else {
8025 host_tbuf = NULL;
8027 if (!(p = lock_user_string(arg1)))
8028 goto efault;
8029 ret = get_errno(utime(p, host_tbuf));
8030 unlock_user(p, arg1, 0);
8032 break;
8033 #endif
8034 #ifdef TARGET_NR_utimes
8035 case TARGET_NR_utimes:
8037 struct timeval *tvp, tv[2];
8038 if (arg2) {
8039 if (copy_from_user_timeval(&tv[0], arg2)
8040 || copy_from_user_timeval(&tv[1],
8041 arg2 + sizeof(struct target_timeval)))
8042 goto efault;
8043 tvp = tv;
8044 } else {
8045 tvp = NULL;
8047 if (!(p = lock_user_string(arg1)))
8048 goto efault;
8049 ret = get_errno(utimes(p, tvp));
8050 unlock_user(p, arg1, 0);
8052 break;
8053 #endif
8054 #if defined(TARGET_NR_futimesat)
8055 case TARGET_NR_futimesat:
8057 struct timeval *tvp, tv[2];
8058 if (arg3) {
8059 if (copy_from_user_timeval(&tv[0], arg3)
8060 || copy_from_user_timeval(&tv[1],
8061 arg3 + sizeof(struct target_timeval)))
8062 goto efault;
8063 tvp = tv;
8064 } else {
8065 tvp = NULL;
8067 if (!(p = lock_user_string(arg2)))
8068 goto efault;
8069 ret = get_errno(futimesat(arg1, path(p), tvp));
8070 unlock_user(p, arg2, 0);
8072 break;
8073 #endif
8074 #ifdef TARGET_NR_stty
8075 case TARGET_NR_stty:
8076 goto unimplemented;
8077 #endif
8078 #ifdef TARGET_NR_gtty
8079 case TARGET_NR_gtty:
8080 goto unimplemented;
8081 #endif
8082 #ifdef TARGET_NR_access
8083 case TARGET_NR_access:
8084 if (!(p = lock_user_string(arg1)))
8085 goto efault;
8086 ret = get_errno(access(path(p), arg2));
8087 unlock_user(p, arg1, 0);
8088 break;
8089 #endif
8090 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8091 case TARGET_NR_faccessat:
8092 if (!(p = lock_user_string(arg2)))
8093 goto efault;
8094 ret = get_errno(faccessat(arg1, p, arg3, 0));
8095 unlock_user(p, arg2, 0);
8096 break;
8097 #endif
8098 #ifdef TARGET_NR_nice /* not on alpha */
8099 case TARGET_NR_nice:
8100 ret = get_errno(nice(arg1));
8101 break;
8102 #endif
8103 #ifdef TARGET_NR_ftime
8104 case TARGET_NR_ftime:
8105 goto unimplemented;
8106 #endif
8107 case TARGET_NR_sync:
8108 sync();
8109 ret = 0;
8110 break;
8111 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8112 case TARGET_NR_syncfs:
8113 ret = get_errno(syncfs(arg1));
8114 break;
8115 #endif
8116 case TARGET_NR_kill:
8117 ret = get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8118 break;
8119 #ifdef TARGET_NR_rename
8120 case TARGET_NR_rename:
8122 void *p2;
8123 p = lock_user_string(arg1);
8124 p2 = lock_user_string(arg2);
8125 if (!p || !p2)
8126 ret = -TARGET_EFAULT;
8127 else
8128 ret = get_errno(rename(p, p2));
8129 unlock_user(p2, arg2, 0);
8130 unlock_user(p, arg1, 0);
8132 break;
8133 #endif
8134 #if defined(TARGET_NR_renameat)
8135 case TARGET_NR_renameat:
8137 void *p2;
8138 p = lock_user_string(arg2);
8139 p2 = lock_user_string(arg4);
8140 if (!p || !p2)
8141 ret = -TARGET_EFAULT;
8142 else
8143 ret = get_errno(renameat(arg1, p, arg3, p2));
8144 unlock_user(p2, arg4, 0);
8145 unlock_user(p, arg2, 0);
8147 break;
8148 #endif
8149 #ifdef TARGET_NR_mkdir
8150 case TARGET_NR_mkdir:
8151 if (!(p = lock_user_string(arg1)))
8152 goto efault;
8153 ret = get_errno(mkdir(p, arg2));
8154 unlock_user(p, arg1, 0);
8155 break;
8156 #endif
8157 #if defined(TARGET_NR_mkdirat)
8158 case TARGET_NR_mkdirat:
8159 if (!(p = lock_user_string(arg2)))
8160 goto efault;
8161 ret = get_errno(mkdirat(arg1, p, arg3));
8162 unlock_user(p, arg2, 0);
8163 break;
8164 #endif
8165 #ifdef TARGET_NR_rmdir
8166 case TARGET_NR_rmdir:
8167 if (!(p = lock_user_string(arg1)))
8168 goto efault;
8169 ret = get_errno(rmdir(p));
8170 unlock_user(p, arg1, 0);
8171 break;
8172 #endif
8173 case TARGET_NR_dup:
8174 ret = get_errno(dup(arg1));
8175 if (ret >= 0) {
8176 fd_trans_dup(arg1, ret);
8178 break;
8179 #ifdef TARGET_NR_pipe
8180 case TARGET_NR_pipe:
8181 ret = do_pipe(cpu_env, arg1, 0, 0);
8182 break;
8183 #endif
8184 #ifdef TARGET_NR_pipe2
8185 case TARGET_NR_pipe2:
8186 ret = do_pipe(cpu_env, arg1,
8187 target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8188 break;
8189 #endif
8190 case TARGET_NR_times:
8192 struct target_tms *tmsp;
8193 struct tms tms;
8194 ret = get_errno(times(&tms));
8195 if (arg1) {
8196 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8197 if (!tmsp)
8198 goto efault;
8199 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8200 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8201 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8202 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8204 if (!is_error(ret))
8205 ret = host_to_target_clock_t(ret);
8207 break;
8208 #ifdef TARGET_NR_prof
8209 case TARGET_NR_prof:
8210 goto unimplemented;
8211 #endif
8212 #ifdef TARGET_NR_signal
8213 case TARGET_NR_signal:
8214 goto unimplemented;
8215 #endif
8216 case TARGET_NR_acct:
8217 if (arg1 == 0) {
8218 ret = get_errno(acct(NULL));
8219 } else {
8220 if (!(p = lock_user_string(arg1)))
8221 goto efault;
8222 ret = get_errno(acct(path(p)));
8223 unlock_user(p, arg1, 0);
8225 break;
8226 #ifdef TARGET_NR_umount2
8227 case TARGET_NR_umount2:
8228 if (!(p = lock_user_string(arg1)))
8229 goto efault;
8230 ret = get_errno(umount2(p, arg2));
8231 unlock_user(p, arg1, 0);
8232 break;
8233 #endif
8234 #ifdef TARGET_NR_lock
8235 case TARGET_NR_lock:
8236 goto unimplemented;
8237 #endif
8238 case TARGET_NR_ioctl:
8239 ret = do_ioctl(arg1, arg2, arg3);
8240 break;
8241 case TARGET_NR_fcntl:
8242 ret = do_fcntl(arg1, arg2, arg3);
8243 break;
8244 #ifdef TARGET_NR_mpx
8245 case TARGET_NR_mpx:
8246 goto unimplemented;
8247 #endif
8248 case TARGET_NR_setpgid:
8249 ret = get_errno(setpgid(arg1, arg2));
8250 break;
8251 #ifdef TARGET_NR_ulimit
8252 case TARGET_NR_ulimit:
8253 goto unimplemented;
8254 #endif
8255 #ifdef TARGET_NR_oldolduname
8256 case TARGET_NR_oldolduname:
8257 goto unimplemented;
8258 #endif
8259 case TARGET_NR_umask:
8260 ret = get_errno(umask(arg1));
8261 break;
8262 case TARGET_NR_chroot:
8263 if (!(p = lock_user_string(arg1)))
8264 goto efault;
8265 ret = get_errno(chroot(p));
8266 unlock_user(p, arg1, 0);
8267 break;
8268 #ifdef TARGET_NR_ustat
8269 case TARGET_NR_ustat:
8270 goto unimplemented;
8271 #endif
8272 #ifdef TARGET_NR_dup2
8273 case TARGET_NR_dup2:
8274 ret = get_errno(dup2(arg1, arg2));
8275 if (ret >= 0) {
8276 fd_trans_dup(arg1, arg2);
8278 break;
8279 #endif
8280 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8281 case TARGET_NR_dup3:
8282 ret = get_errno(dup3(arg1, arg2, arg3));
8283 if (ret >= 0) {
8284 fd_trans_dup(arg1, arg2);
8286 break;
8287 #endif
8288 #ifdef TARGET_NR_getppid /* not on alpha */
8289 case TARGET_NR_getppid:
8290 ret = get_errno(getppid());
8291 break;
8292 #endif
8293 #ifdef TARGET_NR_getpgrp
8294 case TARGET_NR_getpgrp:
8295 ret = get_errno(getpgrp());
8296 break;
8297 #endif
8298 case TARGET_NR_setsid:
8299 ret = get_errno(setsid());
8300 break;
8301 #ifdef TARGET_NR_sigaction
8302 case TARGET_NR_sigaction:
8304 #if defined(TARGET_ALPHA)
8305 struct target_sigaction act, oact, *pact = 0;
8306 struct target_old_sigaction *old_act;
8307 if (arg2) {
8308 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8309 goto efault;
8310 act._sa_handler = old_act->_sa_handler;
8311 target_siginitset(&act.sa_mask, old_act->sa_mask);
8312 act.sa_flags = old_act->sa_flags;
8313 act.sa_restorer = 0;
8314 unlock_user_struct(old_act, arg2, 0);
8315 pact = &act;
8317 ret = get_errno(do_sigaction(arg1, pact, &oact));
8318 if (!is_error(ret) && arg3) {
8319 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8320 goto efault;
8321 old_act->_sa_handler = oact._sa_handler;
8322 old_act->sa_mask = oact.sa_mask.sig[0];
8323 old_act->sa_flags = oact.sa_flags;
8324 unlock_user_struct(old_act, arg3, 1);
8326 #elif defined(TARGET_MIPS)
8327 struct target_sigaction act, oact, *pact, *old_act;
8329 if (arg2) {
8330 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8331 goto efault;
8332 act._sa_handler = old_act->_sa_handler;
8333 target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8334 act.sa_flags = old_act->sa_flags;
8335 unlock_user_struct(old_act, arg2, 0);
8336 pact = &act;
8337 } else {
8338 pact = NULL;
8341 ret = get_errno(do_sigaction(arg1, pact, &oact));
8343 if (!is_error(ret) && arg3) {
8344 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8345 goto efault;
8346 old_act->_sa_handler = oact._sa_handler;
8347 old_act->sa_flags = oact.sa_flags;
8348 old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8349 old_act->sa_mask.sig[1] = 0;
8350 old_act->sa_mask.sig[2] = 0;
8351 old_act->sa_mask.sig[3] = 0;
8352 unlock_user_struct(old_act, arg3, 1);
8354 #else
8355 struct target_old_sigaction *old_act;
8356 struct target_sigaction act, oact, *pact;
8357 if (arg2) {
8358 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8359 goto efault;
8360 act._sa_handler = old_act->_sa_handler;
8361 target_siginitset(&act.sa_mask, old_act->sa_mask);
8362 act.sa_flags = old_act->sa_flags;
8363 act.sa_restorer = old_act->sa_restorer;
8364 unlock_user_struct(old_act, arg2, 0);
8365 pact = &act;
8366 } else {
8367 pact = NULL;
8369 ret = get_errno(do_sigaction(arg1, pact, &oact));
8370 if (!is_error(ret) && arg3) {
8371 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8372 goto efault;
8373 old_act->_sa_handler = oact._sa_handler;
8374 old_act->sa_mask = oact.sa_mask.sig[0];
8375 old_act->sa_flags = oact.sa_flags;
8376 old_act->sa_restorer = oact.sa_restorer;
8377 unlock_user_struct(old_act, arg3, 1);
8379 #endif
8381 break;
8382 #endif
8383 case TARGET_NR_rt_sigaction:
8385 #if defined(TARGET_ALPHA)
8386 struct target_sigaction act, oact, *pact = 0;
8387 struct target_rt_sigaction *rt_act;
8389 if (arg4 != sizeof(target_sigset_t)) {
8390 ret = -TARGET_EINVAL;
8391 break;
8393 if (arg2) {
8394 if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
8395 goto efault;
8396 act._sa_handler = rt_act->_sa_handler;
8397 act.sa_mask = rt_act->sa_mask;
8398 act.sa_flags = rt_act->sa_flags;
8399 act.sa_restorer = arg5;
8400 unlock_user_struct(rt_act, arg2, 0);
8401 pact = &act;
8403 ret = get_errno(do_sigaction(arg1, pact, &oact));
8404 if (!is_error(ret) && arg3) {
8405 if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
8406 goto efault;
8407 rt_act->_sa_handler = oact._sa_handler;
8408 rt_act->sa_mask = oact.sa_mask;
8409 rt_act->sa_flags = oact.sa_flags;
8410 unlock_user_struct(rt_act, arg3, 1);
8412 #else
8413 struct target_sigaction *act;
8414 struct target_sigaction *oact;
8416 if (arg4 != sizeof(target_sigset_t)) {
8417 ret = -TARGET_EINVAL;
8418 break;
8420 if (arg2) {
8421 if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
8422 goto efault;
8423 } else
8424 act = NULL;
8425 if (arg3) {
8426 if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8427 ret = -TARGET_EFAULT;
8428 goto rt_sigaction_fail;
8430 } else
8431 oact = NULL;
8432 ret = get_errno(do_sigaction(arg1, act, oact));
8433 rt_sigaction_fail:
8434 if (act)
8435 unlock_user_struct(act, arg2, 0);
8436 if (oact)
8437 unlock_user_struct(oact, arg3, 1);
8438 #endif
8440 break;
8441 #ifdef TARGET_NR_sgetmask /* not on alpha */
8442 case TARGET_NR_sgetmask:
8444 sigset_t cur_set;
8445 abi_ulong target_set;
8446 ret = do_sigprocmask(0, NULL, &cur_set);
8447 if (!ret) {
8448 host_to_target_old_sigset(&target_set, &cur_set);
8449 ret = target_set;
8452 break;
8453 #endif
8454 #ifdef TARGET_NR_ssetmask /* not on alpha */
8455 case TARGET_NR_ssetmask:
8457 sigset_t set, oset, cur_set;
8458 abi_ulong target_set = arg1;
8459 /* We only have one word of the new mask so we must read
8460 * the rest of it with do_sigprocmask() and OR in this word.
8461 * We are guaranteed that a do_sigprocmask() that only queries
8462 * the signal mask will not fail.
8464 ret = do_sigprocmask(0, NULL, &cur_set);
8465 assert(!ret);
8466 target_to_host_old_sigset(&set, &target_set);
8467 sigorset(&set, &set, &cur_set);
8468 ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8469 if (!ret) {
8470 host_to_target_old_sigset(&target_set, &oset);
8471 ret = target_set;
8474 break;
8475 #endif
8476 #ifdef TARGET_NR_sigprocmask
8477 case TARGET_NR_sigprocmask:
8479 #if defined(TARGET_ALPHA)
8480 sigset_t set, oldset;
8481 abi_ulong mask;
8482 int how;
8484 switch (arg1) {
8485 case TARGET_SIG_BLOCK:
8486 how = SIG_BLOCK;
8487 break;
8488 case TARGET_SIG_UNBLOCK:
8489 how = SIG_UNBLOCK;
8490 break;
8491 case TARGET_SIG_SETMASK:
8492 how = SIG_SETMASK;
8493 break;
8494 default:
8495 ret = -TARGET_EINVAL;
8496 goto fail;
8498 mask = arg2;
8499 target_to_host_old_sigset(&set, &mask);
8501 ret = do_sigprocmask(how, &set, &oldset);
8502 if (!is_error(ret)) {
8503 host_to_target_old_sigset(&mask, &oldset);
8504 ret = mask;
8505 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
8507 #else
8508 sigset_t set, oldset, *set_ptr;
8509 int how;
8511 if (arg2) {
8512 switch (arg1) {
8513 case TARGET_SIG_BLOCK:
8514 how = SIG_BLOCK;
8515 break;
8516 case TARGET_SIG_UNBLOCK:
8517 how = SIG_UNBLOCK;
8518 break;
8519 case TARGET_SIG_SETMASK:
8520 how = SIG_SETMASK;
8521 break;
8522 default:
8523 ret = -TARGET_EINVAL;
8524 goto fail;
8526 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8527 goto efault;
8528 target_to_host_old_sigset(&set, p);
8529 unlock_user(p, arg2, 0);
8530 set_ptr = &set;
8531 } else {
8532 how = 0;
8533 set_ptr = NULL;
8535 ret = do_sigprocmask(how, set_ptr, &oldset);
8536 if (!is_error(ret) && arg3) {
8537 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8538 goto efault;
8539 host_to_target_old_sigset(p, &oldset);
8540 unlock_user(p, arg3, sizeof(target_sigset_t));
8542 #endif
8544 break;
8545 #endif
8546 case TARGET_NR_rt_sigprocmask:
8548 int how = arg1;
8549 sigset_t set, oldset, *set_ptr;
8551 if (arg4 != sizeof(target_sigset_t)) {
8552 ret = -TARGET_EINVAL;
8553 break;
8556 if (arg2) {
8557 switch(how) {
8558 case TARGET_SIG_BLOCK:
8559 how = SIG_BLOCK;
8560 break;
8561 case TARGET_SIG_UNBLOCK:
8562 how = SIG_UNBLOCK;
8563 break;
8564 case TARGET_SIG_SETMASK:
8565 how = SIG_SETMASK;
8566 break;
8567 default:
8568 ret = -TARGET_EINVAL;
8569 goto fail;
8571 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
8572 goto efault;
8573 target_to_host_sigset(&set, p);
8574 unlock_user(p, arg2, 0);
8575 set_ptr = &set;
8576 } else {
8577 how = 0;
8578 set_ptr = NULL;
8580 ret = do_sigprocmask(how, set_ptr, &oldset);
8581 if (!is_error(ret) && arg3) {
8582 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
8583 goto efault;
8584 host_to_target_sigset(p, &oldset);
8585 unlock_user(p, arg3, sizeof(target_sigset_t));
8588 break;
8589 #ifdef TARGET_NR_sigpending
8590 case TARGET_NR_sigpending:
8592 sigset_t set;
8593 ret = get_errno(sigpending(&set));
8594 if (!is_error(ret)) {
8595 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8596 goto efault;
8597 host_to_target_old_sigset(p, &set);
8598 unlock_user(p, arg1, sizeof(target_sigset_t));
8601 break;
8602 #endif
8603 case TARGET_NR_rt_sigpending:
8605 sigset_t set;
8607 /* Yes, this check is >, not != like most. We follow the kernel's
8608 * logic and it does it like this because it implements
8609 * NR_sigpending through the same code path, and in that case
8610 * the old_sigset_t is smaller in size.
8612 if (arg2 > sizeof(target_sigset_t)) {
8613 ret = -TARGET_EINVAL;
8614 break;
8617 ret = get_errno(sigpending(&set));
8618 if (!is_error(ret)) {
8619 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
8620 goto efault;
8621 host_to_target_sigset(p, &set);
8622 unlock_user(p, arg1, sizeof(target_sigset_t));
8625 break;
8626 #ifdef TARGET_NR_sigsuspend
8627 case TARGET_NR_sigsuspend:
8629 TaskState *ts = cpu->opaque;
8630 #if defined(TARGET_ALPHA)
8631 abi_ulong mask = arg1;
8632 target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
8633 #else
8634 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8635 goto efault;
8636 target_to_host_old_sigset(&ts->sigsuspend_mask, p);
8637 unlock_user(p, arg1, 0);
8638 #endif
8639 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8640 SIGSET_T_SIZE));
8641 if (ret != -TARGET_ERESTARTSYS) {
8642 ts->in_sigsuspend = 1;
8645 break;
8646 #endif
8647 case TARGET_NR_rt_sigsuspend:
8649 TaskState *ts = cpu->opaque;
8651 if (arg2 != sizeof(target_sigset_t)) {
8652 ret = -TARGET_EINVAL;
8653 break;
8655 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8656 goto efault;
8657 target_to_host_sigset(&ts->sigsuspend_mask, p);
8658 unlock_user(p, arg1, 0);
8659 ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
8660 SIGSET_T_SIZE));
8661 if (ret != -TARGET_ERESTARTSYS) {
8662 ts->in_sigsuspend = 1;
8665 break;
8666 case TARGET_NR_rt_sigtimedwait:
8668 sigset_t set;
8669 struct timespec uts, *puts;
8670 siginfo_t uinfo;
8672 if (arg4 != sizeof(target_sigset_t)) {
8673 ret = -TARGET_EINVAL;
8674 break;
8677 if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
8678 goto efault;
8679 target_to_host_sigset(&set, p);
8680 unlock_user(p, arg1, 0);
8681 if (arg3) {
8682 puts = &uts;
8683 target_to_host_timespec(puts, arg3);
8684 } else {
8685 puts = NULL;
8687 ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
8688 SIGSET_T_SIZE));
8689 if (!is_error(ret)) {
8690 if (arg2) {
8691 p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
8693 if (!p) {
8694 goto efault;
8696 host_to_target_siginfo(p, &uinfo);
8697 unlock_user(p, arg2, sizeof(target_siginfo_t));
8699 ret = host_to_target_signal(ret);
8702 break;
8703 case TARGET_NR_rt_sigqueueinfo:
8705 siginfo_t uinfo;
8707 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
8708 if (!p) {
8709 goto efault;
8711 target_to_host_siginfo(&uinfo, p);
8712 unlock_user(p, arg1, 0);
8713 ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
8715 break;
8716 #ifdef TARGET_NR_sigreturn
8717 case TARGET_NR_sigreturn:
8718 if (block_signals()) {
8719 ret = -TARGET_ERESTARTSYS;
8720 } else {
8721 ret = do_sigreturn(cpu_env);
8723 break;
8724 #endif
8725 case TARGET_NR_rt_sigreturn:
8726 if (block_signals()) {
8727 ret = -TARGET_ERESTARTSYS;
8728 } else {
8729 ret = do_rt_sigreturn(cpu_env);
8731 break;
8732 case TARGET_NR_sethostname:
8733 if (!(p = lock_user_string(arg1)))
8734 goto efault;
8735 ret = get_errno(sethostname(p, arg2));
8736 unlock_user(p, arg1, 0);
8737 break;
8738 case TARGET_NR_setrlimit:
8740 int resource = target_to_host_resource(arg1);
8741 struct target_rlimit *target_rlim;
8742 struct rlimit rlim;
8743 if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
8744 goto efault;
8745 rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
8746 rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
8747 unlock_user_struct(target_rlim, arg2, 0);
8748 ret = get_errno(setrlimit(resource, &rlim));
8750 break;
8751 case TARGET_NR_getrlimit:
8753 int resource = target_to_host_resource(arg1);
8754 struct target_rlimit *target_rlim;
8755 struct rlimit rlim;
8757 ret = get_errno(getrlimit(resource, &rlim));
8758 if (!is_error(ret)) {
8759 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
8760 goto efault;
8761 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
8762 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
8763 unlock_user_struct(target_rlim, arg2, 1);
8766 break;
8767 case TARGET_NR_getrusage:
8769 struct rusage rusage;
8770 ret = get_errno(getrusage(arg1, &rusage));
8771 if (!is_error(ret)) {
8772 ret = host_to_target_rusage(arg2, &rusage);
8775 break;
8776 case TARGET_NR_gettimeofday:
8778 struct timeval tv;
8779 ret = get_errno(gettimeofday(&tv, NULL));
8780 if (!is_error(ret)) {
8781 if (copy_to_user_timeval(arg1, &tv))
8782 goto efault;
8785 break;
8786 case TARGET_NR_settimeofday:
8788 struct timeval tv, *ptv = NULL;
8789 struct timezone tz, *ptz = NULL;
8791 if (arg1) {
8792 if (copy_from_user_timeval(&tv, arg1)) {
8793 goto efault;
8795 ptv = &tv;
8798 if (arg2) {
8799 if (copy_from_user_timezone(&tz, arg2)) {
8800 goto efault;
8802 ptz = &tz;
8805 ret = get_errno(settimeofday(ptv, ptz));
8807 break;
8808 #if defined(TARGET_NR_select)
8809 case TARGET_NR_select:
8810 #if defined(TARGET_WANT_NI_OLD_SELECT)
8811 /* some architectures used to have old_select here
8812 * but now ENOSYS it.
8814 ret = -TARGET_ENOSYS;
8815 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
8816 ret = do_old_select(arg1);
8817 #else
8818 ret = do_select(arg1, arg2, arg3, arg4, arg5);
8819 #endif
8820 break;
8821 #endif
8822 #ifdef TARGET_NR_pselect6
8823 case TARGET_NR_pselect6:
8825 abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
8826 fd_set rfds, wfds, efds;
8827 fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
8828 struct timespec ts, *ts_ptr;
8831 * The 6th arg is actually two args smashed together,
8832 * so we cannot use the C library.
8834 sigset_t set;
8835 struct {
8836 sigset_t *set;
8837 size_t size;
8838 } sig, *sig_ptr;
8840 abi_ulong arg_sigset, arg_sigsize, *arg7;
8841 target_sigset_t *target_sigset;
8843 n = arg1;
8844 rfd_addr = arg2;
8845 wfd_addr = arg3;
8846 efd_addr = arg4;
8847 ts_addr = arg5;
8849 ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
8850 if (ret) {
8851 goto fail;
8853 ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
8854 if (ret) {
8855 goto fail;
8857 ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
8858 if (ret) {
8859 goto fail;
8863 * This takes a timespec, and not a timeval, so we cannot
8864 * use the do_select() helper ...
8866 if (ts_addr) {
8867 if (target_to_host_timespec(&ts, ts_addr)) {
8868 goto efault;
8870 ts_ptr = &ts;
8871 } else {
8872 ts_ptr = NULL;
8875 /* Extract the two packed args for the sigset */
8876 if (arg6) {
8877 sig_ptr = &sig;
8878 sig.size = SIGSET_T_SIZE;
8880 arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
8881 if (!arg7) {
8882 goto efault;
8884 arg_sigset = tswapal(arg7[0]);
8885 arg_sigsize = tswapal(arg7[1]);
8886 unlock_user(arg7, arg6, 0);
8888 if (arg_sigset) {
8889 sig.set = &set;
8890 if (arg_sigsize != sizeof(*target_sigset)) {
8891 /* Like the kernel, we enforce correct size sigsets */
8892 ret = -TARGET_EINVAL;
8893 goto fail;
8895 target_sigset = lock_user(VERIFY_READ, arg_sigset,
8896 sizeof(*target_sigset), 1);
8897 if (!target_sigset) {
8898 goto efault;
8900 target_to_host_sigset(&set, target_sigset);
8901 unlock_user(target_sigset, arg_sigset, 0);
8902 } else {
8903 sig.set = NULL;
8905 } else {
8906 sig_ptr = NULL;
8909 ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
8910 ts_ptr, sig_ptr));
8912 if (!is_error(ret)) {
8913 if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
8914 goto efault;
8915 if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
8916 goto efault;
8917 if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
8918 goto efault;
8920 if (ts_addr && host_to_target_timespec(ts_addr, &ts))
8921 goto efault;
8924 break;
8925 #endif
8926 #ifdef TARGET_NR_symlink
8927 case TARGET_NR_symlink:
8929 void *p2;
8930 p = lock_user_string(arg1);
8931 p2 = lock_user_string(arg2);
8932 if (!p || !p2)
8933 ret = -TARGET_EFAULT;
8934 else
8935 ret = get_errno(symlink(p, p2));
8936 unlock_user(p2, arg2, 0);
8937 unlock_user(p, arg1, 0);
8939 break;
8940 #endif
8941 #if defined(TARGET_NR_symlinkat)
8942 case TARGET_NR_symlinkat:
8944 void *p2;
8945 p = lock_user_string(arg1);
8946 p2 = lock_user_string(arg3);
8947 if (!p || !p2)
8948 ret = -TARGET_EFAULT;
8949 else
8950 ret = get_errno(symlinkat(p, arg2, p2));
8951 unlock_user(p2, arg3, 0);
8952 unlock_user(p, arg1, 0);
8954 break;
8955 #endif
8956 #ifdef TARGET_NR_oldlstat
8957 case TARGET_NR_oldlstat:
8958 goto unimplemented;
8959 #endif
8960 #ifdef TARGET_NR_readlink
8961 case TARGET_NR_readlink:
8963 void *p2;
8964 p = lock_user_string(arg1);
8965 p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
8966 if (!p || !p2) {
8967 ret = -TARGET_EFAULT;
8968 } else if (!arg3) {
8969 /* Short circuit this for the magic exe check. */
8970 ret = -TARGET_EINVAL;
8971 } else if (is_proc_myself((const char *)p, "exe")) {
8972 char real[PATH_MAX], *temp;
8973 temp = realpath(exec_path, real);
8974 /* Return value is # of bytes that we wrote to the buffer. */
8975 if (temp == NULL) {
8976 ret = get_errno(-1);
8977 } else {
8978 /* Don't worry about sign mismatch as earlier mapping
8979 * logic would have thrown a bad address error. */
8980 ret = MIN(strlen(real), arg3);
8981 /* We cannot NUL terminate the string. */
8982 memcpy(p2, real, ret);
8984 } else {
8985 ret = get_errno(readlink(path(p), p2, arg3));
8987 unlock_user(p2, arg2, ret);
8988 unlock_user(p, arg1, 0);
8990 break;
8991 #endif
8992 #if defined(TARGET_NR_readlinkat)
8993 case TARGET_NR_readlinkat:
8995 void *p2;
8996 p = lock_user_string(arg2);
8997 p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
8998 if (!p || !p2) {
8999 ret = -TARGET_EFAULT;
9000 } else if (is_proc_myself((const char *)p, "exe")) {
9001 char real[PATH_MAX], *temp;
9002 temp = realpath(exec_path, real);
9003 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9004 snprintf((char *)p2, arg4, "%s", real);
9005 } else {
9006 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9008 unlock_user(p2, arg3, ret);
9009 unlock_user(p, arg2, 0);
9011 break;
9012 #endif
9013 #ifdef TARGET_NR_uselib
9014 case TARGET_NR_uselib:
9015 goto unimplemented;
9016 #endif
9017 #ifdef TARGET_NR_swapon
9018 case TARGET_NR_swapon:
9019 if (!(p = lock_user_string(arg1)))
9020 goto efault;
9021 ret = get_errno(swapon(p, arg2));
9022 unlock_user(p, arg1, 0);
9023 break;
9024 #endif
9025 case TARGET_NR_reboot:
9026 if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9027 /* arg4 must be ignored in all other cases */
9028 p = lock_user_string(arg4);
9029 if (!p) {
9030 goto efault;
9032 ret = get_errno(reboot(arg1, arg2, arg3, p));
9033 unlock_user(p, arg4, 0);
9034 } else {
9035 ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9037 break;
9038 #ifdef TARGET_NR_readdir
9039 case TARGET_NR_readdir:
9040 goto unimplemented;
9041 #endif
9042 #ifdef TARGET_NR_mmap
9043 case TARGET_NR_mmap:
9044 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9045 (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9046 defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9047 || defined(TARGET_S390X)
9049 abi_ulong *v;
9050 abi_ulong v1, v2, v3, v4, v5, v6;
9051 if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9052 goto efault;
9053 v1 = tswapal(v[0]);
9054 v2 = tswapal(v[1]);
9055 v3 = tswapal(v[2]);
9056 v4 = tswapal(v[3]);
9057 v5 = tswapal(v[4]);
9058 v6 = tswapal(v[5]);
9059 unlock_user(v, arg1, 0);
9060 ret = get_errno(target_mmap(v1, v2, v3,
9061 target_to_host_bitmask(v4, mmap_flags_tbl),
9062 v5, v6));
9064 #else
9065 ret = get_errno(target_mmap(arg1, arg2, arg3,
9066 target_to_host_bitmask(arg4, mmap_flags_tbl),
9067 arg5,
9068 arg6));
9069 #endif
9070 break;
9071 #endif
9072 #ifdef TARGET_NR_mmap2
9073 case TARGET_NR_mmap2:
9074 #ifndef MMAP_SHIFT
9075 #define MMAP_SHIFT 12
9076 #endif
9077 ret = get_errno(target_mmap(arg1, arg2, arg3,
9078 target_to_host_bitmask(arg4, mmap_flags_tbl),
9079 arg5,
9080 arg6 << MMAP_SHIFT));
9081 break;
9082 #endif
9083 case TARGET_NR_munmap:
9084 ret = get_errno(target_munmap(arg1, arg2));
9085 break;
9086 case TARGET_NR_mprotect:
9088 TaskState *ts = cpu->opaque;
9089 /* Special hack to detect libc making the stack executable. */
9090 if ((arg3 & PROT_GROWSDOWN)
9091 && arg1 >= ts->info->stack_limit
9092 && arg1 <= ts->info->start_stack) {
9093 arg3 &= ~PROT_GROWSDOWN;
9094 arg2 = arg2 + arg1 - ts->info->stack_limit;
9095 arg1 = ts->info->stack_limit;
9098 ret = get_errno(target_mprotect(arg1, arg2, arg3));
9099 break;
9100 #ifdef TARGET_NR_mremap
9101 case TARGET_NR_mremap:
9102 ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9103 break;
9104 #endif
9105 /* ??? msync/mlock/munlock are broken for softmmu. */
9106 #ifdef TARGET_NR_msync
9107 case TARGET_NR_msync:
9108 ret = get_errno(msync(g2h(arg1), arg2, arg3));
9109 break;
9110 #endif
9111 #ifdef TARGET_NR_mlock
9112 case TARGET_NR_mlock:
9113 ret = get_errno(mlock(g2h(arg1), arg2));
9114 break;
9115 #endif
9116 #ifdef TARGET_NR_munlock
9117 case TARGET_NR_munlock:
9118 ret = get_errno(munlock(g2h(arg1), arg2));
9119 break;
9120 #endif
9121 #ifdef TARGET_NR_mlockall
9122 case TARGET_NR_mlockall:
9123 ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9124 break;
9125 #endif
9126 #ifdef TARGET_NR_munlockall
9127 case TARGET_NR_munlockall:
9128 ret = get_errno(munlockall());
9129 break;
9130 #endif
9131 case TARGET_NR_truncate:
9132 if (!(p = lock_user_string(arg1)))
9133 goto efault;
9134 ret = get_errno(truncate(p, arg2));
9135 unlock_user(p, arg1, 0);
9136 break;
9137 case TARGET_NR_ftruncate:
9138 ret = get_errno(ftruncate(arg1, arg2));
9139 break;
9140 case TARGET_NR_fchmod:
9141 ret = get_errno(fchmod(arg1, arg2));
9142 break;
9143 #if defined(TARGET_NR_fchmodat)
9144 case TARGET_NR_fchmodat:
9145 if (!(p = lock_user_string(arg2)))
9146 goto efault;
9147 ret = get_errno(fchmodat(arg1, p, arg3, 0));
9148 unlock_user(p, arg2, 0);
9149 break;
9150 #endif
9151 case TARGET_NR_getpriority:
9152 /* Note that negative values are valid for getpriority, so we must
9153 differentiate based on errno settings. */
9154 errno = 0;
9155 ret = getpriority(arg1, arg2);
9156 if (ret == -1 && errno != 0) {
9157 ret = -host_to_target_errno(errno);
9158 break;
9160 #ifdef TARGET_ALPHA
9161 /* Return value is the unbiased priority. Signal no error. */
9162 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9163 #else
9164 /* Return value is a biased priority to avoid negative numbers. */
9165 ret = 20 - ret;
9166 #endif
9167 break;
9168 case TARGET_NR_setpriority:
9169 ret = get_errno(setpriority(arg1, arg2, arg3));
9170 break;
9171 #ifdef TARGET_NR_profil
9172 case TARGET_NR_profil:
9173 goto unimplemented;
9174 #endif
9175 case TARGET_NR_statfs:
9176 if (!(p = lock_user_string(arg1)))
9177 goto efault;
9178 ret = get_errno(statfs(path(p), &stfs));
9179 unlock_user(p, arg1, 0);
9180 convert_statfs:
9181 if (!is_error(ret)) {
9182 struct target_statfs *target_stfs;
9184 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9185 goto efault;
9186 __put_user(stfs.f_type, &target_stfs->f_type);
9187 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9188 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9189 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9190 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9191 __put_user(stfs.f_files, &target_stfs->f_files);
9192 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9193 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9194 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9195 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9196 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9197 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9198 unlock_user_struct(target_stfs, arg2, 1);
9200 break;
9201 case TARGET_NR_fstatfs:
9202 ret = get_errno(fstatfs(arg1, &stfs));
9203 goto convert_statfs;
9204 #ifdef TARGET_NR_statfs64
9205 case TARGET_NR_statfs64:
9206 if (!(p = lock_user_string(arg1)))
9207 goto efault;
9208 ret = get_errno(statfs(path(p), &stfs));
9209 unlock_user(p, arg1, 0);
9210 convert_statfs64:
9211 if (!is_error(ret)) {
9212 struct target_statfs64 *target_stfs;
9214 if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9215 goto efault;
9216 __put_user(stfs.f_type, &target_stfs->f_type);
9217 __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9218 __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9219 __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9220 __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9221 __put_user(stfs.f_files, &target_stfs->f_files);
9222 __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9223 __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9224 __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9225 __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9226 __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9227 memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9228 unlock_user_struct(target_stfs, arg3, 1);
9230 break;
9231 case TARGET_NR_fstatfs64:
9232 ret = get_errno(fstatfs(arg1, &stfs));
9233 goto convert_statfs64;
9234 #endif
9235 #ifdef TARGET_NR_ioperm
9236 case TARGET_NR_ioperm:
9237 goto unimplemented;
9238 #endif
9239 #ifdef TARGET_NR_socketcall
9240 case TARGET_NR_socketcall:
9241 ret = do_socketcall(arg1, arg2);
9242 break;
9243 #endif
9244 #ifdef TARGET_NR_accept
9245 case TARGET_NR_accept:
9246 ret = do_accept4(arg1, arg2, arg3, 0);
9247 break;
9248 #endif
9249 #ifdef TARGET_NR_accept4
9250 case TARGET_NR_accept4:
9251 ret = do_accept4(arg1, arg2, arg3, arg4);
9252 break;
9253 #endif
9254 #ifdef TARGET_NR_bind
9255 case TARGET_NR_bind:
9256 ret = do_bind(arg1, arg2, arg3);
9257 break;
9258 #endif
9259 #ifdef TARGET_NR_connect
9260 case TARGET_NR_connect:
9261 ret = do_connect(arg1, arg2, arg3);
9262 break;
9263 #endif
9264 #ifdef TARGET_NR_getpeername
9265 case TARGET_NR_getpeername:
9266 ret = do_getpeername(arg1, arg2, arg3);
9267 break;
9268 #endif
9269 #ifdef TARGET_NR_getsockname
9270 case TARGET_NR_getsockname:
9271 ret = do_getsockname(arg1, arg2, arg3);
9272 break;
9273 #endif
9274 #ifdef TARGET_NR_getsockopt
9275 case TARGET_NR_getsockopt:
9276 ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9277 break;
9278 #endif
9279 #ifdef TARGET_NR_listen
9280 case TARGET_NR_listen:
9281 ret = get_errno(listen(arg1, arg2));
9282 break;
9283 #endif
9284 #ifdef TARGET_NR_recv
9285 case TARGET_NR_recv:
9286 ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9287 break;
9288 #endif
9289 #ifdef TARGET_NR_recvfrom
9290 case TARGET_NR_recvfrom:
9291 ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9292 break;
9293 #endif
9294 #ifdef TARGET_NR_recvmsg
9295 case TARGET_NR_recvmsg:
9296 ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
9297 break;
9298 #endif
9299 #ifdef TARGET_NR_send
9300 case TARGET_NR_send:
9301 ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9302 break;
9303 #endif
9304 #ifdef TARGET_NR_sendmsg
9305 case TARGET_NR_sendmsg:
9306 ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
9307 break;
9308 #endif
9309 #ifdef TARGET_NR_sendmmsg
9310 case TARGET_NR_sendmmsg:
9311 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9312 break;
9313 case TARGET_NR_recvmmsg:
9314 ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9315 break;
9316 #endif
9317 #ifdef TARGET_NR_sendto
9318 case TARGET_NR_sendto:
9319 ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9320 break;
9321 #endif
9322 #ifdef TARGET_NR_shutdown
9323 case TARGET_NR_shutdown:
9324 ret = get_errno(shutdown(arg1, arg2));
9325 break;
9326 #endif
9327 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9328 case TARGET_NR_getrandom:
9329 p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9330 if (!p) {
9331 goto efault;
9333 ret = get_errno(getrandom(p, arg2, arg3));
9334 unlock_user(p, arg1, ret);
9335 break;
9336 #endif
9337 #ifdef TARGET_NR_socket
9338 case TARGET_NR_socket:
9339 ret = do_socket(arg1, arg2, arg3);
9340 fd_trans_unregister(ret);
9341 break;
9342 #endif
9343 #ifdef TARGET_NR_socketpair
9344 case TARGET_NR_socketpair:
9345 ret = do_socketpair(arg1, arg2, arg3, arg4);
9346 break;
9347 #endif
9348 #ifdef TARGET_NR_setsockopt
9349 case TARGET_NR_setsockopt:
9350 ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9351 break;
9352 #endif
9353 #if defined(TARGET_NR_syslog)
9354 case TARGET_NR_syslog:
9356 int len = arg2;
9358 switch (arg1) {
9359 case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
9360 case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
9361 case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
9362 case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
9363 case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
9364 case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9365 case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
9366 case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
9368 ret = get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9370 break;
9371 case TARGET_SYSLOG_ACTION_READ: /* Read from log */
9372 case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
9373 case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
9375 ret = -TARGET_EINVAL;
9376 if (len < 0) {
9377 goto fail;
9379 ret = 0;
9380 if (len == 0) {
9381 break;
9383 p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9384 if (!p) {
9385 ret = -TARGET_EFAULT;
9386 goto fail;
9388 ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9389 unlock_user(p, arg2, arg3);
9391 break;
9392 default:
9393 ret = -EINVAL;
9394 break;
9397 break;
9398 #endif
9399 case TARGET_NR_setitimer:
9401 struct itimerval value, ovalue, *pvalue;
9403 if (arg2) {
9404 pvalue = &value;
9405 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9406 || copy_from_user_timeval(&pvalue->it_value,
9407 arg2 + sizeof(struct target_timeval)))
9408 goto efault;
9409 } else {
9410 pvalue = NULL;
9412 ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9413 if (!is_error(ret) && arg3) {
9414 if (copy_to_user_timeval(arg3,
9415 &ovalue.it_interval)
9416 || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9417 &ovalue.it_value))
9418 goto efault;
9421 break;
9422 case TARGET_NR_getitimer:
9424 struct itimerval value;
9426 ret = get_errno(getitimer(arg1, &value));
9427 if (!is_error(ret) && arg2) {
9428 if (copy_to_user_timeval(arg2,
9429 &value.it_interval)
9430 || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9431 &value.it_value))
9432 goto efault;
9435 break;
9436 #ifdef TARGET_NR_stat
9437 case TARGET_NR_stat:
9438 if (!(p = lock_user_string(arg1)))
9439 goto efault;
9440 ret = get_errno(stat(path(p), &st));
9441 unlock_user(p, arg1, 0);
9442 goto do_stat;
9443 #endif
9444 #ifdef TARGET_NR_lstat
9445 case TARGET_NR_lstat:
9446 if (!(p = lock_user_string(arg1)))
9447 goto efault;
9448 ret = get_errno(lstat(path(p), &st));
9449 unlock_user(p, arg1, 0);
9450 goto do_stat;
9451 #endif
9452 case TARGET_NR_fstat:
9454 ret = get_errno(fstat(arg1, &st));
9455 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9456 do_stat:
9457 #endif
9458 if (!is_error(ret)) {
9459 struct target_stat *target_st;
9461 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9462 goto efault;
9463 memset(target_st, 0, sizeof(*target_st));
9464 __put_user(st.st_dev, &target_st->st_dev);
9465 __put_user(st.st_ino, &target_st->st_ino);
9466 __put_user(st.st_mode, &target_st->st_mode);
9467 __put_user(st.st_uid, &target_st->st_uid);
9468 __put_user(st.st_gid, &target_st->st_gid);
9469 __put_user(st.st_nlink, &target_st->st_nlink);
9470 __put_user(st.st_rdev, &target_st->st_rdev);
9471 __put_user(st.st_size, &target_st->st_size);
9472 __put_user(st.st_blksize, &target_st->st_blksize);
9473 __put_user(st.st_blocks, &target_st->st_blocks);
9474 __put_user(st.st_atime, &target_st->target_st_atime);
9475 __put_user(st.st_mtime, &target_st->target_st_mtime);
9476 __put_user(st.st_ctime, &target_st->target_st_ctime);
9477 unlock_user_struct(target_st, arg2, 1);
9480 break;
9481 #ifdef TARGET_NR_olduname
9482 case TARGET_NR_olduname:
9483 goto unimplemented;
9484 #endif
9485 #ifdef TARGET_NR_iopl
9486 case TARGET_NR_iopl:
9487 goto unimplemented;
9488 #endif
9489 case TARGET_NR_vhangup:
9490 ret = get_errno(vhangup());
9491 break;
9492 #ifdef TARGET_NR_idle
9493 case TARGET_NR_idle:
9494 goto unimplemented;
9495 #endif
9496 #ifdef TARGET_NR_syscall
9497 case TARGET_NR_syscall:
9498 ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9499 arg6, arg7, arg8, 0);
9500 break;
9501 #endif
9502 case TARGET_NR_wait4:
9504 int status;
9505 abi_long status_ptr = arg2;
9506 struct rusage rusage, *rusage_ptr;
9507 abi_ulong target_rusage = arg4;
9508 abi_long rusage_err;
9509 if (target_rusage)
9510 rusage_ptr = &rusage;
9511 else
9512 rusage_ptr = NULL;
9513 ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9514 if (!is_error(ret)) {
9515 if (status_ptr && ret) {
9516 status = host_to_target_waitstatus(status);
9517 if (put_user_s32(status, status_ptr))
9518 goto efault;
9520 if (target_rusage) {
9521 rusage_err = host_to_target_rusage(target_rusage, &rusage);
9522 if (rusage_err) {
9523 ret = rusage_err;
9528 break;
9529 #ifdef TARGET_NR_swapoff
9530 case TARGET_NR_swapoff:
9531 if (!(p = lock_user_string(arg1)))
9532 goto efault;
9533 ret = get_errno(swapoff(p));
9534 unlock_user(p, arg1, 0);
9535 break;
9536 #endif
9537 case TARGET_NR_sysinfo:
9539 struct target_sysinfo *target_value;
9540 struct sysinfo value;
9541 ret = get_errno(sysinfo(&value));
9542 if (!is_error(ret) && arg1)
9544 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
9545 goto efault;
9546 __put_user(value.uptime, &target_value->uptime);
9547 __put_user(value.loads[0], &target_value->loads[0]);
9548 __put_user(value.loads[1], &target_value->loads[1]);
9549 __put_user(value.loads[2], &target_value->loads[2]);
9550 __put_user(value.totalram, &target_value->totalram);
9551 __put_user(value.freeram, &target_value->freeram);
9552 __put_user(value.sharedram, &target_value->sharedram);
9553 __put_user(value.bufferram, &target_value->bufferram);
9554 __put_user(value.totalswap, &target_value->totalswap);
9555 __put_user(value.freeswap, &target_value->freeswap);
9556 __put_user(value.procs, &target_value->procs);
9557 __put_user(value.totalhigh, &target_value->totalhigh);
9558 __put_user(value.freehigh, &target_value->freehigh);
9559 __put_user(value.mem_unit, &target_value->mem_unit);
9560 unlock_user_struct(target_value, arg1, 1);
9563 break;
9564 #ifdef TARGET_NR_ipc
9565 case TARGET_NR_ipc:
9566 ret = do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
9567 break;
9568 #endif
9569 #ifdef TARGET_NR_semget
9570 case TARGET_NR_semget:
9571 ret = get_errno(semget(arg1, arg2, arg3));
9572 break;
9573 #endif
9574 #ifdef TARGET_NR_semop
9575 case TARGET_NR_semop:
9576 ret = do_semop(arg1, arg2, arg3);
9577 break;
9578 #endif
9579 #ifdef TARGET_NR_semctl
9580 case TARGET_NR_semctl:
9581 ret = do_semctl(arg1, arg2, arg3, arg4);
9582 break;
9583 #endif
9584 #ifdef TARGET_NR_msgctl
9585 case TARGET_NR_msgctl:
9586 ret = do_msgctl(arg1, arg2, arg3);
9587 break;
9588 #endif
9589 #ifdef TARGET_NR_msgget
9590 case TARGET_NR_msgget:
9591 ret = get_errno(msgget(arg1, arg2));
9592 break;
9593 #endif
9594 #ifdef TARGET_NR_msgrcv
9595 case TARGET_NR_msgrcv:
9596 ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
9597 break;
9598 #endif
9599 #ifdef TARGET_NR_msgsnd
9600 case TARGET_NR_msgsnd:
9601 ret = do_msgsnd(arg1, arg2, arg3, arg4);
9602 break;
9603 #endif
9604 #ifdef TARGET_NR_shmget
9605 case TARGET_NR_shmget:
9606 ret = get_errno(shmget(arg1, arg2, arg3));
9607 break;
9608 #endif
9609 #ifdef TARGET_NR_shmctl
9610 case TARGET_NR_shmctl:
9611 ret = do_shmctl(arg1, arg2, arg3);
9612 break;
9613 #endif
9614 #ifdef TARGET_NR_shmat
9615 case TARGET_NR_shmat:
9616 ret = do_shmat(cpu_env, arg1, arg2, arg3);
9617 break;
9618 #endif
9619 #ifdef TARGET_NR_shmdt
9620 case TARGET_NR_shmdt:
9621 ret = do_shmdt(arg1);
9622 break;
9623 #endif
9624 case TARGET_NR_fsync:
9625 ret = get_errno(fsync(arg1));
9626 break;
9627 case TARGET_NR_clone:
9628 /* Linux manages to have three different orderings for its
9629 * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
9630 * match the kernel's CONFIG_CLONE_* settings.
9631 * Microblaze is further special in that it uses a sixth
9632 * implicit argument to clone for the TLS pointer.
9634 #if defined(TARGET_MICROBLAZE)
9635 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
9636 #elif defined(TARGET_CLONE_BACKWARDS)
9637 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
9638 #elif defined(TARGET_CLONE_BACKWARDS2)
9639 ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
9640 #else
9641 ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
9642 #endif
9643 break;
9644 #ifdef __NR_exit_group
9645 /* new thread calls */
9646 case TARGET_NR_exit_group:
9647 #ifdef TARGET_GPROF
9648 _mcleanup();
9649 #endif
9650 gdb_exit(cpu_env, arg1);
9651 ret = get_errno(exit_group(arg1));
9652 break;
9653 #endif
9654 case TARGET_NR_setdomainname:
9655 if (!(p = lock_user_string(arg1)))
9656 goto efault;
9657 ret = get_errno(setdomainname(p, arg2));
9658 unlock_user(p, arg1, 0);
9659 break;
9660 case TARGET_NR_uname:
9661 /* no need to transcode because we use the linux syscall */
9663 struct new_utsname * buf;
9665 if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
9666 goto efault;
9667 ret = get_errno(sys_uname(buf));
9668 if (!is_error(ret)) {
9669 /* Overwrite the native machine name with whatever is being
9670 emulated. */
9671 strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
9672 /* Allow the user to override the reported release. */
9673 if (qemu_uname_release && *qemu_uname_release) {
9674 g_strlcpy(buf->release, qemu_uname_release,
9675 sizeof(buf->release));
9678 unlock_user_struct(buf, arg1, 1);
9680 break;
9681 #ifdef TARGET_I386
9682 case TARGET_NR_modify_ldt:
9683 ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
9684 break;
9685 #if !defined(TARGET_X86_64)
9686 case TARGET_NR_vm86old:
9687 goto unimplemented;
9688 case TARGET_NR_vm86:
9689 ret = do_vm86(cpu_env, arg1, arg2);
9690 break;
9691 #endif
9692 #endif
9693 case TARGET_NR_adjtimex:
9695 struct timex host_buf;
9697 if (target_to_host_timex(&host_buf, arg1) != 0) {
9698 goto efault;
9700 ret = get_errno(adjtimex(&host_buf));
9701 if (!is_error(ret)) {
9702 if (host_to_target_timex(arg1, &host_buf) != 0) {
9703 goto efault;
9707 break;
9708 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
9709 case TARGET_NR_clock_adjtime:
9711 struct timex htx, *phtx = &htx;
9713 if (target_to_host_timex(phtx, arg2) != 0) {
9714 goto efault;
9716 ret = get_errno(clock_adjtime(arg1, phtx));
9717 if (!is_error(ret) && phtx) {
9718 if (host_to_target_timex(arg2, phtx) != 0) {
9719 goto efault;
9723 break;
9724 #endif
9725 #ifdef TARGET_NR_create_module
9726 case TARGET_NR_create_module:
9727 #endif
9728 case TARGET_NR_init_module:
9729 case TARGET_NR_delete_module:
9730 #ifdef TARGET_NR_get_kernel_syms
9731 case TARGET_NR_get_kernel_syms:
9732 #endif
9733 goto unimplemented;
9734 case TARGET_NR_quotactl:
9735 goto unimplemented;
9736 case TARGET_NR_getpgid:
9737 ret = get_errno(getpgid(arg1));
9738 break;
9739 case TARGET_NR_fchdir:
9740 ret = get_errno(fchdir(arg1));
9741 break;
9742 #ifdef TARGET_NR_bdflush /* not on x86_64 */
9743 case TARGET_NR_bdflush:
9744 goto unimplemented;
9745 #endif
9746 #ifdef TARGET_NR_sysfs
9747 case TARGET_NR_sysfs:
9748 goto unimplemented;
9749 #endif
9750 case TARGET_NR_personality:
9751 ret = get_errno(personality(arg1));
9752 break;
9753 #ifdef TARGET_NR_afs_syscall
9754 case TARGET_NR_afs_syscall:
9755 goto unimplemented;
9756 #endif
9757 #ifdef TARGET_NR__llseek /* Not on alpha */
9758 case TARGET_NR__llseek:
9760 int64_t res;
9761 #if !defined(__NR_llseek)
9762 res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
9763 if (res == -1) {
9764 ret = get_errno(res);
9765 } else {
9766 ret = 0;
9768 #else
9769 ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
9770 #endif
9771 if ((ret == 0) && put_user_s64(res, arg4)) {
9772 goto efault;
9775 break;
9776 #endif
9777 #ifdef TARGET_NR_getdents
9778 case TARGET_NR_getdents:
9779 #ifdef __NR_getdents
9780 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
9782 struct target_dirent *target_dirp;
9783 struct linux_dirent *dirp;
9784 abi_long count = arg3;
9786 dirp = g_try_malloc(count);
9787 if (!dirp) {
9788 ret = -TARGET_ENOMEM;
9789 goto fail;
9792 ret = get_errno(sys_getdents(arg1, dirp, count));
9793 if (!is_error(ret)) {
9794 struct linux_dirent *de;
9795 struct target_dirent *tde;
9796 int len = ret;
9797 int reclen, treclen;
9798 int count1, tnamelen;
9800 count1 = 0;
9801 de = dirp;
9802 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9803 goto efault;
9804 tde = target_dirp;
9805 while (len > 0) {
9806 reclen = de->d_reclen;
9807 tnamelen = reclen - offsetof(struct linux_dirent, d_name);
9808 assert(tnamelen >= 0);
9809 treclen = tnamelen + offsetof(struct target_dirent, d_name);
9810 assert(count1 + treclen <= count);
9811 tde->d_reclen = tswap16(treclen);
9812 tde->d_ino = tswapal(de->d_ino);
9813 tde->d_off = tswapal(de->d_off);
9814 memcpy(tde->d_name, de->d_name, tnamelen);
9815 de = (struct linux_dirent *)((char *)de + reclen);
9816 len -= reclen;
9817 tde = (struct target_dirent *)((char *)tde + treclen);
9818 count1 += treclen;
9820 ret = count1;
9821 unlock_user(target_dirp, arg2, ret);
9823 g_free(dirp);
9825 #else
9827 struct linux_dirent *dirp;
9828 abi_long count = arg3;
9830 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9831 goto efault;
9832 ret = get_errno(sys_getdents(arg1, dirp, count));
9833 if (!is_error(ret)) {
9834 struct linux_dirent *de;
9835 int len = ret;
9836 int reclen;
9837 de = dirp;
9838 while (len > 0) {
9839 reclen = de->d_reclen;
9840 if (reclen > len)
9841 break;
9842 de->d_reclen = tswap16(reclen);
9843 tswapls(&de->d_ino);
9844 tswapls(&de->d_off);
9845 de = (struct linux_dirent *)((char *)de + reclen);
9846 len -= reclen;
9849 unlock_user(dirp, arg2, ret);
9851 #endif
9852 #else
9853 /* Implement getdents in terms of getdents64 */
9855 struct linux_dirent64 *dirp;
9856 abi_long count = arg3;
9858 dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
9859 if (!dirp) {
9860 goto efault;
9862 ret = get_errno(sys_getdents64(arg1, dirp, count));
9863 if (!is_error(ret)) {
9864 /* Convert the dirent64 structs to target dirent. We do this
9865 * in-place, since we can guarantee that a target_dirent is no
9866 * larger than a dirent64; however this means we have to be
9867 * careful to read everything before writing in the new format.
9869 struct linux_dirent64 *de;
9870 struct target_dirent *tde;
9871 int len = ret;
9872 int tlen = 0;
9874 de = dirp;
9875 tde = (struct target_dirent *)dirp;
9876 while (len > 0) {
9877 int namelen, treclen;
9878 int reclen = de->d_reclen;
9879 uint64_t ino = de->d_ino;
9880 int64_t off = de->d_off;
9881 uint8_t type = de->d_type;
9883 namelen = strlen(de->d_name);
9884 treclen = offsetof(struct target_dirent, d_name)
9885 + namelen + 2;
9886 treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
9888 memmove(tde->d_name, de->d_name, namelen + 1);
9889 tde->d_ino = tswapal(ino);
9890 tde->d_off = tswapal(off);
9891 tde->d_reclen = tswap16(treclen);
9892 /* The target_dirent type is in what was formerly a padding
9893 * byte at the end of the structure:
9895 *(((char *)tde) + treclen - 1) = type;
9897 de = (struct linux_dirent64 *)((char *)de + reclen);
9898 tde = (struct target_dirent *)((char *)tde + treclen);
9899 len -= reclen;
9900 tlen += treclen;
9902 ret = tlen;
9904 unlock_user(dirp, arg2, ret);
9906 #endif
9907 break;
9908 #endif /* TARGET_NR_getdents */
9909 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
9910 case TARGET_NR_getdents64:
9912 struct linux_dirent64 *dirp;
9913 abi_long count = arg3;
9914 if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
9915 goto efault;
9916 ret = get_errno(sys_getdents64(arg1, dirp, count));
9917 if (!is_error(ret)) {
9918 struct linux_dirent64 *de;
9919 int len = ret;
9920 int reclen;
9921 de = dirp;
9922 while (len > 0) {
9923 reclen = de->d_reclen;
9924 if (reclen > len)
9925 break;
9926 de->d_reclen = tswap16(reclen);
9927 tswap64s((uint64_t *)&de->d_ino);
9928 tswap64s((uint64_t *)&de->d_off);
9929 de = (struct linux_dirent64 *)((char *)de + reclen);
9930 len -= reclen;
9933 unlock_user(dirp, arg2, ret);
9935 break;
9936 #endif /* TARGET_NR_getdents64 */
9937 #if defined(TARGET_NR__newselect)
9938 case TARGET_NR__newselect:
9939 ret = do_select(arg1, arg2, arg3, arg4, arg5);
9940 break;
9941 #endif
9942 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
9943 # ifdef TARGET_NR_poll
9944 case TARGET_NR_poll:
9945 # endif
9946 # ifdef TARGET_NR_ppoll
9947 case TARGET_NR_ppoll:
9948 # endif
9950 struct target_pollfd *target_pfd;
9951 unsigned int nfds = arg2;
9952 struct pollfd *pfd;
9953 unsigned int i;
9955 pfd = NULL;
9956 target_pfd = NULL;
9957 if (nfds) {
9958 if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
9959 ret = -TARGET_EINVAL;
9960 break;
9963 target_pfd = lock_user(VERIFY_WRITE, arg1,
9964 sizeof(struct target_pollfd) * nfds, 1);
9965 if (!target_pfd) {
9966 goto efault;
9969 pfd = alloca(sizeof(struct pollfd) * nfds);
9970 for (i = 0; i < nfds; i++) {
9971 pfd[i].fd = tswap32(target_pfd[i].fd);
9972 pfd[i].events = tswap16(target_pfd[i].events);
9976 switch (num) {
9977 # ifdef TARGET_NR_ppoll
9978 case TARGET_NR_ppoll:
9980 struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
9981 target_sigset_t *target_set;
9982 sigset_t _set, *set = &_set;
9984 if (arg3) {
9985 if (target_to_host_timespec(timeout_ts, arg3)) {
9986 unlock_user(target_pfd, arg1, 0);
9987 goto efault;
9989 } else {
9990 timeout_ts = NULL;
9993 if (arg4) {
9994 if (arg5 != sizeof(target_sigset_t)) {
9995 unlock_user(target_pfd, arg1, 0);
9996 ret = -TARGET_EINVAL;
9997 break;
10000 target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
10001 if (!target_set) {
10002 unlock_user(target_pfd, arg1, 0);
10003 goto efault;
10005 target_to_host_sigset(set, target_set);
10006 } else {
10007 set = NULL;
10010 ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
10011 set, SIGSET_T_SIZE));
10013 if (!is_error(ret) && arg3) {
10014 host_to_target_timespec(arg3, timeout_ts);
10016 if (arg4) {
10017 unlock_user(target_set, arg4, 0);
10019 break;
10021 # endif
10022 # ifdef TARGET_NR_poll
10023 case TARGET_NR_poll:
10025 struct timespec ts, *pts;
10027 if (arg3 >= 0) {
10028 /* Convert ms to secs, ns */
10029 ts.tv_sec = arg3 / 1000;
10030 ts.tv_nsec = (arg3 % 1000) * 1000000LL;
10031 pts = &ts;
10032 } else {
10033 /* -ve poll() timeout means "infinite" */
10034 pts = NULL;
10036 ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
10037 break;
10039 # endif
10040 default:
10041 g_assert_not_reached();
10044 if (!is_error(ret)) {
10045 for(i = 0; i < nfds; i++) {
10046 target_pfd[i].revents = tswap16(pfd[i].revents);
10049 unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
10051 break;
10052 #endif
10053 case TARGET_NR_flock:
10054 /* NOTE: the flock constant seems to be the same for every
10055 Linux platform */
10056 ret = get_errno(safe_flock(arg1, arg2));
10057 break;
10058 case TARGET_NR_readv:
10060 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10061 if (vec != NULL) {
10062 ret = get_errno(safe_readv(arg1, vec, arg3));
10063 unlock_iovec(vec, arg2, arg3, 1);
10064 } else {
10065 ret = -host_to_target_errno(errno);
10068 break;
10069 case TARGET_NR_writev:
10071 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10072 if (vec != NULL) {
10073 ret = get_errno(safe_writev(arg1, vec, arg3));
10074 unlock_iovec(vec, arg2, arg3, 0);
10075 } else {
10076 ret = -host_to_target_errno(errno);
10079 break;
10080 #if defined(TARGET_NR_preadv)
10081 case TARGET_NR_preadv:
10083 struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10084 if (vec != NULL) {
10085 ret = get_errno(safe_preadv(arg1, vec, arg3, arg4, arg5));
10086 unlock_iovec(vec, arg2, arg3, 1);
10087 } else {
10088 ret = -host_to_target_errno(errno);
10091 break;
10092 #endif
10093 #if defined(TARGET_NR_pwritev)
10094 case TARGET_NR_pwritev:
10096 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10097 if (vec != NULL) {
10098 ret = get_errno(safe_pwritev(arg1, vec, arg3, arg4, arg5));
10099 unlock_iovec(vec, arg2, arg3, 0);
10100 } else {
10101 ret = -host_to_target_errno(errno);
10104 break;
10105 #endif
10106 case TARGET_NR_getsid:
10107 ret = get_errno(getsid(arg1));
10108 break;
10109 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10110 case TARGET_NR_fdatasync:
10111 ret = get_errno(fdatasync(arg1));
10112 break;
10113 #endif
10114 #ifdef TARGET_NR__sysctl
10115 case TARGET_NR__sysctl:
10116 /* We don't implement this, but ENOTDIR is always a safe
10117 return value. */
10118 ret = -TARGET_ENOTDIR;
10119 break;
10120 #endif
10121 case TARGET_NR_sched_getaffinity:
10123 unsigned int mask_size;
10124 unsigned long *mask;
10127 * sched_getaffinity needs multiples of ulong, so need to take
10128 * care of mismatches between target ulong and host ulong sizes.
10130 if (arg2 & (sizeof(abi_ulong) - 1)) {
10131 ret = -TARGET_EINVAL;
10132 break;
10134 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10136 mask = alloca(mask_size);
10137 ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10139 if (!is_error(ret)) {
10140 if (ret > arg2) {
10141 /* More data returned than the caller's buffer will fit.
10142 * This only happens if sizeof(abi_long) < sizeof(long)
10143 * and the caller passed us a buffer holding an odd number
10144 * of abi_longs. If the host kernel is actually using the
10145 * extra 4 bytes then fail EINVAL; otherwise we can just
10146 * ignore them and only copy the interesting part.
10148 int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10149 if (numcpus > arg2 * 8) {
10150 ret = -TARGET_EINVAL;
10151 break;
10153 ret = arg2;
10156 if (copy_to_user(arg3, mask, ret)) {
10157 goto efault;
10161 break;
10162 case TARGET_NR_sched_setaffinity:
10164 unsigned int mask_size;
10165 unsigned long *mask;
10168 * sched_setaffinity needs multiples of ulong, so need to take
10169 * care of mismatches between target ulong and host ulong sizes.
10171 if (arg2 & (sizeof(abi_ulong) - 1)) {
10172 ret = -TARGET_EINVAL;
10173 break;
10175 mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10177 mask = alloca(mask_size);
10178 if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
10179 goto efault;
10181 memcpy(mask, p, arg2);
10182 unlock_user_struct(p, arg2, 0);
10184 ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10186 break;
10187 case TARGET_NR_sched_setparam:
10189 struct sched_param *target_schp;
10190 struct sched_param schp;
10192 if (arg2 == 0) {
10193 return -TARGET_EINVAL;
10195 if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10196 goto efault;
10197 schp.sched_priority = tswap32(target_schp->sched_priority);
10198 unlock_user_struct(target_schp, arg2, 0);
10199 ret = get_errno(sched_setparam(arg1, &schp));
10201 break;
10202 case TARGET_NR_sched_getparam:
10204 struct sched_param *target_schp;
10205 struct sched_param schp;
10207 if (arg2 == 0) {
10208 return -TARGET_EINVAL;
10210 ret = get_errno(sched_getparam(arg1, &schp));
10211 if (!is_error(ret)) {
10212 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10213 goto efault;
10214 target_schp->sched_priority = tswap32(schp.sched_priority);
10215 unlock_user_struct(target_schp, arg2, 1);
10218 break;
10219 case TARGET_NR_sched_setscheduler:
10221 struct sched_param *target_schp;
10222 struct sched_param schp;
10223 if (arg3 == 0) {
10224 return -TARGET_EINVAL;
10226 if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10227 goto efault;
10228 schp.sched_priority = tswap32(target_schp->sched_priority);
10229 unlock_user_struct(target_schp, arg3, 0);
10230 ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
10232 break;
10233 case TARGET_NR_sched_getscheduler:
10234 ret = get_errno(sched_getscheduler(arg1));
10235 break;
10236 case TARGET_NR_sched_yield:
10237 ret = get_errno(sched_yield());
10238 break;
10239 case TARGET_NR_sched_get_priority_max:
10240 ret = get_errno(sched_get_priority_max(arg1));
10241 break;
10242 case TARGET_NR_sched_get_priority_min:
10243 ret = get_errno(sched_get_priority_min(arg1));
10244 break;
10245 case TARGET_NR_sched_rr_get_interval:
10247 struct timespec ts;
10248 ret = get_errno(sched_rr_get_interval(arg1, &ts));
10249 if (!is_error(ret)) {
10250 ret = host_to_target_timespec(arg2, &ts);
10253 break;
10254 case TARGET_NR_nanosleep:
10256 struct timespec req, rem;
10257 target_to_host_timespec(&req, arg1);
10258 ret = get_errno(safe_nanosleep(&req, &rem));
10259 if (is_error(ret) && arg2) {
10260 host_to_target_timespec(arg2, &rem);
10263 break;
10264 #ifdef TARGET_NR_query_module
10265 case TARGET_NR_query_module:
10266 goto unimplemented;
10267 #endif
10268 #ifdef TARGET_NR_nfsservctl
10269 case TARGET_NR_nfsservctl:
10270 goto unimplemented;
10271 #endif
10272 case TARGET_NR_prctl:
10273 switch (arg1) {
10274 case PR_GET_PDEATHSIG:
10276 int deathsig;
10277 ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10278 if (!is_error(ret) && arg2
10279 && put_user_ual(deathsig, arg2)) {
10280 goto efault;
10282 break;
10284 #ifdef PR_GET_NAME
10285 case PR_GET_NAME:
10287 void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10288 if (!name) {
10289 goto efault;
10291 ret = get_errno(prctl(arg1, (unsigned long)name,
10292 arg3, arg4, arg5));
10293 unlock_user(name, arg2, 16);
10294 break;
10296 case PR_SET_NAME:
10298 void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10299 if (!name) {
10300 goto efault;
10302 ret = get_errno(prctl(arg1, (unsigned long)name,
10303 arg3, arg4, arg5));
10304 unlock_user(name, arg2, 0);
10305 break;
10307 #endif
10308 default:
10309 /* Most prctl options have no pointer arguments */
10310 ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10311 break;
10313 break;
10314 #ifdef TARGET_NR_arch_prctl
10315 case TARGET_NR_arch_prctl:
10316 #if defined(TARGET_I386) && !defined(TARGET_ABI32)
10317 ret = do_arch_prctl(cpu_env, arg1, arg2);
10318 break;
10319 #else
10320 goto unimplemented;
10321 #endif
10322 #endif
10323 #ifdef TARGET_NR_pread64
10324 case TARGET_NR_pread64:
10325 if (regpairs_aligned(cpu_env)) {
10326 arg4 = arg5;
10327 arg5 = arg6;
10329 if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
10330 goto efault;
10331 ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10332 unlock_user(p, arg2, ret);
10333 break;
10334 case TARGET_NR_pwrite64:
10335 if (regpairs_aligned(cpu_env)) {
10336 arg4 = arg5;
10337 arg5 = arg6;
10339 if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
10340 goto efault;
10341 ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10342 unlock_user(p, arg2, 0);
10343 break;
10344 #endif
10345 case TARGET_NR_getcwd:
10346 if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10347 goto efault;
10348 ret = get_errno(sys_getcwd1(p, arg2));
10349 unlock_user(p, arg1, ret);
10350 break;
10351 case TARGET_NR_capget:
10352 case TARGET_NR_capset:
10354 struct target_user_cap_header *target_header;
10355 struct target_user_cap_data *target_data = NULL;
10356 struct __user_cap_header_struct header;
10357 struct __user_cap_data_struct data[2];
10358 struct __user_cap_data_struct *dataptr = NULL;
10359 int i, target_datalen;
10360 int data_items = 1;
10362 if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10363 goto efault;
10365 header.version = tswap32(target_header->version);
10366 header.pid = tswap32(target_header->pid);
10368 if (header.version != _LINUX_CAPABILITY_VERSION) {
10369 /* Version 2 and up takes pointer to two user_data structs */
10370 data_items = 2;
10373 target_datalen = sizeof(*target_data) * data_items;
10375 if (arg2) {
10376 if (num == TARGET_NR_capget) {
10377 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10378 } else {
10379 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10381 if (!target_data) {
10382 unlock_user_struct(target_header, arg1, 0);
10383 goto efault;
10386 if (num == TARGET_NR_capset) {
10387 for (i = 0; i < data_items; i++) {
10388 data[i].effective = tswap32(target_data[i].effective);
10389 data[i].permitted = tswap32(target_data[i].permitted);
10390 data[i].inheritable = tswap32(target_data[i].inheritable);
10394 dataptr = data;
10397 if (num == TARGET_NR_capget) {
10398 ret = get_errno(capget(&header, dataptr));
10399 } else {
10400 ret = get_errno(capset(&header, dataptr));
10403 /* The kernel always updates version for both capget and capset */
10404 target_header->version = tswap32(header.version);
10405 unlock_user_struct(target_header, arg1, 1);
10407 if (arg2) {
10408 if (num == TARGET_NR_capget) {
10409 for (i = 0; i < data_items; i++) {
10410 target_data[i].effective = tswap32(data[i].effective);
10411 target_data[i].permitted = tswap32(data[i].permitted);
10412 target_data[i].inheritable = tswap32(data[i].inheritable);
10414 unlock_user(target_data, arg2, target_datalen);
10415 } else {
10416 unlock_user(target_data, arg2, 0);
10419 break;
10421 case TARGET_NR_sigaltstack:
10422 ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
10423 break;
10425 #ifdef CONFIG_SENDFILE
10426 case TARGET_NR_sendfile:
10428 off_t *offp = NULL;
10429 off_t off;
10430 if (arg3) {
10431 ret = get_user_sal(off, arg3);
10432 if (is_error(ret)) {
10433 break;
10435 offp = &off;
10437 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10438 if (!is_error(ret) && arg3) {
10439 abi_long ret2 = put_user_sal(off, arg3);
10440 if (is_error(ret2)) {
10441 ret = ret2;
10444 break;
10446 #ifdef TARGET_NR_sendfile64
10447 case TARGET_NR_sendfile64:
10449 off_t *offp = NULL;
10450 off_t off;
10451 if (arg3) {
10452 ret = get_user_s64(off, arg3);
10453 if (is_error(ret)) {
10454 break;
10456 offp = &off;
10458 ret = get_errno(sendfile(arg1, arg2, offp, arg4));
10459 if (!is_error(ret) && arg3) {
10460 abi_long ret2 = put_user_s64(off, arg3);
10461 if (is_error(ret2)) {
10462 ret = ret2;
10465 break;
10467 #endif
10468 #else
10469 case TARGET_NR_sendfile:
10470 #ifdef TARGET_NR_sendfile64
10471 case TARGET_NR_sendfile64:
10472 #endif
10473 goto unimplemented;
10474 #endif
10476 #ifdef TARGET_NR_getpmsg
10477 case TARGET_NR_getpmsg:
10478 goto unimplemented;
10479 #endif
10480 #ifdef TARGET_NR_putpmsg
10481 case TARGET_NR_putpmsg:
10482 goto unimplemented;
10483 #endif
10484 #ifdef TARGET_NR_vfork
10485 case TARGET_NR_vfork:
10486 ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
10487 0, 0, 0, 0));
10488 break;
10489 #endif
10490 #ifdef TARGET_NR_ugetrlimit
10491 case TARGET_NR_ugetrlimit:
10493 struct rlimit rlim;
10494 int resource = target_to_host_resource(arg1);
10495 ret = get_errno(getrlimit(resource, &rlim));
10496 if (!is_error(ret)) {
10497 struct target_rlimit *target_rlim;
10498 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10499 goto efault;
10500 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10501 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10502 unlock_user_struct(target_rlim, arg2, 1);
10504 break;
10506 #endif
10507 #ifdef TARGET_NR_truncate64
10508 case TARGET_NR_truncate64:
10509 if (!(p = lock_user_string(arg1)))
10510 goto efault;
10511 ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
10512 unlock_user(p, arg1, 0);
10513 break;
10514 #endif
10515 #ifdef TARGET_NR_ftruncate64
10516 case TARGET_NR_ftruncate64:
10517 ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
10518 break;
10519 #endif
10520 #ifdef TARGET_NR_stat64
10521 case TARGET_NR_stat64:
10522 if (!(p = lock_user_string(arg1)))
10523 goto efault;
10524 ret = get_errno(stat(path(p), &st));
10525 unlock_user(p, arg1, 0);
10526 if (!is_error(ret))
10527 ret = host_to_target_stat64(cpu_env, arg2, &st);
10528 break;
10529 #endif
10530 #ifdef TARGET_NR_lstat64
10531 case TARGET_NR_lstat64:
10532 if (!(p = lock_user_string(arg1)))
10533 goto efault;
10534 ret = get_errno(lstat(path(p), &st));
10535 unlock_user(p, arg1, 0);
10536 if (!is_error(ret))
10537 ret = host_to_target_stat64(cpu_env, arg2, &st);
10538 break;
10539 #endif
10540 #ifdef TARGET_NR_fstat64
10541 case TARGET_NR_fstat64:
10542 ret = get_errno(fstat(arg1, &st));
10543 if (!is_error(ret))
10544 ret = host_to_target_stat64(cpu_env, arg2, &st);
10545 break;
10546 #endif
10547 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
10548 #ifdef TARGET_NR_fstatat64
10549 case TARGET_NR_fstatat64:
10550 #endif
10551 #ifdef TARGET_NR_newfstatat
10552 case TARGET_NR_newfstatat:
10553 #endif
10554 if (!(p = lock_user_string(arg2)))
10555 goto efault;
10556 ret = get_errno(fstatat(arg1, path(p), &st, arg4));
10557 if (!is_error(ret))
10558 ret = host_to_target_stat64(cpu_env, arg3, &st);
10559 break;
10560 #endif
10561 #ifdef TARGET_NR_lchown
10562 case TARGET_NR_lchown:
10563 if (!(p = lock_user_string(arg1)))
10564 goto efault;
10565 ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
10566 unlock_user(p, arg1, 0);
10567 break;
10568 #endif
10569 #ifdef TARGET_NR_getuid
10570 case TARGET_NR_getuid:
10571 ret = get_errno(high2lowuid(getuid()));
10572 break;
10573 #endif
10574 #ifdef TARGET_NR_getgid
10575 case TARGET_NR_getgid:
10576 ret = get_errno(high2lowgid(getgid()));
10577 break;
10578 #endif
10579 #ifdef TARGET_NR_geteuid
10580 case TARGET_NR_geteuid:
10581 ret = get_errno(high2lowuid(geteuid()));
10582 break;
10583 #endif
10584 #ifdef TARGET_NR_getegid
10585 case TARGET_NR_getegid:
10586 ret = get_errno(high2lowgid(getegid()));
10587 break;
10588 #endif
10589 case TARGET_NR_setreuid:
10590 ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
10591 break;
10592 case TARGET_NR_setregid:
10593 ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
10594 break;
10595 case TARGET_NR_getgroups:
10597 int gidsetsize = arg1;
10598 target_id *target_grouplist;
10599 gid_t *grouplist;
10600 int i;
10602 grouplist = alloca(gidsetsize * sizeof(gid_t));
10603 ret = get_errno(getgroups(gidsetsize, grouplist));
10604 if (gidsetsize == 0)
10605 break;
10606 if (!is_error(ret)) {
10607 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
10608 if (!target_grouplist)
10609 goto efault;
10610 for(i = 0;i < ret; i++)
10611 target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
10612 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
10615 break;
10616 case TARGET_NR_setgroups:
10618 int gidsetsize = arg1;
10619 target_id *target_grouplist;
10620 gid_t *grouplist = NULL;
10621 int i;
10622 if (gidsetsize) {
10623 grouplist = alloca(gidsetsize * sizeof(gid_t));
10624 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
10625 if (!target_grouplist) {
10626 ret = -TARGET_EFAULT;
10627 goto fail;
10629 for (i = 0; i < gidsetsize; i++) {
10630 grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
10632 unlock_user(target_grouplist, arg2, 0);
10634 ret = get_errno(setgroups(gidsetsize, grouplist));
10636 break;
10637 case TARGET_NR_fchown:
10638 ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
10639 break;
10640 #if defined(TARGET_NR_fchownat)
10641 case TARGET_NR_fchownat:
10642 if (!(p = lock_user_string(arg2)))
10643 goto efault;
10644 ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
10645 low2highgid(arg4), arg5));
10646 unlock_user(p, arg2, 0);
10647 break;
10648 #endif
10649 #ifdef TARGET_NR_setresuid
10650 case TARGET_NR_setresuid:
10651 ret = get_errno(sys_setresuid(low2highuid(arg1),
10652 low2highuid(arg2),
10653 low2highuid(arg3)));
10654 break;
10655 #endif
10656 #ifdef TARGET_NR_getresuid
10657 case TARGET_NR_getresuid:
10659 uid_t ruid, euid, suid;
10660 ret = get_errno(getresuid(&ruid, &euid, &suid));
10661 if (!is_error(ret)) {
10662 if (put_user_id(high2lowuid(ruid), arg1)
10663 || put_user_id(high2lowuid(euid), arg2)
10664 || put_user_id(high2lowuid(suid), arg3))
10665 goto efault;
10668 break;
10669 #endif
10670 #ifdef TARGET_NR_getresgid
10671 case TARGET_NR_setresgid:
10672 ret = get_errno(sys_setresgid(low2highgid(arg1),
10673 low2highgid(arg2),
10674 low2highgid(arg3)));
10675 break;
10676 #endif
10677 #ifdef TARGET_NR_getresgid
10678 case TARGET_NR_getresgid:
10680 gid_t rgid, egid, sgid;
10681 ret = get_errno(getresgid(&rgid, &egid, &sgid));
10682 if (!is_error(ret)) {
10683 if (put_user_id(high2lowgid(rgid), arg1)
10684 || put_user_id(high2lowgid(egid), arg2)
10685 || put_user_id(high2lowgid(sgid), arg3))
10686 goto efault;
10689 break;
10690 #endif
10691 #ifdef TARGET_NR_chown
10692 case TARGET_NR_chown:
10693 if (!(p = lock_user_string(arg1)))
10694 goto efault;
10695 ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
10696 unlock_user(p, arg1, 0);
10697 break;
10698 #endif
10699 case TARGET_NR_setuid:
10700 ret = get_errno(sys_setuid(low2highuid(arg1)));
10701 break;
10702 case TARGET_NR_setgid:
10703 ret = get_errno(sys_setgid(low2highgid(arg1)));
10704 break;
10705 case TARGET_NR_setfsuid:
10706 ret = get_errno(setfsuid(arg1));
10707 break;
10708 case TARGET_NR_setfsgid:
10709 ret = get_errno(setfsgid(arg1));
10710 break;
10712 #ifdef TARGET_NR_lchown32
10713 case TARGET_NR_lchown32:
10714 if (!(p = lock_user_string(arg1)))
10715 goto efault;
10716 ret = get_errno(lchown(p, arg2, arg3));
10717 unlock_user(p, arg1, 0);
10718 break;
10719 #endif
10720 #ifdef TARGET_NR_getuid32
10721 case TARGET_NR_getuid32:
10722 ret = get_errno(getuid());
10723 break;
10724 #endif
10726 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
10727 /* Alpha specific */
10728 case TARGET_NR_getxuid:
10730 uid_t euid;
10731 euid=geteuid();
10732 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
10734 ret = get_errno(getuid());
10735 break;
10736 #endif
10737 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
10738 /* Alpha specific */
10739 case TARGET_NR_getxgid:
10741 uid_t egid;
10742 egid=getegid();
10743 ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
10745 ret = get_errno(getgid());
10746 break;
10747 #endif
10748 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
10749 /* Alpha specific */
10750 case TARGET_NR_osf_getsysinfo:
10751 ret = -TARGET_EOPNOTSUPP;
10752 switch (arg1) {
10753 case TARGET_GSI_IEEE_FP_CONTROL:
10755 uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
10757 /* Copied from linux ieee_fpcr_to_swcr. */
10758 swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
10759 swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
10760 swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
10761 | SWCR_TRAP_ENABLE_DZE
10762 | SWCR_TRAP_ENABLE_OVF);
10763 swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
10764 | SWCR_TRAP_ENABLE_INE);
10765 swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
10766 swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
10768 if (put_user_u64 (swcr, arg2))
10769 goto efault;
10770 ret = 0;
10772 break;
10774 /* case GSI_IEEE_STATE_AT_SIGNAL:
10775 -- Not implemented in linux kernel.
10776 case GSI_UACPROC:
10777 -- Retrieves current unaligned access state; not much used.
10778 case GSI_PROC_TYPE:
10779 -- Retrieves implver information; surely not used.
10780 case GSI_GET_HWRPB:
10781 -- Grabs a copy of the HWRPB; surely not used.
10784 break;
10785 #endif
10786 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
10787 /* Alpha specific */
10788 case TARGET_NR_osf_setsysinfo:
10789 ret = -TARGET_EOPNOTSUPP;
10790 switch (arg1) {
10791 case TARGET_SSI_IEEE_FP_CONTROL:
10793 uint64_t swcr, fpcr, orig_fpcr;
10795 if (get_user_u64 (swcr, arg2)) {
10796 goto efault;
10798 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10799 fpcr = orig_fpcr & FPCR_DYN_MASK;
10801 /* Copied from linux ieee_swcr_to_fpcr. */
10802 fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
10803 fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
10804 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
10805 | SWCR_TRAP_ENABLE_DZE
10806 | SWCR_TRAP_ENABLE_OVF)) << 48;
10807 fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
10808 | SWCR_TRAP_ENABLE_INE)) << 57;
10809 fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
10810 fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
10812 cpu_alpha_store_fpcr(cpu_env, fpcr);
10813 ret = 0;
10815 break;
10817 case TARGET_SSI_IEEE_RAISE_EXCEPTION:
10819 uint64_t exc, fpcr, orig_fpcr;
10820 int si_code;
10822 if (get_user_u64(exc, arg2)) {
10823 goto efault;
10826 orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
10828 /* We only add to the exception status here. */
10829 fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
10831 cpu_alpha_store_fpcr(cpu_env, fpcr);
10832 ret = 0;
10834 /* Old exceptions are not signaled. */
10835 fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
10837 /* If any exceptions set by this call,
10838 and are unmasked, send a signal. */
10839 si_code = 0;
10840 if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
10841 si_code = TARGET_FPE_FLTRES;
10843 if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
10844 si_code = TARGET_FPE_FLTUND;
10846 if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
10847 si_code = TARGET_FPE_FLTOVF;
10849 if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
10850 si_code = TARGET_FPE_FLTDIV;
10852 if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
10853 si_code = TARGET_FPE_FLTINV;
10855 if (si_code != 0) {
10856 target_siginfo_t info;
10857 info.si_signo = SIGFPE;
10858 info.si_errno = 0;
10859 info.si_code = si_code;
10860 info._sifields._sigfault._addr
10861 = ((CPUArchState *)cpu_env)->pc;
10862 queue_signal((CPUArchState *)cpu_env, info.si_signo,
10863 QEMU_SI_FAULT, &info);
10866 break;
10868 /* case SSI_NVPAIRS:
10869 -- Used with SSIN_UACPROC to enable unaligned accesses.
10870 case SSI_IEEE_STATE_AT_SIGNAL:
10871 case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
10872 -- Not implemented in linux kernel
10875 break;
10876 #endif
10877 #ifdef TARGET_NR_osf_sigprocmask
10878 /* Alpha specific. */
10879 case TARGET_NR_osf_sigprocmask:
10881 abi_ulong mask;
10882 int how;
10883 sigset_t set, oldset;
10885 switch(arg1) {
10886 case TARGET_SIG_BLOCK:
10887 how = SIG_BLOCK;
10888 break;
10889 case TARGET_SIG_UNBLOCK:
10890 how = SIG_UNBLOCK;
10891 break;
10892 case TARGET_SIG_SETMASK:
10893 how = SIG_SETMASK;
10894 break;
10895 default:
10896 ret = -TARGET_EINVAL;
10897 goto fail;
10899 mask = arg2;
10900 target_to_host_old_sigset(&set, &mask);
10901 ret = do_sigprocmask(how, &set, &oldset);
10902 if (!ret) {
10903 host_to_target_old_sigset(&mask, &oldset);
10904 ret = mask;
10907 break;
10908 #endif
10910 #ifdef TARGET_NR_getgid32
10911 case TARGET_NR_getgid32:
10912 ret = get_errno(getgid());
10913 break;
10914 #endif
10915 #ifdef TARGET_NR_geteuid32
10916 case TARGET_NR_geteuid32:
10917 ret = get_errno(geteuid());
10918 break;
10919 #endif
10920 #ifdef TARGET_NR_getegid32
10921 case TARGET_NR_getegid32:
10922 ret = get_errno(getegid());
10923 break;
10924 #endif
10925 #ifdef TARGET_NR_setreuid32
10926 case TARGET_NR_setreuid32:
10927 ret = get_errno(setreuid(arg1, arg2));
10928 break;
10929 #endif
10930 #ifdef TARGET_NR_setregid32
10931 case TARGET_NR_setregid32:
10932 ret = get_errno(setregid(arg1, arg2));
10933 break;
10934 #endif
10935 #ifdef TARGET_NR_getgroups32
10936 case TARGET_NR_getgroups32:
10938 int gidsetsize = arg1;
10939 uint32_t *target_grouplist;
10940 gid_t *grouplist;
10941 int i;
10943 grouplist = alloca(gidsetsize * sizeof(gid_t));
10944 ret = get_errno(getgroups(gidsetsize, grouplist));
10945 if (gidsetsize == 0)
10946 break;
10947 if (!is_error(ret)) {
10948 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
10949 if (!target_grouplist) {
10950 ret = -TARGET_EFAULT;
10951 goto fail;
10953 for(i = 0;i < ret; i++)
10954 target_grouplist[i] = tswap32(grouplist[i]);
10955 unlock_user(target_grouplist, arg2, gidsetsize * 4);
10958 break;
10959 #endif
10960 #ifdef TARGET_NR_setgroups32
10961 case TARGET_NR_setgroups32:
10963 int gidsetsize = arg1;
10964 uint32_t *target_grouplist;
10965 gid_t *grouplist;
10966 int i;
10968 grouplist = alloca(gidsetsize * sizeof(gid_t));
10969 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
10970 if (!target_grouplist) {
10971 ret = -TARGET_EFAULT;
10972 goto fail;
10974 for(i = 0;i < gidsetsize; i++)
10975 grouplist[i] = tswap32(target_grouplist[i]);
10976 unlock_user(target_grouplist, arg2, 0);
10977 ret = get_errno(setgroups(gidsetsize, grouplist));
10979 break;
10980 #endif
10981 #ifdef TARGET_NR_fchown32
10982 case TARGET_NR_fchown32:
10983 ret = get_errno(fchown(arg1, arg2, arg3));
10984 break;
10985 #endif
10986 #ifdef TARGET_NR_setresuid32
10987 case TARGET_NR_setresuid32:
10988 ret = get_errno(sys_setresuid(arg1, arg2, arg3));
10989 break;
10990 #endif
10991 #ifdef TARGET_NR_getresuid32
10992 case TARGET_NR_getresuid32:
10994 uid_t ruid, euid, suid;
10995 ret = get_errno(getresuid(&ruid, &euid, &suid));
10996 if (!is_error(ret)) {
10997 if (put_user_u32(ruid, arg1)
10998 || put_user_u32(euid, arg2)
10999 || put_user_u32(suid, arg3))
11000 goto efault;
11003 break;
11004 #endif
11005 #ifdef TARGET_NR_setresgid32
11006 case TARGET_NR_setresgid32:
11007 ret = get_errno(sys_setresgid(arg1, arg2, arg3));
11008 break;
11009 #endif
11010 #ifdef TARGET_NR_getresgid32
11011 case TARGET_NR_getresgid32:
11013 gid_t rgid, egid, sgid;
11014 ret = get_errno(getresgid(&rgid, &egid, &sgid));
11015 if (!is_error(ret)) {
11016 if (put_user_u32(rgid, arg1)
11017 || put_user_u32(egid, arg2)
11018 || put_user_u32(sgid, arg3))
11019 goto efault;
11022 break;
11023 #endif
11024 #ifdef TARGET_NR_chown32
11025 case TARGET_NR_chown32:
11026 if (!(p = lock_user_string(arg1)))
11027 goto efault;
11028 ret = get_errno(chown(p, arg2, arg3));
11029 unlock_user(p, arg1, 0);
11030 break;
11031 #endif
11032 #ifdef TARGET_NR_setuid32
11033 case TARGET_NR_setuid32:
11034 ret = get_errno(sys_setuid(arg1));
11035 break;
11036 #endif
11037 #ifdef TARGET_NR_setgid32
11038 case TARGET_NR_setgid32:
11039 ret = get_errno(sys_setgid(arg1));
11040 break;
11041 #endif
11042 #ifdef TARGET_NR_setfsuid32
11043 case TARGET_NR_setfsuid32:
11044 ret = get_errno(setfsuid(arg1));
11045 break;
11046 #endif
11047 #ifdef TARGET_NR_setfsgid32
11048 case TARGET_NR_setfsgid32:
11049 ret = get_errno(setfsgid(arg1));
11050 break;
11051 #endif
11053 case TARGET_NR_pivot_root:
11054 goto unimplemented;
11055 #ifdef TARGET_NR_mincore
11056 case TARGET_NR_mincore:
11058 void *a;
11059 ret = -TARGET_EFAULT;
11060 if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
11061 goto efault;
11062 if (!(p = lock_user_string(arg3)))
11063 goto mincore_fail;
11064 ret = get_errno(mincore(a, arg2, p));
11065 unlock_user(p, arg3, ret);
11066 mincore_fail:
11067 unlock_user(a, arg1, 0);
11069 break;
11070 #endif
11071 #ifdef TARGET_NR_arm_fadvise64_64
11072 case TARGET_NR_arm_fadvise64_64:
11073 /* arm_fadvise64_64 looks like fadvise64_64 but
11074 * with different argument order: fd, advice, offset, len
11075 * rather than the usual fd, offset, len, advice.
11076 * Note that offset and len are both 64-bit so appear as
11077 * pairs of 32-bit registers.
11079 ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11080 target_offset64(arg5, arg6), arg2);
11081 ret = -host_to_target_errno(ret);
11082 break;
11083 #endif
11085 #if TARGET_ABI_BITS == 32
11087 #ifdef TARGET_NR_fadvise64_64
11088 case TARGET_NR_fadvise64_64:
11089 /* 6 args: fd, offset (high, low), len (high, low), advice */
11090 if (regpairs_aligned(cpu_env)) {
11091 /* offset is in (3,4), len in (5,6) and advice in 7 */
11092 arg2 = arg3;
11093 arg3 = arg4;
11094 arg4 = arg5;
11095 arg5 = arg6;
11096 arg6 = arg7;
11098 ret = -host_to_target_errno(posix_fadvise(arg1,
11099 target_offset64(arg2, arg3),
11100 target_offset64(arg4, arg5),
11101 arg6));
11102 break;
11103 #endif
11105 #ifdef TARGET_NR_fadvise64
11106 case TARGET_NR_fadvise64:
11107 /* 5 args: fd, offset (high, low), len, advice */
11108 if (regpairs_aligned(cpu_env)) {
11109 /* offset is in (3,4), len in 5 and advice in 6 */
11110 arg2 = arg3;
11111 arg3 = arg4;
11112 arg4 = arg5;
11113 arg5 = arg6;
11115 ret = -host_to_target_errno(posix_fadvise(arg1,
11116 target_offset64(arg2, arg3),
11117 arg4, arg5));
11118 break;
11119 #endif
11121 #else /* not a 32-bit ABI */
11122 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11123 #ifdef TARGET_NR_fadvise64_64
11124 case TARGET_NR_fadvise64_64:
11125 #endif
11126 #ifdef TARGET_NR_fadvise64
11127 case TARGET_NR_fadvise64:
11128 #endif
11129 #ifdef TARGET_S390X
11130 switch (arg4) {
11131 case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11132 case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11133 case 6: arg4 = POSIX_FADV_DONTNEED; break;
11134 case 7: arg4 = POSIX_FADV_NOREUSE; break;
11135 default: break;
11137 #endif
11138 ret = -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11139 break;
11140 #endif
11141 #endif /* end of 64-bit ABI fadvise handling */
11143 #ifdef TARGET_NR_madvise
11144 case TARGET_NR_madvise:
11145 /* A straight passthrough may not be safe because qemu sometimes
11146 turns private file-backed mappings into anonymous mappings.
11147 This will break MADV_DONTNEED.
11148 This is a hint, so ignoring and returning success is ok. */
11149 ret = get_errno(0);
11150 break;
11151 #endif
11152 #if TARGET_ABI_BITS == 32
11153 case TARGET_NR_fcntl64:
11155 int cmd;
11156 struct flock64 fl;
11157 from_flock64_fn *copyfrom = copy_from_user_flock64;
11158 to_flock64_fn *copyto = copy_to_user_flock64;
11160 #ifdef TARGET_ARM
11161 if (((CPUARMState *)cpu_env)->eabi) {
11162 copyfrom = copy_from_user_eabi_flock64;
11163 copyto = copy_to_user_eabi_flock64;
11165 #endif
11167 cmd = target_to_host_fcntl_cmd(arg2);
11168 if (cmd == -TARGET_EINVAL) {
11169 ret = cmd;
11170 break;
11173 switch(arg2) {
11174 case TARGET_F_GETLK64:
11175 ret = copyfrom(&fl, arg3);
11176 if (ret) {
11177 break;
11179 ret = get_errno(fcntl(arg1, cmd, &fl));
11180 if (ret == 0) {
11181 ret = copyto(arg3, &fl);
11183 break;
11185 case TARGET_F_SETLK64:
11186 case TARGET_F_SETLKW64:
11187 ret = copyfrom(&fl, arg3);
11188 if (ret) {
11189 break;
11191 ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11192 break;
11193 default:
11194 ret = do_fcntl(arg1, arg2, arg3);
11195 break;
11197 break;
11199 #endif
11200 #ifdef TARGET_NR_cacheflush
11201 case TARGET_NR_cacheflush:
11202 /* self-modifying code is handled automatically, so nothing needed */
11203 ret = 0;
11204 break;
11205 #endif
11206 #ifdef TARGET_NR_security
11207 case TARGET_NR_security:
11208 goto unimplemented;
11209 #endif
11210 #ifdef TARGET_NR_getpagesize
11211 case TARGET_NR_getpagesize:
11212 ret = TARGET_PAGE_SIZE;
11213 break;
11214 #endif
11215 case TARGET_NR_gettid:
11216 ret = get_errno(gettid());
11217 break;
11218 #ifdef TARGET_NR_readahead
11219 case TARGET_NR_readahead:
11220 #if TARGET_ABI_BITS == 32
11221 if (regpairs_aligned(cpu_env)) {
11222 arg2 = arg3;
11223 arg3 = arg4;
11224 arg4 = arg5;
11226 ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
11227 #else
11228 ret = get_errno(readahead(arg1, arg2, arg3));
11229 #endif
11230 break;
11231 #endif
11232 #ifdef CONFIG_ATTR
11233 #ifdef TARGET_NR_setxattr
11234 case TARGET_NR_listxattr:
11235 case TARGET_NR_llistxattr:
11237 void *p, *b = 0;
11238 if (arg2) {
11239 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11240 if (!b) {
11241 ret = -TARGET_EFAULT;
11242 break;
11245 p = lock_user_string(arg1);
11246 if (p) {
11247 if (num == TARGET_NR_listxattr) {
11248 ret = get_errno(listxattr(p, b, arg3));
11249 } else {
11250 ret = get_errno(llistxattr(p, b, arg3));
11252 } else {
11253 ret = -TARGET_EFAULT;
11255 unlock_user(p, arg1, 0);
11256 unlock_user(b, arg2, arg3);
11257 break;
11259 case TARGET_NR_flistxattr:
11261 void *b = 0;
11262 if (arg2) {
11263 b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11264 if (!b) {
11265 ret = -TARGET_EFAULT;
11266 break;
11269 ret = get_errno(flistxattr(arg1, b, arg3));
11270 unlock_user(b, arg2, arg3);
11271 break;
11273 case TARGET_NR_setxattr:
11274 case TARGET_NR_lsetxattr:
11276 void *p, *n, *v = 0;
11277 if (arg3) {
11278 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11279 if (!v) {
11280 ret = -TARGET_EFAULT;
11281 break;
11284 p = lock_user_string(arg1);
11285 n = lock_user_string(arg2);
11286 if (p && n) {
11287 if (num == TARGET_NR_setxattr) {
11288 ret = get_errno(setxattr(p, n, v, arg4, arg5));
11289 } else {
11290 ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11292 } else {
11293 ret = -TARGET_EFAULT;
11295 unlock_user(p, arg1, 0);
11296 unlock_user(n, arg2, 0);
11297 unlock_user(v, arg3, 0);
11299 break;
11300 case TARGET_NR_fsetxattr:
11302 void *n, *v = 0;
11303 if (arg3) {
11304 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11305 if (!v) {
11306 ret = -TARGET_EFAULT;
11307 break;
11310 n = lock_user_string(arg2);
11311 if (n) {
11312 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11313 } else {
11314 ret = -TARGET_EFAULT;
11316 unlock_user(n, arg2, 0);
11317 unlock_user(v, arg3, 0);
11319 break;
11320 case TARGET_NR_getxattr:
11321 case TARGET_NR_lgetxattr:
11323 void *p, *n, *v = 0;
11324 if (arg3) {
11325 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11326 if (!v) {
11327 ret = -TARGET_EFAULT;
11328 break;
11331 p = lock_user_string(arg1);
11332 n = lock_user_string(arg2);
11333 if (p && n) {
11334 if (num == TARGET_NR_getxattr) {
11335 ret = get_errno(getxattr(p, n, v, arg4));
11336 } else {
11337 ret = get_errno(lgetxattr(p, n, v, arg4));
11339 } else {
11340 ret = -TARGET_EFAULT;
11342 unlock_user(p, arg1, 0);
11343 unlock_user(n, arg2, 0);
11344 unlock_user(v, arg3, arg4);
11346 break;
11347 case TARGET_NR_fgetxattr:
11349 void *n, *v = 0;
11350 if (arg3) {
11351 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11352 if (!v) {
11353 ret = -TARGET_EFAULT;
11354 break;
11357 n = lock_user_string(arg2);
11358 if (n) {
11359 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11360 } else {
11361 ret = -TARGET_EFAULT;
11363 unlock_user(n, arg2, 0);
11364 unlock_user(v, arg3, arg4);
11366 break;
11367 case TARGET_NR_removexattr:
11368 case TARGET_NR_lremovexattr:
11370 void *p, *n;
11371 p = lock_user_string(arg1);
11372 n = lock_user_string(arg2);
11373 if (p && n) {
11374 if (num == TARGET_NR_removexattr) {
11375 ret = get_errno(removexattr(p, n));
11376 } else {
11377 ret = get_errno(lremovexattr(p, n));
11379 } else {
11380 ret = -TARGET_EFAULT;
11382 unlock_user(p, arg1, 0);
11383 unlock_user(n, arg2, 0);
11385 break;
11386 case TARGET_NR_fremovexattr:
11388 void *n;
11389 n = lock_user_string(arg2);
11390 if (n) {
11391 ret = get_errno(fremovexattr(arg1, n));
11392 } else {
11393 ret = -TARGET_EFAULT;
11395 unlock_user(n, arg2, 0);
11397 break;
11398 #endif
11399 #endif /* CONFIG_ATTR */
11400 #ifdef TARGET_NR_set_thread_area
11401 case TARGET_NR_set_thread_area:
11402 #if defined(TARGET_MIPS)
11403 ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
11404 ret = 0;
11405 break;
11406 #elif defined(TARGET_CRIS)
11407 if (arg1 & 0xff)
11408 ret = -TARGET_EINVAL;
11409 else {
11410 ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
11411 ret = 0;
11413 break;
11414 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
11415 ret = do_set_thread_area(cpu_env, arg1);
11416 break;
11417 #elif defined(TARGET_M68K)
11419 TaskState *ts = cpu->opaque;
11420 ts->tp_value = arg1;
11421 ret = 0;
11422 break;
11424 #else
11425 goto unimplemented_nowarn;
11426 #endif
11427 #endif
11428 #ifdef TARGET_NR_get_thread_area
11429 case TARGET_NR_get_thread_area:
11430 #if defined(TARGET_I386) && defined(TARGET_ABI32)
11431 ret = do_get_thread_area(cpu_env, arg1);
11432 break;
11433 #elif defined(TARGET_M68K)
11435 TaskState *ts = cpu->opaque;
11436 ret = ts->tp_value;
11437 break;
11439 #else
11440 goto unimplemented_nowarn;
11441 #endif
11442 #endif
11443 #ifdef TARGET_NR_getdomainname
11444 case TARGET_NR_getdomainname:
11445 goto unimplemented_nowarn;
11446 #endif
11448 #ifdef TARGET_NR_clock_gettime
11449 case TARGET_NR_clock_gettime:
11451 struct timespec ts;
11452 ret = get_errno(clock_gettime(arg1, &ts));
11453 if (!is_error(ret)) {
11454 host_to_target_timespec(arg2, &ts);
11456 break;
11458 #endif
11459 #ifdef TARGET_NR_clock_getres
11460 case TARGET_NR_clock_getres:
11462 struct timespec ts;
11463 ret = get_errno(clock_getres(arg1, &ts));
11464 if (!is_error(ret)) {
11465 host_to_target_timespec(arg2, &ts);
11467 break;
11469 #endif
11470 #ifdef TARGET_NR_clock_nanosleep
11471 case TARGET_NR_clock_nanosleep:
11473 struct timespec ts;
11474 target_to_host_timespec(&ts, arg3);
11475 ret = get_errno(safe_clock_nanosleep(arg1, arg2,
11476 &ts, arg4 ? &ts : NULL));
11477 if (arg4)
11478 host_to_target_timespec(arg4, &ts);
11480 #if defined(TARGET_PPC)
11481 /* clock_nanosleep is odd in that it returns positive errno values.
11482 * On PPC, CR0 bit 3 should be set in such a situation. */
11483 if (ret && ret != -TARGET_ERESTARTSYS) {
11484 ((CPUPPCState *)cpu_env)->crf[0] |= 1;
11486 #endif
11487 break;
11489 #endif
11491 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
11492 case TARGET_NR_set_tid_address:
11493 ret = get_errno(set_tid_address((int *)g2h(arg1)));
11494 break;
11495 #endif
11497 case TARGET_NR_tkill:
11498 ret = get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
11499 break;
11501 case TARGET_NR_tgkill:
11502 ret = get_errno(safe_tgkill((int)arg1, (int)arg2,
11503 target_to_host_signal(arg3)));
11504 break;
11506 #ifdef TARGET_NR_set_robust_list
11507 case TARGET_NR_set_robust_list:
11508 case TARGET_NR_get_robust_list:
11509 /* The ABI for supporting robust futexes has userspace pass
11510 * the kernel a pointer to a linked list which is updated by
11511 * userspace after the syscall; the list is walked by the kernel
11512 * when the thread exits. Since the linked list in QEMU guest
11513 * memory isn't a valid linked list for the host and we have
11514 * no way to reliably intercept the thread-death event, we can't
11515 * support these. Silently return ENOSYS so that guest userspace
11516 * falls back to a non-robust futex implementation (which should
11517 * be OK except in the corner case of the guest crashing while
11518 * holding a mutex that is shared with another process via
11519 * shared memory).
11521 goto unimplemented_nowarn;
11522 #endif
11524 #if defined(TARGET_NR_utimensat)
11525 case TARGET_NR_utimensat:
11527 struct timespec *tsp, ts[2];
11528 if (!arg3) {
11529 tsp = NULL;
11530 } else {
11531 target_to_host_timespec(ts, arg3);
11532 target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
11533 tsp = ts;
11535 if (!arg2)
11536 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
11537 else {
11538 if (!(p = lock_user_string(arg2))) {
11539 ret = -TARGET_EFAULT;
11540 goto fail;
11542 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
11543 unlock_user(p, arg2, 0);
11546 break;
11547 #endif
11548 case TARGET_NR_futex:
11549 ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
11550 break;
11551 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
11552 case TARGET_NR_inotify_init:
11553 ret = get_errno(sys_inotify_init());
11554 break;
11555 #endif
11556 #ifdef CONFIG_INOTIFY1
11557 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
11558 case TARGET_NR_inotify_init1:
11559 ret = get_errno(sys_inotify_init1(arg1));
11560 break;
11561 #endif
11562 #endif
11563 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
11564 case TARGET_NR_inotify_add_watch:
11565 p = lock_user_string(arg2);
11566 ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
11567 unlock_user(p, arg2, 0);
11568 break;
11569 #endif
11570 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
11571 case TARGET_NR_inotify_rm_watch:
11572 ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
11573 break;
11574 #endif
11576 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
11577 case TARGET_NR_mq_open:
11579 struct mq_attr posix_mq_attr;
11580 int host_flags;
11582 host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
11583 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
11584 goto efault;
11586 p = lock_user_string(arg1 - 1);
11587 if (!p) {
11588 goto efault;
11590 ret = get_errno(mq_open(p, host_flags, arg3, &posix_mq_attr));
11591 unlock_user (p, arg1, 0);
11593 break;
11595 case TARGET_NR_mq_unlink:
11596 p = lock_user_string(arg1 - 1);
11597 if (!p) {
11598 ret = -TARGET_EFAULT;
11599 break;
11601 ret = get_errno(mq_unlink(p));
11602 unlock_user (p, arg1, 0);
11603 break;
11605 case TARGET_NR_mq_timedsend:
11607 struct timespec ts;
11609 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11610 if (arg5 != 0) {
11611 target_to_host_timespec(&ts, arg5);
11612 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
11613 host_to_target_timespec(arg5, &ts);
11614 } else {
11615 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
11617 unlock_user (p, arg2, arg3);
11619 break;
11621 case TARGET_NR_mq_timedreceive:
11623 struct timespec ts;
11624 unsigned int prio;
11626 p = lock_user (VERIFY_READ, arg2, arg3, 1);
11627 if (arg5 != 0) {
11628 target_to_host_timespec(&ts, arg5);
11629 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11630 &prio, &ts));
11631 host_to_target_timespec(arg5, &ts);
11632 } else {
11633 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
11634 &prio, NULL));
11636 unlock_user (p, arg2, arg3);
11637 if (arg4 != 0)
11638 put_user_u32(prio, arg4);
11640 break;
11642 /* Not implemented for now... */
11643 /* case TARGET_NR_mq_notify: */
11644 /* break; */
11646 case TARGET_NR_mq_getsetattr:
11648 struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
11649 ret = 0;
11650 if (arg3 != 0) {
11651 ret = mq_getattr(arg1, &posix_mq_attr_out);
11652 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
11654 if (arg2 != 0) {
11655 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
11656 ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
11660 break;
11661 #endif
11663 #ifdef CONFIG_SPLICE
11664 #ifdef TARGET_NR_tee
11665 case TARGET_NR_tee:
11667 ret = get_errno(tee(arg1,arg2,arg3,arg4));
11669 break;
11670 #endif
11671 #ifdef TARGET_NR_splice
11672 case TARGET_NR_splice:
11674 loff_t loff_in, loff_out;
11675 loff_t *ploff_in = NULL, *ploff_out = NULL;
11676 if (arg2) {
11677 if (get_user_u64(loff_in, arg2)) {
11678 goto efault;
11680 ploff_in = &loff_in;
11682 if (arg4) {
11683 if (get_user_u64(loff_out, arg4)) {
11684 goto efault;
11686 ploff_out = &loff_out;
11688 ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
11689 if (arg2) {
11690 if (put_user_u64(loff_in, arg2)) {
11691 goto efault;
11694 if (arg4) {
11695 if (put_user_u64(loff_out, arg4)) {
11696 goto efault;
11700 break;
11701 #endif
11702 #ifdef TARGET_NR_vmsplice
11703 case TARGET_NR_vmsplice:
11705 struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11706 if (vec != NULL) {
11707 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
11708 unlock_iovec(vec, arg2, arg3, 0);
11709 } else {
11710 ret = -host_to_target_errno(errno);
11713 break;
11714 #endif
11715 #endif /* CONFIG_SPLICE */
11716 #ifdef CONFIG_EVENTFD
11717 #if defined(TARGET_NR_eventfd)
11718 case TARGET_NR_eventfd:
11719 ret = get_errno(eventfd(arg1, 0));
11720 fd_trans_unregister(ret);
11721 break;
11722 #endif
11723 #if defined(TARGET_NR_eventfd2)
11724 case TARGET_NR_eventfd2:
11726 int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
11727 if (arg2 & TARGET_O_NONBLOCK) {
11728 host_flags |= O_NONBLOCK;
11730 if (arg2 & TARGET_O_CLOEXEC) {
11731 host_flags |= O_CLOEXEC;
11733 ret = get_errno(eventfd(arg1, host_flags));
11734 fd_trans_unregister(ret);
11735 break;
11737 #endif
11738 #endif /* CONFIG_EVENTFD */
11739 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
11740 case TARGET_NR_fallocate:
11741 #if TARGET_ABI_BITS == 32
11742 ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
11743 target_offset64(arg5, arg6)));
11744 #else
11745 ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
11746 #endif
11747 break;
11748 #endif
11749 #if defined(CONFIG_SYNC_FILE_RANGE)
11750 #if defined(TARGET_NR_sync_file_range)
11751 case TARGET_NR_sync_file_range:
11752 #if TARGET_ABI_BITS == 32
11753 #if defined(TARGET_MIPS)
11754 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11755 target_offset64(arg5, arg6), arg7));
11756 #else
11757 ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
11758 target_offset64(arg4, arg5), arg6));
11759 #endif /* !TARGET_MIPS */
11760 #else
11761 ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
11762 #endif
11763 break;
11764 #endif
11765 #if defined(TARGET_NR_sync_file_range2)
11766 case TARGET_NR_sync_file_range2:
11767 /* This is like sync_file_range but the arguments are reordered */
11768 #if TARGET_ABI_BITS == 32
11769 ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
11770 target_offset64(arg5, arg6), arg2));
11771 #else
11772 ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
11773 #endif
11774 break;
11775 #endif
11776 #endif
11777 #if defined(TARGET_NR_signalfd4)
11778 case TARGET_NR_signalfd4:
11779 ret = do_signalfd4(arg1, arg2, arg4);
11780 break;
11781 #endif
11782 #if defined(TARGET_NR_signalfd)
11783 case TARGET_NR_signalfd:
11784 ret = do_signalfd4(arg1, arg2, 0);
11785 break;
11786 #endif
11787 #if defined(CONFIG_EPOLL)
11788 #if defined(TARGET_NR_epoll_create)
11789 case TARGET_NR_epoll_create:
11790 ret = get_errno(epoll_create(arg1));
11791 break;
11792 #endif
11793 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
11794 case TARGET_NR_epoll_create1:
11795 ret = get_errno(epoll_create1(arg1));
11796 break;
11797 #endif
11798 #if defined(TARGET_NR_epoll_ctl)
11799 case TARGET_NR_epoll_ctl:
11801 struct epoll_event ep;
11802 struct epoll_event *epp = 0;
11803 if (arg4) {
11804 struct target_epoll_event *target_ep;
11805 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
11806 goto efault;
11808 ep.events = tswap32(target_ep->events);
11809 /* The epoll_data_t union is just opaque data to the kernel,
11810 * so we transfer all 64 bits across and need not worry what
11811 * actual data type it is.
11813 ep.data.u64 = tswap64(target_ep->data.u64);
11814 unlock_user_struct(target_ep, arg4, 0);
11815 epp = &ep;
11817 ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
11818 break;
11820 #endif
11822 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
11823 #if defined(TARGET_NR_epoll_wait)
11824 case TARGET_NR_epoll_wait:
11825 #endif
11826 #if defined(TARGET_NR_epoll_pwait)
11827 case TARGET_NR_epoll_pwait:
11828 #endif
11830 struct target_epoll_event *target_ep;
11831 struct epoll_event *ep;
11832 int epfd = arg1;
11833 int maxevents = arg3;
11834 int timeout = arg4;
11836 if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
11837 ret = -TARGET_EINVAL;
11838 break;
11841 target_ep = lock_user(VERIFY_WRITE, arg2,
11842 maxevents * sizeof(struct target_epoll_event), 1);
11843 if (!target_ep) {
11844 goto efault;
11847 ep = g_try_new(struct epoll_event, maxevents);
11848 if (!ep) {
11849 unlock_user(target_ep, arg2, 0);
11850 ret = -TARGET_ENOMEM;
11851 break;
11854 switch (num) {
11855 #if defined(TARGET_NR_epoll_pwait)
11856 case TARGET_NR_epoll_pwait:
11858 target_sigset_t *target_set;
11859 sigset_t _set, *set = &_set;
11861 if (arg5) {
11862 if (arg6 != sizeof(target_sigset_t)) {
11863 ret = -TARGET_EINVAL;
11864 break;
11867 target_set = lock_user(VERIFY_READ, arg5,
11868 sizeof(target_sigset_t), 1);
11869 if (!target_set) {
11870 ret = -TARGET_EFAULT;
11871 break;
11873 target_to_host_sigset(set, target_set);
11874 unlock_user(target_set, arg5, 0);
11875 } else {
11876 set = NULL;
11879 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11880 set, SIGSET_T_SIZE));
11881 break;
11883 #endif
11884 #if defined(TARGET_NR_epoll_wait)
11885 case TARGET_NR_epoll_wait:
11886 ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
11887 NULL, 0));
11888 break;
11889 #endif
11890 default:
11891 ret = -TARGET_ENOSYS;
11893 if (!is_error(ret)) {
11894 int i;
11895 for (i = 0; i < ret; i++) {
11896 target_ep[i].events = tswap32(ep[i].events);
11897 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
11899 unlock_user(target_ep, arg2,
11900 ret * sizeof(struct target_epoll_event));
11901 } else {
11902 unlock_user(target_ep, arg2, 0);
11904 g_free(ep);
11905 break;
11907 #endif
11908 #endif
11909 #ifdef TARGET_NR_prlimit64
11910 case TARGET_NR_prlimit64:
11912 /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
11913 struct target_rlimit64 *target_rnew, *target_rold;
11914 struct host_rlimit64 rnew, rold, *rnewp = 0;
11915 int resource = target_to_host_resource(arg2);
11916 if (arg3) {
11917 if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
11918 goto efault;
11920 rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
11921 rnew.rlim_max = tswap64(target_rnew->rlim_max);
11922 unlock_user_struct(target_rnew, arg3, 0);
11923 rnewp = &rnew;
11926 ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
11927 if (!is_error(ret) && arg4) {
11928 if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
11929 goto efault;
11931 target_rold->rlim_cur = tswap64(rold.rlim_cur);
11932 target_rold->rlim_max = tswap64(rold.rlim_max);
11933 unlock_user_struct(target_rold, arg4, 1);
11935 break;
11937 #endif
11938 #ifdef TARGET_NR_gethostname
11939 case TARGET_NR_gethostname:
11941 char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
11942 if (name) {
11943 ret = get_errno(gethostname(name, arg2));
11944 unlock_user(name, arg1, arg2);
11945 } else {
11946 ret = -TARGET_EFAULT;
11948 break;
11950 #endif
11951 #ifdef TARGET_NR_atomic_cmpxchg_32
11952 case TARGET_NR_atomic_cmpxchg_32:
11954 /* should use start_exclusive from main.c */
11955 abi_ulong mem_value;
11956 if (get_user_u32(mem_value, arg6)) {
11957 target_siginfo_t info;
11958 info.si_signo = SIGSEGV;
11959 info.si_errno = 0;
11960 info.si_code = TARGET_SEGV_MAPERR;
11961 info._sifields._sigfault._addr = arg6;
11962 queue_signal((CPUArchState *)cpu_env, info.si_signo,
11963 QEMU_SI_FAULT, &info);
11964 ret = 0xdeadbeef;
11967 if (mem_value == arg2)
11968 put_user_u32(arg1, arg6);
11969 ret = mem_value;
11970 break;
11972 #endif
11973 #ifdef TARGET_NR_atomic_barrier
11974 case TARGET_NR_atomic_barrier:
11976 /* Like the kernel implementation and the qemu arm barrier, no-op this? */
11977 ret = 0;
11978 break;
11980 #endif
11982 #ifdef TARGET_NR_timer_create
11983 case TARGET_NR_timer_create:
11985 /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
11987 struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
11989 int clkid = arg1;
11990 int timer_index = next_free_host_timer();
11992 if (timer_index < 0) {
11993 ret = -TARGET_EAGAIN;
11994 } else {
11995 timer_t *phtimer = g_posix_timers + timer_index;
11997 if (arg2) {
11998 phost_sevp = &host_sevp;
11999 ret = target_to_host_sigevent(phost_sevp, arg2);
12000 if (ret != 0) {
12001 break;
12005 ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12006 if (ret) {
12007 phtimer = NULL;
12008 } else {
12009 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12010 goto efault;
12014 break;
12016 #endif
12018 #ifdef TARGET_NR_timer_settime
12019 case TARGET_NR_timer_settime:
12021 /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12022 * struct itimerspec * old_value */
12023 target_timer_t timerid = get_timer_id(arg1);
12025 if (timerid < 0) {
12026 ret = timerid;
12027 } else if (arg3 == 0) {
12028 ret = -TARGET_EINVAL;
12029 } else {
12030 timer_t htimer = g_posix_timers[timerid];
12031 struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12033 target_to_host_itimerspec(&hspec_new, arg3);
12034 ret = get_errno(
12035 timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12036 host_to_target_itimerspec(arg2, &hspec_old);
12038 break;
12040 #endif
12042 #ifdef TARGET_NR_timer_gettime
12043 case TARGET_NR_timer_gettime:
12045 /* args: timer_t timerid, struct itimerspec *curr_value */
12046 target_timer_t timerid = get_timer_id(arg1);
12048 if (timerid < 0) {
12049 ret = timerid;
12050 } else if (!arg2) {
12051 ret = -TARGET_EFAULT;
12052 } else {
12053 timer_t htimer = g_posix_timers[timerid];
12054 struct itimerspec hspec;
12055 ret = get_errno(timer_gettime(htimer, &hspec));
12057 if (host_to_target_itimerspec(arg2, &hspec)) {
12058 ret = -TARGET_EFAULT;
12061 break;
12063 #endif
12065 #ifdef TARGET_NR_timer_getoverrun
12066 case TARGET_NR_timer_getoverrun:
12068 /* args: timer_t timerid */
12069 target_timer_t timerid = get_timer_id(arg1);
12071 if (timerid < 0) {
12072 ret = timerid;
12073 } else {
12074 timer_t htimer = g_posix_timers[timerid];
12075 ret = get_errno(timer_getoverrun(htimer));
12077 fd_trans_unregister(ret);
12078 break;
12080 #endif
12082 #ifdef TARGET_NR_timer_delete
12083 case TARGET_NR_timer_delete:
12085 /* args: timer_t timerid */
12086 target_timer_t timerid = get_timer_id(arg1);
12088 if (timerid < 0) {
12089 ret = timerid;
12090 } else {
12091 timer_t htimer = g_posix_timers[timerid];
12092 ret = get_errno(timer_delete(htimer));
12093 g_posix_timers[timerid] = 0;
12095 break;
12097 #endif
12099 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12100 case TARGET_NR_timerfd_create:
12101 ret = get_errno(timerfd_create(arg1,
12102 target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12103 break;
12104 #endif
12106 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12107 case TARGET_NR_timerfd_gettime:
12109 struct itimerspec its_curr;
12111 ret = get_errno(timerfd_gettime(arg1, &its_curr));
12113 if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12114 goto efault;
12117 break;
12118 #endif
12120 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12121 case TARGET_NR_timerfd_settime:
12123 struct itimerspec its_new, its_old, *p_new;
12125 if (arg3) {
12126 if (target_to_host_itimerspec(&its_new, arg3)) {
12127 goto efault;
12129 p_new = &its_new;
12130 } else {
12131 p_new = NULL;
12134 ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12136 if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12137 goto efault;
12140 break;
12141 #endif
12143 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
12144 case TARGET_NR_ioprio_get:
12145 ret = get_errno(ioprio_get(arg1, arg2));
12146 break;
12147 #endif
12149 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
12150 case TARGET_NR_ioprio_set:
12151 ret = get_errno(ioprio_set(arg1, arg2, arg3));
12152 break;
12153 #endif
12155 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
12156 case TARGET_NR_setns:
12157 ret = get_errno(setns(arg1, arg2));
12158 break;
12159 #endif
12160 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
12161 case TARGET_NR_unshare:
12162 ret = get_errno(unshare(arg1));
12163 break;
12164 #endif
12165 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
12166 case TARGET_NR_kcmp:
12167 ret = get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
12168 break;
12169 #endif
12171 default:
12172 unimplemented:
12173 gemu_log("qemu: Unsupported syscall: %d\n", num);
12174 #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
12175 unimplemented_nowarn:
12176 #endif
12177 ret = -TARGET_ENOSYS;
12178 break;
12180 fail:
12181 #ifdef DEBUG
12182 gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
12183 #endif
12184 if(do_strace)
12185 print_syscall_ret(num, ret);
12186 trace_guest_user_syscall_ret(cpu, num, ret);
12187 return ret;
12188 efault:
12189 ret = -TARGET_EFAULT;
12190 goto fail;